1 // SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
3 * Copyright 2015-2016 Freescale Semiconductor Inc.
4 * Copyright 2017-2019 NXP
9 #include "caamalg_qi2.h"
10 #include "dpseci_cmd.h"
11 #include "desc_constr.h"
13 #include "sg_sw_sec4.h"
14 #include "sg_sw_qm2.h"
16 #include "caamalg_desc.h"
17 #include "caamhash_desc.h"
18 #include "dpseci-debugfs.h"
19 #include <linux/fsl/mc.h>
20 #include <soc/fsl/dpaa2-io.h>
21 #include <soc/fsl/dpaa2-fd.h>
23 #define CAAM_CRA_PRIORITY 2000
25 /* max key is sum of AES_MAX_KEY_SIZE, max split key size */
26 #define CAAM_MAX_KEY_SIZE (AES_MAX_KEY_SIZE + CTR_RFC3686_NONCE_SIZE + \
27 SHA512_DIGEST_SIZE * 2)
30 * This is a a cache of buffers, from which the users of CAAM QI driver
31 * can allocate short buffers. It's speedier than doing kmalloc on the hotpath.
32 * NOTE: A more elegant solution would be to have some headroom in the frames
33 * being processed. This can be added by the dpaa2-eth driver. This would
34 * pose a problem for userspace application processing which cannot
35 * know of this limitation. So for now, this will work.
36 * NOTE: The memcache is SMP-safe. No need to handle spinlocks in-here
38 static struct kmem_cache
*qi_cache
;
40 struct caam_alg_entry
{
49 struct caam_aead_alg
{
51 struct caam_alg_entry caam
;
55 struct caam_skcipher_alg
{
56 struct skcipher_alg skcipher
;
57 struct caam_alg_entry caam
;
62 * caam_ctx - per-session context
63 * @flc: Flow Contexts array
64 * @key: [authentication key], encryption key
65 * @flc_dma: I/O virtual addresses of the Flow Contexts
66 * @key_dma: I/O virtual address of the key
67 * @dir: DMA direction for mapping key and Flow Contexts
69 * @adata: authentication algorithm details
70 * @cdata: encryption algorithm details
71 * @authsize: authentication tag (a.k.a. ICV / MAC) size
74 struct caam_flc flc
[NUM_OP
];
75 u8 key
[CAAM_MAX_KEY_SIZE
];
76 dma_addr_t flc_dma
[NUM_OP
];
78 enum dma_data_direction dir
;
82 unsigned int authsize
;
85 static void *dpaa2_caam_iova_to_virt(struct dpaa2_caam_priv
*priv
,
88 phys_addr_t phys_addr
;
90 phys_addr
= priv
->domain
? iommu_iova_to_phys(priv
->domain
, iova_addr
) :
93 return phys_to_virt(phys_addr
);
97 * qi_cache_zalloc - Allocate buffers from CAAM-QI cache
99 * Allocate data on the hotpath. Instead of using kzalloc, one can use the
100 * services of the CAAM QI memory cache (backed by kmem_cache). The buffers
101 * will have a size of CAAM_QI_MEMCACHE_SIZE, which should be sufficient for
102 * hosting 16 SG entries.
104 * @flags - flags that would be used for the equivalent kmalloc(..) call
106 * Returns a pointer to a retrieved buffer on success or NULL on failure.
108 static inline void *qi_cache_zalloc(gfp_t flags
)
110 return kmem_cache_zalloc(qi_cache
, flags
);
114 * qi_cache_free - Frees buffers allocated from CAAM-QI cache
116 * @obj - buffer previously allocated by qi_cache_zalloc
118 * No checking is being done, the call is a passthrough call to
119 * kmem_cache_free(...)
121 static inline void qi_cache_free(void *obj
)
123 kmem_cache_free(qi_cache
, obj
);
126 static struct caam_request
*to_caam_req(struct crypto_async_request
*areq
)
128 switch (crypto_tfm_alg_type(areq
->tfm
)) {
129 case CRYPTO_ALG_TYPE_SKCIPHER
:
130 return skcipher_request_ctx(skcipher_request_cast(areq
));
131 case CRYPTO_ALG_TYPE_AEAD
:
132 return aead_request_ctx(container_of(areq
, struct aead_request
,
134 case CRYPTO_ALG_TYPE_AHASH
:
135 return ahash_request_ctx(ahash_request_cast(areq
));
137 return ERR_PTR(-EINVAL
);
141 static void caam_unmap(struct device
*dev
, struct scatterlist
*src
,
142 struct scatterlist
*dst
, int src_nents
,
143 int dst_nents
, dma_addr_t iv_dma
, int ivsize
,
144 enum dma_data_direction iv_dir
, dma_addr_t qm_sg_dma
,
149 dma_unmap_sg(dev
, src
, src_nents
, DMA_TO_DEVICE
);
151 dma_unmap_sg(dev
, dst
, dst_nents
, DMA_FROM_DEVICE
);
153 dma_unmap_sg(dev
, src
, src_nents
, DMA_BIDIRECTIONAL
);
157 dma_unmap_single(dev
, iv_dma
, ivsize
, iv_dir
);
160 dma_unmap_single(dev
, qm_sg_dma
, qm_sg_bytes
, DMA_TO_DEVICE
);
163 static int aead_set_sh_desc(struct crypto_aead
*aead
)
165 struct caam_aead_alg
*alg
= container_of(crypto_aead_alg(aead
),
167 struct caam_ctx
*ctx
= crypto_aead_ctx(aead
);
168 unsigned int ivsize
= crypto_aead_ivsize(aead
);
169 struct device
*dev
= ctx
->dev
;
170 struct dpaa2_caam_priv
*priv
= dev_get_drvdata(dev
);
171 struct caam_flc
*flc
;
175 unsigned int data_len
[2];
177 const bool ctr_mode
= ((ctx
->cdata
.algtype
& OP_ALG_AAI_MASK
) ==
178 OP_ALG_AAI_CTR_MOD128
);
179 const bool is_rfc3686
= alg
->caam
.rfc3686
;
181 if (!ctx
->cdata
.keylen
|| !ctx
->authsize
)
185 * AES-CTR needs to load IV in CONTEXT1 reg
186 * at an offset of 128bits (16bytes)
187 * CONTEXT1[255:128] = IV
194 * CONTEXT1[255:128] = {NONCE, IV, COUNTER}
197 ctx1_iv_off
= 16 + CTR_RFC3686_NONCE_SIZE
;
198 nonce
= (u32
*)((void *)ctx
->key
+ ctx
->adata
.keylen_pad
+
199 ctx
->cdata
.keylen
- CTR_RFC3686_NONCE_SIZE
);
203 * In case |user key| > |derived key|, using DKP<imm,imm> would result
204 * in invalid opcodes (last bytes of user key) in the resulting
205 * descriptor. Use DKP<ptr,imm> instead => both virtual and dma key
206 * addresses are needed.
208 ctx
->adata
.key_virt
= ctx
->key
;
209 ctx
->adata
.key_dma
= ctx
->key_dma
;
211 ctx
->cdata
.key_virt
= ctx
->key
+ ctx
->adata
.keylen_pad
;
212 ctx
->cdata
.key_dma
= ctx
->key_dma
+ ctx
->adata
.keylen_pad
;
214 data_len
[0] = ctx
->adata
.keylen_pad
;
215 data_len
[1] = ctx
->cdata
.keylen
;
217 /* aead_encrypt shared descriptor */
218 if (desc_inline_query((alg
->caam
.geniv
? DESC_QI_AEAD_GIVENC_LEN
:
219 DESC_QI_AEAD_ENC_LEN
) +
220 (is_rfc3686
? DESC_AEAD_CTR_RFC3686_LEN
: 0),
221 DESC_JOB_IO_LEN
, data_len
, &inl_mask
,
222 ARRAY_SIZE(data_len
)) < 0)
225 ctx
->adata
.key_inline
= !!(inl_mask
& 1);
226 ctx
->cdata
.key_inline
= !!(inl_mask
& 2);
228 flc
= &ctx
->flc
[ENCRYPT
];
232 cnstr_shdsc_aead_givencap(desc
, &ctx
->cdata
, &ctx
->adata
,
233 ivsize
, ctx
->authsize
, is_rfc3686
,
234 nonce
, ctx1_iv_off
, true,
237 cnstr_shdsc_aead_encap(desc
, &ctx
->cdata
, &ctx
->adata
,
238 ivsize
, ctx
->authsize
, is_rfc3686
, nonce
,
239 ctx1_iv_off
, true, priv
->sec_attr
.era
);
241 flc
->flc
[1] = cpu_to_caam32(desc_len(desc
)); /* SDL */
242 dma_sync_single_for_device(dev
, ctx
->flc_dma
[ENCRYPT
],
243 sizeof(flc
->flc
) + desc_bytes(desc
),
246 /* aead_decrypt shared descriptor */
247 if (desc_inline_query(DESC_QI_AEAD_DEC_LEN
+
248 (is_rfc3686
? DESC_AEAD_CTR_RFC3686_LEN
: 0),
249 DESC_JOB_IO_LEN
, data_len
, &inl_mask
,
250 ARRAY_SIZE(data_len
)) < 0)
253 ctx
->adata
.key_inline
= !!(inl_mask
& 1);
254 ctx
->cdata
.key_inline
= !!(inl_mask
& 2);
256 flc
= &ctx
->flc
[DECRYPT
];
258 cnstr_shdsc_aead_decap(desc
, &ctx
->cdata
, &ctx
->adata
,
259 ivsize
, ctx
->authsize
, alg
->caam
.geniv
,
260 is_rfc3686
, nonce
, ctx1_iv_off
, true,
262 flc
->flc
[1] = cpu_to_caam32(desc_len(desc
)); /* SDL */
263 dma_sync_single_for_device(dev
, ctx
->flc_dma
[DECRYPT
],
264 sizeof(flc
->flc
) + desc_bytes(desc
),
270 static int aead_setauthsize(struct crypto_aead
*authenc
, unsigned int authsize
)
272 struct caam_ctx
*ctx
= crypto_aead_ctx(authenc
);
274 ctx
->authsize
= authsize
;
275 aead_set_sh_desc(authenc
);
280 static int aead_setkey(struct crypto_aead
*aead
, const u8
*key
,
283 struct caam_ctx
*ctx
= crypto_aead_ctx(aead
);
284 struct device
*dev
= ctx
->dev
;
285 struct crypto_authenc_keys keys
;
287 if (crypto_authenc_extractkeys(&keys
, key
, keylen
) != 0)
290 dev_dbg(dev
, "keylen %d enckeylen %d authkeylen %d\n",
291 keys
.authkeylen
+ keys
.enckeylen
, keys
.enckeylen
,
293 print_hex_dump_debug("key in @" __stringify(__LINE__
)": ",
294 DUMP_PREFIX_ADDRESS
, 16, 4, key
, keylen
, 1);
296 ctx
->adata
.keylen
= keys
.authkeylen
;
297 ctx
->adata
.keylen_pad
= split_key_len(ctx
->adata
.algtype
&
300 if (ctx
->adata
.keylen_pad
+ keys
.enckeylen
> CAAM_MAX_KEY_SIZE
)
303 memcpy(ctx
->key
, keys
.authkey
, keys
.authkeylen
);
304 memcpy(ctx
->key
+ ctx
->adata
.keylen_pad
, keys
.enckey
, keys
.enckeylen
);
305 dma_sync_single_for_device(dev
, ctx
->key_dma
, ctx
->adata
.keylen_pad
+
306 keys
.enckeylen
, ctx
->dir
);
307 print_hex_dump_debug("ctx.key@" __stringify(__LINE__
)": ",
308 DUMP_PREFIX_ADDRESS
, 16, 4, ctx
->key
,
309 ctx
->adata
.keylen_pad
+ keys
.enckeylen
, 1);
311 ctx
->cdata
.keylen
= keys
.enckeylen
;
313 memzero_explicit(&keys
, sizeof(keys
));
314 return aead_set_sh_desc(aead
);
316 memzero_explicit(&keys
, sizeof(keys
));
320 static int des3_aead_setkey(struct crypto_aead
*aead
, const u8
*key
,
323 struct crypto_authenc_keys keys
;
326 err
= crypto_authenc_extractkeys(&keys
, key
, keylen
);
331 if (keys
.enckeylen
!= DES3_EDE_KEY_SIZE
)
334 err
= crypto_des3_ede_verify_key(crypto_aead_tfm(aead
), keys
.enckey
) ?:
335 aead_setkey(aead
, key
, keylen
);
338 memzero_explicit(&keys
, sizeof(keys
));
342 static struct aead_edesc
*aead_edesc_alloc(struct aead_request
*req
,
345 struct crypto_aead
*aead
= crypto_aead_reqtfm(req
);
346 struct caam_request
*req_ctx
= aead_request_ctx(req
);
347 struct dpaa2_fl_entry
*in_fle
= &req_ctx
->fd_flt
[1];
348 struct dpaa2_fl_entry
*out_fle
= &req_ctx
->fd_flt
[0];
349 struct caam_ctx
*ctx
= crypto_aead_ctx(aead
);
350 struct caam_aead_alg
*alg
= container_of(crypto_aead_alg(aead
),
352 struct device
*dev
= ctx
->dev
;
353 gfp_t flags
= (req
->base
.flags
& CRYPTO_TFM_REQ_MAY_SLEEP
) ?
354 GFP_KERNEL
: GFP_ATOMIC
;
355 int src_nents
, mapped_src_nents
, dst_nents
= 0, mapped_dst_nents
= 0;
356 int src_len
, dst_len
= 0;
357 struct aead_edesc
*edesc
;
358 dma_addr_t qm_sg_dma
, iv_dma
= 0;
360 unsigned int authsize
= ctx
->authsize
;
361 int qm_sg_index
= 0, qm_sg_nents
= 0, qm_sg_bytes
;
363 struct dpaa2_sg_entry
*sg_table
;
365 /* allocate space for base edesc, link tables and IV */
366 edesc
= qi_cache_zalloc(GFP_DMA
| flags
);
367 if (unlikely(!edesc
)) {
368 dev_err(dev
, "could not allocate extended descriptor\n");
369 return ERR_PTR(-ENOMEM
);
372 if (unlikely(req
->dst
!= req
->src
)) {
373 src_len
= req
->assoclen
+ req
->cryptlen
;
374 dst_len
= src_len
+ (encrypt
? authsize
: (-authsize
));
376 src_nents
= sg_nents_for_len(req
->src
, src_len
);
377 if (unlikely(src_nents
< 0)) {
378 dev_err(dev
, "Insufficient bytes (%d) in src S/G\n",
380 qi_cache_free(edesc
);
381 return ERR_PTR(src_nents
);
384 dst_nents
= sg_nents_for_len(req
->dst
, dst_len
);
385 if (unlikely(dst_nents
< 0)) {
386 dev_err(dev
, "Insufficient bytes (%d) in dst S/G\n",
388 qi_cache_free(edesc
);
389 return ERR_PTR(dst_nents
);
393 mapped_src_nents
= dma_map_sg(dev
, req
->src
, src_nents
,
395 if (unlikely(!mapped_src_nents
)) {
396 dev_err(dev
, "unable to map source\n");
397 qi_cache_free(edesc
);
398 return ERR_PTR(-ENOMEM
);
401 mapped_src_nents
= 0;
405 mapped_dst_nents
= dma_map_sg(dev
, req
->dst
, dst_nents
,
407 if (unlikely(!mapped_dst_nents
)) {
408 dev_err(dev
, "unable to map destination\n");
409 dma_unmap_sg(dev
, req
->src
, src_nents
,
411 qi_cache_free(edesc
);
412 return ERR_PTR(-ENOMEM
);
415 mapped_dst_nents
= 0;
418 src_len
= req
->assoclen
+ req
->cryptlen
+
419 (encrypt
? authsize
: 0);
421 src_nents
= sg_nents_for_len(req
->src
, src_len
);
422 if (unlikely(src_nents
< 0)) {
423 dev_err(dev
, "Insufficient bytes (%d) in src S/G\n",
425 qi_cache_free(edesc
);
426 return ERR_PTR(src_nents
);
429 mapped_src_nents
= dma_map_sg(dev
, req
->src
, src_nents
,
431 if (unlikely(!mapped_src_nents
)) {
432 dev_err(dev
, "unable to map source\n");
433 qi_cache_free(edesc
);
434 return ERR_PTR(-ENOMEM
);
438 if ((alg
->caam
.rfc3686
&& encrypt
) || !alg
->caam
.geniv
)
439 ivsize
= crypto_aead_ivsize(aead
);
442 * Create S/G table: req->assoclen, [IV,] req->src [, req->dst].
443 * Input is not contiguous.
444 * HW reads 4 S/G entries at a time; make sure the reads don't go beyond
445 * the end of the table by allocating more S/G entries. Logic:
446 * if (src != dst && output S/G)
447 * pad output S/G, if needed
448 * else if (src == dst && S/G)
449 * overlapping S/Gs; pad one of them
450 * else if (input S/G) ...
451 * pad input S/G, if needed
453 qm_sg_nents
= 1 + !!ivsize
+ mapped_src_nents
;
454 if (mapped_dst_nents
> 1)
455 qm_sg_nents
+= pad_sg_nents(mapped_dst_nents
);
456 else if ((req
->src
== req
->dst
) && (mapped_src_nents
> 1))
457 qm_sg_nents
= max(pad_sg_nents(qm_sg_nents
),
459 pad_sg_nents(mapped_src_nents
));
461 qm_sg_nents
= pad_sg_nents(qm_sg_nents
);
463 sg_table
= &edesc
->sgt
[0];
464 qm_sg_bytes
= qm_sg_nents
* sizeof(*sg_table
);
465 if (unlikely(offsetof(struct aead_edesc
, sgt
) + qm_sg_bytes
+ ivsize
>
466 CAAM_QI_MEMCACHE_SIZE
)) {
467 dev_err(dev
, "No space for %d S/G entries and/or %dB IV\n",
468 qm_sg_nents
, ivsize
);
469 caam_unmap(dev
, req
->src
, req
->dst
, src_nents
, dst_nents
, 0,
471 qi_cache_free(edesc
);
472 return ERR_PTR(-ENOMEM
);
476 u8
*iv
= (u8
*)(sg_table
+ qm_sg_nents
);
478 /* Make sure IV is located in a DMAable area */
479 memcpy(iv
, req
->iv
, ivsize
);
481 iv_dma
= dma_map_single(dev
, iv
, ivsize
, DMA_TO_DEVICE
);
482 if (dma_mapping_error(dev
, iv_dma
)) {
483 dev_err(dev
, "unable to map IV\n");
484 caam_unmap(dev
, req
->src
, req
->dst
, src_nents
,
485 dst_nents
, 0, 0, DMA_NONE
, 0, 0);
486 qi_cache_free(edesc
);
487 return ERR_PTR(-ENOMEM
);
491 edesc
->src_nents
= src_nents
;
492 edesc
->dst_nents
= dst_nents
;
493 edesc
->iv_dma
= iv_dma
;
495 if ((alg
->caam
.class1_alg_type
& OP_ALG_ALGSEL_MASK
) ==
496 OP_ALG_ALGSEL_CHACHA20
&& ivsize
!= CHACHAPOLY_IV_SIZE
)
498 * The associated data comes already with the IV but we need
499 * to skip it when we authenticate or encrypt...
501 edesc
->assoclen
= cpu_to_caam32(req
->assoclen
- ivsize
);
503 edesc
->assoclen
= cpu_to_caam32(req
->assoclen
);
504 edesc
->assoclen_dma
= dma_map_single(dev
, &edesc
->assoclen
, 4,
506 if (dma_mapping_error(dev
, edesc
->assoclen_dma
)) {
507 dev_err(dev
, "unable to map assoclen\n");
508 caam_unmap(dev
, req
->src
, req
->dst
, src_nents
, dst_nents
,
509 iv_dma
, ivsize
, DMA_TO_DEVICE
, 0, 0);
510 qi_cache_free(edesc
);
511 return ERR_PTR(-ENOMEM
);
514 dma_to_qm_sg_one(sg_table
, edesc
->assoclen_dma
, 4, 0);
517 dma_to_qm_sg_one(sg_table
+ qm_sg_index
, iv_dma
, ivsize
, 0);
520 sg_to_qm_sg_last(req
->src
, src_len
, sg_table
+ qm_sg_index
, 0);
521 qm_sg_index
+= mapped_src_nents
;
523 if (mapped_dst_nents
> 1)
524 sg_to_qm_sg_last(req
->dst
, dst_len
, sg_table
+ qm_sg_index
, 0);
526 qm_sg_dma
= dma_map_single(dev
, sg_table
, qm_sg_bytes
, DMA_TO_DEVICE
);
527 if (dma_mapping_error(dev
, qm_sg_dma
)) {
528 dev_err(dev
, "unable to map S/G table\n");
529 dma_unmap_single(dev
, edesc
->assoclen_dma
, 4, DMA_TO_DEVICE
);
530 caam_unmap(dev
, req
->src
, req
->dst
, src_nents
, dst_nents
,
531 iv_dma
, ivsize
, DMA_TO_DEVICE
, 0, 0);
532 qi_cache_free(edesc
);
533 return ERR_PTR(-ENOMEM
);
536 edesc
->qm_sg_dma
= qm_sg_dma
;
537 edesc
->qm_sg_bytes
= qm_sg_bytes
;
539 out_len
= req
->assoclen
+ req
->cryptlen
+
540 (encrypt
? ctx
->authsize
: (-ctx
->authsize
));
541 in_len
= 4 + ivsize
+ req
->assoclen
+ req
->cryptlen
;
543 memset(&req_ctx
->fd_flt
, 0, sizeof(req_ctx
->fd_flt
));
544 dpaa2_fl_set_final(in_fle
, true);
545 dpaa2_fl_set_format(in_fle
, dpaa2_fl_sg
);
546 dpaa2_fl_set_addr(in_fle
, qm_sg_dma
);
547 dpaa2_fl_set_len(in_fle
, in_len
);
549 if (req
->dst
== req
->src
) {
550 if (mapped_src_nents
== 1) {
551 dpaa2_fl_set_format(out_fle
, dpaa2_fl_single
);
552 dpaa2_fl_set_addr(out_fle
, sg_dma_address(req
->src
));
554 dpaa2_fl_set_format(out_fle
, dpaa2_fl_sg
);
555 dpaa2_fl_set_addr(out_fle
, qm_sg_dma
+
556 (1 + !!ivsize
) * sizeof(*sg_table
));
558 } else if (!mapped_dst_nents
) {
560 * crypto engine requires the output entry to be present when
561 * "frame list" FD is used.
562 * Since engine does not support FMT=2'b11 (unused entry type),
563 * leaving out_fle zeroized is the best option.
566 } else if (mapped_dst_nents
== 1) {
567 dpaa2_fl_set_format(out_fle
, dpaa2_fl_single
);
568 dpaa2_fl_set_addr(out_fle
, sg_dma_address(req
->dst
));
570 dpaa2_fl_set_format(out_fle
, dpaa2_fl_sg
);
571 dpaa2_fl_set_addr(out_fle
, qm_sg_dma
+ qm_sg_index
*
575 dpaa2_fl_set_len(out_fle
, out_len
);
581 static int chachapoly_set_sh_desc(struct crypto_aead
*aead
)
583 struct caam_ctx
*ctx
= crypto_aead_ctx(aead
);
584 unsigned int ivsize
= crypto_aead_ivsize(aead
);
585 struct device
*dev
= ctx
->dev
;
586 struct caam_flc
*flc
;
589 if (!ctx
->cdata
.keylen
|| !ctx
->authsize
)
592 flc
= &ctx
->flc
[ENCRYPT
];
594 cnstr_shdsc_chachapoly(desc
, &ctx
->cdata
, &ctx
->adata
, ivsize
,
595 ctx
->authsize
, true, true);
596 flc
->flc
[1] = cpu_to_caam32(desc_len(desc
)); /* SDL */
597 dma_sync_single_for_device(dev
, ctx
->flc_dma
[ENCRYPT
],
598 sizeof(flc
->flc
) + desc_bytes(desc
),
601 flc
= &ctx
->flc
[DECRYPT
];
603 cnstr_shdsc_chachapoly(desc
, &ctx
->cdata
, &ctx
->adata
, ivsize
,
604 ctx
->authsize
, false, true);
605 flc
->flc
[1] = cpu_to_caam32(desc_len(desc
)); /* SDL */
606 dma_sync_single_for_device(dev
, ctx
->flc_dma
[DECRYPT
],
607 sizeof(flc
->flc
) + desc_bytes(desc
),
613 static int chachapoly_setauthsize(struct crypto_aead
*aead
,
614 unsigned int authsize
)
616 struct caam_ctx
*ctx
= crypto_aead_ctx(aead
);
618 if (authsize
!= POLY1305_DIGEST_SIZE
)
621 ctx
->authsize
= authsize
;
622 return chachapoly_set_sh_desc(aead
);
625 static int chachapoly_setkey(struct crypto_aead
*aead
, const u8
*key
,
628 struct caam_ctx
*ctx
= crypto_aead_ctx(aead
);
629 unsigned int ivsize
= crypto_aead_ivsize(aead
);
630 unsigned int saltlen
= CHACHAPOLY_IV_SIZE
- ivsize
;
632 if (keylen
!= CHACHA_KEY_SIZE
+ saltlen
)
635 ctx
->cdata
.key_virt
= key
;
636 ctx
->cdata
.keylen
= keylen
- saltlen
;
638 return chachapoly_set_sh_desc(aead
);
641 static int gcm_set_sh_desc(struct crypto_aead
*aead
)
643 struct caam_ctx
*ctx
= crypto_aead_ctx(aead
);
644 struct device
*dev
= ctx
->dev
;
645 unsigned int ivsize
= crypto_aead_ivsize(aead
);
646 struct caam_flc
*flc
;
648 int rem_bytes
= CAAM_DESC_BYTES_MAX
- DESC_JOB_IO_LEN
-
651 if (!ctx
->cdata
.keylen
|| !ctx
->authsize
)
655 * AES GCM encrypt shared descriptor
656 * Job Descriptor and Shared Descriptor
657 * must fit into the 64-word Descriptor h/w Buffer
659 if (rem_bytes
>= DESC_QI_GCM_ENC_LEN
) {
660 ctx
->cdata
.key_inline
= true;
661 ctx
->cdata
.key_virt
= ctx
->key
;
663 ctx
->cdata
.key_inline
= false;
664 ctx
->cdata
.key_dma
= ctx
->key_dma
;
667 flc
= &ctx
->flc
[ENCRYPT
];
669 cnstr_shdsc_gcm_encap(desc
, &ctx
->cdata
, ivsize
, ctx
->authsize
, true);
670 flc
->flc
[1] = cpu_to_caam32(desc_len(desc
)); /* SDL */
671 dma_sync_single_for_device(dev
, ctx
->flc_dma
[ENCRYPT
],
672 sizeof(flc
->flc
) + desc_bytes(desc
),
676 * Job Descriptor and Shared Descriptors
677 * must all fit into the 64-word Descriptor h/w Buffer
679 if (rem_bytes
>= DESC_QI_GCM_DEC_LEN
) {
680 ctx
->cdata
.key_inline
= true;
681 ctx
->cdata
.key_virt
= ctx
->key
;
683 ctx
->cdata
.key_inline
= false;
684 ctx
->cdata
.key_dma
= ctx
->key_dma
;
687 flc
= &ctx
->flc
[DECRYPT
];
689 cnstr_shdsc_gcm_decap(desc
, &ctx
->cdata
, ivsize
, ctx
->authsize
, true);
690 flc
->flc
[1] = cpu_to_caam32(desc_len(desc
)); /* SDL */
691 dma_sync_single_for_device(dev
, ctx
->flc_dma
[DECRYPT
],
692 sizeof(flc
->flc
) + desc_bytes(desc
),
698 static int gcm_setauthsize(struct crypto_aead
*authenc
, unsigned int authsize
)
700 struct caam_ctx
*ctx
= crypto_aead_ctx(authenc
);
703 err
= crypto_gcm_check_authsize(authsize
);
707 ctx
->authsize
= authsize
;
708 gcm_set_sh_desc(authenc
);
713 static int gcm_setkey(struct crypto_aead
*aead
,
714 const u8
*key
, unsigned int keylen
)
716 struct caam_ctx
*ctx
= crypto_aead_ctx(aead
);
717 struct device
*dev
= ctx
->dev
;
720 ret
= aes_check_keylen(keylen
);
723 print_hex_dump_debug("key in @" __stringify(__LINE__
)": ",
724 DUMP_PREFIX_ADDRESS
, 16, 4, key
, keylen
, 1);
726 memcpy(ctx
->key
, key
, keylen
);
727 dma_sync_single_for_device(dev
, ctx
->key_dma
, keylen
, ctx
->dir
);
728 ctx
->cdata
.keylen
= keylen
;
730 return gcm_set_sh_desc(aead
);
733 static int rfc4106_set_sh_desc(struct crypto_aead
*aead
)
735 struct caam_ctx
*ctx
= crypto_aead_ctx(aead
);
736 struct device
*dev
= ctx
->dev
;
737 unsigned int ivsize
= crypto_aead_ivsize(aead
);
738 struct caam_flc
*flc
;
740 int rem_bytes
= CAAM_DESC_BYTES_MAX
- DESC_JOB_IO_LEN
-
743 if (!ctx
->cdata
.keylen
|| !ctx
->authsize
)
746 ctx
->cdata
.key_virt
= ctx
->key
;
749 * RFC4106 encrypt shared descriptor
750 * Job Descriptor and Shared Descriptor
751 * must fit into the 64-word Descriptor h/w Buffer
753 if (rem_bytes
>= DESC_QI_RFC4106_ENC_LEN
) {
754 ctx
->cdata
.key_inline
= true;
756 ctx
->cdata
.key_inline
= false;
757 ctx
->cdata
.key_dma
= ctx
->key_dma
;
760 flc
= &ctx
->flc
[ENCRYPT
];
762 cnstr_shdsc_rfc4106_encap(desc
, &ctx
->cdata
, ivsize
, ctx
->authsize
,
764 flc
->flc
[1] = cpu_to_caam32(desc_len(desc
)); /* SDL */
765 dma_sync_single_for_device(dev
, ctx
->flc_dma
[ENCRYPT
],
766 sizeof(flc
->flc
) + desc_bytes(desc
),
770 * Job Descriptor and Shared Descriptors
771 * must all fit into the 64-word Descriptor h/w Buffer
773 if (rem_bytes
>= DESC_QI_RFC4106_DEC_LEN
) {
774 ctx
->cdata
.key_inline
= true;
776 ctx
->cdata
.key_inline
= false;
777 ctx
->cdata
.key_dma
= ctx
->key_dma
;
780 flc
= &ctx
->flc
[DECRYPT
];
782 cnstr_shdsc_rfc4106_decap(desc
, &ctx
->cdata
, ivsize
, ctx
->authsize
,
784 flc
->flc
[1] = cpu_to_caam32(desc_len(desc
)); /* SDL */
785 dma_sync_single_for_device(dev
, ctx
->flc_dma
[DECRYPT
],
786 sizeof(flc
->flc
) + desc_bytes(desc
),
792 static int rfc4106_setauthsize(struct crypto_aead
*authenc
,
793 unsigned int authsize
)
795 struct caam_ctx
*ctx
= crypto_aead_ctx(authenc
);
798 err
= crypto_rfc4106_check_authsize(authsize
);
802 ctx
->authsize
= authsize
;
803 rfc4106_set_sh_desc(authenc
);
808 static int rfc4106_setkey(struct crypto_aead
*aead
,
809 const u8
*key
, unsigned int keylen
)
811 struct caam_ctx
*ctx
= crypto_aead_ctx(aead
);
812 struct device
*dev
= ctx
->dev
;
815 ret
= aes_check_keylen(keylen
- 4);
819 print_hex_dump_debug("key in @" __stringify(__LINE__
)": ",
820 DUMP_PREFIX_ADDRESS
, 16, 4, key
, keylen
, 1);
822 memcpy(ctx
->key
, key
, keylen
);
824 * The last four bytes of the key material are used as the salt value
825 * in the nonce. Update the AES key length.
827 ctx
->cdata
.keylen
= keylen
- 4;
828 dma_sync_single_for_device(dev
, ctx
->key_dma
, ctx
->cdata
.keylen
,
831 return rfc4106_set_sh_desc(aead
);
834 static int rfc4543_set_sh_desc(struct crypto_aead
*aead
)
836 struct caam_ctx
*ctx
= crypto_aead_ctx(aead
);
837 struct device
*dev
= ctx
->dev
;
838 unsigned int ivsize
= crypto_aead_ivsize(aead
);
839 struct caam_flc
*flc
;
841 int rem_bytes
= CAAM_DESC_BYTES_MAX
- DESC_JOB_IO_LEN
-
844 if (!ctx
->cdata
.keylen
|| !ctx
->authsize
)
847 ctx
->cdata
.key_virt
= ctx
->key
;
850 * RFC4543 encrypt shared descriptor
851 * Job Descriptor and Shared Descriptor
852 * must fit into the 64-word Descriptor h/w Buffer
854 if (rem_bytes
>= DESC_QI_RFC4543_ENC_LEN
) {
855 ctx
->cdata
.key_inline
= true;
857 ctx
->cdata
.key_inline
= false;
858 ctx
->cdata
.key_dma
= ctx
->key_dma
;
861 flc
= &ctx
->flc
[ENCRYPT
];
863 cnstr_shdsc_rfc4543_encap(desc
, &ctx
->cdata
, ivsize
, ctx
->authsize
,
865 flc
->flc
[1] = cpu_to_caam32(desc_len(desc
)); /* SDL */
866 dma_sync_single_for_device(dev
, ctx
->flc_dma
[ENCRYPT
],
867 sizeof(flc
->flc
) + desc_bytes(desc
),
871 * Job Descriptor and Shared Descriptors
872 * must all fit into the 64-word Descriptor h/w Buffer
874 if (rem_bytes
>= DESC_QI_RFC4543_DEC_LEN
) {
875 ctx
->cdata
.key_inline
= true;
877 ctx
->cdata
.key_inline
= false;
878 ctx
->cdata
.key_dma
= ctx
->key_dma
;
881 flc
= &ctx
->flc
[DECRYPT
];
883 cnstr_shdsc_rfc4543_decap(desc
, &ctx
->cdata
, ivsize
, ctx
->authsize
,
885 flc
->flc
[1] = cpu_to_caam32(desc_len(desc
)); /* SDL */
886 dma_sync_single_for_device(dev
, ctx
->flc_dma
[DECRYPT
],
887 sizeof(flc
->flc
) + desc_bytes(desc
),
893 static int rfc4543_setauthsize(struct crypto_aead
*authenc
,
894 unsigned int authsize
)
896 struct caam_ctx
*ctx
= crypto_aead_ctx(authenc
);
901 ctx
->authsize
= authsize
;
902 rfc4543_set_sh_desc(authenc
);
907 static int rfc4543_setkey(struct crypto_aead
*aead
,
908 const u8
*key
, unsigned int keylen
)
910 struct caam_ctx
*ctx
= crypto_aead_ctx(aead
);
911 struct device
*dev
= ctx
->dev
;
914 ret
= aes_check_keylen(keylen
- 4);
918 print_hex_dump_debug("key in @" __stringify(__LINE__
)": ",
919 DUMP_PREFIX_ADDRESS
, 16, 4, key
, keylen
, 1);
921 memcpy(ctx
->key
, key
, keylen
);
923 * The last four bytes of the key material are used as the salt value
924 * in the nonce. Update the AES key length.
926 ctx
->cdata
.keylen
= keylen
- 4;
927 dma_sync_single_for_device(dev
, ctx
->key_dma
, ctx
->cdata
.keylen
,
930 return rfc4543_set_sh_desc(aead
);
933 static int skcipher_setkey(struct crypto_skcipher
*skcipher
, const u8
*key
,
934 unsigned int keylen
, const u32 ctx1_iv_off
)
936 struct caam_ctx
*ctx
= crypto_skcipher_ctx(skcipher
);
937 struct caam_skcipher_alg
*alg
=
938 container_of(crypto_skcipher_alg(skcipher
),
939 struct caam_skcipher_alg
, skcipher
);
940 struct device
*dev
= ctx
->dev
;
941 struct caam_flc
*flc
;
942 unsigned int ivsize
= crypto_skcipher_ivsize(skcipher
);
944 const bool is_rfc3686
= alg
->caam
.rfc3686
;
946 print_hex_dump_debug("key in @" __stringify(__LINE__
)": ",
947 DUMP_PREFIX_ADDRESS
, 16, 4, key
, keylen
, 1);
949 ctx
->cdata
.keylen
= keylen
;
950 ctx
->cdata
.key_virt
= key
;
951 ctx
->cdata
.key_inline
= true;
953 /* skcipher_encrypt shared descriptor */
954 flc
= &ctx
->flc
[ENCRYPT
];
956 cnstr_shdsc_skcipher_encap(desc
, &ctx
->cdata
, ivsize
, is_rfc3686
,
958 flc
->flc
[1] = cpu_to_caam32(desc_len(desc
)); /* SDL */
959 dma_sync_single_for_device(dev
, ctx
->flc_dma
[ENCRYPT
],
960 sizeof(flc
->flc
) + desc_bytes(desc
),
963 /* skcipher_decrypt shared descriptor */
964 flc
= &ctx
->flc
[DECRYPT
];
966 cnstr_shdsc_skcipher_decap(desc
, &ctx
->cdata
, ivsize
, is_rfc3686
,
968 flc
->flc
[1] = cpu_to_caam32(desc_len(desc
)); /* SDL */
969 dma_sync_single_for_device(dev
, ctx
->flc_dma
[DECRYPT
],
970 sizeof(flc
->flc
) + desc_bytes(desc
),
976 static int aes_skcipher_setkey(struct crypto_skcipher
*skcipher
,
977 const u8
*key
, unsigned int keylen
)
981 err
= aes_check_keylen(keylen
);
985 return skcipher_setkey(skcipher
, key
, keylen
, 0);
988 static int rfc3686_skcipher_setkey(struct crypto_skcipher
*skcipher
,
989 const u8
*key
, unsigned int keylen
)
996 * | CONTEXT1[255:128] = {NONCE, IV, COUNTER}
997 * | *key = {KEY, NONCE}
999 ctx1_iv_off
= 16 + CTR_RFC3686_NONCE_SIZE
;
1000 keylen
-= CTR_RFC3686_NONCE_SIZE
;
1002 err
= aes_check_keylen(keylen
);
1006 return skcipher_setkey(skcipher
, key
, keylen
, ctx1_iv_off
);
1009 static int ctr_skcipher_setkey(struct crypto_skcipher
*skcipher
,
1010 const u8
*key
, unsigned int keylen
)
1016 * AES-CTR needs to load IV in CONTEXT1 reg
1017 * at an offset of 128bits (16bytes)
1018 * CONTEXT1[255:128] = IV
1022 err
= aes_check_keylen(keylen
);
1026 return skcipher_setkey(skcipher
, key
, keylen
, ctx1_iv_off
);
1029 static int chacha20_skcipher_setkey(struct crypto_skcipher
*skcipher
,
1030 const u8
*key
, unsigned int keylen
)
1032 if (keylen
!= CHACHA_KEY_SIZE
)
1035 return skcipher_setkey(skcipher
, key
, keylen
, 0);
1038 static int des_skcipher_setkey(struct crypto_skcipher
*skcipher
,
1039 const u8
*key
, unsigned int keylen
)
1041 return verify_skcipher_des_key(skcipher
, key
) ?:
1042 skcipher_setkey(skcipher
, key
, keylen
, 0);
1045 static int des3_skcipher_setkey(struct crypto_skcipher
*skcipher
,
1046 const u8
*key
, unsigned int keylen
)
1048 return verify_skcipher_des3_key(skcipher
, key
) ?:
1049 skcipher_setkey(skcipher
, key
, keylen
, 0);
1052 static int xts_skcipher_setkey(struct crypto_skcipher
*skcipher
, const u8
*key
,
1053 unsigned int keylen
)
1055 struct caam_ctx
*ctx
= crypto_skcipher_ctx(skcipher
);
1056 struct device
*dev
= ctx
->dev
;
1057 struct caam_flc
*flc
;
1060 if (keylen
!= 2 * AES_MIN_KEY_SIZE
&& keylen
!= 2 * AES_MAX_KEY_SIZE
) {
1061 dev_err(dev
, "key size mismatch\n");
1065 ctx
->cdata
.keylen
= keylen
;
1066 ctx
->cdata
.key_virt
= key
;
1067 ctx
->cdata
.key_inline
= true;
1069 /* xts_skcipher_encrypt shared descriptor */
1070 flc
= &ctx
->flc
[ENCRYPT
];
1071 desc
= flc
->sh_desc
;
1072 cnstr_shdsc_xts_skcipher_encap(desc
, &ctx
->cdata
);
1073 flc
->flc
[1] = cpu_to_caam32(desc_len(desc
)); /* SDL */
1074 dma_sync_single_for_device(dev
, ctx
->flc_dma
[ENCRYPT
],
1075 sizeof(flc
->flc
) + desc_bytes(desc
),
1078 /* xts_skcipher_decrypt shared descriptor */
1079 flc
= &ctx
->flc
[DECRYPT
];
1080 desc
= flc
->sh_desc
;
1081 cnstr_shdsc_xts_skcipher_decap(desc
, &ctx
->cdata
);
1082 flc
->flc
[1] = cpu_to_caam32(desc_len(desc
)); /* SDL */
1083 dma_sync_single_for_device(dev
, ctx
->flc_dma
[DECRYPT
],
1084 sizeof(flc
->flc
) + desc_bytes(desc
),
1090 static struct skcipher_edesc
*skcipher_edesc_alloc(struct skcipher_request
*req
)
1092 struct crypto_skcipher
*skcipher
= crypto_skcipher_reqtfm(req
);
1093 struct caam_request
*req_ctx
= skcipher_request_ctx(req
);
1094 struct dpaa2_fl_entry
*in_fle
= &req_ctx
->fd_flt
[1];
1095 struct dpaa2_fl_entry
*out_fle
= &req_ctx
->fd_flt
[0];
1096 struct caam_ctx
*ctx
= crypto_skcipher_ctx(skcipher
);
1097 struct device
*dev
= ctx
->dev
;
1098 gfp_t flags
= (req
->base
.flags
& CRYPTO_TFM_REQ_MAY_SLEEP
) ?
1099 GFP_KERNEL
: GFP_ATOMIC
;
1100 int src_nents
, mapped_src_nents
, dst_nents
= 0, mapped_dst_nents
= 0;
1101 struct skcipher_edesc
*edesc
;
1104 int ivsize
= crypto_skcipher_ivsize(skcipher
);
1105 int dst_sg_idx
, qm_sg_ents
, qm_sg_bytes
;
1106 struct dpaa2_sg_entry
*sg_table
;
1108 src_nents
= sg_nents_for_len(req
->src
, req
->cryptlen
);
1109 if (unlikely(src_nents
< 0)) {
1110 dev_err(dev
, "Insufficient bytes (%d) in src S/G\n",
1112 return ERR_PTR(src_nents
);
1115 if (unlikely(req
->dst
!= req
->src
)) {
1116 dst_nents
= sg_nents_for_len(req
->dst
, req
->cryptlen
);
1117 if (unlikely(dst_nents
< 0)) {
1118 dev_err(dev
, "Insufficient bytes (%d) in dst S/G\n",
1120 return ERR_PTR(dst_nents
);
1123 mapped_src_nents
= dma_map_sg(dev
, req
->src
, src_nents
,
1125 if (unlikely(!mapped_src_nents
)) {
1126 dev_err(dev
, "unable to map source\n");
1127 return ERR_PTR(-ENOMEM
);
1130 mapped_dst_nents
= dma_map_sg(dev
, req
->dst
, dst_nents
,
1132 if (unlikely(!mapped_dst_nents
)) {
1133 dev_err(dev
, "unable to map destination\n");
1134 dma_unmap_sg(dev
, req
->src
, src_nents
, DMA_TO_DEVICE
);
1135 return ERR_PTR(-ENOMEM
);
1138 mapped_src_nents
= dma_map_sg(dev
, req
->src
, src_nents
,
1140 if (unlikely(!mapped_src_nents
)) {
1141 dev_err(dev
, "unable to map source\n");
1142 return ERR_PTR(-ENOMEM
);
1146 qm_sg_ents
= 1 + mapped_src_nents
;
1147 dst_sg_idx
= qm_sg_ents
;
1150 * Input, output HW S/G tables: [IV, src][dst, IV]
1151 * IV entries point to the same buffer
1152 * If src == dst, S/G entries are reused (S/G tables overlap)
1154 * HW reads 4 S/G entries at a time; make sure the reads don't go beyond
1155 * the end of the table by allocating more S/G entries.
1157 if (req
->src
!= req
->dst
)
1158 qm_sg_ents
+= pad_sg_nents(mapped_dst_nents
+ 1);
1160 qm_sg_ents
= 1 + pad_sg_nents(qm_sg_ents
);
1162 qm_sg_bytes
= qm_sg_ents
* sizeof(struct dpaa2_sg_entry
);
1163 if (unlikely(offsetof(struct skcipher_edesc
, sgt
) + qm_sg_bytes
+
1164 ivsize
> CAAM_QI_MEMCACHE_SIZE
)) {
1165 dev_err(dev
, "No space for %d S/G entries and/or %dB IV\n",
1166 qm_sg_ents
, ivsize
);
1167 caam_unmap(dev
, req
->src
, req
->dst
, src_nents
, dst_nents
, 0,
1169 return ERR_PTR(-ENOMEM
);
1172 /* allocate space for base edesc, link tables and IV */
1173 edesc
= qi_cache_zalloc(GFP_DMA
| flags
);
1174 if (unlikely(!edesc
)) {
1175 dev_err(dev
, "could not allocate extended descriptor\n");
1176 caam_unmap(dev
, req
->src
, req
->dst
, src_nents
, dst_nents
, 0,
1178 return ERR_PTR(-ENOMEM
);
1181 /* Make sure IV is located in a DMAable area */
1182 sg_table
= &edesc
->sgt
[0];
1183 iv
= (u8
*)(sg_table
+ qm_sg_ents
);
1184 memcpy(iv
, req
->iv
, ivsize
);
1186 iv_dma
= dma_map_single(dev
, iv
, ivsize
, DMA_BIDIRECTIONAL
);
1187 if (dma_mapping_error(dev
, iv_dma
)) {
1188 dev_err(dev
, "unable to map IV\n");
1189 caam_unmap(dev
, req
->src
, req
->dst
, src_nents
, dst_nents
, 0,
1191 qi_cache_free(edesc
);
1192 return ERR_PTR(-ENOMEM
);
1195 edesc
->src_nents
= src_nents
;
1196 edesc
->dst_nents
= dst_nents
;
1197 edesc
->iv_dma
= iv_dma
;
1198 edesc
->qm_sg_bytes
= qm_sg_bytes
;
1200 dma_to_qm_sg_one(sg_table
, iv_dma
, ivsize
, 0);
1201 sg_to_qm_sg(req
->src
, req
->cryptlen
, sg_table
+ 1, 0);
1203 if (req
->src
!= req
->dst
)
1204 sg_to_qm_sg(req
->dst
, req
->cryptlen
, sg_table
+ dst_sg_idx
, 0);
1206 dma_to_qm_sg_one(sg_table
+ dst_sg_idx
+ mapped_dst_nents
, iv_dma
,
1209 edesc
->qm_sg_dma
= dma_map_single(dev
, sg_table
, edesc
->qm_sg_bytes
,
1211 if (dma_mapping_error(dev
, edesc
->qm_sg_dma
)) {
1212 dev_err(dev
, "unable to map S/G table\n");
1213 caam_unmap(dev
, req
->src
, req
->dst
, src_nents
, dst_nents
,
1214 iv_dma
, ivsize
, DMA_BIDIRECTIONAL
, 0, 0);
1215 qi_cache_free(edesc
);
1216 return ERR_PTR(-ENOMEM
);
1219 memset(&req_ctx
->fd_flt
, 0, sizeof(req_ctx
->fd_flt
));
1220 dpaa2_fl_set_final(in_fle
, true);
1221 dpaa2_fl_set_len(in_fle
, req
->cryptlen
+ ivsize
);
1222 dpaa2_fl_set_len(out_fle
, req
->cryptlen
+ ivsize
);
1224 dpaa2_fl_set_format(in_fle
, dpaa2_fl_sg
);
1225 dpaa2_fl_set_addr(in_fle
, edesc
->qm_sg_dma
);
1227 dpaa2_fl_set_format(out_fle
, dpaa2_fl_sg
);
1229 if (req
->src
== req
->dst
)
1230 dpaa2_fl_set_addr(out_fle
, edesc
->qm_sg_dma
+
1233 dpaa2_fl_set_addr(out_fle
, edesc
->qm_sg_dma
+ dst_sg_idx
*
1239 static void aead_unmap(struct device
*dev
, struct aead_edesc
*edesc
,
1240 struct aead_request
*req
)
1242 struct crypto_aead
*aead
= crypto_aead_reqtfm(req
);
1243 int ivsize
= crypto_aead_ivsize(aead
);
1245 caam_unmap(dev
, req
->src
, req
->dst
, edesc
->src_nents
, edesc
->dst_nents
,
1246 edesc
->iv_dma
, ivsize
, DMA_TO_DEVICE
, edesc
->qm_sg_dma
,
1247 edesc
->qm_sg_bytes
);
1248 dma_unmap_single(dev
, edesc
->assoclen_dma
, 4, DMA_TO_DEVICE
);
1251 static void skcipher_unmap(struct device
*dev
, struct skcipher_edesc
*edesc
,
1252 struct skcipher_request
*req
)
1254 struct crypto_skcipher
*skcipher
= crypto_skcipher_reqtfm(req
);
1255 int ivsize
= crypto_skcipher_ivsize(skcipher
);
1257 caam_unmap(dev
, req
->src
, req
->dst
, edesc
->src_nents
, edesc
->dst_nents
,
1258 edesc
->iv_dma
, ivsize
, DMA_BIDIRECTIONAL
, edesc
->qm_sg_dma
,
1259 edesc
->qm_sg_bytes
);
1262 static void aead_encrypt_done(void *cbk_ctx
, u32 status
)
1264 struct crypto_async_request
*areq
= cbk_ctx
;
1265 struct aead_request
*req
= container_of(areq
, struct aead_request
,
1267 struct caam_request
*req_ctx
= to_caam_req(areq
);
1268 struct aead_edesc
*edesc
= req_ctx
->edesc
;
1269 struct crypto_aead
*aead
= crypto_aead_reqtfm(req
);
1270 struct caam_ctx
*ctx
= crypto_aead_ctx(aead
);
1273 dev_dbg(ctx
->dev
, "%s %d: err 0x%x\n", __func__
, __LINE__
, status
);
1275 if (unlikely(status
))
1276 ecode
= caam_qi2_strstatus(ctx
->dev
, status
);
1278 aead_unmap(ctx
->dev
, edesc
, req
);
1279 qi_cache_free(edesc
);
1280 aead_request_complete(req
, ecode
);
1283 static void aead_decrypt_done(void *cbk_ctx
, u32 status
)
1285 struct crypto_async_request
*areq
= cbk_ctx
;
1286 struct aead_request
*req
= container_of(areq
, struct aead_request
,
1288 struct caam_request
*req_ctx
= to_caam_req(areq
);
1289 struct aead_edesc
*edesc
= req_ctx
->edesc
;
1290 struct crypto_aead
*aead
= crypto_aead_reqtfm(req
);
1291 struct caam_ctx
*ctx
= crypto_aead_ctx(aead
);
1294 dev_dbg(ctx
->dev
, "%s %d: err 0x%x\n", __func__
, __LINE__
, status
);
1296 if (unlikely(status
))
1297 ecode
= caam_qi2_strstatus(ctx
->dev
, status
);
1299 aead_unmap(ctx
->dev
, edesc
, req
);
1300 qi_cache_free(edesc
);
1301 aead_request_complete(req
, ecode
);
1304 static int aead_encrypt(struct aead_request
*req
)
1306 struct aead_edesc
*edesc
;
1307 struct crypto_aead
*aead
= crypto_aead_reqtfm(req
);
1308 struct caam_ctx
*ctx
= crypto_aead_ctx(aead
);
1309 struct caam_request
*caam_req
= aead_request_ctx(req
);
1312 /* allocate extended descriptor */
1313 edesc
= aead_edesc_alloc(req
, true);
1315 return PTR_ERR(edesc
);
1317 caam_req
->flc
= &ctx
->flc
[ENCRYPT
];
1318 caam_req
->flc_dma
= ctx
->flc_dma
[ENCRYPT
];
1319 caam_req
->cbk
= aead_encrypt_done
;
1320 caam_req
->ctx
= &req
->base
;
1321 caam_req
->edesc
= edesc
;
1322 ret
= dpaa2_caam_enqueue(ctx
->dev
, caam_req
);
1323 if (ret
!= -EINPROGRESS
&&
1324 !(ret
== -EBUSY
&& req
->base
.flags
& CRYPTO_TFM_REQ_MAY_BACKLOG
)) {
1325 aead_unmap(ctx
->dev
, edesc
, req
);
1326 qi_cache_free(edesc
);
1332 static int aead_decrypt(struct aead_request
*req
)
1334 struct aead_edesc
*edesc
;
1335 struct crypto_aead
*aead
= crypto_aead_reqtfm(req
);
1336 struct caam_ctx
*ctx
= crypto_aead_ctx(aead
);
1337 struct caam_request
*caam_req
= aead_request_ctx(req
);
1340 /* allocate extended descriptor */
1341 edesc
= aead_edesc_alloc(req
, false);
1343 return PTR_ERR(edesc
);
1345 caam_req
->flc
= &ctx
->flc
[DECRYPT
];
1346 caam_req
->flc_dma
= ctx
->flc_dma
[DECRYPT
];
1347 caam_req
->cbk
= aead_decrypt_done
;
1348 caam_req
->ctx
= &req
->base
;
1349 caam_req
->edesc
= edesc
;
1350 ret
= dpaa2_caam_enqueue(ctx
->dev
, caam_req
);
1351 if (ret
!= -EINPROGRESS
&&
1352 !(ret
== -EBUSY
&& req
->base
.flags
& CRYPTO_TFM_REQ_MAY_BACKLOG
)) {
1353 aead_unmap(ctx
->dev
, edesc
, req
);
1354 qi_cache_free(edesc
);
1360 static int ipsec_gcm_encrypt(struct aead_request
*req
)
1362 return crypto_ipsec_check_assoclen(req
->assoclen
) ? : aead_encrypt(req
);
1365 static int ipsec_gcm_decrypt(struct aead_request
*req
)
1367 return crypto_ipsec_check_assoclen(req
->assoclen
) ? : aead_decrypt(req
);
1370 static void skcipher_encrypt_done(void *cbk_ctx
, u32 status
)
1372 struct crypto_async_request
*areq
= cbk_ctx
;
1373 struct skcipher_request
*req
= skcipher_request_cast(areq
);
1374 struct caam_request
*req_ctx
= to_caam_req(areq
);
1375 struct crypto_skcipher
*skcipher
= crypto_skcipher_reqtfm(req
);
1376 struct caam_ctx
*ctx
= crypto_skcipher_ctx(skcipher
);
1377 struct skcipher_edesc
*edesc
= req_ctx
->edesc
;
1379 int ivsize
= crypto_skcipher_ivsize(skcipher
);
1381 dev_dbg(ctx
->dev
, "%s %d: err 0x%x\n", __func__
, __LINE__
, status
);
1383 if (unlikely(status
))
1384 ecode
= caam_qi2_strstatus(ctx
->dev
, status
);
1386 print_hex_dump_debug("dstiv @" __stringify(__LINE__
)": ",
1387 DUMP_PREFIX_ADDRESS
, 16, 4, req
->iv
,
1388 edesc
->src_nents
> 1 ? 100 : ivsize
, 1);
1389 caam_dump_sg("dst @" __stringify(__LINE__
)": ",
1390 DUMP_PREFIX_ADDRESS
, 16, 4, req
->dst
,
1391 edesc
->dst_nents
> 1 ? 100 : req
->cryptlen
, 1);
1393 skcipher_unmap(ctx
->dev
, edesc
, req
);
1396 * The crypto API expects us to set the IV (req->iv) to the last
1397 * ciphertext block (CBC mode) or last counter (CTR mode).
1398 * This is used e.g. by the CTS mode.
1401 memcpy(req
->iv
, (u8
*)&edesc
->sgt
[0] + edesc
->qm_sg_bytes
,
1404 qi_cache_free(edesc
);
1405 skcipher_request_complete(req
, ecode
);
1408 static void skcipher_decrypt_done(void *cbk_ctx
, u32 status
)
1410 struct crypto_async_request
*areq
= cbk_ctx
;
1411 struct skcipher_request
*req
= skcipher_request_cast(areq
);
1412 struct caam_request
*req_ctx
= to_caam_req(areq
);
1413 struct crypto_skcipher
*skcipher
= crypto_skcipher_reqtfm(req
);
1414 struct caam_ctx
*ctx
= crypto_skcipher_ctx(skcipher
);
1415 struct skcipher_edesc
*edesc
= req_ctx
->edesc
;
1417 int ivsize
= crypto_skcipher_ivsize(skcipher
);
1419 dev_dbg(ctx
->dev
, "%s %d: err 0x%x\n", __func__
, __LINE__
, status
);
1421 if (unlikely(status
))
1422 ecode
= caam_qi2_strstatus(ctx
->dev
, status
);
1424 print_hex_dump_debug("dstiv @" __stringify(__LINE__
)": ",
1425 DUMP_PREFIX_ADDRESS
, 16, 4, req
->iv
,
1426 edesc
->src_nents
> 1 ? 100 : ivsize
, 1);
1427 caam_dump_sg("dst @" __stringify(__LINE__
)": ",
1428 DUMP_PREFIX_ADDRESS
, 16, 4, req
->dst
,
1429 edesc
->dst_nents
> 1 ? 100 : req
->cryptlen
, 1);
1431 skcipher_unmap(ctx
->dev
, edesc
, req
);
1434 * The crypto API expects us to set the IV (req->iv) to the last
1435 * ciphertext block (CBC mode) or last counter (CTR mode).
1436 * This is used e.g. by the CTS mode.
1439 memcpy(req
->iv
, (u8
*)&edesc
->sgt
[0] + edesc
->qm_sg_bytes
,
1442 qi_cache_free(edesc
);
1443 skcipher_request_complete(req
, ecode
);
1446 static int skcipher_encrypt(struct skcipher_request
*req
)
1448 struct skcipher_edesc
*edesc
;
1449 struct crypto_skcipher
*skcipher
= crypto_skcipher_reqtfm(req
);
1450 struct caam_ctx
*ctx
= crypto_skcipher_ctx(skcipher
);
1451 struct caam_request
*caam_req
= skcipher_request_ctx(req
);
1457 /* allocate extended descriptor */
1458 edesc
= skcipher_edesc_alloc(req
);
1460 return PTR_ERR(edesc
);
1462 caam_req
->flc
= &ctx
->flc
[ENCRYPT
];
1463 caam_req
->flc_dma
= ctx
->flc_dma
[ENCRYPT
];
1464 caam_req
->cbk
= skcipher_encrypt_done
;
1465 caam_req
->ctx
= &req
->base
;
1466 caam_req
->edesc
= edesc
;
1467 ret
= dpaa2_caam_enqueue(ctx
->dev
, caam_req
);
1468 if (ret
!= -EINPROGRESS
&&
1469 !(ret
== -EBUSY
&& req
->base
.flags
& CRYPTO_TFM_REQ_MAY_BACKLOG
)) {
1470 skcipher_unmap(ctx
->dev
, edesc
, req
);
1471 qi_cache_free(edesc
);
1477 static int skcipher_decrypt(struct skcipher_request
*req
)
1479 struct skcipher_edesc
*edesc
;
1480 struct crypto_skcipher
*skcipher
= crypto_skcipher_reqtfm(req
);
1481 struct caam_ctx
*ctx
= crypto_skcipher_ctx(skcipher
);
1482 struct caam_request
*caam_req
= skcipher_request_ctx(req
);
1487 /* allocate extended descriptor */
1488 edesc
= skcipher_edesc_alloc(req
);
1490 return PTR_ERR(edesc
);
1492 caam_req
->flc
= &ctx
->flc
[DECRYPT
];
1493 caam_req
->flc_dma
= ctx
->flc_dma
[DECRYPT
];
1494 caam_req
->cbk
= skcipher_decrypt_done
;
1495 caam_req
->ctx
= &req
->base
;
1496 caam_req
->edesc
= edesc
;
1497 ret
= dpaa2_caam_enqueue(ctx
->dev
, caam_req
);
1498 if (ret
!= -EINPROGRESS
&&
1499 !(ret
== -EBUSY
&& req
->base
.flags
& CRYPTO_TFM_REQ_MAY_BACKLOG
)) {
1500 skcipher_unmap(ctx
->dev
, edesc
, req
);
1501 qi_cache_free(edesc
);
1507 static int caam_cra_init(struct caam_ctx
*ctx
, struct caam_alg_entry
*caam
,
1510 dma_addr_t dma_addr
;
1513 /* copy descriptor header template value */
1514 ctx
->cdata
.algtype
= OP_TYPE_CLASS1_ALG
| caam
->class1_alg_type
;
1515 ctx
->adata
.algtype
= OP_TYPE_CLASS2_ALG
| caam
->class2_alg_type
;
1517 ctx
->dev
= caam
->dev
;
1518 ctx
->dir
= uses_dkp
? DMA_BIDIRECTIONAL
: DMA_TO_DEVICE
;
1520 dma_addr
= dma_map_single_attrs(ctx
->dev
, ctx
->flc
,
1521 offsetof(struct caam_ctx
, flc_dma
),
1522 ctx
->dir
, DMA_ATTR_SKIP_CPU_SYNC
);
1523 if (dma_mapping_error(ctx
->dev
, dma_addr
)) {
1524 dev_err(ctx
->dev
, "unable to map key, shared descriptors\n");
1528 for (i
= 0; i
< NUM_OP
; i
++)
1529 ctx
->flc_dma
[i
] = dma_addr
+ i
* sizeof(ctx
->flc
[i
]);
1530 ctx
->key_dma
= dma_addr
+ NUM_OP
* sizeof(ctx
->flc
[0]);
1535 static int caam_cra_init_skcipher(struct crypto_skcipher
*tfm
)
1537 struct skcipher_alg
*alg
= crypto_skcipher_alg(tfm
);
1538 struct caam_skcipher_alg
*caam_alg
=
1539 container_of(alg
, typeof(*caam_alg
), skcipher
);
1541 crypto_skcipher_set_reqsize(tfm
, sizeof(struct caam_request
));
1542 return caam_cra_init(crypto_skcipher_ctx(tfm
), &caam_alg
->caam
, false);
1545 static int caam_cra_init_aead(struct crypto_aead
*tfm
)
1547 struct aead_alg
*alg
= crypto_aead_alg(tfm
);
1548 struct caam_aead_alg
*caam_alg
= container_of(alg
, typeof(*caam_alg
),
1551 crypto_aead_set_reqsize(tfm
, sizeof(struct caam_request
));
1552 return caam_cra_init(crypto_aead_ctx(tfm
), &caam_alg
->caam
,
1553 !caam_alg
->caam
.nodkp
);
1556 static void caam_exit_common(struct caam_ctx
*ctx
)
1558 dma_unmap_single_attrs(ctx
->dev
, ctx
->flc_dma
[0],
1559 offsetof(struct caam_ctx
, flc_dma
), ctx
->dir
,
1560 DMA_ATTR_SKIP_CPU_SYNC
);
1563 static void caam_cra_exit(struct crypto_skcipher
*tfm
)
1565 caam_exit_common(crypto_skcipher_ctx(tfm
));
1568 static void caam_cra_exit_aead(struct crypto_aead
*tfm
)
1570 caam_exit_common(crypto_aead_ctx(tfm
));
1573 static struct caam_skcipher_alg driver_algs
[] = {
1577 .cra_name
= "cbc(aes)",
1578 .cra_driver_name
= "cbc-aes-caam-qi2",
1579 .cra_blocksize
= AES_BLOCK_SIZE
,
1581 .setkey
= aes_skcipher_setkey
,
1582 .encrypt
= skcipher_encrypt
,
1583 .decrypt
= skcipher_decrypt
,
1584 .min_keysize
= AES_MIN_KEY_SIZE
,
1585 .max_keysize
= AES_MAX_KEY_SIZE
,
1586 .ivsize
= AES_BLOCK_SIZE
,
1588 .caam
.class1_alg_type
= OP_ALG_ALGSEL_AES
| OP_ALG_AAI_CBC
,
1593 .cra_name
= "cbc(des3_ede)",
1594 .cra_driver_name
= "cbc-3des-caam-qi2",
1595 .cra_blocksize
= DES3_EDE_BLOCK_SIZE
,
1597 .setkey
= des3_skcipher_setkey
,
1598 .encrypt
= skcipher_encrypt
,
1599 .decrypt
= skcipher_decrypt
,
1600 .min_keysize
= DES3_EDE_KEY_SIZE
,
1601 .max_keysize
= DES3_EDE_KEY_SIZE
,
1602 .ivsize
= DES3_EDE_BLOCK_SIZE
,
1604 .caam
.class1_alg_type
= OP_ALG_ALGSEL_3DES
| OP_ALG_AAI_CBC
,
1609 .cra_name
= "cbc(des)",
1610 .cra_driver_name
= "cbc-des-caam-qi2",
1611 .cra_blocksize
= DES_BLOCK_SIZE
,
1613 .setkey
= des_skcipher_setkey
,
1614 .encrypt
= skcipher_encrypt
,
1615 .decrypt
= skcipher_decrypt
,
1616 .min_keysize
= DES_KEY_SIZE
,
1617 .max_keysize
= DES_KEY_SIZE
,
1618 .ivsize
= DES_BLOCK_SIZE
,
1620 .caam
.class1_alg_type
= OP_ALG_ALGSEL_DES
| OP_ALG_AAI_CBC
,
1625 .cra_name
= "ctr(aes)",
1626 .cra_driver_name
= "ctr-aes-caam-qi2",
1629 .setkey
= ctr_skcipher_setkey
,
1630 .encrypt
= skcipher_encrypt
,
1631 .decrypt
= skcipher_decrypt
,
1632 .min_keysize
= AES_MIN_KEY_SIZE
,
1633 .max_keysize
= AES_MAX_KEY_SIZE
,
1634 .ivsize
= AES_BLOCK_SIZE
,
1635 .chunksize
= AES_BLOCK_SIZE
,
1637 .caam
.class1_alg_type
= OP_ALG_ALGSEL_AES
|
1638 OP_ALG_AAI_CTR_MOD128
,
1643 .cra_name
= "rfc3686(ctr(aes))",
1644 .cra_driver_name
= "rfc3686-ctr-aes-caam-qi2",
1647 .setkey
= rfc3686_skcipher_setkey
,
1648 .encrypt
= skcipher_encrypt
,
1649 .decrypt
= skcipher_decrypt
,
1650 .min_keysize
= AES_MIN_KEY_SIZE
+
1651 CTR_RFC3686_NONCE_SIZE
,
1652 .max_keysize
= AES_MAX_KEY_SIZE
+
1653 CTR_RFC3686_NONCE_SIZE
,
1654 .ivsize
= CTR_RFC3686_IV_SIZE
,
1655 .chunksize
= AES_BLOCK_SIZE
,
1658 .class1_alg_type
= OP_ALG_ALGSEL_AES
|
1659 OP_ALG_AAI_CTR_MOD128
,
1666 .cra_name
= "xts(aes)",
1667 .cra_driver_name
= "xts-aes-caam-qi2",
1668 .cra_blocksize
= AES_BLOCK_SIZE
,
1670 .setkey
= xts_skcipher_setkey
,
1671 .encrypt
= skcipher_encrypt
,
1672 .decrypt
= skcipher_decrypt
,
1673 .min_keysize
= 2 * AES_MIN_KEY_SIZE
,
1674 .max_keysize
= 2 * AES_MAX_KEY_SIZE
,
1675 .ivsize
= AES_BLOCK_SIZE
,
1677 .caam
.class1_alg_type
= OP_ALG_ALGSEL_AES
| OP_ALG_AAI_XTS
,
1682 .cra_name
= "chacha20",
1683 .cra_driver_name
= "chacha20-caam-qi2",
1686 .setkey
= chacha20_skcipher_setkey
,
1687 .encrypt
= skcipher_encrypt
,
1688 .decrypt
= skcipher_decrypt
,
1689 .min_keysize
= CHACHA_KEY_SIZE
,
1690 .max_keysize
= CHACHA_KEY_SIZE
,
1691 .ivsize
= CHACHA_IV_SIZE
,
1693 .caam
.class1_alg_type
= OP_ALG_ALGSEL_CHACHA20
,
1697 static struct caam_aead_alg driver_aeads
[] = {
1701 .cra_name
= "rfc4106(gcm(aes))",
1702 .cra_driver_name
= "rfc4106-gcm-aes-caam-qi2",
1705 .setkey
= rfc4106_setkey
,
1706 .setauthsize
= rfc4106_setauthsize
,
1707 .encrypt
= ipsec_gcm_encrypt
,
1708 .decrypt
= ipsec_gcm_decrypt
,
1710 .maxauthsize
= AES_BLOCK_SIZE
,
1713 .class1_alg_type
= OP_ALG_ALGSEL_AES
| OP_ALG_AAI_GCM
,
1720 .cra_name
= "rfc4543(gcm(aes))",
1721 .cra_driver_name
= "rfc4543-gcm-aes-caam-qi2",
1724 .setkey
= rfc4543_setkey
,
1725 .setauthsize
= rfc4543_setauthsize
,
1726 .encrypt
= ipsec_gcm_encrypt
,
1727 .decrypt
= ipsec_gcm_decrypt
,
1729 .maxauthsize
= AES_BLOCK_SIZE
,
1732 .class1_alg_type
= OP_ALG_ALGSEL_AES
| OP_ALG_AAI_GCM
,
1736 /* Galois Counter Mode */
1740 .cra_name
= "gcm(aes)",
1741 .cra_driver_name
= "gcm-aes-caam-qi2",
1744 .setkey
= gcm_setkey
,
1745 .setauthsize
= gcm_setauthsize
,
1746 .encrypt
= aead_encrypt
,
1747 .decrypt
= aead_decrypt
,
1749 .maxauthsize
= AES_BLOCK_SIZE
,
1752 .class1_alg_type
= OP_ALG_ALGSEL_AES
| OP_ALG_AAI_GCM
,
1756 /* single-pass ipsec_esp descriptor */
1760 .cra_name
= "authenc(hmac(md5),cbc(aes))",
1761 .cra_driver_name
= "authenc-hmac-md5-"
1763 .cra_blocksize
= AES_BLOCK_SIZE
,
1765 .setkey
= aead_setkey
,
1766 .setauthsize
= aead_setauthsize
,
1767 .encrypt
= aead_encrypt
,
1768 .decrypt
= aead_decrypt
,
1769 .ivsize
= AES_BLOCK_SIZE
,
1770 .maxauthsize
= MD5_DIGEST_SIZE
,
1773 .class1_alg_type
= OP_ALG_ALGSEL_AES
| OP_ALG_AAI_CBC
,
1774 .class2_alg_type
= OP_ALG_ALGSEL_MD5
|
1775 OP_ALG_AAI_HMAC_PRECOMP
,
1781 .cra_name
= "echainiv(authenc(hmac(md5),"
1783 .cra_driver_name
= "echainiv-authenc-hmac-md5-"
1785 .cra_blocksize
= AES_BLOCK_SIZE
,
1787 .setkey
= aead_setkey
,
1788 .setauthsize
= aead_setauthsize
,
1789 .encrypt
= aead_encrypt
,
1790 .decrypt
= aead_decrypt
,
1791 .ivsize
= AES_BLOCK_SIZE
,
1792 .maxauthsize
= MD5_DIGEST_SIZE
,
1795 .class1_alg_type
= OP_ALG_ALGSEL_AES
| OP_ALG_AAI_CBC
,
1796 .class2_alg_type
= OP_ALG_ALGSEL_MD5
|
1797 OP_ALG_AAI_HMAC_PRECOMP
,
1804 .cra_name
= "authenc(hmac(sha1),cbc(aes))",
1805 .cra_driver_name
= "authenc-hmac-sha1-"
1807 .cra_blocksize
= AES_BLOCK_SIZE
,
1809 .setkey
= aead_setkey
,
1810 .setauthsize
= aead_setauthsize
,
1811 .encrypt
= aead_encrypt
,
1812 .decrypt
= aead_decrypt
,
1813 .ivsize
= AES_BLOCK_SIZE
,
1814 .maxauthsize
= SHA1_DIGEST_SIZE
,
1817 .class1_alg_type
= OP_ALG_ALGSEL_AES
| OP_ALG_AAI_CBC
,
1818 .class2_alg_type
= OP_ALG_ALGSEL_SHA1
|
1819 OP_ALG_AAI_HMAC_PRECOMP
,
1825 .cra_name
= "echainiv(authenc(hmac(sha1),"
1827 .cra_driver_name
= "echainiv-authenc-"
1828 "hmac-sha1-cbc-aes-caam-qi2",
1829 .cra_blocksize
= AES_BLOCK_SIZE
,
1831 .setkey
= aead_setkey
,
1832 .setauthsize
= aead_setauthsize
,
1833 .encrypt
= aead_encrypt
,
1834 .decrypt
= aead_decrypt
,
1835 .ivsize
= AES_BLOCK_SIZE
,
1836 .maxauthsize
= SHA1_DIGEST_SIZE
,
1839 .class1_alg_type
= OP_ALG_ALGSEL_AES
| OP_ALG_AAI_CBC
,
1840 .class2_alg_type
= OP_ALG_ALGSEL_SHA1
|
1841 OP_ALG_AAI_HMAC_PRECOMP
,
1848 .cra_name
= "authenc(hmac(sha224),cbc(aes))",
1849 .cra_driver_name
= "authenc-hmac-sha224-"
1851 .cra_blocksize
= AES_BLOCK_SIZE
,
1853 .setkey
= aead_setkey
,
1854 .setauthsize
= aead_setauthsize
,
1855 .encrypt
= aead_encrypt
,
1856 .decrypt
= aead_decrypt
,
1857 .ivsize
= AES_BLOCK_SIZE
,
1858 .maxauthsize
= SHA224_DIGEST_SIZE
,
1861 .class1_alg_type
= OP_ALG_ALGSEL_AES
| OP_ALG_AAI_CBC
,
1862 .class2_alg_type
= OP_ALG_ALGSEL_SHA224
|
1863 OP_ALG_AAI_HMAC_PRECOMP
,
1869 .cra_name
= "echainiv(authenc(hmac(sha224),"
1871 .cra_driver_name
= "echainiv-authenc-"
1872 "hmac-sha224-cbc-aes-caam-qi2",
1873 .cra_blocksize
= AES_BLOCK_SIZE
,
1875 .setkey
= aead_setkey
,
1876 .setauthsize
= aead_setauthsize
,
1877 .encrypt
= aead_encrypt
,
1878 .decrypt
= aead_decrypt
,
1879 .ivsize
= AES_BLOCK_SIZE
,
1880 .maxauthsize
= SHA224_DIGEST_SIZE
,
1883 .class1_alg_type
= OP_ALG_ALGSEL_AES
| OP_ALG_AAI_CBC
,
1884 .class2_alg_type
= OP_ALG_ALGSEL_SHA224
|
1885 OP_ALG_AAI_HMAC_PRECOMP
,
1892 .cra_name
= "authenc(hmac(sha256),cbc(aes))",
1893 .cra_driver_name
= "authenc-hmac-sha256-"
1895 .cra_blocksize
= AES_BLOCK_SIZE
,
1897 .setkey
= aead_setkey
,
1898 .setauthsize
= aead_setauthsize
,
1899 .encrypt
= aead_encrypt
,
1900 .decrypt
= aead_decrypt
,
1901 .ivsize
= AES_BLOCK_SIZE
,
1902 .maxauthsize
= SHA256_DIGEST_SIZE
,
1905 .class1_alg_type
= OP_ALG_ALGSEL_AES
| OP_ALG_AAI_CBC
,
1906 .class2_alg_type
= OP_ALG_ALGSEL_SHA256
|
1907 OP_ALG_AAI_HMAC_PRECOMP
,
1913 .cra_name
= "echainiv(authenc(hmac(sha256),"
1915 .cra_driver_name
= "echainiv-authenc-"
1916 "hmac-sha256-cbc-aes-"
1918 .cra_blocksize
= AES_BLOCK_SIZE
,
1920 .setkey
= aead_setkey
,
1921 .setauthsize
= aead_setauthsize
,
1922 .encrypt
= aead_encrypt
,
1923 .decrypt
= aead_decrypt
,
1924 .ivsize
= AES_BLOCK_SIZE
,
1925 .maxauthsize
= SHA256_DIGEST_SIZE
,
1928 .class1_alg_type
= OP_ALG_ALGSEL_AES
| OP_ALG_AAI_CBC
,
1929 .class2_alg_type
= OP_ALG_ALGSEL_SHA256
|
1930 OP_ALG_AAI_HMAC_PRECOMP
,
1937 .cra_name
= "authenc(hmac(sha384),cbc(aes))",
1938 .cra_driver_name
= "authenc-hmac-sha384-"
1940 .cra_blocksize
= AES_BLOCK_SIZE
,
1942 .setkey
= aead_setkey
,
1943 .setauthsize
= aead_setauthsize
,
1944 .encrypt
= aead_encrypt
,
1945 .decrypt
= aead_decrypt
,
1946 .ivsize
= AES_BLOCK_SIZE
,
1947 .maxauthsize
= SHA384_DIGEST_SIZE
,
1950 .class1_alg_type
= OP_ALG_ALGSEL_AES
| OP_ALG_AAI_CBC
,
1951 .class2_alg_type
= OP_ALG_ALGSEL_SHA384
|
1952 OP_ALG_AAI_HMAC_PRECOMP
,
1958 .cra_name
= "echainiv(authenc(hmac(sha384),"
1960 .cra_driver_name
= "echainiv-authenc-"
1961 "hmac-sha384-cbc-aes-"
1963 .cra_blocksize
= AES_BLOCK_SIZE
,
1965 .setkey
= aead_setkey
,
1966 .setauthsize
= aead_setauthsize
,
1967 .encrypt
= aead_encrypt
,
1968 .decrypt
= aead_decrypt
,
1969 .ivsize
= AES_BLOCK_SIZE
,
1970 .maxauthsize
= SHA384_DIGEST_SIZE
,
1973 .class1_alg_type
= OP_ALG_ALGSEL_AES
| OP_ALG_AAI_CBC
,
1974 .class2_alg_type
= OP_ALG_ALGSEL_SHA384
|
1975 OP_ALG_AAI_HMAC_PRECOMP
,
1982 .cra_name
= "authenc(hmac(sha512),cbc(aes))",
1983 .cra_driver_name
= "authenc-hmac-sha512-"
1985 .cra_blocksize
= AES_BLOCK_SIZE
,
1987 .setkey
= aead_setkey
,
1988 .setauthsize
= aead_setauthsize
,
1989 .encrypt
= aead_encrypt
,
1990 .decrypt
= aead_decrypt
,
1991 .ivsize
= AES_BLOCK_SIZE
,
1992 .maxauthsize
= SHA512_DIGEST_SIZE
,
1995 .class1_alg_type
= OP_ALG_ALGSEL_AES
| OP_ALG_AAI_CBC
,
1996 .class2_alg_type
= OP_ALG_ALGSEL_SHA512
|
1997 OP_ALG_AAI_HMAC_PRECOMP
,
2003 .cra_name
= "echainiv(authenc(hmac(sha512),"
2005 .cra_driver_name
= "echainiv-authenc-"
2006 "hmac-sha512-cbc-aes-"
2008 .cra_blocksize
= AES_BLOCK_SIZE
,
2010 .setkey
= aead_setkey
,
2011 .setauthsize
= aead_setauthsize
,
2012 .encrypt
= aead_encrypt
,
2013 .decrypt
= aead_decrypt
,
2014 .ivsize
= AES_BLOCK_SIZE
,
2015 .maxauthsize
= SHA512_DIGEST_SIZE
,
2018 .class1_alg_type
= OP_ALG_ALGSEL_AES
| OP_ALG_AAI_CBC
,
2019 .class2_alg_type
= OP_ALG_ALGSEL_SHA512
|
2020 OP_ALG_AAI_HMAC_PRECOMP
,
2027 .cra_name
= "authenc(hmac(md5),cbc(des3_ede))",
2028 .cra_driver_name
= "authenc-hmac-md5-"
2029 "cbc-des3_ede-caam-qi2",
2030 .cra_blocksize
= DES3_EDE_BLOCK_SIZE
,
2032 .setkey
= des3_aead_setkey
,
2033 .setauthsize
= aead_setauthsize
,
2034 .encrypt
= aead_encrypt
,
2035 .decrypt
= aead_decrypt
,
2036 .ivsize
= DES3_EDE_BLOCK_SIZE
,
2037 .maxauthsize
= MD5_DIGEST_SIZE
,
2040 .class1_alg_type
= OP_ALG_ALGSEL_3DES
| OP_ALG_AAI_CBC
,
2041 .class2_alg_type
= OP_ALG_ALGSEL_MD5
|
2042 OP_ALG_AAI_HMAC_PRECOMP
,
2048 .cra_name
= "echainiv(authenc(hmac(md5),"
2050 .cra_driver_name
= "echainiv-authenc-hmac-md5-"
2051 "cbc-des3_ede-caam-qi2",
2052 .cra_blocksize
= DES3_EDE_BLOCK_SIZE
,
2054 .setkey
= des3_aead_setkey
,
2055 .setauthsize
= aead_setauthsize
,
2056 .encrypt
= aead_encrypt
,
2057 .decrypt
= aead_decrypt
,
2058 .ivsize
= DES3_EDE_BLOCK_SIZE
,
2059 .maxauthsize
= MD5_DIGEST_SIZE
,
2062 .class1_alg_type
= OP_ALG_ALGSEL_3DES
| OP_ALG_AAI_CBC
,
2063 .class2_alg_type
= OP_ALG_ALGSEL_MD5
|
2064 OP_ALG_AAI_HMAC_PRECOMP
,
2071 .cra_name
= "authenc(hmac(sha1),"
2073 .cra_driver_name
= "authenc-hmac-sha1-"
2074 "cbc-des3_ede-caam-qi2",
2075 .cra_blocksize
= DES3_EDE_BLOCK_SIZE
,
2077 .setkey
= des3_aead_setkey
,
2078 .setauthsize
= aead_setauthsize
,
2079 .encrypt
= aead_encrypt
,
2080 .decrypt
= aead_decrypt
,
2081 .ivsize
= DES3_EDE_BLOCK_SIZE
,
2082 .maxauthsize
= SHA1_DIGEST_SIZE
,
2085 .class1_alg_type
= OP_ALG_ALGSEL_3DES
| OP_ALG_AAI_CBC
,
2086 .class2_alg_type
= OP_ALG_ALGSEL_SHA1
|
2087 OP_ALG_AAI_HMAC_PRECOMP
,
2093 .cra_name
= "echainiv(authenc(hmac(sha1),"
2095 .cra_driver_name
= "echainiv-authenc-"
2097 "cbc-des3_ede-caam-qi2",
2098 .cra_blocksize
= DES3_EDE_BLOCK_SIZE
,
2100 .setkey
= des3_aead_setkey
,
2101 .setauthsize
= aead_setauthsize
,
2102 .encrypt
= aead_encrypt
,
2103 .decrypt
= aead_decrypt
,
2104 .ivsize
= DES3_EDE_BLOCK_SIZE
,
2105 .maxauthsize
= SHA1_DIGEST_SIZE
,
2108 .class1_alg_type
= OP_ALG_ALGSEL_3DES
| OP_ALG_AAI_CBC
,
2109 .class2_alg_type
= OP_ALG_ALGSEL_SHA1
|
2110 OP_ALG_AAI_HMAC_PRECOMP
,
2117 .cra_name
= "authenc(hmac(sha224),"
2119 .cra_driver_name
= "authenc-hmac-sha224-"
2120 "cbc-des3_ede-caam-qi2",
2121 .cra_blocksize
= DES3_EDE_BLOCK_SIZE
,
2123 .setkey
= des3_aead_setkey
,
2124 .setauthsize
= aead_setauthsize
,
2125 .encrypt
= aead_encrypt
,
2126 .decrypt
= aead_decrypt
,
2127 .ivsize
= DES3_EDE_BLOCK_SIZE
,
2128 .maxauthsize
= SHA224_DIGEST_SIZE
,
2131 .class1_alg_type
= OP_ALG_ALGSEL_3DES
| OP_ALG_AAI_CBC
,
2132 .class2_alg_type
= OP_ALG_ALGSEL_SHA224
|
2133 OP_ALG_AAI_HMAC_PRECOMP
,
2139 .cra_name
= "echainiv(authenc(hmac(sha224),"
2141 .cra_driver_name
= "echainiv-authenc-"
2143 "cbc-des3_ede-caam-qi2",
2144 .cra_blocksize
= DES3_EDE_BLOCK_SIZE
,
2146 .setkey
= des3_aead_setkey
,
2147 .setauthsize
= aead_setauthsize
,
2148 .encrypt
= aead_encrypt
,
2149 .decrypt
= aead_decrypt
,
2150 .ivsize
= DES3_EDE_BLOCK_SIZE
,
2151 .maxauthsize
= SHA224_DIGEST_SIZE
,
2154 .class1_alg_type
= OP_ALG_ALGSEL_3DES
| OP_ALG_AAI_CBC
,
2155 .class2_alg_type
= OP_ALG_ALGSEL_SHA224
|
2156 OP_ALG_AAI_HMAC_PRECOMP
,
2163 .cra_name
= "authenc(hmac(sha256),"
2165 .cra_driver_name
= "authenc-hmac-sha256-"
2166 "cbc-des3_ede-caam-qi2",
2167 .cra_blocksize
= DES3_EDE_BLOCK_SIZE
,
2169 .setkey
= des3_aead_setkey
,
2170 .setauthsize
= aead_setauthsize
,
2171 .encrypt
= aead_encrypt
,
2172 .decrypt
= aead_decrypt
,
2173 .ivsize
= DES3_EDE_BLOCK_SIZE
,
2174 .maxauthsize
= SHA256_DIGEST_SIZE
,
2177 .class1_alg_type
= OP_ALG_ALGSEL_3DES
| OP_ALG_AAI_CBC
,
2178 .class2_alg_type
= OP_ALG_ALGSEL_SHA256
|
2179 OP_ALG_AAI_HMAC_PRECOMP
,
2185 .cra_name
= "echainiv(authenc(hmac(sha256),"
2187 .cra_driver_name
= "echainiv-authenc-"
2189 "cbc-des3_ede-caam-qi2",
2190 .cra_blocksize
= DES3_EDE_BLOCK_SIZE
,
2192 .setkey
= des3_aead_setkey
,
2193 .setauthsize
= aead_setauthsize
,
2194 .encrypt
= aead_encrypt
,
2195 .decrypt
= aead_decrypt
,
2196 .ivsize
= DES3_EDE_BLOCK_SIZE
,
2197 .maxauthsize
= SHA256_DIGEST_SIZE
,
2200 .class1_alg_type
= OP_ALG_ALGSEL_3DES
| OP_ALG_AAI_CBC
,
2201 .class2_alg_type
= OP_ALG_ALGSEL_SHA256
|
2202 OP_ALG_AAI_HMAC_PRECOMP
,
2209 .cra_name
= "authenc(hmac(sha384),"
2211 .cra_driver_name
= "authenc-hmac-sha384-"
2212 "cbc-des3_ede-caam-qi2",
2213 .cra_blocksize
= DES3_EDE_BLOCK_SIZE
,
2215 .setkey
= des3_aead_setkey
,
2216 .setauthsize
= aead_setauthsize
,
2217 .encrypt
= aead_encrypt
,
2218 .decrypt
= aead_decrypt
,
2219 .ivsize
= DES3_EDE_BLOCK_SIZE
,
2220 .maxauthsize
= SHA384_DIGEST_SIZE
,
2223 .class1_alg_type
= OP_ALG_ALGSEL_3DES
| OP_ALG_AAI_CBC
,
2224 .class2_alg_type
= OP_ALG_ALGSEL_SHA384
|
2225 OP_ALG_AAI_HMAC_PRECOMP
,
2231 .cra_name
= "echainiv(authenc(hmac(sha384),"
2233 .cra_driver_name
= "echainiv-authenc-"
2235 "cbc-des3_ede-caam-qi2",
2236 .cra_blocksize
= DES3_EDE_BLOCK_SIZE
,
2238 .setkey
= des3_aead_setkey
,
2239 .setauthsize
= aead_setauthsize
,
2240 .encrypt
= aead_encrypt
,
2241 .decrypt
= aead_decrypt
,
2242 .ivsize
= DES3_EDE_BLOCK_SIZE
,
2243 .maxauthsize
= SHA384_DIGEST_SIZE
,
2246 .class1_alg_type
= OP_ALG_ALGSEL_3DES
| OP_ALG_AAI_CBC
,
2247 .class2_alg_type
= OP_ALG_ALGSEL_SHA384
|
2248 OP_ALG_AAI_HMAC_PRECOMP
,
2255 .cra_name
= "authenc(hmac(sha512),"
2257 .cra_driver_name
= "authenc-hmac-sha512-"
2258 "cbc-des3_ede-caam-qi2",
2259 .cra_blocksize
= DES3_EDE_BLOCK_SIZE
,
2261 .setkey
= des3_aead_setkey
,
2262 .setauthsize
= aead_setauthsize
,
2263 .encrypt
= aead_encrypt
,
2264 .decrypt
= aead_decrypt
,
2265 .ivsize
= DES3_EDE_BLOCK_SIZE
,
2266 .maxauthsize
= SHA512_DIGEST_SIZE
,
2269 .class1_alg_type
= OP_ALG_ALGSEL_3DES
| OP_ALG_AAI_CBC
,
2270 .class2_alg_type
= OP_ALG_ALGSEL_SHA512
|
2271 OP_ALG_AAI_HMAC_PRECOMP
,
2277 .cra_name
= "echainiv(authenc(hmac(sha512),"
2279 .cra_driver_name
= "echainiv-authenc-"
2281 "cbc-des3_ede-caam-qi2",
2282 .cra_blocksize
= DES3_EDE_BLOCK_SIZE
,
2284 .setkey
= des3_aead_setkey
,
2285 .setauthsize
= aead_setauthsize
,
2286 .encrypt
= aead_encrypt
,
2287 .decrypt
= aead_decrypt
,
2288 .ivsize
= DES3_EDE_BLOCK_SIZE
,
2289 .maxauthsize
= SHA512_DIGEST_SIZE
,
2292 .class1_alg_type
= OP_ALG_ALGSEL_3DES
| OP_ALG_AAI_CBC
,
2293 .class2_alg_type
= OP_ALG_ALGSEL_SHA512
|
2294 OP_ALG_AAI_HMAC_PRECOMP
,
2301 .cra_name
= "authenc(hmac(md5),cbc(des))",
2302 .cra_driver_name
= "authenc-hmac-md5-"
2304 .cra_blocksize
= DES_BLOCK_SIZE
,
2306 .setkey
= aead_setkey
,
2307 .setauthsize
= aead_setauthsize
,
2308 .encrypt
= aead_encrypt
,
2309 .decrypt
= aead_decrypt
,
2310 .ivsize
= DES_BLOCK_SIZE
,
2311 .maxauthsize
= MD5_DIGEST_SIZE
,
2314 .class1_alg_type
= OP_ALG_ALGSEL_DES
| OP_ALG_AAI_CBC
,
2315 .class2_alg_type
= OP_ALG_ALGSEL_MD5
|
2316 OP_ALG_AAI_HMAC_PRECOMP
,
2322 .cra_name
= "echainiv(authenc(hmac(md5),"
2324 .cra_driver_name
= "echainiv-authenc-hmac-md5-"
2326 .cra_blocksize
= DES_BLOCK_SIZE
,
2328 .setkey
= aead_setkey
,
2329 .setauthsize
= aead_setauthsize
,
2330 .encrypt
= aead_encrypt
,
2331 .decrypt
= aead_decrypt
,
2332 .ivsize
= DES_BLOCK_SIZE
,
2333 .maxauthsize
= MD5_DIGEST_SIZE
,
2336 .class1_alg_type
= OP_ALG_ALGSEL_DES
| OP_ALG_AAI_CBC
,
2337 .class2_alg_type
= OP_ALG_ALGSEL_MD5
|
2338 OP_ALG_AAI_HMAC_PRECOMP
,
2345 .cra_name
= "authenc(hmac(sha1),cbc(des))",
2346 .cra_driver_name
= "authenc-hmac-sha1-"
2348 .cra_blocksize
= DES_BLOCK_SIZE
,
2350 .setkey
= aead_setkey
,
2351 .setauthsize
= aead_setauthsize
,
2352 .encrypt
= aead_encrypt
,
2353 .decrypt
= aead_decrypt
,
2354 .ivsize
= DES_BLOCK_SIZE
,
2355 .maxauthsize
= SHA1_DIGEST_SIZE
,
2358 .class1_alg_type
= OP_ALG_ALGSEL_DES
| OP_ALG_AAI_CBC
,
2359 .class2_alg_type
= OP_ALG_ALGSEL_SHA1
|
2360 OP_ALG_AAI_HMAC_PRECOMP
,
2366 .cra_name
= "echainiv(authenc(hmac(sha1),"
2368 .cra_driver_name
= "echainiv-authenc-"
2369 "hmac-sha1-cbc-des-caam-qi2",
2370 .cra_blocksize
= DES_BLOCK_SIZE
,
2372 .setkey
= aead_setkey
,
2373 .setauthsize
= aead_setauthsize
,
2374 .encrypt
= aead_encrypt
,
2375 .decrypt
= aead_decrypt
,
2376 .ivsize
= DES_BLOCK_SIZE
,
2377 .maxauthsize
= SHA1_DIGEST_SIZE
,
2380 .class1_alg_type
= OP_ALG_ALGSEL_DES
| OP_ALG_AAI_CBC
,
2381 .class2_alg_type
= OP_ALG_ALGSEL_SHA1
|
2382 OP_ALG_AAI_HMAC_PRECOMP
,
2389 .cra_name
= "authenc(hmac(sha224),cbc(des))",
2390 .cra_driver_name
= "authenc-hmac-sha224-"
2392 .cra_blocksize
= DES_BLOCK_SIZE
,
2394 .setkey
= aead_setkey
,
2395 .setauthsize
= aead_setauthsize
,
2396 .encrypt
= aead_encrypt
,
2397 .decrypt
= aead_decrypt
,
2398 .ivsize
= DES_BLOCK_SIZE
,
2399 .maxauthsize
= SHA224_DIGEST_SIZE
,
2402 .class1_alg_type
= OP_ALG_ALGSEL_DES
| OP_ALG_AAI_CBC
,
2403 .class2_alg_type
= OP_ALG_ALGSEL_SHA224
|
2404 OP_ALG_AAI_HMAC_PRECOMP
,
2410 .cra_name
= "echainiv(authenc(hmac(sha224),"
2412 .cra_driver_name
= "echainiv-authenc-"
2413 "hmac-sha224-cbc-des-"
2415 .cra_blocksize
= DES_BLOCK_SIZE
,
2417 .setkey
= aead_setkey
,
2418 .setauthsize
= aead_setauthsize
,
2419 .encrypt
= aead_encrypt
,
2420 .decrypt
= aead_decrypt
,
2421 .ivsize
= DES_BLOCK_SIZE
,
2422 .maxauthsize
= SHA224_DIGEST_SIZE
,
2425 .class1_alg_type
= OP_ALG_ALGSEL_DES
| OP_ALG_AAI_CBC
,
2426 .class2_alg_type
= OP_ALG_ALGSEL_SHA224
|
2427 OP_ALG_AAI_HMAC_PRECOMP
,
2434 .cra_name
= "authenc(hmac(sha256),cbc(des))",
2435 .cra_driver_name
= "authenc-hmac-sha256-"
2437 .cra_blocksize
= DES_BLOCK_SIZE
,
2439 .setkey
= aead_setkey
,
2440 .setauthsize
= aead_setauthsize
,
2441 .encrypt
= aead_encrypt
,
2442 .decrypt
= aead_decrypt
,
2443 .ivsize
= DES_BLOCK_SIZE
,
2444 .maxauthsize
= SHA256_DIGEST_SIZE
,
2447 .class1_alg_type
= OP_ALG_ALGSEL_DES
| OP_ALG_AAI_CBC
,
2448 .class2_alg_type
= OP_ALG_ALGSEL_SHA256
|
2449 OP_ALG_AAI_HMAC_PRECOMP
,
2455 .cra_name
= "echainiv(authenc(hmac(sha256),"
2457 .cra_driver_name
= "echainiv-authenc-"
2458 "hmac-sha256-cbc-des-"
2460 .cra_blocksize
= DES_BLOCK_SIZE
,
2462 .setkey
= aead_setkey
,
2463 .setauthsize
= aead_setauthsize
,
2464 .encrypt
= aead_encrypt
,
2465 .decrypt
= aead_decrypt
,
2466 .ivsize
= DES_BLOCK_SIZE
,
2467 .maxauthsize
= SHA256_DIGEST_SIZE
,
2470 .class1_alg_type
= OP_ALG_ALGSEL_DES
| OP_ALG_AAI_CBC
,
2471 .class2_alg_type
= OP_ALG_ALGSEL_SHA256
|
2472 OP_ALG_AAI_HMAC_PRECOMP
,
2479 .cra_name
= "authenc(hmac(sha384),cbc(des))",
2480 .cra_driver_name
= "authenc-hmac-sha384-"
2482 .cra_blocksize
= DES_BLOCK_SIZE
,
2484 .setkey
= aead_setkey
,
2485 .setauthsize
= aead_setauthsize
,
2486 .encrypt
= aead_encrypt
,
2487 .decrypt
= aead_decrypt
,
2488 .ivsize
= DES_BLOCK_SIZE
,
2489 .maxauthsize
= SHA384_DIGEST_SIZE
,
2492 .class1_alg_type
= OP_ALG_ALGSEL_DES
| OP_ALG_AAI_CBC
,
2493 .class2_alg_type
= OP_ALG_ALGSEL_SHA384
|
2494 OP_ALG_AAI_HMAC_PRECOMP
,
2500 .cra_name
= "echainiv(authenc(hmac(sha384),"
2502 .cra_driver_name
= "echainiv-authenc-"
2503 "hmac-sha384-cbc-des-"
2505 .cra_blocksize
= DES_BLOCK_SIZE
,
2507 .setkey
= aead_setkey
,
2508 .setauthsize
= aead_setauthsize
,
2509 .encrypt
= aead_encrypt
,
2510 .decrypt
= aead_decrypt
,
2511 .ivsize
= DES_BLOCK_SIZE
,
2512 .maxauthsize
= SHA384_DIGEST_SIZE
,
2515 .class1_alg_type
= OP_ALG_ALGSEL_DES
| OP_ALG_AAI_CBC
,
2516 .class2_alg_type
= OP_ALG_ALGSEL_SHA384
|
2517 OP_ALG_AAI_HMAC_PRECOMP
,
2524 .cra_name
= "authenc(hmac(sha512),cbc(des))",
2525 .cra_driver_name
= "authenc-hmac-sha512-"
2527 .cra_blocksize
= DES_BLOCK_SIZE
,
2529 .setkey
= aead_setkey
,
2530 .setauthsize
= aead_setauthsize
,
2531 .encrypt
= aead_encrypt
,
2532 .decrypt
= aead_decrypt
,
2533 .ivsize
= DES_BLOCK_SIZE
,
2534 .maxauthsize
= SHA512_DIGEST_SIZE
,
2537 .class1_alg_type
= OP_ALG_ALGSEL_DES
| OP_ALG_AAI_CBC
,
2538 .class2_alg_type
= OP_ALG_ALGSEL_SHA512
|
2539 OP_ALG_AAI_HMAC_PRECOMP
,
2545 .cra_name
= "echainiv(authenc(hmac(sha512),"
2547 .cra_driver_name
= "echainiv-authenc-"
2548 "hmac-sha512-cbc-des-"
2550 .cra_blocksize
= DES_BLOCK_SIZE
,
2552 .setkey
= aead_setkey
,
2553 .setauthsize
= aead_setauthsize
,
2554 .encrypt
= aead_encrypt
,
2555 .decrypt
= aead_decrypt
,
2556 .ivsize
= DES_BLOCK_SIZE
,
2557 .maxauthsize
= SHA512_DIGEST_SIZE
,
2560 .class1_alg_type
= OP_ALG_ALGSEL_DES
| OP_ALG_AAI_CBC
,
2561 .class2_alg_type
= OP_ALG_ALGSEL_SHA512
|
2562 OP_ALG_AAI_HMAC_PRECOMP
,
2569 .cra_name
= "authenc(hmac(md5),"
2570 "rfc3686(ctr(aes)))",
2571 .cra_driver_name
= "authenc-hmac-md5-"
2572 "rfc3686-ctr-aes-caam-qi2",
2575 .setkey
= aead_setkey
,
2576 .setauthsize
= aead_setauthsize
,
2577 .encrypt
= aead_encrypt
,
2578 .decrypt
= aead_decrypt
,
2579 .ivsize
= CTR_RFC3686_IV_SIZE
,
2580 .maxauthsize
= MD5_DIGEST_SIZE
,
2583 .class1_alg_type
= OP_ALG_ALGSEL_AES
|
2584 OP_ALG_AAI_CTR_MOD128
,
2585 .class2_alg_type
= OP_ALG_ALGSEL_MD5
|
2586 OP_ALG_AAI_HMAC_PRECOMP
,
2593 .cra_name
= "seqiv(authenc("
2594 "hmac(md5),rfc3686(ctr(aes))))",
2595 .cra_driver_name
= "seqiv-authenc-hmac-md5-"
2596 "rfc3686-ctr-aes-caam-qi2",
2599 .setkey
= aead_setkey
,
2600 .setauthsize
= aead_setauthsize
,
2601 .encrypt
= aead_encrypt
,
2602 .decrypt
= aead_decrypt
,
2603 .ivsize
= CTR_RFC3686_IV_SIZE
,
2604 .maxauthsize
= MD5_DIGEST_SIZE
,
2607 .class1_alg_type
= OP_ALG_ALGSEL_AES
|
2608 OP_ALG_AAI_CTR_MOD128
,
2609 .class2_alg_type
= OP_ALG_ALGSEL_MD5
|
2610 OP_ALG_AAI_HMAC_PRECOMP
,
2618 .cra_name
= "authenc(hmac(sha1),"
2619 "rfc3686(ctr(aes)))",
2620 .cra_driver_name
= "authenc-hmac-sha1-"
2621 "rfc3686-ctr-aes-caam-qi2",
2624 .setkey
= aead_setkey
,
2625 .setauthsize
= aead_setauthsize
,
2626 .encrypt
= aead_encrypt
,
2627 .decrypt
= aead_decrypt
,
2628 .ivsize
= CTR_RFC3686_IV_SIZE
,
2629 .maxauthsize
= SHA1_DIGEST_SIZE
,
2632 .class1_alg_type
= OP_ALG_ALGSEL_AES
|
2633 OP_ALG_AAI_CTR_MOD128
,
2634 .class2_alg_type
= OP_ALG_ALGSEL_SHA1
|
2635 OP_ALG_AAI_HMAC_PRECOMP
,
2642 .cra_name
= "seqiv(authenc("
2643 "hmac(sha1),rfc3686(ctr(aes))))",
2644 .cra_driver_name
= "seqiv-authenc-hmac-sha1-"
2645 "rfc3686-ctr-aes-caam-qi2",
2648 .setkey
= aead_setkey
,
2649 .setauthsize
= aead_setauthsize
,
2650 .encrypt
= aead_encrypt
,
2651 .decrypt
= aead_decrypt
,
2652 .ivsize
= CTR_RFC3686_IV_SIZE
,
2653 .maxauthsize
= SHA1_DIGEST_SIZE
,
2656 .class1_alg_type
= OP_ALG_ALGSEL_AES
|
2657 OP_ALG_AAI_CTR_MOD128
,
2658 .class2_alg_type
= OP_ALG_ALGSEL_SHA1
|
2659 OP_ALG_AAI_HMAC_PRECOMP
,
2667 .cra_name
= "authenc(hmac(sha224),"
2668 "rfc3686(ctr(aes)))",
2669 .cra_driver_name
= "authenc-hmac-sha224-"
2670 "rfc3686-ctr-aes-caam-qi2",
2673 .setkey
= aead_setkey
,
2674 .setauthsize
= aead_setauthsize
,
2675 .encrypt
= aead_encrypt
,
2676 .decrypt
= aead_decrypt
,
2677 .ivsize
= CTR_RFC3686_IV_SIZE
,
2678 .maxauthsize
= SHA224_DIGEST_SIZE
,
2681 .class1_alg_type
= OP_ALG_ALGSEL_AES
|
2682 OP_ALG_AAI_CTR_MOD128
,
2683 .class2_alg_type
= OP_ALG_ALGSEL_SHA224
|
2684 OP_ALG_AAI_HMAC_PRECOMP
,
2691 .cra_name
= "seqiv(authenc("
2692 "hmac(sha224),rfc3686(ctr(aes))))",
2693 .cra_driver_name
= "seqiv-authenc-hmac-sha224-"
2694 "rfc3686-ctr-aes-caam-qi2",
2697 .setkey
= aead_setkey
,
2698 .setauthsize
= aead_setauthsize
,
2699 .encrypt
= aead_encrypt
,
2700 .decrypt
= aead_decrypt
,
2701 .ivsize
= CTR_RFC3686_IV_SIZE
,
2702 .maxauthsize
= SHA224_DIGEST_SIZE
,
2705 .class1_alg_type
= OP_ALG_ALGSEL_AES
|
2706 OP_ALG_AAI_CTR_MOD128
,
2707 .class2_alg_type
= OP_ALG_ALGSEL_SHA224
|
2708 OP_ALG_AAI_HMAC_PRECOMP
,
2716 .cra_name
= "authenc(hmac(sha256),"
2717 "rfc3686(ctr(aes)))",
2718 .cra_driver_name
= "authenc-hmac-sha256-"
2719 "rfc3686-ctr-aes-caam-qi2",
2722 .setkey
= aead_setkey
,
2723 .setauthsize
= aead_setauthsize
,
2724 .encrypt
= aead_encrypt
,
2725 .decrypt
= aead_decrypt
,
2726 .ivsize
= CTR_RFC3686_IV_SIZE
,
2727 .maxauthsize
= SHA256_DIGEST_SIZE
,
2730 .class1_alg_type
= OP_ALG_ALGSEL_AES
|
2731 OP_ALG_AAI_CTR_MOD128
,
2732 .class2_alg_type
= OP_ALG_ALGSEL_SHA256
|
2733 OP_ALG_AAI_HMAC_PRECOMP
,
2740 .cra_name
= "seqiv(authenc(hmac(sha256),"
2741 "rfc3686(ctr(aes))))",
2742 .cra_driver_name
= "seqiv-authenc-hmac-sha256-"
2743 "rfc3686-ctr-aes-caam-qi2",
2746 .setkey
= aead_setkey
,
2747 .setauthsize
= aead_setauthsize
,
2748 .encrypt
= aead_encrypt
,
2749 .decrypt
= aead_decrypt
,
2750 .ivsize
= CTR_RFC3686_IV_SIZE
,
2751 .maxauthsize
= SHA256_DIGEST_SIZE
,
2754 .class1_alg_type
= OP_ALG_ALGSEL_AES
|
2755 OP_ALG_AAI_CTR_MOD128
,
2756 .class2_alg_type
= OP_ALG_ALGSEL_SHA256
|
2757 OP_ALG_AAI_HMAC_PRECOMP
,
2765 .cra_name
= "authenc(hmac(sha384),"
2766 "rfc3686(ctr(aes)))",
2767 .cra_driver_name
= "authenc-hmac-sha384-"
2768 "rfc3686-ctr-aes-caam-qi2",
2771 .setkey
= aead_setkey
,
2772 .setauthsize
= aead_setauthsize
,
2773 .encrypt
= aead_encrypt
,
2774 .decrypt
= aead_decrypt
,
2775 .ivsize
= CTR_RFC3686_IV_SIZE
,
2776 .maxauthsize
= SHA384_DIGEST_SIZE
,
2779 .class1_alg_type
= OP_ALG_ALGSEL_AES
|
2780 OP_ALG_AAI_CTR_MOD128
,
2781 .class2_alg_type
= OP_ALG_ALGSEL_SHA384
|
2782 OP_ALG_AAI_HMAC_PRECOMP
,
2789 .cra_name
= "seqiv(authenc(hmac(sha384),"
2790 "rfc3686(ctr(aes))))",
2791 .cra_driver_name
= "seqiv-authenc-hmac-sha384-"
2792 "rfc3686-ctr-aes-caam-qi2",
2795 .setkey
= aead_setkey
,
2796 .setauthsize
= aead_setauthsize
,
2797 .encrypt
= aead_encrypt
,
2798 .decrypt
= aead_decrypt
,
2799 .ivsize
= CTR_RFC3686_IV_SIZE
,
2800 .maxauthsize
= SHA384_DIGEST_SIZE
,
2803 .class1_alg_type
= OP_ALG_ALGSEL_AES
|
2804 OP_ALG_AAI_CTR_MOD128
,
2805 .class2_alg_type
= OP_ALG_ALGSEL_SHA384
|
2806 OP_ALG_AAI_HMAC_PRECOMP
,
2814 .cra_name
= "rfc7539(chacha20,poly1305)",
2815 .cra_driver_name
= "rfc7539-chacha20-poly1305-"
2819 .setkey
= chachapoly_setkey
,
2820 .setauthsize
= chachapoly_setauthsize
,
2821 .encrypt
= aead_encrypt
,
2822 .decrypt
= aead_decrypt
,
2823 .ivsize
= CHACHAPOLY_IV_SIZE
,
2824 .maxauthsize
= POLY1305_DIGEST_SIZE
,
2827 .class1_alg_type
= OP_ALG_ALGSEL_CHACHA20
|
2829 .class2_alg_type
= OP_ALG_ALGSEL_POLY1305
|
2837 .cra_name
= "rfc7539esp(chacha20,poly1305)",
2838 .cra_driver_name
= "rfc7539esp-chacha20-"
2839 "poly1305-caam-qi2",
2842 .setkey
= chachapoly_setkey
,
2843 .setauthsize
= chachapoly_setauthsize
,
2844 .encrypt
= aead_encrypt
,
2845 .decrypt
= aead_decrypt
,
2847 .maxauthsize
= POLY1305_DIGEST_SIZE
,
2850 .class1_alg_type
= OP_ALG_ALGSEL_CHACHA20
|
2852 .class2_alg_type
= OP_ALG_ALGSEL_POLY1305
|
2860 .cra_name
= "authenc(hmac(sha512),"
2861 "rfc3686(ctr(aes)))",
2862 .cra_driver_name
= "authenc-hmac-sha512-"
2863 "rfc3686-ctr-aes-caam-qi2",
2866 .setkey
= aead_setkey
,
2867 .setauthsize
= aead_setauthsize
,
2868 .encrypt
= aead_encrypt
,
2869 .decrypt
= aead_decrypt
,
2870 .ivsize
= CTR_RFC3686_IV_SIZE
,
2871 .maxauthsize
= SHA512_DIGEST_SIZE
,
2874 .class1_alg_type
= OP_ALG_ALGSEL_AES
|
2875 OP_ALG_AAI_CTR_MOD128
,
2876 .class2_alg_type
= OP_ALG_ALGSEL_SHA512
|
2877 OP_ALG_AAI_HMAC_PRECOMP
,
2884 .cra_name
= "seqiv(authenc(hmac(sha512),"
2885 "rfc3686(ctr(aes))))",
2886 .cra_driver_name
= "seqiv-authenc-hmac-sha512-"
2887 "rfc3686-ctr-aes-caam-qi2",
2890 .setkey
= aead_setkey
,
2891 .setauthsize
= aead_setauthsize
,
2892 .encrypt
= aead_encrypt
,
2893 .decrypt
= aead_decrypt
,
2894 .ivsize
= CTR_RFC3686_IV_SIZE
,
2895 .maxauthsize
= SHA512_DIGEST_SIZE
,
2898 .class1_alg_type
= OP_ALG_ALGSEL_AES
|
2899 OP_ALG_AAI_CTR_MOD128
,
2900 .class2_alg_type
= OP_ALG_ALGSEL_SHA512
|
2901 OP_ALG_AAI_HMAC_PRECOMP
,
2908 static void caam_skcipher_alg_init(struct caam_skcipher_alg
*t_alg
)
2910 struct skcipher_alg
*alg
= &t_alg
->skcipher
;
2912 alg
->base
.cra_module
= THIS_MODULE
;
2913 alg
->base
.cra_priority
= CAAM_CRA_PRIORITY
;
2914 alg
->base
.cra_ctxsize
= sizeof(struct caam_ctx
);
2915 alg
->base
.cra_flags
= CRYPTO_ALG_ASYNC
| CRYPTO_ALG_KERN_DRIVER_ONLY
;
2917 alg
->init
= caam_cra_init_skcipher
;
2918 alg
->exit
= caam_cra_exit
;
2921 static void caam_aead_alg_init(struct caam_aead_alg
*t_alg
)
2923 struct aead_alg
*alg
= &t_alg
->aead
;
2925 alg
->base
.cra_module
= THIS_MODULE
;
2926 alg
->base
.cra_priority
= CAAM_CRA_PRIORITY
;
2927 alg
->base
.cra_ctxsize
= sizeof(struct caam_ctx
);
2928 alg
->base
.cra_flags
= CRYPTO_ALG_ASYNC
| CRYPTO_ALG_KERN_DRIVER_ONLY
;
2930 alg
->init
= caam_cra_init_aead
;
2931 alg
->exit
= caam_cra_exit_aead
;
2934 /* max hash key is max split key size */
2935 #define CAAM_MAX_HASH_KEY_SIZE (SHA512_DIGEST_SIZE * 2)
2937 #define CAAM_MAX_HASH_BLOCK_SIZE SHA512_BLOCK_SIZE
2939 /* caam context sizes for hashes: running digest + 8 */
2940 #define HASH_MSG_LEN 8
2941 #define MAX_CTX_LEN (HASH_MSG_LEN + SHA512_DIGEST_SIZE)
2952 * caam_hash_ctx - ahash per-session context
2953 * @flc: Flow Contexts array
2954 * @key: authentication key
2955 * @flc_dma: I/O virtual addresses of the Flow Contexts
2956 * @dev: dpseci device
2957 * @ctx_len: size of Context Register
2958 * @adata: hashing algorithm details
2960 struct caam_hash_ctx
{
2961 struct caam_flc flc
[HASH_NUM_OP
];
2962 u8 key
[CAAM_MAX_HASH_BLOCK_SIZE
] ____cacheline_aligned
;
2963 dma_addr_t flc_dma
[HASH_NUM_OP
];
2966 struct alginfo adata
;
2970 struct caam_hash_state
{
2971 struct caam_request caam_req
;
2975 u8 buf
[CAAM_MAX_HASH_BLOCK_SIZE
] ____cacheline_aligned
;
2978 u8 caam_ctx
[MAX_CTX_LEN
] ____cacheline_aligned
;
2979 int (*update
)(struct ahash_request
*req
);
2980 int (*final
)(struct ahash_request
*req
);
2981 int (*finup
)(struct ahash_request
*req
);
2984 struct caam_export_state
{
2985 u8 buf
[CAAM_MAX_HASH_BLOCK_SIZE
];
2986 u8 caam_ctx
[MAX_CTX_LEN
];
2988 int (*update
)(struct ahash_request
*req
);
2989 int (*final
)(struct ahash_request
*req
);
2990 int (*finup
)(struct ahash_request
*req
);
2993 /* Map current buffer in state (if length > 0) and put it in link table */
2994 static inline int buf_map_to_qm_sg(struct device
*dev
,
2995 struct dpaa2_sg_entry
*qm_sg
,
2996 struct caam_hash_state
*state
)
2998 int buflen
= state
->buflen
;
3003 state
->buf_dma
= dma_map_single(dev
, state
->buf
, buflen
,
3005 if (dma_mapping_error(dev
, state
->buf_dma
)) {
3006 dev_err(dev
, "unable to map buf\n");
3011 dma_to_qm_sg_one(qm_sg
, state
->buf_dma
, buflen
, 0);
3016 /* Map state->caam_ctx, and add it to link table */
3017 static inline int ctx_map_to_qm_sg(struct device
*dev
,
3018 struct caam_hash_state
*state
, int ctx_len
,
3019 struct dpaa2_sg_entry
*qm_sg
, u32 flag
)
3021 state
->ctx_dma_len
= ctx_len
;
3022 state
->ctx_dma
= dma_map_single(dev
, state
->caam_ctx
, ctx_len
, flag
);
3023 if (dma_mapping_error(dev
, state
->ctx_dma
)) {
3024 dev_err(dev
, "unable to map ctx\n");
3029 dma_to_qm_sg_one(qm_sg
, state
->ctx_dma
, ctx_len
, 0);
3034 static int ahash_set_sh_desc(struct crypto_ahash
*ahash
)
3036 struct caam_hash_ctx
*ctx
= crypto_ahash_ctx(ahash
);
3037 int digestsize
= crypto_ahash_digestsize(ahash
);
3038 struct dpaa2_caam_priv
*priv
= dev_get_drvdata(ctx
->dev
);
3039 struct caam_flc
*flc
;
3042 /* ahash_update shared descriptor */
3043 flc
= &ctx
->flc
[UPDATE
];
3044 desc
= flc
->sh_desc
;
3045 cnstr_shdsc_ahash(desc
, &ctx
->adata
, OP_ALG_AS_UPDATE
, ctx
->ctx_len
,
3046 ctx
->ctx_len
, true, priv
->sec_attr
.era
);
3047 flc
->flc
[1] = cpu_to_caam32(desc_len(desc
)); /* SDL */
3048 dma_sync_single_for_device(ctx
->dev
, ctx
->flc_dma
[UPDATE
],
3049 desc_bytes(desc
), DMA_BIDIRECTIONAL
);
3050 print_hex_dump_debug("ahash update shdesc@" __stringify(__LINE__
)": ",
3051 DUMP_PREFIX_ADDRESS
, 16, 4, desc
, desc_bytes(desc
),
3054 /* ahash_update_first shared descriptor */
3055 flc
= &ctx
->flc
[UPDATE_FIRST
];
3056 desc
= flc
->sh_desc
;
3057 cnstr_shdsc_ahash(desc
, &ctx
->adata
, OP_ALG_AS_INIT
, ctx
->ctx_len
,
3058 ctx
->ctx_len
, false, priv
->sec_attr
.era
);
3059 flc
->flc
[1] = cpu_to_caam32(desc_len(desc
)); /* SDL */
3060 dma_sync_single_for_device(ctx
->dev
, ctx
->flc_dma
[UPDATE_FIRST
],
3061 desc_bytes(desc
), DMA_BIDIRECTIONAL
);
3062 print_hex_dump_debug("ahash update first shdesc@" __stringify(__LINE__
)": ",
3063 DUMP_PREFIX_ADDRESS
, 16, 4, desc
, desc_bytes(desc
),
3066 /* ahash_final shared descriptor */
3067 flc
= &ctx
->flc
[FINALIZE
];
3068 desc
= flc
->sh_desc
;
3069 cnstr_shdsc_ahash(desc
, &ctx
->adata
, OP_ALG_AS_FINALIZE
, digestsize
,
3070 ctx
->ctx_len
, true, priv
->sec_attr
.era
);
3071 flc
->flc
[1] = cpu_to_caam32(desc_len(desc
)); /* SDL */
3072 dma_sync_single_for_device(ctx
->dev
, ctx
->flc_dma
[FINALIZE
],
3073 desc_bytes(desc
), DMA_BIDIRECTIONAL
);
3074 print_hex_dump_debug("ahash final shdesc@" __stringify(__LINE__
)": ",
3075 DUMP_PREFIX_ADDRESS
, 16, 4, desc
, desc_bytes(desc
),
3078 /* ahash_digest shared descriptor */
3079 flc
= &ctx
->flc
[DIGEST
];
3080 desc
= flc
->sh_desc
;
3081 cnstr_shdsc_ahash(desc
, &ctx
->adata
, OP_ALG_AS_INITFINAL
, digestsize
,
3082 ctx
->ctx_len
, false, priv
->sec_attr
.era
);
3083 flc
->flc
[1] = cpu_to_caam32(desc_len(desc
)); /* SDL */
3084 dma_sync_single_for_device(ctx
->dev
, ctx
->flc_dma
[DIGEST
],
3085 desc_bytes(desc
), DMA_BIDIRECTIONAL
);
3086 print_hex_dump_debug("ahash digest shdesc@" __stringify(__LINE__
)": ",
3087 DUMP_PREFIX_ADDRESS
, 16, 4, desc
, desc_bytes(desc
),
3093 struct split_key_sh_result
{
3094 struct completion completion
;
3099 static void split_key_sh_done(void *cbk_ctx
, u32 err
)
3101 struct split_key_sh_result
*res
= cbk_ctx
;
3103 dev_dbg(res
->dev
, "%s %d: err 0x%x\n", __func__
, __LINE__
, err
);
3105 res
->err
= err
? caam_qi2_strstatus(res
->dev
, err
) : 0;
3106 complete(&res
->completion
);
3109 /* Digest hash size if it is too large */
3110 static int hash_digest_key(struct caam_hash_ctx
*ctx
, u32
*keylen
, u8
*key
,
3113 struct caam_request
*req_ctx
;
3115 struct split_key_sh_result result
;
3117 struct caam_flc
*flc
;
3120 struct dpaa2_fl_entry
*in_fle
, *out_fle
;
3122 req_ctx
= kzalloc(sizeof(*req_ctx
), GFP_KERNEL
| GFP_DMA
);
3126 in_fle
= &req_ctx
->fd_flt
[1];
3127 out_fle
= &req_ctx
->fd_flt
[0];
3129 flc
= kzalloc(sizeof(*flc
), GFP_KERNEL
| GFP_DMA
);
3133 key_dma
= dma_map_single(ctx
->dev
, key
, *keylen
, DMA_BIDIRECTIONAL
);
3134 if (dma_mapping_error(ctx
->dev
, key_dma
)) {
3135 dev_err(ctx
->dev
, "unable to map key memory\n");
3139 desc
= flc
->sh_desc
;
3141 init_sh_desc(desc
, 0);
3143 /* descriptor to perform unkeyed hash on key_in */
3144 append_operation(desc
, ctx
->adata
.algtype
| OP_ALG_ENCRYPT
|
3145 OP_ALG_AS_INITFINAL
);
3146 append_seq_fifo_load(desc
, *keylen
, FIFOLD_CLASS_CLASS2
|
3147 FIFOLD_TYPE_LAST2
| FIFOLD_TYPE_MSG
);
3148 append_seq_store(desc
, digestsize
, LDST_CLASS_2_CCB
|
3149 LDST_SRCDST_BYTE_CONTEXT
);
3151 flc
->flc
[1] = cpu_to_caam32(desc_len(desc
)); /* SDL */
3152 flc_dma
= dma_map_single(ctx
->dev
, flc
, sizeof(flc
->flc
) +
3153 desc_bytes(desc
), DMA_TO_DEVICE
);
3154 if (dma_mapping_error(ctx
->dev
, flc_dma
)) {
3155 dev_err(ctx
->dev
, "unable to map shared descriptor\n");
3159 dpaa2_fl_set_final(in_fle
, true);
3160 dpaa2_fl_set_format(in_fle
, dpaa2_fl_single
);
3161 dpaa2_fl_set_addr(in_fle
, key_dma
);
3162 dpaa2_fl_set_len(in_fle
, *keylen
);
3163 dpaa2_fl_set_format(out_fle
, dpaa2_fl_single
);
3164 dpaa2_fl_set_addr(out_fle
, key_dma
);
3165 dpaa2_fl_set_len(out_fle
, digestsize
);
3167 print_hex_dump_debug("key_in@" __stringify(__LINE__
)": ",
3168 DUMP_PREFIX_ADDRESS
, 16, 4, key
, *keylen
, 1);
3169 print_hex_dump_debug("shdesc@" __stringify(__LINE__
)": ",
3170 DUMP_PREFIX_ADDRESS
, 16, 4, desc
, desc_bytes(desc
),
3174 init_completion(&result
.completion
);
3175 result
.dev
= ctx
->dev
;
3178 req_ctx
->flc_dma
= flc_dma
;
3179 req_ctx
->cbk
= split_key_sh_done
;
3180 req_ctx
->ctx
= &result
;
3182 ret
= dpaa2_caam_enqueue(ctx
->dev
, req_ctx
);
3183 if (ret
== -EINPROGRESS
) {
3185 wait_for_completion(&result
.completion
);
3187 print_hex_dump_debug("digested key@" __stringify(__LINE__
)": ",
3188 DUMP_PREFIX_ADDRESS
, 16, 4, key
,
3192 dma_unmap_single(ctx
->dev
, flc_dma
, sizeof(flc
->flc
) + desc_bytes(desc
),
3195 dma_unmap_single(ctx
->dev
, key_dma
, *keylen
, DMA_BIDIRECTIONAL
);
3201 *keylen
= digestsize
;
3206 static int ahash_setkey(struct crypto_ahash
*ahash
, const u8
*key
,
3207 unsigned int keylen
)
3209 struct caam_hash_ctx
*ctx
= crypto_ahash_ctx(ahash
);
3210 unsigned int blocksize
= crypto_tfm_alg_blocksize(&ahash
->base
);
3211 unsigned int digestsize
= crypto_ahash_digestsize(ahash
);
3213 u8
*hashed_key
= NULL
;
3215 dev_dbg(ctx
->dev
, "keylen %d blocksize %d\n", keylen
, blocksize
);
3217 if (keylen
> blocksize
) {
3218 hashed_key
= kmemdup(key
, keylen
, GFP_KERNEL
| GFP_DMA
);
3221 ret
= hash_digest_key(ctx
, &keylen
, hashed_key
, digestsize
);
3227 ctx
->adata
.keylen
= keylen
;
3228 ctx
->adata
.keylen_pad
= split_key_len(ctx
->adata
.algtype
&
3229 OP_ALG_ALGSEL_MASK
);
3230 if (ctx
->adata
.keylen_pad
> CAAM_MAX_HASH_KEY_SIZE
)
3233 ctx
->adata
.key_virt
= key
;
3234 ctx
->adata
.key_inline
= true;
3237 * In case |user key| > |derived key|, using DKP<imm,imm> would result
3238 * in invalid opcodes (last bytes of user key) in the resulting
3239 * descriptor. Use DKP<ptr,imm> instead => both virtual and dma key
3240 * addresses are needed.
3242 if (keylen
> ctx
->adata
.keylen_pad
) {
3243 memcpy(ctx
->key
, key
, keylen
);
3244 dma_sync_single_for_device(ctx
->dev
, ctx
->adata
.key_dma
,
3245 ctx
->adata
.keylen_pad
,
3249 ret
= ahash_set_sh_desc(ahash
);
3257 static inline void ahash_unmap(struct device
*dev
, struct ahash_edesc
*edesc
,
3258 struct ahash_request
*req
)
3260 struct caam_hash_state
*state
= ahash_request_ctx(req
);
3262 if (edesc
->src_nents
)
3263 dma_unmap_sg(dev
, req
->src
, edesc
->src_nents
, DMA_TO_DEVICE
);
3265 if (edesc
->qm_sg_bytes
)
3266 dma_unmap_single(dev
, edesc
->qm_sg_dma
, edesc
->qm_sg_bytes
,
3269 if (state
->buf_dma
) {
3270 dma_unmap_single(dev
, state
->buf_dma
, state
->buflen
,
3276 static inline void ahash_unmap_ctx(struct device
*dev
,
3277 struct ahash_edesc
*edesc
,
3278 struct ahash_request
*req
, u32 flag
)
3280 struct caam_hash_state
*state
= ahash_request_ctx(req
);
3282 if (state
->ctx_dma
) {
3283 dma_unmap_single(dev
, state
->ctx_dma
, state
->ctx_dma_len
, flag
);
3286 ahash_unmap(dev
, edesc
, req
);
3289 static void ahash_done(void *cbk_ctx
, u32 status
)
3291 struct crypto_async_request
*areq
= cbk_ctx
;
3292 struct ahash_request
*req
= ahash_request_cast(areq
);
3293 struct crypto_ahash
*ahash
= crypto_ahash_reqtfm(req
);
3294 struct caam_hash_state
*state
= ahash_request_ctx(req
);
3295 struct ahash_edesc
*edesc
= state
->caam_req
.edesc
;
3296 struct caam_hash_ctx
*ctx
= crypto_ahash_ctx(ahash
);
3297 int digestsize
= crypto_ahash_digestsize(ahash
);
3300 dev_dbg(ctx
->dev
, "%s %d: err 0x%x\n", __func__
, __LINE__
, status
);
3302 if (unlikely(status
))
3303 ecode
= caam_qi2_strstatus(ctx
->dev
, status
);
3305 ahash_unmap_ctx(ctx
->dev
, edesc
, req
, DMA_FROM_DEVICE
);
3306 memcpy(req
->result
, state
->caam_ctx
, digestsize
);
3307 qi_cache_free(edesc
);
3309 print_hex_dump_debug("ctx@" __stringify(__LINE__
)": ",
3310 DUMP_PREFIX_ADDRESS
, 16, 4, state
->caam_ctx
,
3313 req
->base
.complete(&req
->base
, ecode
);
3316 static void ahash_done_bi(void *cbk_ctx
, u32 status
)
3318 struct crypto_async_request
*areq
= cbk_ctx
;
3319 struct ahash_request
*req
= ahash_request_cast(areq
);
3320 struct crypto_ahash
*ahash
= crypto_ahash_reqtfm(req
);
3321 struct caam_hash_state
*state
= ahash_request_ctx(req
);
3322 struct ahash_edesc
*edesc
= state
->caam_req
.edesc
;
3323 struct caam_hash_ctx
*ctx
= crypto_ahash_ctx(ahash
);
3326 dev_dbg(ctx
->dev
, "%s %d: err 0x%x\n", __func__
, __LINE__
, status
);
3328 if (unlikely(status
))
3329 ecode
= caam_qi2_strstatus(ctx
->dev
, status
);
3331 ahash_unmap_ctx(ctx
->dev
, edesc
, req
, DMA_BIDIRECTIONAL
);
3332 qi_cache_free(edesc
);
3334 scatterwalk_map_and_copy(state
->buf
, req
->src
,
3335 req
->nbytes
- state
->next_buflen
,
3336 state
->next_buflen
, 0);
3337 state
->buflen
= state
->next_buflen
;
3339 print_hex_dump_debug("buf@" __stringify(__LINE__
)": ",
3340 DUMP_PREFIX_ADDRESS
, 16, 4, state
->buf
,
3343 print_hex_dump_debug("ctx@" __stringify(__LINE__
)": ",
3344 DUMP_PREFIX_ADDRESS
, 16, 4, state
->caam_ctx
,
3347 print_hex_dump_debug("result@" __stringify(__LINE__
)": ",
3348 DUMP_PREFIX_ADDRESS
, 16, 4, req
->result
,
3349 crypto_ahash_digestsize(ahash
), 1);
3351 req
->base
.complete(&req
->base
, ecode
);
3354 static void ahash_done_ctx_src(void *cbk_ctx
, u32 status
)
3356 struct crypto_async_request
*areq
= cbk_ctx
;
3357 struct ahash_request
*req
= ahash_request_cast(areq
);
3358 struct crypto_ahash
*ahash
= crypto_ahash_reqtfm(req
);
3359 struct caam_hash_state
*state
= ahash_request_ctx(req
);
3360 struct ahash_edesc
*edesc
= state
->caam_req
.edesc
;
3361 struct caam_hash_ctx
*ctx
= crypto_ahash_ctx(ahash
);
3362 int digestsize
= crypto_ahash_digestsize(ahash
);
3365 dev_dbg(ctx
->dev
, "%s %d: err 0x%x\n", __func__
, __LINE__
, status
);
3367 if (unlikely(status
))
3368 ecode
= caam_qi2_strstatus(ctx
->dev
, status
);
3370 ahash_unmap_ctx(ctx
->dev
, edesc
, req
, DMA_BIDIRECTIONAL
);
3371 memcpy(req
->result
, state
->caam_ctx
, digestsize
);
3372 qi_cache_free(edesc
);
3374 print_hex_dump_debug("ctx@" __stringify(__LINE__
)": ",
3375 DUMP_PREFIX_ADDRESS
, 16, 4, state
->caam_ctx
,
3378 req
->base
.complete(&req
->base
, ecode
);
3381 static void ahash_done_ctx_dst(void *cbk_ctx
, u32 status
)
3383 struct crypto_async_request
*areq
= cbk_ctx
;
3384 struct ahash_request
*req
= ahash_request_cast(areq
);
3385 struct crypto_ahash
*ahash
= crypto_ahash_reqtfm(req
);
3386 struct caam_hash_state
*state
= ahash_request_ctx(req
);
3387 struct ahash_edesc
*edesc
= state
->caam_req
.edesc
;
3388 struct caam_hash_ctx
*ctx
= crypto_ahash_ctx(ahash
);
3391 dev_dbg(ctx
->dev
, "%s %d: err 0x%x\n", __func__
, __LINE__
, status
);
3393 if (unlikely(status
))
3394 ecode
= caam_qi2_strstatus(ctx
->dev
, status
);
3396 ahash_unmap_ctx(ctx
->dev
, edesc
, req
, DMA_FROM_DEVICE
);
3397 qi_cache_free(edesc
);
3399 scatterwalk_map_and_copy(state
->buf
, req
->src
,
3400 req
->nbytes
- state
->next_buflen
,
3401 state
->next_buflen
, 0);
3402 state
->buflen
= state
->next_buflen
;
3404 print_hex_dump_debug("buf@" __stringify(__LINE__
)": ",
3405 DUMP_PREFIX_ADDRESS
, 16, 4, state
->buf
,
3408 print_hex_dump_debug("ctx@" __stringify(__LINE__
)": ",
3409 DUMP_PREFIX_ADDRESS
, 16, 4, state
->caam_ctx
,
3412 print_hex_dump_debug("result@" __stringify(__LINE__
)": ",
3413 DUMP_PREFIX_ADDRESS
, 16, 4, req
->result
,
3414 crypto_ahash_digestsize(ahash
), 1);
3416 req
->base
.complete(&req
->base
, ecode
);
3419 static int ahash_update_ctx(struct ahash_request
*req
)
3421 struct crypto_ahash
*ahash
= crypto_ahash_reqtfm(req
);
3422 struct caam_hash_ctx
*ctx
= crypto_ahash_ctx(ahash
);
3423 struct caam_hash_state
*state
= ahash_request_ctx(req
);
3424 struct caam_request
*req_ctx
= &state
->caam_req
;
3425 struct dpaa2_fl_entry
*in_fle
= &req_ctx
->fd_flt
[1];
3426 struct dpaa2_fl_entry
*out_fle
= &req_ctx
->fd_flt
[0];
3427 gfp_t flags
= (req
->base
.flags
& CRYPTO_TFM_REQ_MAY_SLEEP
) ?
3428 GFP_KERNEL
: GFP_ATOMIC
;
3429 u8
*buf
= state
->buf
;
3430 int *buflen
= &state
->buflen
;
3431 int *next_buflen
= &state
->next_buflen
;
3432 int in_len
= *buflen
+ req
->nbytes
, to_hash
;
3433 int src_nents
, mapped_nents
, qm_sg_bytes
, qm_sg_src_index
;
3434 struct ahash_edesc
*edesc
;
3437 *next_buflen
= in_len
& (crypto_tfm_alg_blocksize(&ahash
->base
) - 1);
3438 to_hash
= in_len
- *next_buflen
;
3441 struct dpaa2_sg_entry
*sg_table
;
3442 int src_len
= req
->nbytes
- *next_buflen
;
3444 src_nents
= sg_nents_for_len(req
->src
, src_len
);
3445 if (src_nents
< 0) {
3446 dev_err(ctx
->dev
, "Invalid number of src SG.\n");
3451 mapped_nents
= dma_map_sg(ctx
->dev
, req
->src
, src_nents
,
3453 if (!mapped_nents
) {
3454 dev_err(ctx
->dev
, "unable to DMA map source\n");
3461 /* allocate space for base edesc and link tables */
3462 edesc
= qi_cache_zalloc(GFP_DMA
| flags
);
3464 dma_unmap_sg(ctx
->dev
, req
->src
, src_nents
,
3469 edesc
->src_nents
= src_nents
;
3470 qm_sg_src_index
= 1 + (*buflen
? 1 : 0);
3471 qm_sg_bytes
= pad_sg_nents(qm_sg_src_index
+ mapped_nents
) *
3473 sg_table
= &edesc
->sgt
[0];
3475 ret
= ctx_map_to_qm_sg(ctx
->dev
, state
, ctx
->ctx_len
, sg_table
,
3480 ret
= buf_map_to_qm_sg(ctx
->dev
, sg_table
+ 1, state
);
3485 sg_to_qm_sg_last(req
->src
, src_len
,
3486 sg_table
+ qm_sg_src_index
, 0);
3488 dpaa2_sg_set_final(sg_table
+ qm_sg_src_index
- 1,
3492 edesc
->qm_sg_dma
= dma_map_single(ctx
->dev
, sg_table
,
3493 qm_sg_bytes
, DMA_TO_DEVICE
);
3494 if (dma_mapping_error(ctx
->dev
, edesc
->qm_sg_dma
)) {
3495 dev_err(ctx
->dev
, "unable to map S/G table\n");
3499 edesc
->qm_sg_bytes
= qm_sg_bytes
;
3501 memset(&req_ctx
->fd_flt
, 0, sizeof(req_ctx
->fd_flt
));
3502 dpaa2_fl_set_final(in_fle
, true);
3503 dpaa2_fl_set_format(in_fle
, dpaa2_fl_sg
);
3504 dpaa2_fl_set_addr(in_fle
, edesc
->qm_sg_dma
);
3505 dpaa2_fl_set_len(in_fle
, ctx
->ctx_len
+ to_hash
);
3506 dpaa2_fl_set_format(out_fle
, dpaa2_fl_single
);
3507 dpaa2_fl_set_addr(out_fle
, state
->ctx_dma
);
3508 dpaa2_fl_set_len(out_fle
, ctx
->ctx_len
);
3510 req_ctx
->flc
= &ctx
->flc
[UPDATE
];
3511 req_ctx
->flc_dma
= ctx
->flc_dma
[UPDATE
];
3512 req_ctx
->cbk
= ahash_done_bi
;
3513 req_ctx
->ctx
= &req
->base
;
3514 req_ctx
->edesc
= edesc
;
3516 ret
= dpaa2_caam_enqueue(ctx
->dev
, req_ctx
);
3517 if (ret
!= -EINPROGRESS
&&
3519 req
->base
.flags
& CRYPTO_TFM_REQ_MAY_BACKLOG
))
3521 } else if (*next_buflen
) {
3522 scatterwalk_map_and_copy(buf
+ *buflen
, req
->src
, 0,
3524 *buflen
= *next_buflen
;
3526 print_hex_dump_debug("buf@" __stringify(__LINE__
)": ",
3527 DUMP_PREFIX_ADDRESS
, 16, 4, buf
,
3533 ahash_unmap_ctx(ctx
->dev
, edesc
, req
, DMA_BIDIRECTIONAL
);
3534 qi_cache_free(edesc
);
3538 static int ahash_final_ctx(struct ahash_request
*req
)
3540 struct crypto_ahash
*ahash
= crypto_ahash_reqtfm(req
);
3541 struct caam_hash_ctx
*ctx
= crypto_ahash_ctx(ahash
);
3542 struct caam_hash_state
*state
= ahash_request_ctx(req
);
3543 struct caam_request
*req_ctx
= &state
->caam_req
;
3544 struct dpaa2_fl_entry
*in_fle
= &req_ctx
->fd_flt
[1];
3545 struct dpaa2_fl_entry
*out_fle
= &req_ctx
->fd_flt
[0];
3546 gfp_t flags
= (req
->base
.flags
& CRYPTO_TFM_REQ_MAY_SLEEP
) ?
3547 GFP_KERNEL
: GFP_ATOMIC
;
3548 int buflen
= state
->buflen
;
3550 int digestsize
= crypto_ahash_digestsize(ahash
);
3551 struct ahash_edesc
*edesc
;
3552 struct dpaa2_sg_entry
*sg_table
;
3555 /* allocate space for base edesc and link tables */
3556 edesc
= qi_cache_zalloc(GFP_DMA
| flags
);
3560 qm_sg_bytes
= pad_sg_nents(1 + (buflen
? 1 : 0)) * sizeof(*sg_table
);
3561 sg_table
= &edesc
->sgt
[0];
3563 ret
= ctx_map_to_qm_sg(ctx
->dev
, state
, ctx
->ctx_len
, sg_table
,
3568 ret
= buf_map_to_qm_sg(ctx
->dev
, sg_table
+ 1, state
);
3572 dpaa2_sg_set_final(sg_table
+ (buflen
? 1 : 0), true);
3574 edesc
->qm_sg_dma
= dma_map_single(ctx
->dev
, sg_table
, qm_sg_bytes
,
3576 if (dma_mapping_error(ctx
->dev
, edesc
->qm_sg_dma
)) {
3577 dev_err(ctx
->dev
, "unable to map S/G table\n");
3581 edesc
->qm_sg_bytes
= qm_sg_bytes
;
3583 memset(&req_ctx
->fd_flt
, 0, sizeof(req_ctx
->fd_flt
));
3584 dpaa2_fl_set_final(in_fle
, true);
3585 dpaa2_fl_set_format(in_fle
, dpaa2_fl_sg
);
3586 dpaa2_fl_set_addr(in_fle
, edesc
->qm_sg_dma
);
3587 dpaa2_fl_set_len(in_fle
, ctx
->ctx_len
+ buflen
);
3588 dpaa2_fl_set_format(out_fle
, dpaa2_fl_single
);
3589 dpaa2_fl_set_addr(out_fle
, state
->ctx_dma
);
3590 dpaa2_fl_set_len(out_fle
, digestsize
);
3592 req_ctx
->flc
= &ctx
->flc
[FINALIZE
];
3593 req_ctx
->flc_dma
= ctx
->flc_dma
[FINALIZE
];
3594 req_ctx
->cbk
= ahash_done_ctx_src
;
3595 req_ctx
->ctx
= &req
->base
;
3596 req_ctx
->edesc
= edesc
;
3598 ret
= dpaa2_caam_enqueue(ctx
->dev
, req_ctx
);
3599 if (ret
== -EINPROGRESS
||
3600 (ret
== -EBUSY
&& req
->base
.flags
& CRYPTO_TFM_REQ_MAY_BACKLOG
))
3604 ahash_unmap_ctx(ctx
->dev
, edesc
, req
, DMA_BIDIRECTIONAL
);
3605 qi_cache_free(edesc
);
3609 static int ahash_finup_ctx(struct ahash_request
*req
)
3611 struct crypto_ahash
*ahash
= crypto_ahash_reqtfm(req
);
3612 struct caam_hash_ctx
*ctx
= crypto_ahash_ctx(ahash
);
3613 struct caam_hash_state
*state
= ahash_request_ctx(req
);
3614 struct caam_request
*req_ctx
= &state
->caam_req
;
3615 struct dpaa2_fl_entry
*in_fle
= &req_ctx
->fd_flt
[1];
3616 struct dpaa2_fl_entry
*out_fle
= &req_ctx
->fd_flt
[0];
3617 gfp_t flags
= (req
->base
.flags
& CRYPTO_TFM_REQ_MAY_SLEEP
) ?
3618 GFP_KERNEL
: GFP_ATOMIC
;
3619 int buflen
= state
->buflen
;
3620 int qm_sg_bytes
, qm_sg_src_index
;
3621 int src_nents
, mapped_nents
;
3622 int digestsize
= crypto_ahash_digestsize(ahash
);
3623 struct ahash_edesc
*edesc
;
3624 struct dpaa2_sg_entry
*sg_table
;
3627 src_nents
= sg_nents_for_len(req
->src
, req
->nbytes
);
3628 if (src_nents
< 0) {
3629 dev_err(ctx
->dev
, "Invalid number of src SG.\n");
3634 mapped_nents
= dma_map_sg(ctx
->dev
, req
->src
, src_nents
,
3636 if (!mapped_nents
) {
3637 dev_err(ctx
->dev
, "unable to DMA map source\n");
3644 /* allocate space for base edesc and link tables */
3645 edesc
= qi_cache_zalloc(GFP_DMA
| flags
);
3647 dma_unmap_sg(ctx
->dev
, req
->src
, src_nents
, DMA_TO_DEVICE
);
3651 edesc
->src_nents
= src_nents
;
3652 qm_sg_src_index
= 1 + (buflen
? 1 : 0);
3653 qm_sg_bytes
= pad_sg_nents(qm_sg_src_index
+ mapped_nents
) *
3655 sg_table
= &edesc
->sgt
[0];
3657 ret
= ctx_map_to_qm_sg(ctx
->dev
, state
, ctx
->ctx_len
, sg_table
,
3662 ret
= buf_map_to_qm_sg(ctx
->dev
, sg_table
+ 1, state
);
3666 sg_to_qm_sg_last(req
->src
, req
->nbytes
, sg_table
+ qm_sg_src_index
, 0);
3668 edesc
->qm_sg_dma
= dma_map_single(ctx
->dev
, sg_table
, qm_sg_bytes
,
3670 if (dma_mapping_error(ctx
->dev
, edesc
->qm_sg_dma
)) {
3671 dev_err(ctx
->dev
, "unable to map S/G table\n");
3675 edesc
->qm_sg_bytes
= qm_sg_bytes
;
3677 memset(&req_ctx
->fd_flt
, 0, sizeof(req_ctx
->fd_flt
));
3678 dpaa2_fl_set_final(in_fle
, true);
3679 dpaa2_fl_set_format(in_fle
, dpaa2_fl_sg
);
3680 dpaa2_fl_set_addr(in_fle
, edesc
->qm_sg_dma
);
3681 dpaa2_fl_set_len(in_fle
, ctx
->ctx_len
+ buflen
+ req
->nbytes
);
3682 dpaa2_fl_set_format(out_fle
, dpaa2_fl_single
);
3683 dpaa2_fl_set_addr(out_fle
, state
->ctx_dma
);
3684 dpaa2_fl_set_len(out_fle
, digestsize
);
3686 req_ctx
->flc
= &ctx
->flc
[FINALIZE
];
3687 req_ctx
->flc_dma
= ctx
->flc_dma
[FINALIZE
];
3688 req_ctx
->cbk
= ahash_done_ctx_src
;
3689 req_ctx
->ctx
= &req
->base
;
3690 req_ctx
->edesc
= edesc
;
3692 ret
= dpaa2_caam_enqueue(ctx
->dev
, req_ctx
);
3693 if (ret
== -EINPROGRESS
||
3694 (ret
== -EBUSY
&& req
->base
.flags
& CRYPTO_TFM_REQ_MAY_BACKLOG
))
3698 ahash_unmap_ctx(ctx
->dev
, edesc
, req
, DMA_BIDIRECTIONAL
);
3699 qi_cache_free(edesc
);
3703 static int ahash_digest(struct ahash_request
*req
)
3705 struct crypto_ahash
*ahash
= crypto_ahash_reqtfm(req
);
3706 struct caam_hash_ctx
*ctx
= crypto_ahash_ctx(ahash
);
3707 struct caam_hash_state
*state
= ahash_request_ctx(req
);
3708 struct caam_request
*req_ctx
= &state
->caam_req
;
3709 struct dpaa2_fl_entry
*in_fle
= &req_ctx
->fd_flt
[1];
3710 struct dpaa2_fl_entry
*out_fle
= &req_ctx
->fd_flt
[0];
3711 gfp_t flags
= (req
->base
.flags
& CRYPTO_TFM_REQ_MAY_SLEEP
) ?
3712 GFP_KERNEL
: GFP_ATOMIC
;
3713 int digestsize
= crypto_ahash_digestsize(ahash
);
3714 int src_nents
, mapped_nents
;
3715 struct ahash_edesc
*edesc
;
3720 src_nents
= sg_nents_for_len(req
->src
, req
->nbytes
);
3721 if (src_nents
< 0) {
3722 dev_err(ctx
->dev
, "Invalid number of src SG.\n");
3727 mapped_nents
= dma_map_sg(ctx
->dev
, req
->src
, src_nents
,
3729 if (!mapped_nents
) {
3730 dev_err(ctx
->dev
, "unable to map source for DMA\n");
3737 /* allocate space for base edesc and link tables */
3738 edesc
= qi_cache_zalloc(GFP_DMA
| flags
);
3740 dma_unmap_sg(ctx
->dev
, req
->src
, src_nents
, DMA_TO_DEVICE
);
3744 edesc
->src_nents
= src_nents
;
3745 memset(&req_ctx
->fd_flt
, 0, sizeof(req_ctx
->fd_flt
));
3747 if (mapped_nents
> 1) {
3749 struct dpaa2_sg_entry
*sg_table
= &edesc
->sgt
[0];
3751 qm_sg_bytes
= pad_sg_nents(mapped_nents
) * sizeof(*sg_table
);
3752 sg_to_qm_sg_last(req
->src
, req
->nbytes
, sg_table
, 0);
3753 edesc
->qm_sg_dma
= dma_map_single(ctx
->dev
, sg_table
,
3754 qm_sg_bytes
, DMA_TO_DEVICE
);
3755 if (dma_mapping_error(ctx
->dev
, edesc
->qm_sg_dma
)) {
3756 dev_err(ctx
->dev
, "unable to map S/G table\n");
3759 edesc
->qm_sg_bytes
= qm_sg_bytes
;
3760 dpaa2_fl_set_format(in_fle
, dpaa2_fl_sg
);
3761 dpaa2_fl_set_addr(in_fle
, edesc
->qm_sg_dma
);
3763 dpaa2_fl_set_format(in_fle
, dpaa2_fl_single
);
3764 dpaa2_fl_set_addr(in_fle
, sg_dma_address(req
->src
));
3767 state
->ctx_dma_len
= digestsize
;
3768 state
->ctx_dma
= dma_map_single(ctx
->dev
, state
->caam_ctx
, digestsize
,
3770 if (dma_mapping_error(ctx
->dev
, state
->ctx_dma
)) {
3771 dev_err(ctx
->dev
, "unable to map ctx\n");
3776 dpaa2_fl_set_final(in_fle
, true);
3777 dpaa2_fl_set_len(in_fle
, req
->nbytes
);
3778 dpaa2_fl_set_format(out_fle
, dpaa2_fl_single
);
3779 dpaa2_fl_set_addr(out_fle
, state
->ctx_dma
);
3780 dpaa2_fl_set_len(out_fle
, digestsize
);
3782 req_ctx
->flc
= &ctx
->flc
[DIGEST
];
3783 req_ctx
->flc_dma
= ctx
->flc_dma
[DIGEST
];
3784 req_ctx
->cbk
= ahash_done
;
3785 req_ctx
->ctx
= &req
->base
;
3786 req_ctx
->edesc
= edesc
;
3787 ret
= dpaa2_caam_enqueue(ctx
->dev
, req_ctx
);
3788 if (ret
== -EINPROGRESS
||
3789 (ret
== -EBUSY
&& req
->base
.flags
& CRYPTO_TFM_REQ_MAY_BACKLOG
))
3793 ahash_unmap_ctx(ctx
->dev
, edesc
, req
, DMA_FROM_DEVICE
);
3794 qi_cache_free(edesc
);
3798 static int ahash_final_no_ctx(struct ahash_request
*req
)
3800 struct crypto_ahash
*ahash
= crypto_ahash_reqtfm(req
);
3801 struct caam_hash_ctx
*ctx
= crypto_ahash_ctx(ahash
);
3802 struct caam_hash_state
*state
= ahash_request_ctx(req
);
3803 struct caam_request
*req_ctx
= &state
->caam_req
;
3804 struct dpaa2_fl_entry
*in_fle
= &req_ctx
->fd_flt
[1];
3805 struct dpaa2_fl_entry
*out_fle
= &req_ctx
->fd_flt
[0];
3806 gfp_t flags
= (req
->base
.flags
& CRYPTO_TFM_REQ_MAY_SLEEP
) ?
3807 GFP_KERNEL
: GFP_ATOMIC
;
3808 u8
*buf
= state
->buf
;
3809 int buflen
= state
->buflen
;
3810 int digestsize
= crypto_ahash_digestsize(ahash
);
3811 struct ahash_edesc
*edesc
;
3814 /* allocate space for base edesc and link tables */
3815 edesc
= qi_cache_zalloc(GFP_DMA
| flags
);
3820 state
->buf_dma
= dma_map_single(ctx
->dev
, buf
, buflen
,
3822 if (dma_mapping_error(ctx
->dev
, state
->buf_dma
)) {
3823 dev_err(ctx
->dev
, "unable to map src\n");
3828 state
->ctx_dma_len
= digestsize
;
3829 state
->ctx_dma
= dma_map_single(ctx
->dev
, state
->caam_ctx
, digestsize
,
3831 if (dma_mapping_error(ctx
->dev
, state
->ctx_dma
)) {
3832 dev_err(ctx
->dev
, "unable to map ctx\n");
3837 memset(&req_ctx
->fd_flt
, 0, sizeof(req_ctx
->fd_flt
));
3838 dpaa2_fl_set_final(in_fle
, true);
3840 * crypto engine requires the input entry to be present when
3841 * "frame list" FD is used.
3842 * Since engine does not support FMT=2'b11 (unused entry type), leaving
3843 * in_fle zeroized (except for "Final" flag) is the best option.
3846 dpaa2_fl_set_format(in_fle
, dpaa2_fl_single
);
3847 dpaa2_fl_set_addr(in_fle
, state
->buf_dma
);
3848 dpaa2_fl_set_len(in_fle
, buflen
);
3850 dpaa2_fl_set_format(out_fle
, dpaa2_fl_single
);
3851 dpaa2_fl_set_addr(out_fle
, state
->ctx_dma
);
3852 dpaa2_fl_set_len(out_fle
, digestsize
);
3854 req_ctx
->flc
= &ctx
->flc
[DIGEST
];
3855 req_ctx
->flc_dma
= ctx
->flc_dma
[DIGEST
];
3856 req_ctx
->cbk
= ahash_done
;
3857 req_ctx
->ctx
= &req
->base
;
3858 req_ctx
->edesc
= edesc
;
3860 ret
= dpaa2_caam_enqueue(ctx
->dev
, req_ctx
);
3861 if (ret
== -EINPROGRESS
||
3862 (ret
== -EBUSY
&& req
->base
.flags
& CRYPTO_TFM_REQ_MAY_BACKLOG
))
3866 ahash_unmap_ctx(ctx
->dev
, edesc
, req
, DMA_FROM_DEVICE
);
3867 qi_cache_free(edesc
);
3871 static int ahash_update_no_ctx(struct ahash_request
*req
)
3873 struct crypto_ahash
*ahash
= crypto_ahash_reqtfm(req
);
3874 struct caam_hash_ctx
*ctx
= crypto_ahash_ctx(ahash
);
3875 struct caam_hash_state
*state
= ahash_request_ctx(req
);
3876 struct caam_request
*req_ctx
= &state
->caam_req
;
3877 struct dpaa2_fl_entry
*in_fle
= &req_ctx
->fd_flt
[1];
3878 struct dpaa2_fl_entry
*out_fle
= &req_ctx
->fd_flt
[0];
3879 gfp_t flags
= (req
->base
.flags
& CRYPTO_TFM_REQ_MAY_SLEEP
) ?
3880 GFP_KERNEL
: GFP_ATOMIC
;
3881 u8
*buf
= state
->buf
;
3882 int *buflen
= &state
->buflen
;
3883 int *next_buflen
= &state
->next_buflen
;
3884 int in_len
= *buflen
+ req
->nbytes
, to_hash
;
3885 int qm_sg_bytes
, src_nents
, mapped_nents
;
3886 struct ahash_edesc
*edesc
;
3889 *next_buflen
= in_len
& (crypto_tfm_alg_blocksize(&ahash
->base
) - 1);
3890 to_hash
= in_len
- *next_buflen
;
3893 struct dpaa2_sg_entry
*sg_table
;
3894 int src_len
= req
->nbytes
- *next_buflen
;
3896 src_nents
= sg_nents_for_len(req
->src
, src_len
);
3897 if (src_nents
< 0) {
3898 dev_err(ctx
->dev
, "Invalid number of src SG.\n");
3903 mapped_nents
= dma_map_sg(ctx
->dev
, req
->src
, src_nents
,
3905 if (!mapped_nents
) {
3906 dev_err(ctx
->dev
, "unable to DMA map source\n");
3913 /* allocate space for base edesc and link tables */
3914 edesc
= qi_cache_zalloc(GFP_DMA
| flags
);
3916 dma_unmap_sg(ctx
->dev
, req
->src
, src_nents
,
3921 edesc
->src_nents
= src_nents
;
3922 qm_sg_bytes
= pad_sg_nents(1 + mapped_nents
) *
3924 sg_table
= &edesc
->sgt
[0];
3926 ret
= buf_map_to_qm_sg(ctx
->dev
, sg_table
, state
);
3930 sg_to_qm_sg_last(req
->src
, src_len
, sg_table
+ 1, 0);
3932 edesc
->qm_sg_dma
= dma_map_single(ctx
->dev
, sg_table
,
3933 qm_sg_bytes
, DMA_TO_DEVICE
);
3934 if (dma_mapping_error(ctx
->dev
, edesc
->qm_sg_dma
)) {
3935 dev_err(ctx
->dev
, "unable to map S/G table\n");
3939 edesc
->qm_sg_bytes
= qm_sg_bytes
;
3941 state
->ctx_dma_len
= ctx
->ctx_len
;
3942 state
->ctx_dma
= dma_map_single(ctx
->dev
, state
->caam_ctx
,
3943 ctx
->ctx_len
, DMA_FROM_DEVICE
);
3944 if (dma_mapping_error(ctx
->dev
, state
->ctx_dma
)) {
3945 dev_err(ctx
->dev
, "unable to map ctx\n");
3951 memset(&req_ctx
->fd_flt
, 0, sizeof(req_ctx
->fd_flt
));
3952 dpaa2_fl_set_final(in_fle
, true);
3953 dpaa2_fl_set_format(in_fle
, dpaa2_fl_sg
);
3954 dpaa2_fl_set_addr(in_fle
, edesc
->qm_sg_dma
);
3955 dpaa2_fl_set_len(in_fle
, to_hash
);
3956 dpaa2_fl_set_format(out_fle
, dpaa2_fl_single
);
3957 dpaa2_fl_set_addr(out_fle
, state
->ctx_dma
);
3958 dpaa2_fl_set_len(out_fle
, ctx
->ctx_len
);
3960 req_ctx
->flc
= &ctx
->flc
[UPDATE_FIRST
];
3961 req_ctx
->flc_dma
= ctx
->flc_dma
[UPDATE_FIRST
];
3962 req_ctx
->cbk
= ahash_done_ctx_dst
;
3963 req_ctx
->ctx
= &req
->base
;
3964 req_ctx
->edesc
= edesc
;
3966 ret
= dpaa2_caam_enqueue(ctx
->dev
, req_ctx
);
3967 if (ret
!= -EINPROGRESS
&&
3969 req
->base
.flags
& CRYPTO_TFM_REQ_MAY_BACKLOG
))
3972 state
->update
= ahash_update_ctx
;
3973 state
->finup
= ahash_finup_ctx
;
3974 state
->final
= ahash_final_ctx
;
3975 } else if (*next_buflen
) {
3976 scatterwalk_map_and_copy(buf
+ *buflen
, req
->src
, 0,
3978 *buflen
= *next_buflen
;
3980 print_hex_dump_debug("buf@" __stringify(__LINE__
)": ",
3981 DUMP_PREFIX_ADDRESS
, 16, 4, buf
,
3987 ahash_unmap_ctx(ctx
->dev
, edesc
, req
, DMA_TO_DEVICE
);
3988 qi_cache_free(edesc
);
3992 static int ahash_finup_no_ctx(struct ahash_request
*req
)
3994 struct crypto_ahash
*ahash
= crypto_ahash_reqtfm(req
);
3995 struct caam_hash_ctx
*ctx
= crypto_ahash_ctx(ahash
);
3996 struct caam_hash_state
*state
= ahash_request_ctx(req
);
3997 struct caam_request
*req_ctx
= &state
->caam_req
;
3998 struct dpaa2_fl_entry
*in_fle
= &req_ctx
->fd_flt
[1];
3999 struct dpaa2_fl_entry
*out_fle
= &req_ctx
->fd_flt
[0];
4000 gfp_t flags
= (req
->base
.flags
& CRYPTO_TFM_REQ_MAY_SLEEP
) ?
4001 GFP_KERNEL
: GFP_ATOMIC
;
4002 int buflen
= state
->buflen
;
4003 int qm_sg_bytes
, src_nents
, mapped_nents
;
4004 int digestsize
= crypto_ahash_digestsize(ahash
);
4005 struct ahash_edesc
*edesc
;
4006 struct dpaa2_sg_entry
*sg_table
;
4009 src_nents
= sg_nents_for_len(req
->src
, req
->nbytes
);
4010 if (src_nents
< 0) {
4011 dev_err(ctx
->dev
, "Invalid number of src SG.\n");
4016 mapped_nents
= dma_map_sg(ctx
->dev
, req
->src
, src_nents
,
4018 if (!mapped_nents
) {
4019 dev_err(ctx
->dev
, "unable to DMA map source\n");
4026 /* allocate space for base edesc and link tables */
4027 edesc
= qi_cache_zalloc(GFP_DMA
| flags
);
4029 dma_unmap_sg(ctx
->dev
, req
->src
, src_nents
, DMA_TO_DEVICE
);
4033 edesc
->src_nents
= src_nents
;
4034 qm_sg_bytes
= pad_sg_nents(2 + mapped_nents
) * sizeof(*sg_table
);
4035 sg_table
= &edesc
->sgt
[0];
4037 ret
= buf_map_to_qm_sg(ctx
->dev
, sg_table
, state
);
4041 sg_to_qm_sg_last(req
->src
, req
->nbytes
, sg_table
+ 1, 0);
4043 edesc
->qm_sg_dma
= dma_map_single(ctx
->dev
, sg_table
, qm_sg_bytes
,
4045 if (dma_mapping_error(ctx
->dev
, edesc
->qm_sg_dma
)) {
4046 dev_err(ctx
->dev
, "unable to map S/G table\n");
4050 edesc
->qm_sg_bytes
= qm_sg_bytes
;
4052 state
->ctx_dma_len
= digestsize
;
4053 state
->ctx_dma
= dma_map_single(ctx
->dev
, state
->caam_ctx
, digestsize
,
4055 if (dma_mapping_error(ctx
->dev
, state
->ctx_dma
)) {
4056 dev_err(ctx
->dev
, "unable to map ctx\n");
4062 memset(&req_ctx
->fd_flt
, 0, sizeof(req_ctx
->fd_flt
));
4063 dpaa2_fl_set_final(in_fle
, true);
4064 dpaa2_fl_set_format(in_fle
, dpaa2_fl_sg
);
4065 dpaa2_fl_set_addr(in_fle
, edesc
->qm_sg_dma
);
4066 dpaa2_fl_set_len(in_fle
, buflen
+ req
->nbytes
);
4067 dpaa2_fl_set_format(out_fle
, dpaa2_fl_single
);
4068 dpaa2_fl_set_addr(out_fle
, state
->ctx_dma
);
4069 dpaa2_fl_set_len(out_fle
, digestsize
);
4071 req_ctx
->flc
= &ctx
->flc
[DIGEST
];
4072 req_ctx
->flc_dma
= ctx
->flc_dma
[DIGEST
];
4073 req_ctx
->cbk
= ahash_done
;
4074 req_ctx
->ctx
= &req
->base
;
4075 req_ctx
->edesc
= edesc
;
4076 ret
= dpaa2_caam_enqueue(ctx
->dev
, req_ctx
);
4077 if (ret
!= -EINPROGRESS
&&
4078 !(ret
== -EBUSY
&& req
->base
.flags
& CRYPTO_TFM_REQ_MAY_BACKLOG
))
4083 ahash_unmap_ctx(ctx
->dev
, edesc
, req
, DMA_FROM_DEVICE
);
4084 qi_cache_free(edesc
);
4088 static int ahash_update_first(struct ahash_request
*req
)
4090 struct crypto_ahash
*ahash
= crypto_ahash_reqtfm(req
);
4091 struct caam_hash_ctx
*ctx
= crypto_ahash_ctx(ahash
);
4092 struct caam_hash_state
*state
= ahash_request_ctx(req
);
4093 struct caam_request
*req_ctx
= &state
->caam_req
;
4094 struct dpaa2_fl_entry
*in_fle
= &req_ctx
->fd_flt
[1];
4095 struct dpaa2_fl_entry
*out_fle
= &req_ctx
->fd_flt
[0];
4096 gfp_t flags
= (req
->base
.flags
& CRYPTO_TFM_REQ_MAY_SLEEP
) ?
4097 GFP_KERNEL
: GFP_ATOMIC
;
4098 u8
*buf
= state
->buf
;
4099 int *buflen
= &state
->buflen
;
4100 int *next_buflen
= &state
->next_buflen
;
4102 int src_nents
, mapped_nents
;
4103 struct ahash_edesc
*edesc
;
4106 *next_buflen
= req
->nbytes
& (crypto_tfm_alg_blocksize(&ahash
->base
) -
4108 to_hash
= req
->nbytes
- *next_buflen
;
4111 struct dpaa2_sg_entry
*sg_table
;
4112 int src_len
= req
->nbytes
- *next_buflen
;
4114 src_nents
= sg_nents_for_len(req
->src
, src_len
);
4115 if (src_nents
< 0) {
4116 dev_err(ctx
->dev
, "Invalid number of src SG.\n");
4121 mapped_nents
= dma_map_sg(ctx
->dev
, req
->src
, src_nents
,
4123 if (!mapped_nents
) {
4124 dev_err(ctx
->dev
, "unable to map source for DMA\n");
4131 /* allocate space for base edesc and link tables */
4132 edesc
= qi_cache_zalloc(GFP_DMA
| flags
);
4134 dma_unmap_sg(ctx
->dev
, req
->src
, src_nents
,
4139 edesc
->src_nents
= src_nents
;
4140 sg_table
= &edesc
->sgt
[0];
4142 memset(&req_ctx
->fd_flt
, 0, sizeof(req_ctx
->fd_flt
));
4143 dpaa2_fl_set_final(in_fle
, true);
4144 dpaa2_fl_set_len(in_fle
, to_hash
);
4146 if (mapped_nents
> 1) {
4149 sg_to_qm_sg_last(req
->src
, src_len
, sg_table
, 0);
4150 qm_sg_bytes
= pad_sg_nents(mapped_nents
) *
4152 edesc
->qm_sg_dma
= dma_map_single(ctx
->dev
, sg_table
,
4155 if (dma_mapping_error(ctx
->dev
, edesc
->qm_sg_dma
)) {
4156 dev_err(ctx
->dev
, "unable to map S/G table\n");
4160 edesc
->qm_sg_bytes
= qm_sg_bytes
;
4161 dpaa2_fl_set_format(in_fle
, dpaa2_fl_sg
);
4162 dpaa2_fl_set_addr(in_fle
, edesc
->qm_sg_dma
);
4164 dpaa2_fl_set_format(in_fle
, dpaa2_fl_single
);
4165 dpaa2_fl_set_addr(in_fle
, sg_dma_address(req
->src
));
4168 state
->ctx_dma_len
= ctx
->ctx_len
;
4169 state
->ctx_dma
= dma_map_single(ctx
->dev
, state
->caam_ctx
,
4170 ctx
->ctx_len
, DMA_FROM_DEVICE
);
4171 if (dma_mapping_error(ctx
->dev
, state
->ctx_dma
)) {
4172 dev_err(ctx
->dev
, "unable to map ctx\n");
4178 dpaa2_fl_set_format(out_fle
, dpaa2_fl_single
);
4179 dpaa2_fl_set_addr(out_fle
, state
->ctx_dma
);
4180 dpaa2_fl_set_len(out_fle
, ctx
->ctx_len
);
4182 req_ctx
->flc
= &ctx
->flc
[UPDATE_FIRST
];
4183 req_ctx
->flc_dma
= ctx
->flc_dma
[UPDATE_FIRST
];
4184 req_ctx
->cbk
= ahash_done_ctx_dst
;
4185 req_ctx
->ctx
= &req
->base
;
4186 req_ctx
->edesc
= edesc
;
4188 ret
= dpaa2_caam_enqueue(ctx
->dev
, req_ctx
);
4189 if (ret
!= -EINPROGRESS
&&
4190 !(ret
== -EBUSY
&& req
->base
.flags
&
4191 CRYPTO_TFM_REQ_MAY_BACKLOG
))
4194 state
->update
= ahash_update_ctx
;
4195 state
->finup
= ahash_finup_ctx
;
4196 state
->final
= ahash_final_ctx
;
4197 } else if (*next_buflen
) {
4198 state
->update
= ahash_update_no_ctx
;
4199 state
->finup
= ahash_finup_no_ctx
;
4200 state
->final
= ahash_final_no_ctx
;
4201 scatterwalk_map_and_copy(buf
, req
->src
, 0,
4203 *buflen
= *next_buflen
;
4205 print_hex_dump_debug("buf@" __stringify(__LINE__
)": ",
4206 DUMP_PREFIX_ADDRESS
, 16, 4, buf
,
4212 ahash_unmap_ctx(ctx
->dev
, edesc
, req
, DMA_TO_DEVICE
);
4213 qi_cache_free(edesc
);
4217 static int ahash_finup_first(struct ahash_request
*req
)
4219 return ahash_digest(req
);
4222 static int ahash_init(struct ahash_request
*req
)
4224 struct caam_hash_state
*state
= ahash_request_ctx(req
);
4226 state
->update
= ahash_update_first
;
4227 state
->finup
= ahash_finup_first
;
4228 state
->final
= ahash_final_no_ctx
;
4231 state
->ctx_dma_len
= 0;
4234 state
->next_buflen
= 0;
4239 static int ahash_update(struct ahash_request
*req
)
4241 struct caam_hash_state
*state
= ahash_request_ctx(req
);
4243 return state
->update(req
);
4246 static int ahash_finup(struct ahash_request
*req
)
4248 struct caam_hash_state
*state
= ahash_request_ctx(req
);
4250 return state
->finup(req
);
4253 static int ahash_final(struct ahash_request
*req
)
4255 struct caam_hash_state
*state
= ahash_request_ctx(req
);
4257 return state
->final(req
);
4260 static int ahash_export(struct ahash_request
*req
, void *out
)
4262 struct caam_hash_state
*state
= ahash_request_ctx(req
);
4263 struct caam_export_state
*export
= out
;
4264 u8
*buf
= state
->buf
;
4265 int len
= state
->buflen
;
4267 memcpy(export
->buf
, buf
, len
);
4268 memcpy(export
->caam_ctx
, state
->caam_ctx
, sizeof(export
->caam_ctx
));
4269 export
->buflen
= len
;
4270 export
->update
= state
->update
;
4271 export
->final
= state
->final
;
4272 export
->finup
= state
->finup
;
4277 static int ahash_import(struct ahash_request
*req
, const void *in
)
4279 struct caam_hash_state
*state
= ahash_request_ctx(req
);
4280 const struct caam_export_state
*export
= in
;
4282 memset(state
, 0, sizeof(*state
));
4283 memcpy(state
->buf
, export
->buf
, export
->buflen
);
4284 memcpy(state
->caam_ctx
, export
->caam_ctx
, sizeof(state
->caam_ctx
));
4285 state
->buflen
= export
->buflen
;
4286 state
->update
= export
->update
;
4287 state
->final
= export
->final
;
4288 state
->finup
= export
->finup
;
4293 struct caam_hash_template
{
4294 char name
[CRYPTO_MAX_ALG_NAME
];
4295 char driver_name
[CRYPTO_MAX_ALG_NAME
];
4296 char hmac_name
[CRYPTO_MAX_ALG_NAME
];
4297 char hmac_driver_name
[CRYPTO_MAX_ALG_NAME
];
4298 unsigned int blocksize
;
4299 struct ahash_alg template_ahash
;
4303 /* ahash descriptors */
4304 static struct caam_hash_template driver_hash
[] = {
4307 .driver_name
= "sha1-caam-qi2",
4308 .hmac_name
= "hmac(sha1)",
4309 .hmac_driver_name
= "hmac-sha1-caam-qi2",
4310 .blocksize
= SHA1_BLOCK_SIZE
,
4313 .update
= ahash_update
,
4314 .final
= ahash_final
,
4315 .finup
= ahash_finup
,
4316 .digest
= ahash_digest
,
4317 .export
= ahash_export
,
4318 .import
= ahash_import
,
4319 .setkey
= ahash_setkey
,
4321 .digestsize
= SHA1_DIGEST_SIZE
,
4322 .statesize
= sizeof(struct caam_export_state
),
4325 .alg_type
= OP_ALG_ALGSEL_SHA1
,
4328 .driver_name
= "sha224-caam-qi2",
4329 .hmac_name
= "hmac(sha224)",
4330 .hmac_driver_name
= "hmac-sha224-caam-qi2",
4331 .blocksize
= SHA224_BLOCK_SIZE
,
4334 .update
= ahash_update
,
4335 .final
= ahash_final
,
4336 .finup
= ahash_finup
,
4337 .digest
= ahash_digest
,
4338 .export
= ahash_export
,
4339 .import
= ahash_import
,
4340 .setkey
= ahash_setkey
,
4342 .digestsize
= SHA224_DIGEST_SIZE
,
4343 .statesize
= sizeof(struct caam_export_state
),
4346 .alg_type
= OP_ALG_ALGSEL_SHA224
,
4349 .driver_name
= "sha256-caam-qi2",
4350 .hmac_name
= "hmac(sha256)",
4351 .hmac_driver_name
= "hmac-sha256-caam-qi2",
4352 .blocksize
= SHA256_BLOCK_SIZE
,
4355 .update
= ahash_update
,
4356 .final
= ahash_final
,
4357 .finup
= ahash_finup
,
4358 .digest
= ahash_digest
,
4359 .export
= ahash_export
,
4360 .import
= ahash_import
,
4361 .setkey
= ahash_setkey
,
4363 .digestsize
= SHA256_DIGEST_SIZE
,
4364 .statesize
= sizeof(struct caam_export_state
),
4367 .alg_type
= OP_ALG_ALGSEL_SHA256
,
4370 .driver_name
= "sha384-caam-qi2",
4371 .hmac_name
= "hmac(sha384)",
4372 .hmac_driver_name
= "hmac-sha384-caam-qi2",
4373 .blocksize
= SHA384_BLOCK_SIZE
,
4376 .update
= ahash_update
,
4377 .final
= ahash_final
,
4378 .finup
= ahash_finup
,
4379 .digest
= ahash_digest
,
4380 .export
= ahash_export
,
4381 .import
= ahash_import
,
4382 .setkey
= ahash_setkey
,
4384 .digestsize
= SHA384_DIGEST_SIZE
,
4385 .statesize
= sizeof(struct caam_export_state
),
4388 .alg_type
= OP_ALG_ALGSEL_SHA384
,
4391 .driver_name
= "sha512-caam-qi2",
4392 .hmac_name
= "hmac(sha512)",
4393 .hmac_driver_name
= "hmac-sha512-caam-qi2",
4394 .blocksize
= SHA512_BLOCK_SIZE
,
4397 .update
= ahash_update
,
4398 .final
= ahash_final
,
4399 .finup
= ahash_finup
,
4400 .digest
= ahash_digest
,
4401 .export
= ahash_export
,
4402 .import
= ahash_import
,
4403 .setkey
= ahash_setkey
,
4405 .digestsize
= SHA512_DIGEST_SIZE
,
4406 .statesize
= sizeof(struct caam_export_state
),
4409 .alg_type
= OP_ALG_ALGSEL_SHA512
,
4412 .driver_name
= "md5-caam-qi2",
4413 .hmac_name
= "hmac(md5)",
4414 .hmac_driver_name
= "hmac-md5-caam-qi2",
4415 .blocksize
= MD5_BLOCK_WORDS
* 4,
4418 .update
= ahash_update
,
4419 .final
= ahash_final
,
4420 .finup
= ahash_finup
,
4421 .digest
= ahash_digest
,
4422 .export
= ahash_export
,
4423 .import
= ahash_import
,
4424 .setkey
= ahash_setkey
,
4426 .digestsize
= MD5_DIGEST_SIZE
,
4427 .statesize
= sizeof(struct caam_export_state
),
4430 .alg_type
= OP_ALG_ALGSEL_MD5
,
4434 struct caam_hash_alg
{
4435 struct list_head entry
;
4438 struct ahash_alg ahash_alg
;
4441 static int caam_hash_cra_init(struct crypto_tfm
*tfm
)
4443 struct crypto_ahash
*ahash
= __crypto_ahash_cast(tfm
);
4444 struct crypto_alg
*base
= tfm
->__crt_alg
;
4445 struct hash_alg_common
*halg
=
4446 container_of(base
, struct hash_alg_common
, base
);
4447 struct ahash_alg
*alg
=
4448 container_of(halg
, struct ahash_alg
, halg
);
4449 struct caam_hash_alg
*caam_hash
=
4450 container_of(alg
, struct caam_hash_alg
, ahash_alg
);
4451 struct caam_hash_ctx
*ctx
= crypto_tfm_ctx(tfm
);
4452 /* Sizes for MDHA running digests: MD5, SHA1, 224, 256, 384, 512 */
4453 static const u8 runninglen
[] = { HASH_MSG_LEN
+ MD5_DIGEST_SIZE
,
4454 HASH_MSG_LEN
+ SHA1_DIGEST_SIZE
,
4456 HASH_MSG_LEN
+ SHA256_DIGEST_SIZE
,
4458 HASH_MSG_LEN
+ SHA512_DIGEST_SIZE
};
4459 dma_addr_t dma_addr
;
4462 ctx
->dev
= caam_hash
->dev
;
4465 ctx
->adata
.key_dma
= dma_map_single_attrs(ctx
->dev
, ctx
->key
,
4466 ARRAY_SIZE(ctx
->key
),
4468 DMA_ATTR_SKIP_CPU_SYNC
);
4469 if (dma_mapping_error(ctx
->dev
, ctx
->adata
.key_dma
)) {
4470 dev_err(ctx
->dev
, "unable to map key\n");
4475 dma_addr
= dma_map_single_attrs(ctx
->dev
, ctx
->flc
, sizeof(ctx
->flc
),
4477 DMA_ATTR_SKIP_CPU_SYNC
);
4478 if (dma_mapping_error(ctx
->dev
, dma_addr
)) {
4479 dev_err(ctx
->dev
, "unable to map shared descriptors\n");
4480 if (ctx
->adata
.key_dma
)
4481 dma_unmap_single_attrs(ctx
->dev
, ctx
->adata
.key_dma
,
4482 ARRAY_SIZE(ctx
->key
),
4484 DMA_ATTR_SKIP_CPU_SYNC
);
4488 for (i
= 0; i
< HASH_NUM_OP
; i
++)
4489 ctx
->flc_dma
[i
] = dma_addr
+ i
* sizeof(ctx
->flc
[i
]);
4491 /* copy descriptor header template value */
4492 ctx
->adata
.algtype
= OP_TYPE_CLASS2_ALG
| caam_hash
->alg_type
;
4494 ctx
->ctx_len
= runninglen
[(ctx
->adata
.algtype
&
4495 OP_ALG_ALGSEL_SUBMASK
) >>
4496 OP_ALG_ALGSEL_SHIFT
];
4498 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm
),
4499 sizeof(struct caam_hash_state
));
4501 return ahash_set_sh_desc(ahash
);
4504 static void caam_hash_cra_exit(struct crypto_tfm
*tfm
)
4506 struct caam_hash_ctx
*ctx
= crypto_tfm_ctx(tfm
);
4508 dma_unmap_single_attrs(ctx
->dev
, ctx
->flc_dma
[0], sizeof(ctx
->flc
),
4509 DMA_BIDIRECTIONAL
, DMA_ATTR_SKIP_CPU_SYNC
);
4510 if (ctx
->adata
.key_dma
)
4511 dma_unmap_single_attrs(ctx
->dev
, ctx
->adata
.key_dma
,
4512 ARRAY_SIZE(ctx
->key
), DMA_TO_DEVICE
,
4513 DMA_ATTR_SKIP_CPU_SYNC
);
4516 static struct caam_hash_alg
*caam_hash_alloc(struct device
*dev
,
4517 struct caam_hash_template
*template, bool keyed
)
4519 struct caam_hash_alg
*t_alg
;
4520 struct ahash_alg
*halg
;
4521 struct crypto_alg
*alg
;
4523 t_alg
= kzalloc(sizeof(*t_alg
), GFP_KERNEL
);
4525 return ERR_PTR(-ENOMEM
);
4527 t_alg
->ahash_alg
= template->template_ahash
;
4528 halg
= &t_alg
->ahash_alg
;
4529 alg
= &halg
->halg
.base
;
4532 snprintf(alg
->cra_name
, CRYPTO_MAX_ALG_NAME
, "%s",
4533 template->hmac_name
);
4534 snprintf(alg
->cra_driver_name
, CRYPTO_MAX_ALG_NAME
, "%s",
4535 template->hmac_driver_name
);
4537 snprintf(alg
->cra_name
, CRYPTO_MAX_ALG_NAME
, "%s",
4539 snprintf(alg
->cra_driver_name
, CRYPTO_MAX_ALG_NAME
, "%s",
4540 template->driver_name
);
4541 t_alg
->ahash_alg
.setkey
= NULL
;
4543 alg
->cra_module
= THIS_MODULE
;
4544 alg
->cra_init
= caam_hash_cra_init
;
4545 alg
->cra_exit
= caam_hash_cra_exit
;
4546 alg
->cra_ctxsize
= sizeof(struct caam_hash_ctx
);
4547 alg
->cra_priority
= CAAM_CRA_PRIORITY
;
4548 alg
->cra_blocksize
= template->blocksize
;
4549 alg
->cra_alignmask
= 0;
4550 alg
->cra_flags
= CRYPTO_ALG_ASYNC
;
4552 t_alg
->alg_type
= template->alg_type
;
4558 static void dpaa2_caam_fqdan_cb(struct dpaa2_io_notification_ctx
*nctx
)
4560 struct dpaa2_caam_priv_per_cpu
*ppriv
;
4562 ppriv
= container_of(nctx
, struct dpaa2_caam_priv_per_cpu
, nctx
);
4563 napi_schedule_irqoff(&ppriv
->napi
);
4566 static int __cold
dpaa2_dpseci_dpio_setup(struct dpaa2_caam_priv
*priv
)
4568 struct device
*dev
= priv
->dev
;
4569 struct dpaa2_io_notification_ctx
*nctx
;
4570 struct dpaa2_caam_priv_per_cpu
*ppriv
;
4571 int err
, i
= 0, cpu
;
4573 for_each_online_cpu(cpu
) {
4574 ppriv
= per_cpu_ptr(priv
->ppriv
, cpu
);
4576 nctx
= &ppriv
->nctx
;
4578 nctx
->id
= ppriv
->rsp_fqid
;
4579 nctx
->desired_cpu
= cpu
;
4580 nctx
->cb
= dpaa2_caam_fqdan_cb
;
4582 /* Register notification callbacks */
4583 ppriv
->dpio
= dpaa2_io_service_select(cpu
);
4584 err
= dpaa2_io_service_register(ppriv
->dpio
, nctx
, dev
);
4585 if (unlikely(err
)) {
4586 dev_dbg(dev
, "No affine DPIO for cpu %d\n", cpu
);
4589 * If no affine DPIO for this core, there's probably
4590 * none available for next cores either. Signal we want
4591 * to retry later, in case the DPIO devices weren't
4594 err
= -EPROBE_DEFER
;
4598 ppriv
->store
= dpaa2_io_store_create(DPAA2_CAAM_STORE_SIZE
,
4600 if (unlikely(!ppriv
->store
)) {
4601 dev_err(dev
, "dpaa2_io_store_create() failed\n");
4606 if (++i
== priv
->num_pairs
)
4613 for_each_online_cpu(cpu
) {
4614 ppriv
= per_cpu_ptr(priv
->ppriv
, cpu
);
4615 if (!ppriv
->nctx
.cb
)
4617 dpaa2_io_service_deregister(ppriv
->dpio
, &ppriv
->nctx
, dev
);
4620 for_each_online_cpu(cpu
) {
4621 ppriv
= per_cpu_ptr(priv
->ppriv
, cpu
);
4624 dpaa2_io_store_destroy(ppriv
->store
);
4630 static void __cold
dpaa2_dpseci_dpio_free(struct dpaa2_caam_priv
*priv
)
4632 struct dpaa2_caam_priv_per_cpu
*ppriv
;
4635 for_each_online_cpu(cpu
) {
4636 ppriv
= per_cpu_ptr(priv
->ppriv
, cpu
);
4637 dpaa2_io_service_deregister(ppriv
->dpio
, &ppriv
->nctx
,
4639 dpaa2_io_store_destroy(ppriv
->store
);
4641 if (++i
== priv
->num_pairs
)
4646 static int dpaa2_dpseci_bind(struct dpaa2_caam_priv
*priv
)
4648 struct dpseci_rx_queue_cfg rx_queue_cfg
;
4649 struct device
*dev
= priv
->dev
;
4650 struct fsl_mc_device
*ls_dev
= to_fsl_mc_device(dev
);
4651 struct dpaa2_caam_priv_per_cpu
*ppriv
;
4652 int err
= 0, i
= 0, cpu
;
4654 /* Configure Rx queues */
4655 for_each_online_cpu(cpu
) {
4656 ppriv
= per_cpu_ptr(priv
->ppriv
, cpu
);
4658 rx_queue_cfg
.options
= DPSECI_QUEUE_OPT_DEST
|
4659 DPSECI_QUEUE_OPT_USER_CTX
;
4660 rx_queue_cfg
.order_preservation_en
= 0;
4661 rx_queue_cfg
.dest_cfg
.dest_type
= DPSECI_DEST_DPIO
;
4662 rx_queue_cfg
.dest_cfg
.dest_id
= ppriv
->nctx
.dpio_id
;
4664 * Rx priority (WQ) doesn't really matter, since we use
4665 * pull mode, i.e. volatile dequeues from specific FQs
4667 rx_queue_cfg
.dest_cfg
.priority
= 0;
4668 rx_queue_cfg
.user_ctx
= ppriv
->nctx
.qman64
;
4670 err
= dpseci_set_rx_queue(priv
->mc_io
, 0, ls_dev
->mc_handle
, i
,
4673 dev_err(dev
, "dpseci_set_rx_queue() failed with err %d\n",
4678 if (++i
== priv
->num_pairs
)
4685 static void dpaa2_dpseci_congestion_free(struct dpaa2_caam_priv
*priv
)
4687 struct device
*dev
= priv
->dev
;
4689 if (!priv
->cscn_mem
)
4692 dma_unmap_single(dev
, priv
->cscn_dma
, DPAA2_CSCN_SIZE
, DMA_FROM_DEVICE
);
4693 kfree(priv
->cscn_mem
);
4696 static void dpaa2_dpseci_free(struct dpaa2_caam_priv
*priv
)
4698 struct device
*dev
= priv
->dev
;
4699 struct fsl_mc_device
*ls_dev
= to_fsl_mc_device(dev
);
4701 dpaa2_dpseci_congestion_free(priv
);
4702 dpseci_close(priv
->mc_io
, 0, ls_dev
->mc_handle
);
4705 static void dpaa2_caam_process_fd(struct dpaa2_caam_priv
*priv
,
4706 const struct dpaa2_fd
*fd
)
4708 struct caam_request
*req
;
4711 if (dpaa2_fd_get_format(fd
) != dpaa2_fd_list
) {
4712 dev_err(priv
->dev
, "Only Frame List FD format is supported!\n");
4716 fd_err
= dpaa2_fd_get_ctrl(fd
) & FD_CTRL_ERR_MASK
;
4717 if (unlikely(fd_err
))
4718 dev_err_ratelimited(priv
->dev
, "FD error: %08x\n", fd_err
);
4721 * FD[ADDR] is guaranteed to be valid, irrespective of errors reported
4722 * in FD[ERR] or FD[FRC].
4724 req
= dpaa2_caam_iova_to_virt(priv
, dpaa2_fd_get_addr(fd
));
4725 dma_unmap_single(priv
->dev
, req
->fd_flt_dma
, sizeof(req
->fd_flt
),
4727 req
->cbk(req
->ctx
, dpaa2_fd_get_frc(fd
));
4730 static int dpaa2_caam_pull_fq(struct dpaa2_caam_priv_per_cpu
*ppriv
)
4734 /* Retry while portal is busy */
4736 err
= dpaa2_io_service_pull_fq(ppriv
->dpio
, ppriv
->rsp_fqid
,
4738 } while (err
== -EBUSY
);
4741 dev_err(ppriv
->priv
->dev
, "dpaa2_io_service_pull err %d", err
);
4746 static int dpaa2_caam_store_consume(struct dpaa2_caam_priv_per_cpu
*ppriv
)
4748 struct dpaa2_dq
*dq
;
4749 int cleaned
= 0, is_last
;
4752 dq
= dpaa2_io_store_next(ppriv
->store
, &is_last
);
4753 if (unlikely(!dq
)) {
4754 if (unlikely(!is_last
)) {
4755 dev_dbg(ppriv
->priv
->dev
,
4756 "FQ %d returned no valid frames\n",
4759 * MUST retry until we get some sort of
4760 * valid response token (be it "empty dequeue"
4761 * or a valid frame).
4769 dpaa2_caam_process_fd(ppriv
->priv
, dpaa2_dq_fd(dq
));
4776 static int dpaa2_dpseci_poll(struct napi_struct
*napi
, int budget
)
4778 struct dpaa2_caam_priv_per_cpu
*ppriv
;
4779 struct dpaa2_caam_priv
*priv
;
4780 int err
, cleaned
= 0, store_cleaned
;
4782 ppriv
= container_of(napi
, struct dpaa2_caam_priv_per_cpu
, napi
);
4785 if (unlikely(dpaa2_caam_pull_fq(ppriv
)))
4789 store_cleaned
= dpaa2_caam_store_consume(ppriv
);
4790 cleaned
+= store_cleaned
;
4792 if (store_cleaned
== 0 ||
4793 cleaned
> budget
- DPAA2_CAAM_STORE_SIZE
)
4796 /* Try to dequeue some more */
4797 err
= dpaa2_caam_pull_fq(ppriv
);
4802 if (cleaned
< budget
) {
4803 napi_complete_done(napi
, cleaned
);
4804 err
= dpaa2_io_service_rearm(ppriv
->dpio
, &ppriv
->nctx
);
4806 dev_err(priv
->dev
, "Notification rearm failed: %d\n",
4813 static int dpaa2_dpseci_congestion_setup(struct dpaa2_caam_priv
*priv
,
4816 struct dpseci_congestion_notification_cfg cong_notif_cfg
= { 0 };
4817 struct device
*dev
= priv
->dev
;
4821 * Congestion group feature supported starting with DPSECI API v5.1
4822 * and only when object has been created with this capability.
4824 if ((DPSECI_VER(priv
->major_ver
, priv
->minor_ver
) < DPSECI_VER(5, 1)) ||
4825 !(priv
->dpseci_attr
.options
& DPSECI_OPT_HAS_CG
))
4828 priv
->cscn_mem
= kzalloc(DPAA2_CSCN_SIZE
+ DPAA2_CSCN_ALIGN
,
4829 GFP_KERNEL
| GFP_DMA
);
4830 if (!priv
->cscn_mem
)
4833 priv
->cscn_mem_aligned
= PTR_ALIGN(priv
->cscn_mem
, DPAA2_CSCN_ALIGN
);
4834 priv
->cscn_dma
= dma_map_single(dev
, priv
->cscn_mem_aligned
,
4835 DPAA2_CSCN_SIZE
, DMA_FROM_DEVICE
);
4836 if (dma_mapping_error(dev
, priv
->cscn_dma
)) {
4837 dev_err(dev
, "Error mapping CSCN memory area\n");
4842 cong_notif_cfg
.units
= DPSECI_CONGESTION_UNIT_BYTES
;
4843 cong_notif_cfg
.threshold_entry
= DPAA2_SEC_CONG_ENTRY_THRESH
;
4844 cong_notif_cfg
.threshold_exit
= DPAA2_SEC_CONG_EXIT_THRESH
;
4845 cong_notif_cfg
.message_ctx
= (uintptr_t)priv
;
4846 cong_notif_cfg
.message_iova
= priv
->cscn_dma
;
4847 cong_notif_cfg
.notification_mode
= DPSECI_CGN_MODE_WRITE_MEM_ON_ENTER
|
4848 DPSECI_CGN_MODE_WRITE_MEM_ON_EXIT
|
4849 DPSECI_CGN_MODE_COHERENT_WRITE
;
4851 err
= dpseci_set_congestion_notification(priv
->mc_io
, 0, token
,
4854 dev_err(dev
, "dpseci_set_congestion_notification failed\n");
4861 dma_unmap_single(dev
, priv
->cscn_dma
, DPAA2_CSCN_SIZE
, DMA_FROM_DEVICE
);
4863 kfree(priv
->cscn_mem
);
4868 static int __cold
dpaa2_dpseci_setup(struct fsl_mc_device
*ls_dev
)
4870 struct device
*dev
= &ls_dev
->dev
;
4871 struct dpaa2_caam_priv
*priv
;
4872 struct dpaa2_caam_priv_per_cpu
*ppriv
;
4876 priv
= dev_get_drvdata(dev
);
4879 priv
->dpsec_id
= ls_dev
->obj_desc
.id
;
4881 /* Get a handle for the DPSECI this interface is associate with */
4882 err
= dpseci_open(priv
->mc_io
, 0, priv
->dpsec_id
, &ls_dev
->mc_handle
);
4884 dev_err(dev
, "dpseci_open() failed: %d\n", err
);
4888 err
= dpseci_get_api_version(priv
->mc_io
, 0, &priv
->major_ver
,
4891 dev_err(dev
, "dpseci_get_api_version() failed\n");
4895 dev_info(dev
, "dpseci v%d.%d\n", priv
->major_ver
, priv
->minor_ver
);
4897 err
= dpseci_get_attributes(priv
->mc_io
, 0, ls_dev
->mc_handle
,
4898 &priv
->dpseci_attr
);
4900 dev_err(dev
, "dpseci_get_attributes() failed\n");
4904 err
= dpseci_get_sec_attr(priv
->mc_io
, 0, ls_dev
->mc_handle
,
4907 dev_err(dev
, "dpseci_get_sec_attr() failed\n");
4911 err
= dpaa2_dpseci_congestion_setup(priv
, ls_dev
->mc_handle
);
4913 dev_err(dev
, "setup_congestion() failed\n");
4917 priv
->num_pairs
= min(priv
->dpseci_attr
.num_rx_queues
,
4918 priv
->dpseci_attr
.num_tx_queues
);
4919 if (priv
->num_pairs
> num_online_cpus()) {
4920 dev_warn(dev
, "%d queues won't be used\n",
4921 priv
->num_pairs
- num_online_cpus());
4922 priv
->num_pairs
= num_online_cpus();
4925 for (i
= 0; i
< priv
->dpseci_attr
.num_rx_queues
; i
++) {
4926 err
= dpseci_get_rx_queue(priv
->mc_io
, 0, ls_dev
->mc_handle
, i
,
4927 &priv
->rx_queue_attr
[i
]);
4929 dev_err(dev
, "dpseci_get_rx_queue() failed\n");
4930 goto err_get_rx_queue
;
4934 for (i
= 0; i
< priv
->dpseci_attr
.num_tx_queues
; i
++) {
4935 err
= dpseci_get_tx_queue(priv
->mc_io
, 0, ls_dev
->mc_handle
, i
,
4936 &priv
->tx_queue_attr
[i
]);
4938 dev_err(dev
, "dpseci_get_tx_queue() failed\n");
4939 goto err_get_rx_queue
;
4944 for_each_online_cpu(cpu
) {
4947 j
= i
% priv
->num_pairs
;
4949 ppriv
= per_cpu_ptr(priv
->ppriv
, cpu
);
4950 ppriv
->req_fqid
= priv
->tx_queue_attr
[j
].fqid
;
4953 * Allow all cores to enqueue, while only some of them
4954 * will take part in dequeuing.
4956 if (++i
> priv
->num_pairs
)
4959 ppriv
->rsp_fqid
= priv
->rx_queue_attr
[j
].fqid
;
4962 dev_dbg(dev
, "pair %d: rx queue %d, tx queue %d\n", j
,
4963 priv
->rx_queue_attr
[j
].fqid
,
4964 priv
->tx_queue_attr
[j
].fqid
);
4966 ppriv
->net_dev
.dev
= *dev
;
4967 INIT_LIST_HEAD(&ppriv
->net_dev
.napi_list
);
4968 netif_napi_add(&ppriv
->net_dev
, &ppriv
->napi
, dpaa2_dpseci_poll
,
4969 DPAA2_CAAM_NAPI_WEIGHT
);
4975 dpaa2_dpseci_congestion_free(priv
);
4977 dpseci_close(priv
->mc_io
, 0, ls_dev
->mc_handle
);
4982 static int dpaa2_dpseci_enable(struct dpaa2_caam_priv
*priv
)
4984 struct device
*dev
= priv
->dev
;
4985 struct fsl_mc_device
*ls_dev
= to_fsl_mc_device(dev
);
4986 struct dpaa2_caam_priv_per_cpu
*ppriv
;
4989 for (i
= 0; i
< priv
->num_pairs
; i
++) {
4990 ppriv
= per_cpu_ptr(priv
->ppriv
, i
);
4991 napi_enable(&ppriv
->napi
);
4994 return dpseci_enable(priv
->mc_io
, 0, ls_dev
->mc_handle
);
4997 static int __cold
dpaa2_dpseci_disable(struct dpaa2_caam_priv
*priv
)
4999 struct device
*dev
= priv
->dev
;
5000 struct dpaa2_caam_priv_per_cpu
*ppriv
;
5001 struct fsl_mc_device
*ls_dev
= to_fsl_mc_device(dev
);
5002 int i
, err
= 0, enabled
;
5004 err
= dpseci_disable(priv
->mc_io
, 0, ls_dev
->mc_handle
);
5006 dev_err(dev
, "dpseci_disable() failed\n");
5010 err
= dpseci_is_enabled(priv
->mc_io
, 0, ls_dev
->mc_handle
, &enabled
);
5012 dev_err(dev
, "dpseci_is_enabled() failed\n");
5016 dev_dbg(dev
, "disable: %s\n", enabled
? "false" : "true");
5018 for (i
= 0; i
< priv
->num_pairs
; i
++) {
5019 ppriv
= per_cpu_ptr(priv
->ppriv
, i
);
5020 napi_disable(&ppriv
->napi
);
5021 netif_napi_del(&ppriv
->napi
);
5027 static struct list_head hash_list
;
5029 static int dpaa2_caam_probe(struct fsl_mc_device
*dpseci_dev
)
5032 struct dpaa2_caam_priv
*priv
;
5034 bool registered
= false;
5037 * There is no way to get CAAM endianness - there is no direct register
5038 * space access and MC f/w does not provide this attribute.
5039 * All DPAA2-based SoCs have little endian CAAM, thus hard-code this
5042 caam_little_end
= true;
5046 dev
= &dpseci_dev
->dev
;
5048 priv
= devm_kzalloc(dev
, sizeof(*priv
), GFP_KERNEL
);
5052 dev_set_drvdata(dev
, priv
);
5054 priv
->domain
= iommu_get_domain_for_dev(dev
);
5056 qi_cache
= kmem_cache_create("dpaa2_caamqicache", CAAM_QI_MEMCACHE_SIZE
,
5057 0, SLAB_CACHE_DMA
, NULL
);
5059 dev_err(dev
, "Can't allocate SEC cache\n");
5063 err
= dma_set_mask_and_coherent(dev
, DMA_BIT_MASK(49));
5065 dev_err(dev
, "dma_set_mask_and_coherent() failed\n");
5069 /* Obtain a MC portal */
5070 err
= fsl_mc_portal_allocate(dpseci_dev
, 0, &priv
->mc_io
);
5073 err
= -EPROBE_DEFER
;
5075 dev_err(dev
, "MC portal allocation failed\n");
5080 priv
->ppriv
= alloc_percpu(*priv
->ppriv
);
5082 dev_err(dev
, "alloc_percpu() failed\n");
5084 goto err_alloc_ppriv
;
5087 /* DPSECI initialization */
5088 err
= dpaa2_dpseci_setup(dpseci_dev
);
5090 dev_err(dev
, "dpaa2_dpseci_setup() failed\n");
5091 goto err_dpseci_setup
;
5095 err
= dpaa2_dpseci_dpio_setup(priv
);
5097 if (err
!= -EPROBE_DEFER
)
5098 dev_err(dev
, "dpaa2_dpseci_dpio_setup() failed\n");
5099 goto err_dpio_setup
;
5102 /* DPSECI binding to DPIO */
5103 err
= dpaa2_dpseci_bind(priv
);
5105 dev_err(dev
, "dpaa2_dpseci_bind() failed\n");
5110 err
= dpaa2_dpseci_enable(priv
);
5112 dev_err(dev
, "dpaa2_dpseci_enable() failed\n");
5116 dpaa2_dpseci_debugfs_init(priv
);
5118 /* register crypto algorithms the device supports */
5119 for (i
= 0; i
< ARRAY_SIZE(driver_algs
); i
++) {
5120 struct caam_skcipher_alg
*t_alg
= driver_algs
+ i
;
5121 u32 alg_sel
= t_alg
->caam
.class1_alg_type
& OP_ALG_ALGSEL_MASK
;
5123 /* Skip DES algorithms if not supported by device */
5124 if (!priv
->sec_attr
.des_acc_num
&&
5125 (alg_sel
== OP_ALG_ALGSEL_3DES
||
5126 alg_sel
== OP_ALG_ALGSEL_DES
))
5129 /* Skip AES algorithms if not supported by device */
5130 if (!priv
->sec_attr
.aes_acc_num
&&
5131 alg_sel
== OP_ALG_ALGSEL_AES
)
5134 /* Skip CHACHA20 algorithms if not supported by device */
5135 if (alg_sel
== OP_ALG_ALGSEL_CHACHA20
&&
5136 !priv
->sec_attr
.ccha_acc_num
)
5139 t_alg
->caam
.dev
= dev
;
5140 caam_skcipher_alg_init(t_alg
);
5142 err
= crypto_register_skcipher(&t_alg
->skcipher
);
5144 dev_warn(dev
, "%s alg registration failed: %d\n",
5145 t_alg
->skcipher
.base
.cra_driver_name
, err
);
5149 t_alg
->registered
= true;
5153 for (i
= 0; i
< ARRAY_SIZE(driver_aeads
); i
++) {
5154 struct caam_aead_alg
*t_alg
= driver_aeads
+ i
;
5155 u32 c1_alg_sel
= t_alg
->caam
.class1_alg_type
&
5157 u32 c2_alg_sel
= t_alg
->caam
.class2_alg_type
&
5160 /* Skip DES algorithms if not supported by device */
5161 if (!priv
->sec_attr
.des_acc_num
&&
5162 (c1_alg_sel
== OP_ALG_ALGSEL_3DES
||
5163 c1_alg_sel
== OP_ALG_ALGSEL_DES
))
5166 /* Skip AES algorithms if not supported by device */
5167 if (!priv
->sec_attr
.aes_acc_num
&&
5168 c1_alg_sel
== OP_ALG_ALGSEL_AES
)
5171 /* Skip CHACHA20 algorithms if not supported by device */
5172 if (c1_alg_sel
== OP_ALG_ALGSEL_CHACHA20
&&
5173 !priv
->sec_attr
.ccha_acc_num
)
5176 /* Skip POLY1305 algorithms if not supported by device */
5177 if (c2_alg_sel
== OP_ALG_ALGSEL_POLY1305
&&
5178 !priv
->sec_attr
.ptha_acc_num
)
5182 * Skip algorithms requiring message digests
5183 * if MD not supported by device.
5185 if ((c2_alg_sel
& ~OP_ALG_ALGSEL_SUBMASK
) == 0x40 &&
5186 !priv
->sec_attr
.md_acc_num
)
5189 t_alg
->caam
.dev
= dev
;
5190 caam_aead_alg_init(t_alg
);
5192 err
= crypto_register_aead(&t_alg
->aead
);
5194 dev_warn(dev
, "%s alg registration failed: %d\n",
5195 t_alg
->aead
.base
.cra_driver_name
, err
);
5199 t_alg
->registered
= true;
5203 dev_info(dev
, "algorithms registered in /proc/crypto\n");
5205 /* register hash algorithms the device supports */
5206 INIT_LIST_HEAD(&hash_list
);
5209 * Skip registration of any hashing algorithms if MD block
5212 if (!priv
->sec_attr
.md_acc_num
)
5215 for (i
= 0; i
< ARRAY_SIZE(driver_hash
); i
++) {
5216 struct caam_hash_alg
*t_alg
;
5217 struct caam_hash_template
*alg
= driver_hash
+ i
;
5219 /* register hmac version */
5220 t_alg
= caam_hash_alloc(dev
, alg
, true);
5221 if (IS_ERR(t_alg
)) {
5222 err
= PTR_ERR(t_alg
);
5223 dev_warn(dev
, "%s hash alg allocation failed: %d\n",
5224 alg
->driver_name
, err
);
5228 err
= crypto_register_ahash(&t_alg
->ahash_alg
);
5230 dev_warn(dev
, "%s alg registration failed: %d\n",
5231 t_alg
->ahash_alg
.halg
.base
.cra_driver_name
,
5235 list_add_tail(&t_alg
->entry
, &hash_list
);
5238 /* register unkeyed version */
5239 t_alg
= caam_hash_alloc(dev
, alg
, false);
5240 if (IS_ERR(t_alg
)) {
5241 err
= PTR_ERR(t_alg
);
5242 dev_warn(dev
, "%s alg allocation failed: %d\n",
5243 alg
->driver_name
, err
);
5247 err
= crypto_register_ahash(&t_alg
->ahash_alg
);
5249 dev_warn(dev
, "%s alg registration failed: %d\n",
5250 t_alg
->ahash_alg
.halg
.base
.cra_driver_name
,
5254 list_add_tail(&t_alg
->entry
, &hash_list
);
5257 if (!list_empty(&hash_list
))
5258 dev_info(dev
, "hash algorithms registered in /proc/crypto\n");
5263 dpaa2_dpseci_dpio_free(priv
);
5265 dpaa2_dpseci_free(priv
);
5267 free_percpu(priv
->ppriv
);
5269 fsl_mc_portal_free(priv
->mc_io
);
5271 kmem_cache_destroy(qi_cache
);
5276 static int __cold
dpaa2_caam_remove(struct fsl_mc_device
*ls_dev
)
5279 struct dpaa2_caam_priv
*priv
;
5283 priv
= dev_get_drvdata(dev
);
5285 dpaa2_dpseci_debugfs_exit(priv
);
5287 for (i
= 0; i
< ARRAY_SIZE(driver_aeads
); i
++) {
5288 struct caam_aead_alg
*t_alg
= driver_aeads
+ i
;
5290 if (t_alg
->registered
)
5291 crypto_unregister_aead(&t_alg
->aead
);
5294 for (i
= 0; i
< ARRAY_SIZE(driver_algs
); i
++) {
5295 struct caam_skcipher_alg
*t_alg
= driver_algs
+ i
;
5297 if (t_alg
->registered
)
5298 crypto_unregister_skcipher(&t_alg
->skcipher
);
5301 if (hash_list
.next
) {
5302 struct caam_hash_alg
*t_hash_alg
, *p
;
5304 list_for_each_entry_safe(t_hash_alg
, p
, &hash_list
, entry
) {
5305 crypto_unregister_ahash(&t_hash_alg
->ahash_alg
);
5306 list_del(&t_hash_alg
->entry
);
5311 dpaa2_dpseci_disable(priv
);
5312 dpaa2_dpseci_dpio_free(priv
);
5313 dpaa2_dpseci_free(priv
);
5314 free_percpu(priv
->ppriv
);
5315 fsl_mc_portal_free(priv
->mc_io
);
5316 kmem_cache_destroy(qi_cache
);
5321 int dpaa2_caam_enqueue(struct device
*dev
, struct caam_request
*req
)
5324 struct dpaa2_caam_priv
*priv
= dev_get_drvdata(dev
);
5325 struct dpaa2_caam_priv_per_cpu
*ppriv
;
5329 return PTR_ERR(req
);
5331 if (priv
->cscn_mem
) {
5332 dma_sync_single_for_cpu(priv
->dev
, priv
->cscn_dma
,
5335 if (unlikely(dpaa2_cscn_state_congested(priv
->cscn_mem_aligned
))) {
5336 dev_dbg_ratelimited(dev
, "Dropping request\n");
5341 dpaa2_fl_set_flc(&req
->fd_flt
[1], req
->flc_dma
);
5343 req
->fd_flt_dma
= dma_map_single(dev
, req
->fd_flt
, sizeof(req
->fd_flt
),
5345 if (dma_mapping_error(dev
, req
->fd_flt_dma
)) {
5346 dev_err(dev
, "DMA mapping error for QI enqueue request\n");
5350 memset(&fd
, 0, sizeof(fd
));
5351 dpaa2_fd_set_format(&fd
, dpaa2_fd_list
);
5352 dpaa2_fd_set_addr(&fd
, req
->fd_flt_dma
);
5353 dpaa2_fd_set_len(&fd
, dpaa2_fl_get_len(&req
->fd_flt
[1]));
5354 dpaa2_fd_set_flc(&fd
, req
->flc_dma
);
5356 ppriv
= this_cpu_ptr(priv
->ppriv
);
5357 for (i
= 0; i
< (priv
->dpseci_attr
.num_tx_queues
<< 1); i
++) {
5358 err
= dpaa2_io_service_enqueue_fq(ppriv
->dpio
, ppriv
->req_fqid
,
5366 if (unlikely(err
)) {
5367 dev_err_ratelimited(dev
, "Error enqueuing frame: %d\n", err
);
5371 return -EINPROGRESS
;
5374 dma_unmap_single(dev
, req
->fd_flt_dma
, sizeof(req
->fd_flt
),
5378 EXPORT_SYMBOL(dpaa2_caam_enqueue
);
5380 static const struct fsl_mc_device_id dpaa2_caam_match_id_table
[] = {
5382 .vendor
= FSL_MC_VENDOR_FREESCALE
,
5383 .obj_type
= "dpseci",
5388 static struct fsl_mc_driver dpaa2_caam_driver
= {
5390 .name
= KBUILD_MODNAME
,
5391 .owner
= THIS_MODULE
,
5393 .probe
= dpaa2_caam_probe
,
5394 .remove
= dpaa2_caam_remove
,
5395 .match_id_table
= dpaa2_caam_match_id_table
5398 MODULE_LICENSE("Dual BSD/GPL");
5399 MODULE_AUTHOR("Freescale Semiconductor, Inc");
5400 MODULE_DESCRIPTION("Freescale DPAA2 CAAM Driver");
5402 module_fsl_mc_driver(dpaa2_caam_driver
);