1 // SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
3 * Copyright 2015-2016 Freescale Semiconductor Inc.
4 * Copyright 2017-2019 NXP
9 #include "caamalg_qi2.h"
10 #include "dpseci_cmd.h"
11 #include "desc_constr.h"
13 #include "sg_sw_sec4.h"
14 #include "sg_sw_qm2.h"
16 #include "caamalg_desc.h"
17 #include "caamhash_desc.h"
18 #include "dpseci-debugfs.h"
19 #include <linux/dma-mapping.h>
20 #include <linux/fsl/mc.h>
21 #include <linux/kernel.h>
22 #include <soc/fsl/dpaa2-io.h>
23 #include <soc/fsl/dpaa2-fd.h>
24 #include <crypto/xts.h>
25 #include <linux/unaligned.h>
27 #define CAAM_CRA_PRIORITY 2000
29 /* max key is sum of AES_MAX_KEY_SIZE, max split key size */
30 #define CAAM_MAX_KEY_SIZE (AES_MAX_KEY_SIZE + CTR_RFC3686_NONCE_SIZE + \
31 SHA512_DIGEST_SIZE * 2)
34 * This is a cache of buffers, from which the users of CAAM QI driver
35 * can allocate short buffers. It's speedier than doing kmalloc on the hotpath.
36 * NOTE: A more elegant solution would be to have some headroom in the frames
37 * being processed. This can be added by the dpaa2-eth driver. This would
38 * pose a problem for userspace application processing which cannot
39 * know of this limitation. So for now, this will work.
40 * NOTE: The memcache is SMP-safe. No need to handle spinlocks in-here
42 static struct kmem_cache
*qi_cache
;
44 struct caam_alg_entry
{
53 struct caam_aead_alg
{
55 struct caam_alg_entry caam
;
59 struct caam_skcipher_alg
{
60 struct skcipher_alg skcipher
;
61 struct caam_alg_entry caam
;
66 * struct caam_ctx - per-session context
67 * @flc: Flow Contexts array
68 * @key: [authentication key], encryption key
69 * @flc_dma: I/O virtual addresses of the Flow Contexts
70 * @key_dma: I/O virtual address of the key
71 * @dir: DMA direction for mapping key and Flow Contexts
73 * @adata: authentication algorithm details
74 * @cdata: encryption algorithm details
75 * @authsize: authentication tag (a.k.a. ICV / MAC) size
76 * @xts_key_fallback: true if fallback tfm needs to be used due
77 * to unsupported xts key lengths
78 * @fallback: xts fallback tfm
81 struct caam_flc flc
[NUM_OP
];
82 u8 key
[CAAM_MAX_KEY_SIZE
];
83 dma_addr_t flc_dma
[NUM_OP
];
85 enum dma_data_direction dir
;
89 unsigned int authsize
;
90 bool xts_key_fallback
;
91 struct crypto_skcipher
*fallback
;
94 static void *dpaa2_caam_iova_to_virt(struct dpaa2_caam_priv
*priv
,
97 phys_addr_t phys_addr
;
99 phys_addr
= priv
->domain
? iommu_iova_to_phys(priv
->domain
, iova_addr
) :
102 return phys_to_virt(phys_addr
);
106 * qi_cache_zalloc - Allocate buffers from CAAM-QI cache
108 * Allocate data on the hotpath. Instead of using kzalloc, one can use the
109 * services of the CAAM QI memory cache (backed by kmem_cache). The buffers
110 * will have a size of CAAM_QI_MEMCACHE_SIZE, which should be sufficient for
111 * hosting 16 SG entries.
113 * @flags - flags that would be used for the equivalent kmalloc(..) call
115 * Returns a pointer to a retrieved buffer on success or NULL on failure.
117 static inline void *qi_cache_zalloc(gfp_t flags
)
119 return kmem_cache_zalloc(qi_cache
, flags
);
123 * qi_cache_free - Frees buffers allocated from CAAM-QI cache
125 * @obj - buffer previously allocated by qi_cache_zalloc
127 * No checking is being done, the call is a passthrough call to
128 * kmem_cache_free(...)
130 static inline void qi_cache_free(void *obj
)
132 kmem_cache_free(qi_cache
, obj
);
135 static struct caam_request
*to_caam_req(struct crypto_async_request
*areq
)
137 switch (crypto_tfm_alg_type(areq
->tfm
)) {
138 case CRYPTO_ALG_TYPE_SKCIPHER
:
139 return skcipher_request_ctx_dma(skcipher_request_cast(areq
));
140 case CRYPTO_ALG_TYPE_AEAD
:
141 return aead_request_ctx_dma(
142 container_of(areq
, struct aead_request
, base
));
143 case CRYPTO_ALG_TYPE_AHASH
:
144 return ahash_request_ctx_dma(ahash_request_cast(areq
));
146 return ERR_PTR(-EINVAL
);
150 static void caam_unmap(struct device
*dev
, struct scatterlist
*src
,
151 struct scatterlist
*dst
, int src_nents
,
152 int dst_nents
, dma_addr_t iv_dma
, int ivsize
,
153 enum dma_data_direction iv_dir
, dma_addr_t qm_sg_dma
,
158 dma_unmap_sg(dev
, src
, src_nents
, DMA_TO_DEVICE
);
160 dma_unmap_sg(dev
, dst
, dst_nents
, DMA_FROM_DEVICE
);
162 dma_unmap_sg(dev
, src
, src_nents
, DMA_BIDIRECTIONAL
);
166 dma_unmap_single(dev
, iv_dma
, ivsize
, iv_dir
);
169 dma_unmap_single(dev
, qm_sg_dma
, qm_sg_bytes
, DMA_TO_DEVICE
);
172 static int aead_set_sh_desc(struct crypto_aead
*aead
)
174 struct caam_aead_alg
*alg
= container_of(crypto_aead_alg(aead
),
176 struct caam_ctx
*ctx
= crypto_aead_ctx_dma(aead
);
177 unsigned int ivsize
= crypto_aead_ivsize(aead
);
178 struct device
*dev
= ctx
->dev
;
179 struct dpaa2_caam_priv
*priv
= dev_get_drvdata(dev
);
180 struct caam_flc
*flc
;
184 unsigned int data_len
[2];
186 const bool ctr_mode
= ((ctx
->cdata
.algtype
& OP_ALG_AAI_MASK
) ==
187 OP_ALG_AAI_CTR_MOD128
);
188 const bool is_rfc3686
= alg
->caam
.rfc3686
;
190 if (!ctx
->cdata
.keylen
|| !ctx
->authsize
)
194 * AES-CTR needs to load IV in CONTEXT1 reg
195 * at an offset of 128bits (16bytes)
196 * CONTEXT1[255:128] = IV
203 * CONTEXT1[255:128] = {NONCE, IV, COUNTER}
206 ctx1_iv_off
= 16 + CTR_RFC3686_NONCE_SIZE
;
207 nonce
= (u32
*)((void *)ctx
->key
+ ctx
->adata
.keylen_pad
+
208 ctx
->cdata
.keylen
- CTR_RFC3686_NONCE_SIZE
);
212 * In case |user key| > |derived key|, using DKP<imm,imm> would result
213 * in invalid opcodes (last bytes of user key) in the resulting
214 * descriptor. Use DKP<ptr,imm> instead => both virtual and dma key
215 * addresses are needed.
217 ctx
->adata
.key_virt
= ctx
->key
;
218 ctx
->adata
.key_dma
= ctx
->key_dma
;
220 ctx
->cdata
.key_virt
= ctx
->key
+ ctx
->adata
.keylen_pad
;
221 ctx
->cdata
.key_dma
= ctx
->key_dma
+ ctx
->adata
.keylen_pad
;
223 data_len
[0] = ctx
->adata
.keylen_pad
;
224 data_len
[1] = ctx
->cdata
.keylen
;
226 /* aead_encrypt shared descriptor */
227 if (desc_inline_query((alg
->caam
.geniv
? DESC_QI_AEAD_GIVENC_LEN
:
228 DESC_QI_AEAD_ENC_LEN
) +
229 (is_rfc3686
? DESC_AEAD_CTR_RFC3686_LEN
: 0),
230 DESC_JOB_IO_LEN
, data_len
, &inl_mask
,
231 ARRAY_SIZE(data_len
)) < 0)
234 ctx
->adata
.key_inline
= !!(inl_mask
& 1);
235 ctx
->cdata
.key_inline
= !!(inl_mask
& 2);
237 flc
= &ctx
->flc
[ENCRYPT
];
241 cnstr_shdsc_aead_givencap(desc
, &ctx
->cdata
, &ctx
->adata
,
242 ivsize
, ctx
->authsize
, is_rfc3686
,
243 nonce
, ctx1_iv_off
, true,
246 cnstr_shdsc_aead_encap(desc
, &ctx
->cdata
, &ctx
->adata
,
247 ivsize
, ctx
->authsize
, is_rfc3686
, nonce
,
248 ctx1_iv_off
, true, priv
->sec_attr
.era
);
250 flc
->flc
[1] = cpu_to_caam32(desc_len(desc
)); /* SDL */
251 dma_sync_single_for_device(dev
, ctx
->flc_dma
[ENCRYPT
],
252 sizeof(flc
->flc
) + desc_bytes(desc
),
255 /* aead_decrypt shared descriptor */
256 if (desc_inline_query(DESC_QI_AEAD_DEC_LEN
+
257 (is_rfc3686
? DESC_AEAD_CTR_RFC3686_LEN
: 0),
258 DESC_JOB_IO_LEN
, data_len
, &inl_mask
,
259 ARRAY_SIZE(data_len
)) < 0)
262 ctx
->adata
.key_inline
= !!(inl_mask
& 1);
263 ctx
->cdata
.key_inline
= !!(inl_mask
& 2);
265 flc
= &ctx
->flc
[DECRYPT
];
267 cnstr_shdsc_aead_decap(desc
, &ctx
->cdata
, &ctx
->adata
,
268 ivsize
, ctx
->authsize
, alg
->caam
.geniv
,
269 is_rfc3686
, nonce
, ctx1_iv_off
, true,
271 flc
->flc
[1] = cpu_to_caam32(desc_len(desc
)); /* SDL */
272 dma_sync_single_for_device(dev
, ctx
->flc_dma
[DECRYPT
],
273 sizeof(flc
->flc
) + desc_bytes(desc
),
279 static int aead_setauthsize(struct crypto_aead
*authenc
, unsigned int authsize
)
281 struct caam_ctx
*ctx
= crypto_aead_ctx_dma(authenc
);
283 ctx
->authsize
= authsize
;
284 aead_set_sh_desc(authenc
);
289 static int aead_setkey(struct crypto_aead
*aead
, const u8
*key
,
292 struct caam_ctx
*ctx
= crypto_aead_ctx_dma(aead
);
293 struct device
*dev
= ctx
->dev
;
294 struct crypto_authenc_keys keys
;
296 if (crypto_authenc_extractkeys(&keys
, key
, keylen
) != 0)
299 dev_dbg(dev
, "keylen %d enckeylen %d authkeylen %d\n",
300 keys
.authkeylen
+ keys
.enckeylen
, keys
.enckeylen
,
302 print_hex_dump_debug("key in @" __stringify(__LINE__
)": ",
303 DUMP_PREFIX_ADDRESS
, 16, 4, key
, keylen
, 1);
305 ctx
->adata
.keylen
= keys
.authkeylen
;
306 ctx
->adata
.keylen_pad
= split_key_len(ctx
->adata
.algtype
&
309 if (ctx
->adata
.keylen_pad
+ keys
.enckeylen
> CAAM_MAX_KEY_SIZE
)
312 memcpy(ctx
->key
, keys
.authkey
, keys
.authkeylen
);
313 memcpy(ctx
->key
+ ctx
->adata
.keylen_pad
, keys
.enckey
, keys
.enckeylen
);
314 dma_sync_single_for_device(dev
, ctx
->key_dma
, ctx
->adata
.keylen_pad
+
315 keys
.enckeylen
, ctx
->dir
);
316 print_hex_dump_debug("ctx.key@" __stringify(__LINE__
)": ",
317 DUMP_PREFIX_ADDRESS
, 16, 4, ctx
->key
,
318 ctx
->adata
.keylen_pad
+ keys
.enckeylen
, 1);
320 ctx
->cdata
.keylen
= keys
.enckeylen
;
322 memzero_explicit(&keys
, sizeof(keys
));
323 return aead_set_sh_desc(aead
);
325 memzero_explicit(&keys
, sizeof(keys
));
329 static int des3_aead_setkey(struct crypto_aead
*aead
, const u8
*key
,
332 struct crypto_authenc_keys keys
;
335 err
= crypto_authenc_extractkeys(&keys
, key
, keylen
);
340 if (keys
.enckeylen
!= DES3_EDE_KEY_SIZE
)
343 err
= crypto_des3_ede_verify_key(crypto_aead_tfm(aead
), keys
.enckey
) ?:
344 aead_setkey(aead
, key
, keylen
);
347 memzero_explicit(&keys
, sizeof(keys
));
351 static struct aead_edesc
*aead_edesc_alloc(struct aead_request
*req
,
354 struct crypto_aead
*aead
= crypto_aead_reqtfm(req
);
355 struct caam_request
*req_ctx
= aead_request_ctx_dma(req
);
356 struct dpaa2_fl_entry
*in_fle
= &req_ctx
->fd_flt
[1];
357 struct dpaa2_fl_entry
*out_fle
= &req_ctx
->fd_flt
[0];
358 struct caam_ctx
*ctx
= crypto_aead_ctx_dma(aead
);
359 struct caam_aead_alg
*alg
= container_of(crypto_aead_alg(aead
),
361 struct device
*dev
= ctx
->dev
;
362 gfp_t flags
= (req
->base
.flags
& CRYPTO_TFM_REQ_MAY_SLEEP
) ?
363 GFP_KERNEL
: GFP_ATOMIC
;
364 int src_nents
, mapped_src_nents
, dst_nents
= 0, mapped_dst_nents
= 0;
365 int src_len
, dst_len
= 0;
366 struct aead_edesc
*edesc
;
367 dma_addr_t qm_sg_dma
, iv_dma
= 0;
369 unsigned int authsize
= ctx
->authsize
;
370 int qm_sg_index
= 0, qm_sg_nents
= 0, qm_sg_bytes
;
372 struct dpaa2_sg_entry
*sg_table
;
374 /* allocate space for base edesc, link tables and IV */
375 edesc
= qi_cache_zalloc(flags
);
376 if (unlikely(!edesc
)) {
377 dev_err(dev
, "could not allocate extended descriptor\n");
378 return ERR_PTR(-ENOMEM
);
381 if (unlikely(req
->dst
!= req
->src
)) {
382 src_len
= req
->assoclen
+ req
->cryptlen
;
383 dst_len
= src_len
+ (encrypt
? authsize
: (-authsize
));
385 src_nents
= sg_nents_for_len(req
->src
, src_len
);
386 if (unlikely(src_nents
< 0)) {
387 dev_err(dev
, "Insufficient bytes (%d) in src S/G\n",
389 qi_cache_free(edesc
);
390 return ERR_PTR(src_nents
);
393 dst_nents
= sg_nents_for_len(req
->dst
, dst_len
);
394 if (unlikely(dst_nents
< 0)) {
395 dev_err(dev
, "Insufficient bytes (%d) in dst S/G\n",
397 qi_cache_free(edesc
);
398 return ERR_PTR(dst_nents
);
402 mapped_src_nents
= dma_map_sg(dev
, req
->src
, src_nents
,
404 if (unlikely(!mapped_src_nents
)) {
405 dev_err(dev
, "unable to map source\n");
406 qi_cache_free(edesc
);
407 return ERR_PTR(-ENOMEM
);
410 mapped_src_nents
= 0;
414 mapped_dst_nents
= dma_map_sg(dev
, req
->dst
, dst_nents
,
416 if (unlikely(!mapped_dst_nents
)) {
417 dev_err(dev
, "unable to map destination\n");
418 dma_unmap_sg(dev
, req
->src
, src_nents
,
420 qi_cache_free(edesc
);
421 return ERR_PTR(-ENOMEM
);
424 mapped_dst_nents
= 0;
427 src_len
= req
->assoclen
+ req
->cryptlen
+
428 (encrypt
? authsize
: 0);
430 src_nents
= sg_nents_for_len(req
->src
, src_len
);
431 if (unlikely(src_nents
< 0)) {
432 dev_err(dev
, "Insufficient bytes (%d) in src S/G\n",
434 qi_cache_free(edesc
);
435 return ERR_PTR(src_nents
);
438 mapped_src_nents
= dma_map_sg(dev
, req
->src
, src_nents
,
440 if (unlikely(!mapped_src_nents
)) {
441 dev_err(dev
, "unable to map source\n");
442 qi_cache_free(edesc
);
443 return ERR_PTR(-ENOMEM
);
447 if ((alg
->caam
.rfc3686
&& encrypt
) || !alg
->caam
.geniv
)
448 ivsize
= crypto_aead_ivsize(aead
);
451 * Create S/G table: req->assoclen, [IV,] req->src [, req->dst].
452 * Input is not contiguous.
453 * HW reads 4 S/G entries at a time; make sure the reads don't go beyond
454 * the end of the table by allocating more S/G entries. Logic:
455 * if (src != dst && output S/G)
456 * pad output S/G, if needed
457 * else if (src == dst && S/G)
458 * overlapping S/Gs; pad one of them
459 * else if (input S/G) ...
460 * pad input S/G, if needed
462 qm_sg_nents
= 1 + !!ivsize
+ mapped_src_nents
;
463 if (mapped_dst_nents
> 1)
464 qm_sg_nents
+= pad_sg_nents(mapped_dst_nents
);
465 else if ((req
->src
== req
->dst
) && (mapped_src_nents
> 1))
466 qm_sg_nents
= max(pad_sg_nents(qm_sg_nents
),
468 pad_sg_nents(mapped_src_nents
));
470 qm_sg_nents
= pad_sg_nents(qm_sg_nents
);
472 sg_table
= &edesc
->sgt
[0];
473 qm_sg_bytes
= qm_sg_nents
* sizeof(*sg_table
);
474 if (unlikely(offsetof(struct aead_edesc
, sgt
) + qm_sg_bytes
+ ivsize
>
475 CAAM_QI_MEMCACHE_SIZE
)) {
476 dev_err(dev
, "No space for %d S/G entries and/or %dB IV\n",
477 qm_sg_nents
, ivsize
);
478 caam_unmap(dev
, req
->src
, req
->dst
, src_nents
, dst_nents
, 0,
480 qi_cache_free(edesc
);
481 return ERR_PTR(-ENOMEM
);
485 u8
*iv
= (u8
*)(sg_table
+ qm_sg_nents
);
487 /* Make sure IV is located in a DMAable area */
488 memcpy(iv
, req
->iv
, ivsize
);
490 iv_dma
= dma_map_single(dev
, iv
, ivsize
, DMA_TO_DEVICE
);
491 if (dma_mapping_error(dev
, iv_dma
)) {
492 dev_err(dev
, "unable to map IV\n");
493 caam_unmap(dev
, req
->src
, req
->dst
, src_nents
,
494 dst_nents
, 0, 0, DMA_NONE
, 0, 0);
495 qi_cache_free(edesc
);
496 return ERR_PTR(-ENOMEM
);
500 edesc
->src_nents
= src_nents
;
501 edesc
->dst_nents
= dst_nents
;
502 edesc
->iv_dma
= iv_dma
;
504 if ((alg
->caam
.class1_alg_type
& OP_ALG_ALGSEL_MASK
) ==
505 OP_ALG_ALGSEL_CHACHA20
&& ivsize
!= CHACHAPOLY_IV_SIZE
)
507 * The associated data comes already with the IV but we need
508 * to skip it when we authenticate or encrypt...
510 edesc
->assoclen
= cpu_to_caam32(req
->assoclen
- ivsize
);
512 edesc
->assoclen
= cpu_to_caam32(req
->assoclen
);
513 edesc
->assoclen_dma
= dma_map_single(dev
, &edesc
->assoclen
, 4,
515 if (dma_mapping_error(dev
, edesc
->assoclen_dma
)) {
516 dev_err(dev
, "unable to map assoclen\n");
517 caam_unmap(dev
, req
->src
, req
->dst
, src_nents
, dst_nents
,
518 iv_dma
, ivsize
, DMA_TO_DEVICE
, 0, 0);
519 qi_cache_free(edesc
);
520 return ERR_PTR(-ENOMEM
);
523 dma_to_qm_sg_one(sg_table
, edesc
->assoclen_dma
, 4, 0);
526 dma_to_qm_sg_one(sg_table
+ qm_sg_index
, iv_dma
, ivsize
, 0);
529 sg_to_qm_sg_last(req
->src
, src_len
, sg_table
+ qm_sg_index
, 0);
530 qm_sg_index
+= mapped_src_nents
;
532 if (mapped_dst_nents
> 1)
533 sg_to_qm_sg_last(req
->dst
, dst_len
, sg_table
+ qm_sg_index
, 0);
535 qm_sg_dma
= dma_map_single(dev
, sg_table
, qm_sg_bytes
, DMA_TO_DEVICE
);
536 if (dma_mapping_error(dev
, qm_sg_dma
)) {
537 dev_err(dev
, "unable to map S/G table\n");
538 dma_unmap_single(dev
, edesc
->assoclen_dma
, 4, DMA_TO_DEVICE
);
539 caam_unmap(dev
, req
->src
, req
->dst
, src_nents
, dst_nents
,
540 iv_dma
, ivsize
, DMA_TO_DEVICE
, 0, 0);
541 qi_cache_free(edesc
);
542 return ERR_PTR(-ENOMEM
);
545 edesc
->qm_sg_dma
= qm_sg_dma
;
546 edesc
->qm_sg_bytes
= qm_sg_bytes
;
548 out_len
= req
->assoclen
+ req
->cryptlen
+
549 (encrypt
? ctx
->authsize
: (-ctx
->authsize
));
550 in_len
= 4 + ivsize
+ req
->assoclen
+ req
->cryptlen
;
552 memset(&req_ctx
->fd_flt
, 0, sizeof(req_ctx
->fd_flt
));
553 dpaa2_fl_set_final(in_fle
, true);
554 dpaa2_fl_set_format(in_fle
, dpaa2_fl_sg
);
555 dpaa2_fl_set_addr(in_fle
, qm_sg_dma
);
556 dpaa2_fl_set_len(in_fle
, in_len
);
558 if (req
->dst
== req
->src
) {
559 if (mapped_src_nents
== 1) {
560 dpaa2_fl_set_format(out_fle
, dpaa2_fl_single
);
561 dpaa2_fl_set_addr(out_fle
, sg_dma_address(req
->src
));
563 dpaa2_fl_set_format(out_fle
, dpaa2_fl_sg
);
564 dpaa2_fl_set_addr(out_fle
, qm_sg_dma
+
565 (1 + !!ivsize
) * sizeof(*sg_table
));
567 } else if (!mapped_dst_nents
) {
569 * crypto engine requires the output entry to be present when
570 * "frame list" FD is used.
571 * Since engine does not support FMT=2'b11 (unused entry type),
572 * leaving out_fle zeroized is the best option.
575 } else if (mapped_dst_nents
== 1) {
576 dpaa2_fl_set_format(out_fle
, dpaa2_fl_single
);
577 dpaa2_fl_set_addr(out_fle
, sg_dma_address(req
->dst
));
579 dpaa2_fl_set_format(out_fle
, dpaa2_fl_sg
);
580 dpaa2_fl_set_addr(out_fle
, qm_sg_dma
+ qm_sg_index
*
584 dpaa2_fl_set_len(out_fle
, out_len
);
590 static int chachapoly_set_sh_desc(struct crypto_aead
*aead
)
592 struct caam_ctx
*ctx
= crypto_aead_ctx_dma(aead
);
593 unsigned int ivsize
= crypto_aead_ivsize(aead
);
594 struct device
*dev
= ctx
->dev
;
595 struct caam_flc
*flc
;
598 if (!ctx
->cdata
.keylen
|| !ctx
->authsize
)
601 flc
= &ctx
->flc
[ENCRYPT
];
603 cnstr_shdsc_chachapoly(desc
, &ctx
->cdata
, &ctx
->adata
, ivsize
,
604 ctx
->authsize
, true, true);
605 flc
->flc
[1] = cpu_to_caam32(desc_len(desc
)); /* SDL */
606 dma_sync_single_for_device(dev
, ctx
->flc_dma
[ENCRYPT
],
607 sizeof(flc
->flc
) + desc_bytes(desc
),
610 flc
= &ctx
->flc
[DECRYPT
];
612 cnstr_shdsc_chachapoly(desc
, &ctx
->cdata
, &ctx
->adata
, ivsize
,
613 ctx
->authsize
, false, true);
614 flc
->flc
[1] = cpu_to_caam32(desc_len(desc
)); /* SDL */
615 dma_sync_single_for_device(dev
, ctx
->flc_dma
[DECRYPT
],
616 sizeof(flc
->flc
) + desc_bytes(desc
),
622 static int chachapoly_setauthsize(struct crypto_aead
*aead
,
623 unsigned int authsize
)
625 struct caam_ctx
*ctx
= crypto_aead_ctx_dma(aead
);
627 if (authsize
!= POLY1305_DIGEST_SIZE
)
630 ctx
->authsize
= authsize
;
631 return chachapoly_set_sh_desc(aead
);
634 static int chachapoly_setkey(struct crypto_aead
*aead
, const u8
*key
,
637 struct caam_ctx
*ctx
= crypto_aead_ctx_dma(aead
);
638 unsigned int ivsize
= crypto_aead_ivsize(aead
);
639 unsigned int saltlen
= CHACHAPOLY_IV_SIZE
- ivsize
;
641 if (keylen
!= CHACHA_KEY_SIZE
+ saltlen
)
644 memcpy(ctx
->key
, key
, keylen
);
645 ctx
->cdata
.key_virt
= ctx
->key
;
646 ctx
->cdata
.keylen
= keylen
- saltlen
;
648 return chachapoly_set_sh_desc(aead
);
651 static int gcm_set_sh_desc(struct crypto_aead
*aead
)
653 struct caam_ctx
*ctx
= crypto_aead_ctx_dma(aead
);
654 struct device
*dev
= ctx
->dev
;
655 unsigned int ivsize
= crypto_aead_ivsize(aead
);
656 struct caam_flc
*flc
;
658 int rem_bytes
= CAAM_DESC_BYTES_MAX
- DESC_JOB_IO_LEN
-
661 if (!ctx
->cdata
.keylen
|| !ctx
->authsize
)
665 * AES GCM encrypt shared descriptor
666 * Job Descriptor and Shared Descriptor
667 * must fit into the 64-word Descriptor h/w Buffer
669 if (rem_bytes
>= DESC_QI_GCM_ENC_LEN
) {
670 ctx
->cdata
.key_inline
= true;
671 ctx
->cdata
.key_virt
= ctx
->key
;
673 ctx
->cdata
.key_inline
= false;
674 ctx
->cdata
.key_dma
= ctx
->key_dma
;
677 flc
= &ctx
->flc
[ENCRYPT
];
679 cnstr_shdsc_gcm_encap(desc
, &ctx
->cdata
, ivsize
, ctx
->authsize
, true);
680 flc
->flc
[1] = cpu_to_caam32(desc_len(desc
)); /* SDL */
681 dma_sync_single_for_device(dev
, ctx
->flc_dma
[ENCRYPT
],
682 sizeof(flc
->flc
) + desc_bytes(desc
),
686 * Job Descriptor and Shared Descriptors
687 * must all fit into the 64-word Descriptor h/w Buffer
689 if (rem_bytes
>= DESC_QI_GCM_DEC_LEN
) {
690 ctx
->cdata
.key_inline
= true;
691 ctx
->cdata
.key_virt
= ctx
->key
;
693 ctx
->cdata
.key_inline
= false;
694 ctx
->cdata
.key_dma
= ctx
->key_dma
;
697 flc
= &ctx
->flc
[DECRYPT
];
699 cnstr_shdsc_gcm_decap(desc
, &ctx
->cdata
, ivsize
, ctx
->authsize
, true);
700 flc
->flc
[1] = cpu_to_caam32(desc_len(desc
)); /* SDL */
701 dma_sync_single_for_device(dev
, ctx
->flc_dma
[DECRYPT
],
702 sizeof(flc
->flc
) + desc_bytes(desc
),
708 static int gcm_setauthsize(struct crypto_aead
*authenc
, unsigned int authsize
)
710 struct caam_ctx
*ctx
= crypto_aead_ctx_dma(authenc
);
713 err
= crypto_gcm_check_authsize(authsize
);
717 ctx
->authsize
= authsize
;
718 gcm_set_sh_desc(authenc
);
723 static int gcm_setkey(struct crypto_aead
*aead
,
724 const u8
*key
, unsigned int keylen
)
726 struct caam_ctx
*ctx
= crypto_aead_ctx_dma(aead
);
727 struct device
*dev
= ctx
->dev
;
730 ret
= aes_check_keylen(keylen
);
733 print_hex_dump_debug("key in @" __stringify(__LINE__
)": ",
734 DUMP_PREFIX_ADDRESS
, 16, 4, key
, keylen
, 1);
736 memcpy(ctx
->key
, key
, keylen
);
737 dma_sync_single_for_device(dev
, ctx
->key_dma
, keylen
, ctx
->dir
);
738 ctx
->cdata
.keylen
= keylen
;
740 return gcm_set_sh_desc(aead
);
743 static int rfc4106_set_sh_desc(struct crypto_aead
*aead
)
745 struct caam_ctx
*ctx
= crypto_aead_ctx_dma(aead
);
746 struct device
*dev
= ctx
->dev
;
747 unsigned int ivsize
= crypto_aead_ivsize(aead
);
748 struct caam_flc
*flc
;
750 int rem_bytes
= CAAM_DESC_BYTES_MAX
- DESC_JOB_IO_LEN
-
753 if (!ctx
->cdata
.keylen
|| !ctx
->authsize
)
756 ctx
->cdata
.key_virt
= ctx
->key
;
759 * RFC4106 encrypt shared descriptor
760 * Job Descriptor and Shared Descriptor
761 * must fit into the 64-word Descriptor h/w Buffer
763 if (rem_bytes
>= DESC_QI_RFC4106_ENC_LEN
) {
764 ctx
->cdata
.key_inline
= true;
766 ctx
->cdata
.key_inline
= false;
767 ctx
->cdata
.key_dma
= ctx
->key_dma
;
770 flc
= &ctx
->flc
[ENCRYPT
];
772 cnstr_shdsc_rfc4106_encap(desc
, &ctx
->cdata
, ivsize
, ctx
->authsize
,
774 flc
->flc
[1] = cpu_to_caam32(desc_len(desc
)); /* SDL */
775 dma_sync_single_for_device(dev
, ctx
->flc_dma
[ENCRYPT
],
776 sizeof(flc
->flc
) + desc_bytes(desc
),
780 * Job Descriptor and Shared Descriptors
781 * must all fit into the 64-word Descriptor h/w Buffer
783 if (rem_bytes
>= DESC_QI_RFC4106_DEC_LEN
) {
784 ctx
->cdata
.key_inline
= true;
786 ctx
->cdata
.key_inline
= false;
787 ctx
->cdata
.key_dma
= ctx
->key_dma
;
790 flc
= &ctx
->flc
[DECRYPT
];
792 cnstr_shdsc_rfc4106_decap(desc
, &ctx
->cdata
, ivsize
, ctx
->authsize
,
794 flc
->flc
[1] = cpu_to_caam32(desc_len(desc
)); /* SDL */
795 dma_sync_single_for_device(dev
, ctx
->flc_dma
[DECRYPT
],
796 sizeof(flc
->flc
) + desc_bytes(desc
),
802 static int rfc4106_setauthsize(struct crypto_aead
*authenc
,
803 unsigned int authsize
)
805 struct caam_ctx
*ctx
= crypto_aead_ctx_dma(authenc
);
808 err
= crypto_rfc4106_check_authsize(authsize
);
812 ctx
->authsize
= authsize
;
813 rfc4106_set_sh_desc(authenc
);
818 static int rfc4106_setkey(struct crypto_aead
*aead
,
819 const u8
*key
, unsigned int keylen
)
821 struct caam_ctx
*ctx
= crypto_aead_ctx_dma(aead
);
822 struct device
*dev
= ctx
->dev
;
825 ret
= aes_check_keylen(keylen
- 4);
829 print_hex_dump_debug("key in @" __stringify(__LINE__
)": ",
830 DUMP_PREFIX_ADDRESS
, 16, 4, key
, keylen
, 1);
832 memcpy(ctx
->key
, key
, keylen
);
834 * The last four bytes of the key material are used as the salt value
835 * in the nonce. Update the AES key length.
837 ctx
->cdata
.keylen
= keylen
- 4;
838 dma_sync_single_for_device(dev
, ctx
->key_dma
, ctx
->cdata
.keylen
,
841 return rfc4106_set_sh_desc(aead
);
844 static int rfc4543_set_sh_desc(struct crypto_aead
*aead
)
846 struct caam_ctx
*ctx
= crypto_aead_ctx_dma(aead
);
847 struct device
*dev
= ctx
->dev
;
848 unsigned int ivsize
= crypto_aead_ivsize(aead
);
849 struct caam_flc
*flc
;
851 int rem_bytes
= CAAM_DESC_BYTES_MAX
- DESC_JOB_IO_LEN
-
854 if (!ctx
->cdata
.keylen
|| !ctx
->authsize
)
857 ctx
->cdata
.key_virt
= ctx
->key
;
860 * RFC4543 encrypt shared descriptor
861 * Job Descriptor and Shared Descriptor
862 * must fit into the 64-word Descriptor h/w Buffer
864 if (rem_bytes
>= DESC_QI_RFC4543_ENC_LEN
) {
865 ctx
->cdata
.key_inline
= true;
867 ctx
->cdata
.key_inline
= false;
868 ctx
->cdata
.key_dma
= ctx
->key_dma
;
871 flc
= &ctx
->flc
[ENCRYPT
];
873 cnstr_shdsc_rfc4543_encap(desc
, &ctx
->cdata
, ivsize
, ctx
->authsize
,
875 flc
->flc
[1] = cpu_to_caam32(desc_len(desc
)); /* SDL */
876 dma_sync_single_for_device(dev
, ctx
->flc_dma
[ENCRYPT
],
877 sizeof(flc
->flc
) + desc_bytes(desc
),
881 * Job Descriptor and Shared Descriptors
882 * must all fit into the 64-word Descriptor h/w Buffer
884 if (rem_bytes
>= DESC_QI_RFC4543_DEC_LEN
) {
885 ctx
->cdata
.key_inline
= true;
887 ctx
->cdata
.key_inline
= false;
888 ctx
->cdata
.key_dma
= ctx
->key_dma
;
891 flc
= &ctx
->flc
[DECRYPT
];
893 cnstr_shdsc_rfc4543_decap(desc
, &ctx
->cdata
, ivsize
, ctx
->authsize
,
895 flc
->flc
[1] = cpu_to_caam32(desc_len(desc
)); /* SDL */
896 dma_sync_single_for_device(dev
, ctx
->flc_dma
[DECRYPT
],
897 sizeof(flc
->flc
) + desc_bytes(desc
),
903 static int rfc4543_setauthsize(struct crypto_aead
*authenc
,
904 unsigned int authsize
)
906 struct caam_ctx
*ctx
= crypto_aead_ctx_dma(authenc
);
911 ctx
->authsize
= authsize
;
912 rfc4543_set_sh_desc(authenc
);
917 static int rfc4543_setkey(struct crypto_aead
*aead
,
918 const u8
*key
, unsigned int keylen
)
920 struct caam_ctx
*ctx
= crypto_aead_ctx_dma(aead
);
921 struct device
*dev
= ctx
->dev
;
924 ret
= aes_check_keylen(keylen
- 4);
928 print_hex_dump_debug("key in @" __stringify(__LINE__
)": ",
929 DUMP_PREFIX_ADDRESS
, 16, 4, key
, keylen
, 1);
931 memcpy(ctx
->key
, key
, keylen
);
933 * The last four bytes of the key material are used as the salt value
934 * in the nonce. Update the AES key length.
936 ctx
->cdata
.keylen
= keylen
- 4;
937 dma_sync_single_for_device(dev
, ctx
->key_dma
, ctx
->cdata
.keylen
,
940 return rfc4543_set_sh_desc(aead
);
943 static int skcipher_setkey(struct crypto_skcipher
*skcipher
, const u8
*key
,
944 unsigned int keylen
, const u32 ctx1_iv_off
)
946 struct caam_ctx
*ctx
= crypto_skcipher_ctx_dma(skcipher
);
947 struct caam_skcipher_alg
*alg
=
948 container_of(crypto_skcipher_alg(skcipher
),
949 struct caam_skcipher_alg
, skcipher
);
950 struct device
*dev
= ctx
->dev
;
951 struct caam_flc
*flc
;
952 unsigned int ivsize
= crypto_skcipher_ivsize(skcipher
);
954 const bool is_rfc3686
= alg
->caam
.rfc3686
;
956 print_hex_dump_debug("key in @" __stringify(__LINE__
)": ",
957 DUMP_PREFIX_ADDRESS
, 16, 4, key
, keylen
, 1);
959 ctx
->cdata
.keylen
= keylen
;
960 ctx
->cdata
.key_virt
= key
;
961 ctx
->cdata
.key_inline
= true;
963 /* skcipher_encrypt shared descriptor */
964 flc
= &ctx
->flc
[ENCRYPT
];
966 cnstr_shdsc_skcipher_encap(desc
, &ctx
->cdata
, ivsize
, is_rfc3686
,
968 flc
->flc
[1] = cpu_to_caam32(desc_len(desc
)); /* SDL */
969 dma_sync_single_for_device(dev
, ctx
->flc_dma
[ENCRYPT
],
970 sizeof(flc
->flc
) + desc_bytes(desc
),
973 /* skcipher_decrypt shared descriptor */
974 flc
= &ctx
->flc
[DECRYPT
];
976 cnstr_shdsc_skcipher_decap(desc
, &ctx
->cdata
, ivsize
, is_rfc3686
,
978 flc
->flc
[1] = cpu_to_caam32(desc_len(desc
)); /* SDL */
979 dma_sync_single_for_device(dev
, ctx
->flc_dma
[DECRYPT
],
980 sizeof(flc
->flc
) + desc_bytes(desc
),
986 static int aes_skcipher_setkey(struct crypto_skcipher
*skcipher
,
987 const u8
*key
, unsigned int keylen
)
991 err
= aes_check_keylen(keylen
);
995 return skcipher_setkey(skcipher
, key
, keylen
, 0);
998 static int rfc3686_skcipher_setkey(struct crypto_skcipher
*skcipher
,
999 const u8
*key
, unsigned int keylen
)
1006 * | CONTEXT1[255:128] = {NONCE, IV, COUNTER}
1007 * | *key = {KEY, NONCE}
1009 ctx1_iv_off
= 16 + CTR_RFC3686_NONCE_SIZE
;
1010 keylen
-= CTR_RFC3686_NONCE_SIZE
;
1012 err
= aes_check_keylen(keylen
);
1016 return skcipher_setkey(skcipher
, key
, keylen
, ctx1_iv_off
);
1019 static int ctr_skcipher_setkey(struct crypto_skcipher
*skcipher
,
1020 const u8
*key
, unsigned int keylen
)
1026 * AES-CTR needs to load IV in CONTEXT1 reg
1027 * at an offset of 128bits (16bytes)
1028 * CONTEXT1[255:128] = IV
1032 err
= aes_check_keylen(keylen
);
1036 return skcipher_setkey(skcipher
, key
, keylen
, ctx1_iv_off
);
1039 static int chacha20_skcipher_setkey(struct crypto_skcipher
*skcipher
,
1040 const u8
*key
, unsigned int keylen
)
1042 if (keylen
!= CHACHA_KEY_SIZE
)
1045 return skcipher_setkey(skcipher
, key
, keylen
, 0);
1048 static int des_skcipher_setkey(struct crypto_skcipher
*skcipher
,
1049 const u8
*key
, unsigned int keylen
)
1051 return verify_skcipher_des_key(skcipher
, key
) ?:
1052 skcipher_setkey(skcipher
, key
, keylen
, 0);
1055 static int des3_skcipher_setkey(struct crypto_skcipher
*skcipher
,
1056 const u8
*key
, unsigned int keylen
)
1058 return verify_skcipher_des3_key(skcipher
, key
) ?:
1059 skcipher_setkey(skcipher
, key
, keylen
, 0);
1062 static int xts_skcipher_setkey(struct crypto_skcipher
*skcipher
, const u8
*key
,
1063 unsigned int keylen
)
1065 struct caam_ctx
*ctx
= crypto_skcipher_ctx_dma(skcipher
);
1066 struct device
*dev
= ctx
->dev
;
1067 struct dpaa2_caam_priv
*priv
= dev_get_drvdata(dev
);
1068 struct caam_flc
*flc
;
1072 err
= xts_verify_key(skcipher
, key
, keylen
);
1074 dev_dbg(dev
, "key size mismatch\n");
1078 if (keylen
!= 2 * AES_KEYSIZE_128
&& keylen
!= 2 * AES_KEYSIZE_256
)
1079 ctx
->xts_key_fallback
= true;
1081 if (priv
->sec_attr
.era
<= 8 || ctx
->xts_key_fallback
) {
1082 err
= crypto_skcipher_setkey(ctx
->fallback
, key
, keylen
);
1087 ctx
->cdata
.keylen
= keylen
;
1088 ctx
->cdata
.key_virt
= key
;
1089 ctx
->cdata
.key_inline
= true;
1091 /* xts_skcipher_encrypt shared descriptor */
1092 flc
= &ctx
->flc
[ENCRYPT
];
1093 desc
= flc
->sh_desc
;
1094 cnstr_shdsc_xts_skcipher_encap(desc
, &ctx
->cdata
);
1095 flc
->flc
[1] = cpu_to_caam32(desc_len(desc
)); /* SDL */
1096 dma_sync_single_for_device(dev
, ctx
->flc_dma
[ENCRYPT
],
1097 sizeof(flc
->flc
) + desc_bytes(desc
),
1100 /* xts_skcipher_decrypt shared descriptor */
1101 flc
= &ctx
->flc
[DECRYPT
];
1102 desc
= flc
->sh_desc
;
1103 cnstr_shdsc_xts_skcipher_decap(desc
, &ctx
->cdata
);
1104 flc
->flc
[1] = cpu_to_caam32(desc_len(desc
)); /* SDL */
1105 dma_sync_single_for_device(dev
, ctx
->flc_dma
[DECRYPT
],
1106 sizeof(flc
->flc
) + desc_bytes(desc
),
1112 static struct skcipher_edesc
*skcipher_edesc_alloc(struct skcipher_request
*req
)
1114 struct crypto_skcipher
*skcipher
= crypto_skcipher_reqtfm(req
);
1115 struct caam_request
*req_ctx
= skcipher_request_ctx_dma(req
);
1116 struct dpaa2_fl_entry
*in_fle
= &req_ctx
->fd_flt
[1];
1117 struct dpaa2_fl_entry
*out_fle
= &req_ctx
->fd_flt
[0];
1118 struct caam_ctx
*ctx
= crypto_skcipher_ctx_dma(skcipher
);
1119 struct device
*dev
= ctx
->dev
;
1120 gfp_t flags
= (req
->base
.flags
& CRYPTO_TFM_REQ_MAY_SLEEP
) ?
1121 GFP_KERNEL
: GFP_ATOMIC
;
1122 int src_nents
, mapped_src_nents
, dst_nents
= 0, mapped_dst_nents
= 0;
1123 struct skcipher_edesc
*edesc
;
1126 int ivsize
= crypto_skcipher_ivsize(skcipher
);
1127 int dst_sg_idx
, qm_sg_ents
, qm_sg_bytes
;
1128 struct dpaa2_sg_entry
*sg_table
;
1130 src_nents
= sg_nents_for_len(req
->src
, req
->cryptlen
);
1131 if (unlikely(src_nents
< 0)) {
1132 dev_err(dev
, "Insufficient bytes (%d) in src S/G\n",
1134 return ERR_PTR(src_nents
);
1137 if (unlikely(req
->dst
!= req
->src
)) {
1138 dst_nents
= sg_nents_for_len(req
->dst
, req
->cryptlen
);
1139 if (unlikely(dst_nents
< 0)) {
1140 dev_err(dev
, "Insufficient bytes (%d) in dst S/G\n",
1142 return ERR_PTR(dst_nents
);
1145 mapped_src_nents
= dma_map_sg(dev
, req
->src
, src_nents
,
1147 if (unlikely(!mapped_src_nents
)) {
1148 dev_err(dev
, "unable to map source\n");
1149 return ERR_PTR(-ENOMEM
);
1152 mapped_dst_nents
= dma_map_sg(dev
, req
->dst
, dst_nents
,
1154 if (unlikely(!mapped_dst_nents
)) {
1155 dev_err(dev
, "unable to map destination\n");
1156 dma_unmap_sg(dev
, req
->src
, src_nents
, DMA_TO_DEVICE
);
1157 return ERR_PTR(-ENOMEM
);
1160 mapped_src_nents
= dma_map_sg(dev
, req
->src
, src_nents
,
1162 if (unlikely(!mapped_src_nents
)) {
1163 dev_err(dev
, "unable to map source\n");
1164 return ERR_PTR(-ENOMEM
);
1168 qm_sg_ents
= 1 + mapped_src_nents
;
1169 dst_sg_idx
= qm_sg_ents
;
1172 * Input, output HW S/G tables: [IV, src][dst, IV]
1173 * IV entries point to the same buffer
1174 * If src == dst, S/G entries are reused (S/G tables overlap)
1176 * HW reads 4 S/G entries at a time; make sure the reads don't go beyond
1177 * the end of the table by allocating more S/G entries.
1179 if (req
->src
!= req
->dst
)
1180 qm_sg_ents
+= pad_sg_nents(mapped_dst_nents
+ 1);
1182 qm_sg_ents
= 1 + pad_sg_nents(qm_sg_ents
);
1184 qm_sg_bytes
= qm_sg_ents
* sizeof(struct dpaa2_sg_entry
);
1185 if (unlikely(offsetof(struct skcipher_edesc
, sgt
) + qm_sg_bytes
+
1186 ivsize
> CAAM_QI_MEMCACHE_SIZE
)) {
1187 dev_err(dev
, "No space for %d S/G entries and/or %dB IV\n",
1188 qm_sg_ents
, ivsize
);
1189 caam_unmap(dev
, req
->src
, req
->dst
, src_nents
, dst_nents
, 0,
1191 return ERR_PTR(-ENOMEM
);
1194 /* allocate space for base edesc, link tables and IV */
1195 edesc
= qi_cache_zalloc(flags
);
1196 if (unlikely(!edesc
)) {
1197 dev_err(dev
, "could not allocate extended descriptor\n");
1198 caam_unmap(dev
, req
->src
, req
->dst
, src_nents
, dst_nents
, 0,
1200 return ERR_PTR(-ENOMEM
);
1203 /* Make sure IV is located in a DMAable area */
1204 sg_table
= &edesc
->sgt
[0];
1205 iv
= (u8
*)(sg_table
+ qm_sg_ents
);
1206 memcpy(iv
, req
->iv
, ivsize
);
1208 iv_dma
= dma_map_single(dev
, iv
, ivsize
, DMA_BIDIRECTIONAL
);
1209 if (dma_mapping_error(dev
, iv_dma
)) {
1210 dev_err(dev
, "unable to map IV\n");
1211 caam_unmap(dev
, req
->src
, req
->dst
, src_nents
, dst_nents
, 0,
1213 qi_cache_free(edesc
);
1214 return ERR_PTR(-ENOMEM
);
1217 edesc
->src_nents
= src_nents
;
1218 edesc
->dst_nents
= dst_nents
;
1219 edesc
->iv_dma
= iv_dma
;
1220 edesc
->qm_sg_bytes
= qm_sg_bytes
;
1222 dma_to_qm_sg_one(sg_table
, iv_dma
, ivsize
, 0);
1223 sg_to_qm_sg(req
->src
, req
->cryptlen
, sg_table
+ 1, 0);
1225 if (req
->src
!= req
->dst
)
1226 sg_to_qm_sg(req
->dst
, req
->cryptlen
, sg_table
+ dst_sg_idx
, 0);
1228 dma_to_qm_sg_one(sg_table
+ dst_sg_idx
+ mapped_dst_nents
, iv_dma
,
1231 edesc
->qm_sg_dma
= dma_map_single(dev
, sg_table
, edesc
->qm_sg_bytes
,
1233 if (dma_mapping_error(dev
, edesc
->qm_sg_dma
)) {
1234 dev_err(dev
, "unable to map S/G table\n");
1235 caam_unmap(dev
, req
->src
, req
->dst
, src_nents
, dst_nents
,
1236 iv_dma
, ivsize
, DMA_BIDIRECTIONAL
, 0, 0);
1237 qi_cache_free(edesc
);
1238 return ERR_PTR(-ENOMEM
);
1241 memset(&req_ctx
->fd_flt
, 0, sizeof(req_ctx
->fd_flt
));
1242 dpaa2_fl_set_final(in_fle
, true);
1243 dpaa2_fl_set_len(in_fle
, req
->cryptlen
+ ivsize
);
1244 dpaa2_fl_set_len(out_fle
, req
->cryptlen
+ ivsize
);
1246 dpaa2_fl_set_format(in_fle
, dpaa2_fl_sg
);
1247 dpaa2_fl_set_addr(in_fle
, edesc
->qm_sg_dma
);
1249 dpaa2_fl_set_format(out_fle
, dpaa2_fl_sg
);
1251 if (req
->src
== req
->dst
)
1252 dpaa2_fl_set_addr(out_fle
, edesc
->qm_sg_dma
+
1255 dpaa2_fl_set_addr(out_fle
, edesc
->qm_sg_dma
+ dst_sg_idx
*
1261 static void aead_unmap(struct device
*dev
, struct aead_edesc
*edesc
,
1262 struct aead_request
*req
)
1264 struct crypto_aead
*aead
= crypto_aead_reqtfm(req
);
1265 int ivsize
= crypto_aead_ivsize(aead
);
1267 caam_unmap(dev
, req
->src
, req
->dst
, edesc
->src_nents
, edesc
->dst_nents
,
1268 edesc
->iv_dma
, ivsize
, DMA_TO_DEVICE
, edesc
->qm_sg_dma
,
1269 edesc
->qm_sg_bytes
);
1270 dma_unmap_single(dev
, edesc
->assoclen_dma
, 4, DMA_TO_DEVICE
);
1273 static void skcipher_unmap(struct device
*dev
, struct skcipher_edesc
*edesc
,
1274 struct skcipher_request
*req
)
1276 struct crypto_skcipher
*skcipher
= crypto_skcipher_reqtfm(req
);
1277 int ivsize
= crypto_skcipher_ivsize(skcipher
);
1279 caam_unmap(dev
, req
->src
, req
->dst
, edesc
->src_nents
, edesc
->dst_nents
,
1280 edesc
->iv_dma
, ivsize
, DMA_BIDIRECTIONAL
, edesc
->qm_sg_dma
,
1281 edesc
->qm_sg_bytes
);
1284 static void aead_encrypt_done(void *cbk_ctx
, u32 status
)
1286 struct crypto_async_request
*areq
= cbk_ctx
;
1287 struct aead_request
*req
= container_of(areq
, struct aead_request
,
1289 struct caam_request
*req_ctx
= to_caam_req(areq
);
1290 struct aead_edesc
*edesc
= req_ctx
->edesc
;
1291 struct crypto_aead
*aead
= crypto_aead_reqtfm(req
);
1292 struct caam_ctx
*ctx
= crypto_aead_ctx_dma(aead
);
1295 dev_dbg(ctx
->dev
, "%s %d: err 0x%x\n", __func__
, __LINE__
, status
);
1297 if (unlikely(status
))
1298 ecode
= caam_qi2_strstatus(ctx
->dev
, status
);
1300 aead_unmap(ctx
->dev
, edesc
, req
);
1301 qi_cache_free(edesc
);
1302 aead_request_complete(req
, ecode
);
1305 static void aead_decrypt_done(void *cbk_ctx
, u32 status
)
1307 struct crypto_async_request
*areq
= cbk_ctx
;
1308 struct aead_request
*req
= container_of(areq
, struct aead_request
,
1310 struct caam_request
*req_ctx
= to_caam_req(areq
);
1311 struct aead_edesc
*edesc
= req_ctx
->edesc
;
1312 struct crypto_aead
*aead
= crypto_aead_reqtfm(req
);
1313 struct caam_ctx
*ctx
= crypto_aead_ctx_dma(aead
);
1316 dev_dbg(ctx
->dev
, "%s %d: err 0x%x\n", __func__
, __LINE__
, status
);
1318 if (unlikely(status
))
1319 ecode
= caam_qi2_strstatus(ctx
->dev
, status
);
1321 aead_unmap(ctx
->dev
, edesc
, req
);
1322 qi_cache_free(edesc
);
1323 aead_request_complete(req
, ecode
);
1326 static int aead_encrypt(struct aead_request
*req
)
1328 struct aead_edesc
*edesc
;
1329 struct crypto_aead
*aead
= crypto_aead_reqtfm(req
);
1330 struct caam_ctx
*ctx
= crypto_aead_ctx_dma(aead
);
1331 struct caam_request
*caam_req
= aead_request_ctx_dma(req
);
1334 /* allocate extended descriptor */
1335 edesc
= aead_edesc_alloc(req
, true);
1337 return PTR_ERR(edesc
);
1339 caam_req
->flc
= &ctx
->flc
[ENCRYPT
];
1340 caam_req
->flc_dma
= ctx
->flc_dma
[ENCRYPT
];
1341 caam_req
->cbk
= aead_encrypt_done
;
1342 caam_req
->ctx
= &req
->base
;
1343 caam_req
->edesc
= edesc
;
1344 ret
= dpaa2_caam_enqueue(ctx
->dev
, caam_req
);
1345 if (ret
!= -EINPROGRESS
&&
1346 !(ret
== -EBUSY
&& req
->base
.flags
& CRYPTO_TFM_REQ_MAY_BACKLOG
)) {
1347 aead_unmap(ctx
->dev
, edesc
, req
);
1348 qi_cache_free(edesc
);
1354 static int aead_decrypt(struct aead_request
*req
)
1356 struct aead_edesc
*edesc
;
1357 struct crypto_aead
*aead
= crypto_aead_reqtfm(req
);
1358 struct caam_ctx
*ctx
= crypto_aead_ctx_dma(aead
);
1359 struct caam_request
*caam_req
= aead_request_ctx_dma(req
);
1362 /* allocate extended descriptor */
1363 edesc
= aead_edesc_alloc(req
, false);
1365 return PTR_ERR(edesc
);
1367 caam_req
->flc
= &ctx
->flc
[DECRYPT
];
1368 caam_req
->flc_dma
= ctx
->flc_dma
[DECRYPT
];
1369 caam_req
->cbk
= aead_decrypt_done
;
1370 caam_req
->ctx
= &req
->base
;
1371 caam_req
->edesc
= edesc
;
1372 ret
= dpaa2_caam_enqueue(ctx
->dev
, caam_req
);
1373 if (ret
!= -EINPROGRESS
&&
1374 !(ret
== -EBUSY
&& req
->base
.flags
& CRYPTO_TFM_REQ_MAY_BACKLOG
)) {
1375 aead_unmap(ctx
->dev
, edesc
, req
);
1376 qi_cache_free(edesc
);
1382 static int ipsec_gcm_encrypt(struct aead_request
*req
)
1384 return crypto_ipsec_check_assoclen(req
->assoclen
) ? : aead_encrypt(req
);
1387 static int ipsec_gcm_decrypt(struct aead_request
*req
)
1389 return crypto_ipsec_check_assoclen(req
->assoclen
) ? : aead_decrypt(req
);
1392 static void skcipher_encrypt_done(void *cbk_ctx
, u32 status
)
1394 struct crypto_async_request
*areq
= cbk_ctx
;
1395 struct skcipher_request
*req
= skcipher_request_cast(areq
);
1396 struct caam_request
*req_ctx
= to_caam_req(areq
);
1397 struct crypto_skcipher
*skcipher
= crypto_skcipher_reqtfm(req
);
1398 struct caam_ctx
*ctx
= crypto_skcipher_ctx_dma(skcipher
);
1399 struct skcipher_edesc
*edesc
= req_ctx
->edesc
;
1401 int ivsize
= crypto_skcipher_ivsize(skcipher
);
1403 dev_dbg(ctx
->dev
, "%s %d: err 0x%x\n", __func__
, __LINE__
, status
);
1405 if (unlikely(status
))
1406 ecode
= caam_qi2_strstatus(ctx
->dev
, status
);
1408 print_hex_dump_debug("dstiv @" __stringify(__LINE__
)": ",
1409 DUMP_PREFIX_ADDRESS
, 16, 4, req
->iv
,
1410 edesc
->src_nents
> 1 ? 100 : ivsize
, 1);
1411 caam_dump_sg("dst @" __stringify(__LINE__
)": ",
1412 DUMP_PREFIX_ADDRESS
, 16, 4, req
->dst
,
1413 edesc
->dst_nents
> 1 ? 100 : req
->cryptlen
, 1);
1415 skcipher_unmap(ctx
->dev
, edesc
, req
);
1418 * The crypto API expects us to set the IV (req->iv) to the last
1419 * ciphertext block (CBC mode) or last counter (CTR mode).
1420 * This is used e.g. by the CTS mode.
1423 memcpy(req
->iv
, (u8
*)&edesc
->sgt
[0] + edesc
->qm_sg_bytes
,
1426 qi_cache_free(edesc
);
1427 skcipher_request_complete(req
, ecode
);
1430 static void skcipher_decrypt_done(void *cbk_ctx
, u32 status
)
1432 struct crypto_async_request
*areq
= cbk_ctx
;
1433 struct skcipher_request
*req
= skcipher_request_cast(areq
);
1434 struct caam_request
*req_ctx
= to_caam_req(areq
);
1435 struct crypto_skcipher
*skcipher
= crypto_skcipher_reqtfm(req
);
1436 struct caam_ctx
*ctx
= crypto_skcipher_ctx_dma(skcipher
);
1437 struct skcipher_edesc
*edesc
= req_ctx
->edesc
;
1439 int ivsize
= crypto_skcipher_ivsize(skcipher
);
1441 dev_dbg(ctx
->dev
, "%s %d: err 0x%x\n", __func__
, __LINE__
, status
);
1443 if (unlikely(status
))
1444 ecode
= caam_qi2_strstatus(ctx
->dev
, status
);
1446 print_hex_dump_debug("dstiv @" __stringify(__LINE__
)": ",
1447 DUMP_PREFIX_ADDRESS
, 16, 4, req
->iv
,
1448 edesc
->src_nents
> 1 ? 100 : ivsize
, 1);
1449 caam_dump_sg("dst @" __stringify(__LINE__
)": ",
1450 DUMP_PREFIX_ADDRESS
, 16, 4, req
->dst
,
1451 edesc
->dst_nents
> 1 ? 100 : req
->cryptlen
, 1);
1453 skcipher_unmap(ctx
->dev
, edesc
, req
);
1456 * The crypto API expects us to set the IV (req->iv) to the last
1457 * ciphertext block (CBC mode) or last counter (CTR mode).
1458 * This is used e.g. by the CTS mode.
1461 memcpy(req
->iv
, (u8
*)&edesc
->sgt
[0] + edesc
->qm_sg_bytes
,
1464 qi_cache_free(edesc
);
1465 skcipher_request_complete(req
, ecode
);
1468 static inline bool xts_skcipher_ivsize(struct skcipher_request
*req
)
1470 struct crypto_skcipher
*skcipher
= crypto_skcipher_reqtfm(req
);
1471 unsigned int ivsize
= crypto_skcipher_ivsize(skcipher
);
1473 return !!get_unaligned((u64
*)(req
->iv
+ (ivsize
/ 2)));
1476 static int skcipher_encrypt(struct skcipher_request
*req
)
1478 struct skcipher_edesc
*edesc
;
1479 struct crypto_skcipher
*skcipher
= crypto_skcipher_reqtfm(req
);
1480 struct caam_ctx
*ctx
= crypto_skcipher_ctx_dma(skcipher
);
1481 struct caam_request
*caam_req
= skcipher_request_ctx_dma(req
);
1482 struct dpaa2_caam_priv
*priv
= dev_get_drvdata(ctx
->dev
);
1486 * XTS is expected to return an error even for input length = 0
1487 * Note that the case input length < block size will be caught during
1488 * HW offloading and return an error.
1490 if (!req
->cryptlen
&& !ctx
->fallback
)
1493 if (ctx
->fallback
&& ((priv
->sec_attr
.era
<= 8 && xts_skcipher_ivsize(req
)) ||
1494 ctx
->xts_key_fallback
)) {
1495 skcipher_request_set_tfm(&caam_req
->fallback_req
, ctx
->fallback
);
1496 skcipher_request_set_callback(&caam_req
->fallback_req
,
1500 skcipher_request_set_crypt(&caam_req
->fallback_req
, req
->src
,
1501 req
->dst
, req
->cryptlen
, req
->iv
);
1503 return crypto_skcipher_encrypt(&caam_req
->fallback_req
);
1506 /* allocate extended descriptor */
1507 edesc
= skcipher_edesc_alloc(req
);
1509 return PTR_ERR(edesc
);
1511 caam_req
->flc
= &ctx
->flc
[ENCRYPT
];
1512 caam_req
->flc_dma
= ctx
->flc_dma
[ENCRYPT
];
1513 caam_req
->cbk
= skcipher_encrypt_done
;
1514 caam_req
->ctx
= &req
->base
;
1515 caam_req
->edesc
= edesc
;
1516 ret
= dpaa2_caam_enqueue(ctx
->dev
, caam_req
);
1517 if (ret
!= -EINPROGRESS
&&
1518 !(ret
== -EBUSY
&& req
->base
.flags
& CRYPTO_TFM_REQ_MAY_BACKLOG
)) {
1519 skcipher_unmap(ctx
->dev
, edesc
, req
);
1520 qi_cache_free(edesc
);
1526 static int skcipher_decrypt(struct skcipher_request
*req
)
1528 struct skcipher_edesc
*edesc
;
1529 struct crypto_skcipher
*skcipher
= crypto_skcipher_reqtfm(req
);
1530 struct caam_ctx
*ctx
= crypto_skcipher_ctx_dma(skcipher
);
1531 struct caam_request
*caam_req
= skcipher_request_ctx_dma(req
);
1532 struct dpaa2_caam_priv
*priv
= dev_get_drvdata(ctx
->dev
);
1536 * XTS is expected to return an error even for input length = 0
1537 * Note that the case input length < block size will be caught during
1538 * HW offloading and return an error.
1540 if (!req
->cryptlen
&& !ctx
->fallback
)
1543 if (ctx
->fallback
&& ((priv
->sec_attr
.era
<= 8 && xts_skcipher_ivsize(req
)) ||
1544 ctx
->xts_key_fallback
)) {
1545 skcipher_request_set_tfm(&caam_req
->fallback_req
, ctx
->fallback
);
1546 skcipher_request_set_callback(&caam_req
->fallback_req
,
1550 skcipher_request_set_crypt(&caam_req
->fallback_req
, req
->src
,
1551 req
->dst
, req
->cryptlen
, req
->iv
);
1553 return crypto_skcipher_decrypt(&caam_req
->fallback_req
);
1556 /* allocate extended descriptor */
1557 edesc
= skcipher_edesc_alloc(req
);
1559 return PTR_ERR(edesc
);
1561 caam_req
->flc
= &ctx
->flc
[DECRYPT
];
1562 caam_req
->flc_dma
= ctx
->flc_dma
[DECRYPT
];
1563 caam_req
->cbk
= skcipher_decrypt_done
;
1564 caam_req
->ctx
= &req
->base
;
1565 caam_req
->edesc
= edesc
;
1566 ret
= dpaa2_caam_enqueue(ctx
->dev
, caam_req
);
1567 if (ret
!= -EINPROGRESS
&&
1568 !(ret
== -EBUSY
&& req
->base
.flags
& CRYPTO_TFM_REQ_MAY_BACKLOG
)) {
1569 skcipher_unmap(ctx
->dev
, edesc
, req
);
1570 qi_cache_free(edesc
);
1576 static int caam_cra_init(struct caam_ctx
*ctx
, struct caam_alg_entry
*caam
,
1579 dma_addr_t dma_addr
;
1582 /* copy descriptor header template value */
1583 ctx
->cdata
.algtype
= OP_TYPE_CLASS1_ALG
| caam
->class1_alg_type
;
1584 ctx
->adata
.algtype
= OP_TYPE_CLASS2_ALG
| caam
->class2_alg_type
;
1586 ctx
->dev
= caam
->dev
;
1587 ctx
->dir
= uses_dkp
? DMA_BIDIRECTIONAL
: DMA_TO_DEVICE
;
1589 dma_addr
= dma_map_single_attrs(ctx
->dev
, ctx
->flc
,
1590 offsetof(struct caam_ctx
, flc_dma
),
1591 ctx
->dir
, DMA_ATTR_SKIP_CPU_SYNC
);
1592 if (dma_mapping_error(ctx
->dev
, dma_addr
)) {
1593 dev_err(ctx
->dev
, "unable to map key, shared descriptors\n");
1597 for (i
= 0; i
< NUM_OP
; i
++)
1598 ctx
->flc_dma
[i
] = dma_addr
+ i
* sizeof(ctx
->flc
[i
]);
1599 ctx
->key_dma
= dma_addr
+ NUM_OP
* sizeof(ctx
->flc
[0]);
1604 static int caam_cra_init_skcipher(struct crypto_skcipher
*tfm
)
1606 struct skcipher_alg
*alg
= crypto_skcipher_alg(tfm
);
1607 struct caam_skcipher_alg
*caam_alg
=
1608 container_of(alg
, typeof(*caam_alg
), skcipher
);
1609 struct caam_ctx
*ctx
= crypto_skcipher_ctx_dma(tfm
);
1610 u32 alg_aai
= caam_alg
->caam
.class1_alg_type
& OP_ALG_AAI_MASK
;
1613 if (alg_aai
== OP_ALG_AAI_XTS
) {
1614 const char *tfm_name
= crypto_tfm_alg_name(&tfm
->base
);
1615 struct crypto_skcipher
*fallback
;
1617 fallback
= crypto_alloc_skcipher(tfm_name
, 0,
1618 CRYPTO_ALG_NEED_FALLBACK
);
1619 if (IS_ERR(fallback
)) {
1620 dev_err(caam_alg
->caam
.dev
,
1621 "Failed to allocate %s fallback: %ld\n",
1622 tfm_name
, PTR_ERR(fallback
));
1623 return PTR_ERR(fallback
);
1626 ctx
->fallback
= fallback
;
1627 crypto_skcipher_set_reqsize_dma(
1628 tfm
, sizeof(struct caam_request
) +
1629 crypto_skcipher_reqsize(fallback
));
1631 crypto_skcipher_set_reqsize_dma(tfm
,
1632 sizeof(struct caam_request
));
1635 ret
= caam_cra_init(ctx
, &caam_alg
->caam
, false);
1636 if (ret
&& ctx
->fallback
)
1637 crypto_free_skcipher(ctx
->fallback
);
1642 static int caam_cra_init_aead(struct crypto_aead
*tfm
)
1644 struct aead_alg
*alg
= crypto_aead_alg(tfm
);
1645 struct caam_aead_alg
*caam_alg
= container_of(alg
, typeof(*caam_alg
),
1648 crypto_aead_set_reqsize_dma(tfm
, sizeof(struct caam_request
));
1649 return caam_cra_init(crypto_aead_ctx_dma(tfm
), &caam_alg
->caam
,
1650 !caam_alg
->caam
.nodkp
);
1653 static void caam_exit_common(struct caam_ctx
*ctx
)
1655 dma_unmap_single_attrs(ctx
->dev
, ctx
->flc_dma
[0],
1656 offsetof(struct caam_ctx
, flc_dma
), ctx
->dir
,
1657 DMA_ATTR_SKIP_CPU_SYNC
);
1660 static void caam_cra_exit(struct crypto_skcipher
*tfm
)
1662 struct caam_ctx
*ctx
= crypto_skcipher_ctx_dma(tfm
);
1665 crypto_free_skcipher(ctx
->fallback
);
1666 caam_exit_common(ctx
);
1669 static void caam_cra_exit_aead(struct crypto_aead
*tfm
)
1671 caam_exit_common(crypto_aead_ctx_dma(tfm
));
1674 static struct caam_skcipher_alg driver_algs
[] = {
1678 .cra_name
= "cbc(aes)",
1679 .cra_driver_name
= "cbc-aes-caam-qi2",
1680 .cra_blocksize
= AES_BLOCK_SIZE
,
1682 .setkey
= aes_skcipher_setkey
,
1683 .encrypt
= skcipher_encrypt
,
1684 .decrypt
= skcipher_decrypt
,
1685 .min_keysize
= AES_MIN_KEY_SIZE
,
1686 .max_keysize
= AES_MAX_KEY_SIZE
,
1687 .ivsize
= AES_BLOCK_SIZE
,
1689 .caam
.class1_alg_type
= OP_ALG_ALGSEL_AES
| OP_ALG_AAI_CBC
,
1694 .cra_name
= "cbc(des3_ede)",
1695 .cra_driver_name
= "cbc-3des-caam-qi2",
1696 .cra_blocksize
= DES3_EDE_BLOCK_SIZE
,
1698 .setkey
= des3_skcipher_setkey
,
1699 .encrypt
= skcipher_encrypt
,
1700 .decrypt
= skcipher_decrypt
,
1701 .min_keysize
= DES3_EDE_KEY_SIZE
,
1702 .max_keysize
= DES3_EDE_KEY_SIZE
,
1703 .ivsize
= DES3_EDE_BLOCK_SIZE
,
1705 .caam
.class1_alg_type
= OP_ALG_ALGSEL_3DES
| OP_ALG_AAI_CBC
,
1710 .cra_name
= "cbc(des)",
1711 .cra_driver_name
= "cbc-des-caam-qi2",
1712 .cra_blocksize
= DES_BLOCK_SIZE
,
1714 .setkey
= des_skcipher_setkey
,
1715 .encrypt
= skcipher_encrypt
,
1716 .decrypt
= skcipher_decrypt
,
1717 .min_keysize
= DES_KEY_SIZE
,
1718 .max_keysize
= DES_KEY_SIZE
,
1719 .ivsize
= DES_BLOCK_SIZE
,
1721 .caam
.class1_alg_type
= OP_ALG_ALGSEL_DES
| OP_ALG_AAI_CBC
,
1726 .cra_name
= "ctr(aes)",
1727 .cra_driver_name
= "ctr-aes-caam-qi2",
1730 .setkey
= ctr_skcipher_setkey
,
1731 .encrypt
= skcipher_encrypt
,
1732 .decrypt
= skcipher_decrypt
,
1733 .min_keysize
= AES_MIN_KEY_SIZE
,
1734 .max_keysize
= AES_MAX_KEY_SIZE
,
1735 .ivsize
= AES_BLOCK_SIZE
,
1736 .chunksize
= AES_BLOCK_SIZE
,
1738 .caam
.class1_alg_type
= OP_ALG_ALGSEL_AES
|
1739 OP_ALG_AAI_CTR_MOD128
,
1744 .cra_name
= "rfc3686(ctr(aes))",
1745 .cra_driver_name
= "rfc3686-ctr-aes-caam-qi2",
1748 .setkey
= rfc3686_skcipher_setkey
,
1749 .encrypt
= skcipher_encrypt
,
1750 .decrypt
= skcipher_decrypt
,
1751 .min_keysize
= AES_MIN_KEY_SIZE
+
1752 CTR_RFC3686_NONCE_SIZE
,
1753 .max_keysize
= AES_MAX_KEY_SIZE
+
1754 CTR_RFC3686_NONCE_SIZE
,
1755 .ivsize
= CTR_RFC3686_IV_SIZE
,
1756 .chunksize
= AES_BLOCK_SIZE
,
1759 .class1_alg_type
= OP_ALG_ALGSEL_AES
|
1760 OP_ALG_AAI_CTR_MOD128
,
1767 .cra_name
= "xts(aes)",
1768 .cra_driver_name
= "xts-aes-caam-qi2",
1769 .cra_flags
= CRYPTO_ALG_NEED_FALLBACK
,
1770 .cra_blocksize
= AES_BLOCK_SIZE
,
1772 .setkey
= xts_skcipher_setkey
,
1773 .encrypt
= skcipher_encrypt
,
1774 .decrypt
= skcipher_decrypt
,
1775 .min_keysize
= 2 * AES_MIN_KEY_SIZE
,
1776 .max_keysize
= 2 * AES_MAX_KEY_SIZE
,
1777 .ivsize
= AES_BLOCK_SIZE
,
1779 .caam
.class1_alg_type
= OP_ALG_ALGSEL_AES
| OP_ALG_AAI_XTS
,
1784 .cra_name
= "chacha20",
1785 .cra_driver_name
= "chacha20-caam-qi2",
1788 .setkey
= chacha20_skcipher_setkey
,
1789 .encrypt
= skcipher_encrypt
,
1790 .decrypt
= skcipher_decrypt
,
1791 .min_keysize
= CHACHA_KEY_SIZE
,
1792 .max_keysize
= CHACHA_KEY_SIZE
,
1793 .ivsize
= CHACHA_IV_SIZE
,
1795 .caam
.class1_alg_type
= OP_ALG_ALGSEL_CHACHA20
,
1799 static struct caam_aead_alg driver_aeads
[] = {
1803 .cra_name
= "rfc4106(gcm(aes))",
1804 .cra_driver_name
= "rfc4106-gcm-aes-caam-qi2",
1807 .setkey
= rfc4106_setkey
,
1808 .setauthsize
= rfc4106_setauthsize
,
1809 .encrypt
= ipsec_gcm_encrypt
,
1810 .decrypt
= ipsec_gcm_decrypt
,
1812 .maxauthsize
= AES_BLOCK_SIZE
,
1815 .class1_alg_type
= OP_ALG_ALGSEL_AES
| OP_ALG_AAI_GCM
,
1822 .cra_name
= "rfc4543(gcm(aes))",
1823 .cra_driver_name
= "rfc4543-gcm-aes-caam-qi2",
1826 .setkey
= rfc4543_setkey
,
1827 .setauthsize
= rfc4543_setauthsize
,
1828 .encrypt
= ipsec_gcm_encrypt
,
1829 .decrypt
= ipsec_gcm_decrypt
,
1831 .maxauthsize
= AES_BLOCK_SIZE
,
1834 .class1_alg_type
= OP_ALG_ALGSEL_AES
| OP_ALG_AAI_GCM
,
1838 /* Galois Counter Mode */
1842 .cra_name
= "gcm(aes)",
1843 .cra_driver_name
= "gcm-aes-caam-qi2",
1846 .setkey
= gcm_setkey
,
1847 .setauthsize
= gcm_setauthsize
,
1848 .encrypt
= aead_encrypt
,
1849 .decrypt
= aead_decrypt
,
1851 .maxauthsize
= AES_BLOCK_SIZE
,
1854 .class1_alg_type
= OP_ALG_ALGSEL_AES
| OP_ALG_AAI_GCM
,
1858 /* single-pass ipsec_esp descriptor */
1862 .cra_name
= "authenc(hmac(md5),cbc(aes))",
1863 .cra_driver_name
= "authenc-hmac-md5-"
1865 .cra_blocksize
= AES_BLOCK_SIZE
,
1867 .setkey
= aead_setkey
,
1868 .setauthsize
= aead_setauthsize
,
1869 .encrypt
= aead_encrypt
,
1870 .decrypt
= aead_decrypt
,
1871 .ivsize
= AES_BLOCK_SIZE
,
1872 .maxauthsize
= MD5_DIGEST_SIZE
,
1875 .class1_alg_type
= OP_ALG_ALGSEL_AES
| OP_ALG_AAI_CBC
,
1876 .class2_alg_type
= OP_ALG_ALGSEL_MD5
|
1877 OP_ALG_AAI_HMAC_PRECOMP
,
1883 .cra_name
= "echainiv(authenc(hmac(md5),"
1885 .cra_driver_name
= "echainiv-authenc-hmac-md5-"
1887 .cra_blocksize
= AES_BLOCK_SIZE
,
1889 .setkey
= aead_setkey
,
1890 .setauthsize
= aead_setauthsize
,
1891 .encrypt
= aead_encrypt
,
1892 .decrypt
= aead_decrypt
,
1893 .ivsize
= AES_BLOCK_SIZE
,
1894 .maxauthsize
= MD5_DIGEST_SIZE
,
1897 .class1_alg_type
= OP_ALG_ALGSEL_AES
| OP_ALG_AAI_CBC
,
1898 .class2_alg_type
= OP_ALG_ALGSEL_MD5
|
1899 OP_ALG_AAI_HMAC_PRECOMP
,
1906 .cra_name
= "authenc(hmac(sha1),cbc(aes))",
1907 .cra_driver_name
= "authenc-hmac-sha1-"
1909 .cra_blocksize
= AES_BLOCK_SIZE
,
1911 .setkey
= aead_setkey
,
1912 .setauthsize
= aead_setauthsize
,
1913 .encrypt
= aead_encrypt
,
1914 .decrypt
= aead_decrypt
,
1915 .ivsize
= AES_BLOCK_SIZE
,
1916 .maxauthsize
= SHA1_DIGEST_SIZE
,
1919 .class1_alg_type
= OP_ALG_ALGSEL_AES
| OP_ALG_AAI_CBC
,
1920 .class2_alg_type
= OP_ALG_ALGSEL_SHA1
|
1921 OP_ALG_AAI_HMAC_PRECOMP
,
1927 .cra_name
= "echainiv(authenc(hmac(sha1),"
1929 .cra_driver_name
= "echainiv-authenc-"
1930 "hmac-sha1-cbc-aes-caam-qi2",
1931 .cra_blocksize
= AES_BLOCK_SIZE
,
1933 .setkey
= aead_setkey
,
1934 .setauthsize
= aead_setauthsize
,
1935 .encrypt
= aead_encrypt
,
1936 .decrypt
= aead_decrypt
,
1937 .ivsize
= AES_BLOCK_SIZE
,
1938 .maxauthsize
= SHA1_DIGEST_SIZE
,
1941 .class1_alg_type
= OP_ALG_ALGSEL_AES
| OP_ALG_AAI_CBC
,
1942 .class2_alg_type
= OP_ALG_ALGSEL_SHA1
|
1943 OP_ALG_AAI_HMAC_PRECOMP
,
1950 .cra_name
= "authenc(hmac(sha224),cbc(aes))",
1951 .cra_driver_name
= "authenc-hmac-sha224-"
1953 .cra_blocksize
= AES_BLOCK_SIZE
,
1955 .setkey
= aead_setkey
,
1956 .setauthsize
= aead_setauthsize
,
1957 .encrypt
= aead_encrypt
,
1958 .decrypt
= aead_decrypt
,
1959 .ivsize
= AES_BLOCK_SIZE
,
1960 .maxauthsize
= SHA224_DIGEST_SIZE
,
1963 .class1_alg_type
= OP_ALG_ALGSEL_AES
| OP_ALG_AAI_CBC
,
1964 .class2_alg_type
= OP_ALG_ALGSEL_SHA224
|
1965 OP_ALG_AAI_HMAC_PRECOMP
,
1971 .cra_name
= "echainiv(authenc(hmac(sha224),"
1973 .cra_driver_name
= "echainiv-authenc-"
1974 "hmac-sha224-cbc-aes-caam-qi2",
1975 .cra_blocksize
= AES_BLOCK_SIZE
,
1977 .setkey
= aead_setkey
,
1978 .setauthsize
= aead_setauthsize
,
1979 .encrypt
= aead_encrypt
,
1980 .decrypt
= aead_decrypt
,
1981 .ivsize
= AES_BLOCK_SIZE
,
1982 .maxauthsize
= SHA224_DIGEST_SIZE
,
1985 .class1_alg_type
= OP_ALG_ALGSEL_AES
| OP_ALG_AAI_CBC
,
1986 .class2_alg_type
= OP_ALG_ALGSEL_SHA224
|
1987 OP_ALG_AAI_HMAC_PRECOMP
,
1994 .cra_name
= "authenc(hmac(sha256),cbc(aes))",
1995 .cra_driver_name
= "authenc-hmac-sha256-"
1997 .cra_blocksize
= AES_BLOCK_SIZE
,
1999 .setkey
= aead_setkey
,
2000 .setauthsize
= aead_setauthsize
,
2001 .encrypt
= aead_encrypt
,
2002 .decrypt
= aead_decrypt
,
2003 .ivsize
= AES_BLOCK_SIZE
,
2004 .maxauthsize
= SHA256_DIGEST_SIZE
,
2007 .class1_alg_type
= OP_ALG_ALGSEL_AES
| OP_ALG_AAI_CBC
,
2008 .class2_alg_type
= OP_ALG_ALGSEL_SHA256
|
2009 OP_ALG_AAI_HMAC_PRECOMP
,
2015 .cra_name
= "echainiv(authenc(hmac(sha256),"
2017 .cra_driver_name
= "echainiv-authenc-"
2018 "hmac-sha256-cbc-aes-"
2020 .cra_blocksize
= AES_BLOCK_SIZE
,
2022 .setkey
= aead_setkey
,
2023 .setauthsize
= aead_setauthsize
,
2024 .encrypt
= aead_encrypt
,
2025 .decrypt
= aead_decrypt
,
2026 .ivsize
= AES_BLOCK_SIZE
,
2027 .maxauthsize
= SHA256_DIGEST_SIZE
,
2030 .class1_alg_type
= OP_ALG_ALGSEL_AES
| OP_ALG_AAI_CBC
,
2031 .class2_alg_type
= OP_ALG_ALGSEL_SHA256
|
2032 OP_ALG_AAI_HMAC_PRECOMP
,
2039 .cra_name
= "authenc(hmac(sha384),cbc(aes))",
2040 .cra_driver_name
= "authenc-hmac-sha384-"
2042 .cra_blocksize
= AES_BLOCK_SIZE
,
2044 .setkey
= aead_setkey
,
2045 .setauthsize
= aead_setauthsize
,
2046 .encrypt
= aead_encrypt
,
2047 .decrypt
= aead_decrypt
,
2048 .ivsize
= AES_BLOCK_SIZE
,
2049 .maxauthsize
= SHA384_DIGEST_SIZE
,
2052 .class1_alg_type
= OP_ALG_ALGSEL_AES
| OP_ALG_AAI_CBC
,
2053 .class2_alg_type
= OP_ALG_ALGSEL_SHA384
|
2054 OP_ALG_AAI_HMAC_PRECOMP
,
2060 .cra_name
= "echainiv(authenc(hmac(sha384),"
2062 .cra_driver_name
= "echainiv-authenc-"
2063 "hmac-sha384-cbc-aes-"
2065 .cra_blocksize
= AES_BLOCK_SIZE
,
2067 .setkey
= aead_setkey
,
2068 .setauthsize
= aead_setauthsize
,
2069 .encrypt
= aead_encrypt
,
2070 .decrypt
= aead_decrypt
,
2071 .ivsize
= AES_BLOCK_SIZE
,
2072 .maxauthsize
= SHA384_DIGEST_SIZE
,
2075 .class1_alg_type
= OP_ALG_ALGSEL_AES
| OP_ALG_AAI_CBC
,
2076 .class2_alg_type
= OP_ALG_ALGSEL_SHA384
|
2077 OP_ALG_AAI_HMAC_PRECOMP
,
2084 .cra_name
= "authenc(hmac(sha512),cbc(aes))",
2085 .cra_driver_name
= "authenc-hmac-sha512-"
2087 .cra_blocksize
= AES_BLOCK_SIZE
,
2089 .setkey
= aead_setkey
,
2090 .setauthsize
= aead_setauthsize
,
2091 .encrypt
= aead_encrypt
,
2092 .decrypt
= aead_decrypt
,
2093 .ivsize
= AES_BLOCK_SIZE
,
2094 .maxauthsize
= SHA512_DIGEST_SIZE
,
2097 .class1_alg_type
= OP_ALG_ALGSEL_AES
| OP_ALG_AAI_CBC
,
2098 .class2_alg_type
= OP_ALG_ALGSEL_SHA512
|
2099 OP_ALG_AAI_HMAC_PRECOMP
,
2105 .cra_name
= "echainiv(authenc(hmac(sha512),"
2107 .cra_driver_name
= "echainiv-authenc-"
2108 "hmac-sha512-cbc-aes-"
2110 .cra_blocksize
= AES_BLOCK_SIZE
,
2112 .setkey
= aead_setkey
,
2113 .setauthsize
= aead_setauthsize
,
2114 .encrypt
= aead_encrypt
,
2115 .decrypt
= aead_decrypt
,
2116 .ivsize
= AES_BLOCK_SIZE
,
2117 .maxauthsize
= SHA512_DIGEST_SIZE
,
2120 .class1_alg_type
= OP_ALG_ALGSEL_AES
| OP_ALG_AAI_CBC
,
2121 .class2_alg_type
= OP_ALG_ALGSEL_SHA512
|
2122 OP_ALG_AAI_HMAC_PRECOMP
,
2129 .cra_name
= "authenc(hmac(md5),cbc(des3_ede))",
2130 .cra_driver_name
= "authenc-hmac-md5-"
2131 "cbc-des3_ede-caam-qi2",
2132 .cra_blocksize
= DES3_EDE_BLOCK_SIZE
,
2134 .setkey
= des3_aead_setkey
,
2135 .setauthsize
= aead_setauthsize
,
2136 .encrypt
= aead_encrypt
,
2137 .decrypt
= aead_decrypt
,
2138 .ivsize
= DES3_EDE_BLOCK_SIZE
,
2139 .maxauthsize
= MD5_DIGEST_SIZE
,
2142 .class1_alg_type
= OP_ALG_ALGSEL_3DES
| OP_ALG_AAI_CBC
,
2143 .class2_alg_type
= OP_ALG_ALGSEL_MD5
|
2144 OP_ALG_AAI_HMAC_PRECOMP
,
2150 .cra_name
= "echainiv(authenc(hmac(md5),"
2152 .cra_driver_name
= "echainiv-authenc-hmac-md5-"
2153 "cbc-des3_ede-caam-qi2",
2154 .cra_blocksize
= DES3_EDE_BLOCK_SIZE
,
2156 .setkey
= des3_aead_setkey
,
2157 .setauthsize
= aead_setauthsize
,
2158 .encrypt
= aead_encrypt
,
2159 .decrypt
= aead_decrypt
,
2160 .ivsize
= DES3_EDE_BLOCK_SIZE
,
2161 .maxauthsize
= MD5_DIGEST_SIZE
,
2164 .class1_alg_type
= OP_ALG_ALGSEL_3DES
| OP_ALG_AAI_CBC
,
2165 .class2_alg_type
= OP_ALG_ALGSEL_MD5
|
2166 OP_ALG_AAI_HMAC_PRECOMP
,
2173 .cra_name
= "authenc(hmac(sha1),"
2175 .cra_driver_name
= "authenc-hmac-sha1-"
2176 "cbc-des3_ede-caam-qi2",
2177 .cra_blocksize
= DES3_EDE_BLOCK_SIZE
,
2179 .setkey
= des3_aead_setkey
,
2180 .setauthsize
= aead_setauthsize
,
2181 .encrypt
= aead_encrypt
,
2182 .decrypt
= aead_decrypt
,
2183 .ivsize
= DES3_EDE_BLOCK_SIZE
,
2184 .maxauthsize
= SHA1_DIGEST_SIZE
,
2187 .class1_alg_type
= OP_ALG_ALGSEL_3DES
| OP_ALG_AAI_CBC
,
2188 .class2_alg_type
= OP_ALG_ALGSEL_SHA1
|
2189 OP_ALG_AAI_HMAC_PRECOMP
,
2195 .cra_name
= "echainiv(authenc(hmac(sha1),"
2197 .cra_driver_name
= "echainiv-authenc-"
2199 "cbc-des3_ede-caam-qi2",
2200 .cra_blocksize
= DES3_EDE_BLOCK_SIZE
,
2202 .setkey
= des3_aead_setkey
,
2203 .setauthsize
= aead_setauthsize
,
2204 .encrypt
= aead_encrypt
,
2205 .decrypt
= aead_decrypt
,
2206 .ivsize
= DES3_EDE_BLOCK_SIZE
,
2207 .maxauthsize
= SHA1_DIGEST_SIZE
,
2210 .class1_alg_type
= OP_ALG_ALGSEL_3DES
| OP_ALG_AAI_CBC
,
2211 .class2_alg_type
= OP_ALG_ALGSEL_SHA1
|
2212 OP_ALG_AAI_HMAC_PRECOMP
,
2219 .cra_name
= "authenc(hmac(sha224),"
2221 .cra_driver_name
= "authenc-hmac-sha224-"
2222 "cbc-des3_ede-caam-qi2",
2223 .cra_blocksize
= DES3_EDE_BLOCK_SIZE
,
2225 .setkey
= des3_aead_setkey
,
2226 .setauthsize
= aead_setauthsize
,
2227 .encrypt
= aead_encrypt
,
2228 .decrypt
= aead_decrypt
,
2229 .ivsize
= DES3_EDE_BLOCK_SIZE
,
2230 .maxauthsize
= SHA224_DIGEST_SIZE
,
2233 .class1_alg_type
= OP_ALG_ALGSEL_3DES
| OP_ALG_AAI_CBC
,
2234 .class2_alg_type
= OP_ALG_ALGSEL_SHA224
|
2235 OP_ALG_AAI_HMAC_PRECOMP
,
2241 .cra_name
= "echainiv(authenc(hmac(sha224),"
2243 .cra_driver_name
= "echainiv-authenc-"
2245 "cbc-des3_ede-caam-qi2",
2246 .cra_blocksize
= DES3_EDE_BLOCK_SIZE
,
2248 .setkey
= des3_aead_setkey
,
2249 .setauthsize
= aead_setauthsize
,
2250 .encrypt
= aead_encrypt
,
2251 .decrypt
= aead_decrypt
,
2252 .ivsize
= DES3_EDE_BLOCK_SIZE
,
2253 .maxauthsize
= SHA224_DIGEST_SIZE
,
2256 .class1_alg_type
= OP_ALG_ALGSEL_3DES
| OP_ALG_AAI_CBC
,
2257 .class2_alg_type
= OP_ALG_ALGSEL_SHA224
|
2258 OP_ALG_AAI_HMAC_PRECOMP
,
2265 .cra_name
= "authenc(hmac(sha256),"
2267 .cra_driver_name
= "authenc-hmac-sha256-"
2268 "cbc-des3_ede-caam-qi2",
2269 .cra_blocksize
= DES3_EDE_BLOCK_SIZE
,
2271 .setkey
= des3_aead_setkey
,
2272 .setauthsize
= aead_setauthsize
,
2273 .encrypt
= aead_encrypt
,
2274 .decrypt
= aead_decrypt
,
2275 .ivsize
= DES3_EDE_BLOCK_SIZE
,
2276 .maxauthsize
= SHA256_DIGEST_SIZE
,
2279 .class1_alg_type
= OP_ALG_ALGSEL_3DES
| OP_ALG_AAI_CBC
,
2280 .class2_alg_type
= OP_ALG_ALGSEL_SHA256
|
2281 OP_ALG_AAI_HMAC_PRECOMP
,
2287 .cra_name
= "echainiv(authenc(hmac(sha256),"
2289 .cra_driver_name
= "echainiv-authenc-"
2291 "cbc-des3_ede-caam-qi2",
2292 .cra_blocksize
= DES3_EDE_BLOCK_SIZE
,
2294 .setkey
= des3_aead_setkey
,
2295 .setauthsize
= aead_setauthsize
,
2296 .encrypt
= aead_encrypt
,
2297 .decrypt
= aead_decrypt
,
2298 .ivsize
= DES3_EDE_BLOCK_SIZE
,
2299 .maxauthsize
= SHA256_DIGEST_SIZE
,
2302 .class1_alg_type
= OP_ALG_ALGSEL_3DES
| OP_ALG_AAI_CBC
,
2303 .class2_alg_type
= OP_ALG_ALGSEL_SHA256
|
2304 OP_ALG_AAI_HMAC_PRECOMP
,
2311 .cra_name
= "authenc(hmac(sha384),"
2313 .cra_driver_name
= "authenc-hmac-sha384-"
2314 "cbc-des3_ede-caam-qi2",
2315 .cra_blocksize
= DES3_EDE_BLOCK_SIZE
,
2317 .setkey
= des3_aead_setkey
,
2318 .setauthsize
= aead_setauthsize
,
2319 .encrypt
= aead_encrypt
,
2320 .decrypt
= aead_decrypt
,
2321 .ivsize
= DES3_EDE_BLOCK_SIZE
,
2322 .maxauthsize
= SHA384_DIGEST_SIZE
,
2325 .class1_alg_type
= OP_ALG_ALGSEL_3DES
| OP_ALG_AAI_CBC
,
2326 .class2_alg_type
= OP_ALG_ALGSEL_SHA384
|
2327 OP_ALG_AAI_HMAC_PRECOMP
,
2333 .cra_name
= "echainiv(authenc(hmac(sha384),"
2335 .cra_driver_name
= "echainiv-authenc-"
2337 "cbc-des3_ede-caam-qi2",
2338 .cra_blocksize
= DES3_EDE_BLOCK_SIZE
,
2340 .setkey
= des3_aead_setkey
,
2341 .setauthsize
= aead_setauthsize
,
2342 .encrypt
= aead_encrypt
,
2343 .decrypt
= aead_decrypt
,
2344 .ivsize
= DES3_EDE_BLOCK_SIZE
,
2345 .maxauthsize
= SHA384_DIGEST_SIZE
,
2348 .class1_alg_type
= OP_ALG_ALGSEL_3DES
| OP_ALG_AAI_CBC
,
2349 .class2_alg_type
= OP_ALG_ALGSEL_SHA384
|
2350 OP_ALG_AAI_HMAC_PRECOMP
,
2357 .cra_name
= "authenc(hmac(sha512),"
2359 .cra_driver_name
= "authenc-hmac-sha512-"
2360 "cbc-des3_ede-caam-qi2",
2361 .cra_blocksize
= DES3_EDE_BLOCK_SIZE
,
2363 .setkey
= des3_aead_setkey
,
2364 .setauthsize
= aead_setauthsize
,
2365 .encrypt
= aead_encrypt
,
2366 .decrypt
= aead_decrypt
,
2367 .ivsize
= DES3_EDE_BLOCK_SIZE
,
2368 .maxauthsize
= SHA512_DIGEST_SIZE
,
2371 .class1_alg_type
= OP_ALG_ALGSEL_3DES
| OP_ALG_AAI_CBC
,
2372 .class2_alg_type
= OP_ALG_ALGSEL_SHA512
|
2373 OP_ALG_AAI_HMAC_PRECOMP
,
2379 .cra_name
= "echainiv(authenc(hmac(sha512),"
2381 .cra_driver_name
= "echainiv-authenc-"
2383 "cbc-des3_ede-caam-qi2",
2384 .cra_blocksize
= DES3_EDE_BLOCK_SIZE
,
2386 .setkey
= des3_aead_setkey
,
2387 .setauthsize
= aead_setauthsize
,
2388 .encrypt
= aead_encrypt
,
2389 .decrypt
= aead_decrypt
,
2390 .ivsize
= DES3_EDE_BLOCK_SIZE
,
2391 .maxauthsize
= SHA512_DIGEST_SIZE
,
2394 .class1_alg_type
= OP_ALG_ALGSEL_3DES
| OP_ALG_AAI_CBC
,
2395 .class2_alg_type
= OP_ALG_ALGSEL_SHA512
|
2396 OP_ALG_AAI_HMAC_PRECOMP
,
2403 .cra_name
= "authenc(hmac(md5),cbc(des))",
2404 .cra_driver_name
= "authenc-hmac-md5-"
2406 .cra_blocksize
= DES_BLOCK_SIZE
,
2408 .setkey
= aead_setkey
,
2409 .setauthsize
= aead_setauthsize
,
2410 .encrypt
= aead_encrypt
,
2411 .decrypt
= aead_decrypt
,
2412 .ivsize
= DES_BLOCK_SIZE
,
2413 .maxauthsize
= MD5_DIGEST_SIZE
,
2416 .class1_alg_type
= OP_ALG_ALGSEL_DES
| OP_ALG_AAI_CBC
,
2417 .class2_alg_type
= OP_ALG_ALGSEL_MD5
|
2418 OP_ALG_AAI_HMAC_PRECOMP
,
2424 .cra_name
= "echainiv(authenc(hmac(md5),"
2426 .cra_driver_name
= "echainiv-authenc-hmac-md5-"
2428 .cra_blocksize
= DES_BLOCK_SIZE
,
2430 .setkey
= aead_setkey
,
2431 .setauthsize
= aead_setauthsize
,
2432 .encrypt
= aead_encrypt
,
2433 .decrypt
= aead_decrypt
,
2434 .ivsize
= DES_BLOCK_SIZE
,
2435 .maxauthsize
= MD5_DIGEST_SIZE
,
2438 .class1_alg_type
= OP_ALG_ALGSEL_DES
| OP_ALG_AAI_CBC
,
2439 .class2_alg_type
= OP_ALG_ALGSEL_MD5
|
2440 OP_ALG_AAI_HMAC_PRECOMP
,
2447 .cra_name
= "authenc(hmac(sha1),cbc(des))",
2448 .cra_driver_name
= "authenc-hmac-sha1-"
2450 .cra_blocksize
= DES_BLOCK_SIZE
,
2452 .setkey
= aead_setkey
,
2453 .setauthsize
= aead_setauthsize
,
2454 .encrypt
= aead_encrypt
,
2455 .decrypt
= aead_decrypt
,
2456 .ivsize
= DES_BLOCK_SIZE
,
2457 .maxauthsize
= SHA1_DIGEST_SIZE
,
2460 .class1_alg_type
= OP_ALG_ALGSEL_DES
| OP_ALG_AAI_CBC
,
2461 .class2_alg_type
= OP_ALG_ALGSEL_SHA1
|
2462 OP_ALG_AAI_HMAC_PRECOMP
,
2468 .cra_name
= "echainiv(authenc(hmac(sha1),"
2470 .cra_driver_name
= "echainiv-authenc-"
2471 "hmac-sha1-cbc-des-caam-qi2",
2472 .cra_blocksize
= DES_BLOCK_SIZE
,
2474 .setkey
= aead_setkey
,
2475 .setauthsize
= aead_setauthsize
,
2476 .encrypt
= aead_encrypt
,
2477 .decrypt
= aead_decrypt
,
2478 .ivsize
= DES_BLOCK_SIZE
,
2479 .maxauthsize
= SHA1_DIGEST_SIZE
,
2482 .class1_alg_type
= OP_ALG_ALGSEL_DES
| OP_ALG_AAI_CBC
,
2483 .class2_alg_type
= OP_ALG_ALGSEL_SHA1
|
2484 OP_ALG_AAI_HMAC_PRECOMP
,
2491 .cra_name
= "authenc(hmac(sha224),cbc(des))",
2492 .cra_driver_name
= "authenc-hmac-sha224-"
2494 .cra_blocksize
= DES_BLOCK_SIZE
,
2496 .setkey
= aead_setkey
,
2497 .setauthsize
= aead_setauthsize
,
2498 .encrypt
= aead_encrypt
,
2499 .decrypt
= aead_decrypt
,
2500 .ivsize
= DES_BLOCK_SIZE
,
2501 .maxauthsize
= SHA224_DIGEST_SIZE
,
2504 .class1_alg_type
= OP_ALG_ALGSEL_DES
| OP_ALG_AAI_CBC
,
2505 .class2_alg_type
= OP_ALG_ALGSEL_SHA224
|
2506 OP_ALG_AAI_HMAC_PRECOMP
,
2512 .cra_name
= "echainiv(authenc(hmac(sha224),"
2514 .cra_driver_name
= "echainiv-authenc-"
2515 "hmac-sha224-cbc-des-"
2517 .cra_blocksize
= DES_BLOCK_SIZE
,
2519 .setkey
= aead_setkey
,
2520 .setauthsize
= aead_setauthsize
,
2521 .encrypt
= aead_encrypt
,
2522 .decrypt
= aead_decrypt
,
2523 .ivsize
= DES_BLOCK_SIZE
,
2524 .maxauthsize
= SHA224_DIGEST_SIZE
,
2527 .class1_alg_type
= OP_ALG_ALGSEL_DES
| OP_ALG_AAI_CBC
,
2528 .class2_alg_type
= OP_ALG_ALGSEL_SHA224
|
2529 OP_ALG_AAI_HMAC_PRECOMP
,
2536 .cra_name
= "authenc(hmac(sha256),cbc(des))",
2537 .cra_driver_name
= "authenc-hmac-sha256-"
2539 .cra_blocksize
= DES_BLOCK_SIZE
,
2541 .setkey
= aead_setkey
,
2542 .setauthsize
= aead_setauthsize
,
2543 .encrypt
= aead_encrypt
,
2544 .decrypt
= aead_decrypt
,
2545 .ivsize
= DES_BLOCK_SIZE
,
2546 .maxauthsize
= SHA256_DIGEST_SIZE
,
2549 .class1_alg_type
= OP_ALG_ALGSEL_DES
| OP_ALG_AAI_CBC
,
2550 .class2_alg_type
= OP_ALG_ALGSEL_SHA256
|
2551 OP_ALG_AAI_HMAC_PRECOMP
,
2557 .cra_name
= "echainiv(authenc(hmac(sha256),"
2559 .cra_driver_name
= "echainiv-authenc-"
2560 "hmac-sha256-cbc-des-"
2562 .cra_blocksize
= DES_BLOCK_SIZE
,
2564 .setkey
= aead_setkey
,
2565 .setauthsize
= aead_setauthsize
,
2566 .encrypt
= aead_encrypt
,
2567 .decrypt
= aead_decrypt
,
2568 .ivsize
= DES_BLOCK_SIZE
,
2569 .maxauthsize
= SHA256_DIGEST_SIZE
,
2572 .class1_alg_type
= OP_ALG_ALGSEL_DES
| OP_ALG_AAI_CBC
,
2573 .class2_alg_type
= OP_ALG_ALGSEL_SHA256
|
2574 OP_ALG_AAI_HMAC_PRECOMP
,
2581 .cra_name
= "authenc(hmac(sha384),cbc(des))",
2582 .cra_driver_name
= "authenc-hmac-sha384-"
2584 .cra_blocksize
= DES_BLOCK_SIZE
,
2586 .setkey
= aead_setkey
,
2587 .setauthsize
= aead_setauthsize
,
2588 .encrypt
= aead_encrypt
,
2589 .decrypt
= aead_decrypt
,
2590 .ivsize
= DES_BLOCK_SIZE
,
2591 .maxauthsize
= SHA384_DIGEST_SIZE
,
2594 .class1_alg_type
= OP_ALG_ALGSEL_DES
| OP_ALG_AAI_CBC
,
2595 .class2_alg_type
= OP_ALG_ALGSEL_SHA384
|
2596 OP_ALG_AAI_HMAC_PRECOMP
,
2602 .cra_name
= "echainiv(authenc(hmac(sha384),"
2604 .cra_driver_name
= "echainiv-authenc-"
2605 "hmac-sha384-cbc-des-"
2607 .cra_blocksize
= DES_BLOCK_SIZE
,
2609 .setkey
= aead_setkey
,
2610 .setauthsize
= aead_setauthsize
,
2611 .encrypt
= aead_encrypt
,
2612 .decrypt
= aead_decrypt
,
2613 .ivsize
= DES_BLOCK_SIZE
,
2614 .maxauthsize
= SHA384_DIGEST_SIZE
,
2617 .class1_alg_type
= OP_ALG_ALGSEL_DES
| OP_ALG_AAI_CBC
,
2618 .class2_alg_type
= OP_ALG_ALGSEL_SHA384
|
2619 OP_ALG_AAI_HMAC_PRECOMP
,
2626 .cra_name
= "authenc(hmac(sha512),cbc(des))",
2627 .cra_driver_name
= "authenc-hmac-sha512-"
2629 .cra_blocksize
= DES_BLOCK_SIZE
,
2631 .setkey
= aead_setkey
,
2632 .setauthsize
= aead_setauthsize
,
2633 .encrypt
= aead_encrypt
,
2634 .decrypt
= aead_decrypt
,
2635 .ivsize
= DES_BLOCK_SIZE
,
2636 .maxauthsize
= SHA512_DIGEST_SIZE
,
2639 .class1_alg_type
= OP_ALG_ALGSEL_DES
| OP_ALG_AAI_CBC
,
2640 .class2_alg_type
= OP_ALG_ALGSEL_SHA512
|
2641 OP_ALG_AAI_HMAC_PRECOMP
,
2647 .cra_name
= "echainiv(authenc(hmac(sha512),"
2649 .cra_driver_name
= "echainiv-authenc-"
2650 "hmac-sha512-cbc-des-"
2652 .cra_blocksize
= DES_BLOCK_SIZE
,
2654 .setkey
= aead_setkey
,
2655 .setauthsize
= aead_setauthsize
,
2656 .encrypt
= aead_encrypt
,
2657 .decrypt
= aead_decrypt
,
2658 .ivsize
= DES_BLOCK_SIZE
,
2659 .maxauthsize
= SHA512_DIGEST_SIZE
,
2662 .class1_alg_type
= OP_ALG_ALGSEL_DES
| OP_ALG_AAI_CBC
,
2663 .class2_alg_type
= OP_ALG_ALGSEL_SHA512
|
2664 OP_ALG_AAI_HMAC_PRECOMP
,
2671 .cra_name
= "authenc(hmac(md5),"
2672 "rfc3686(ctr(aes)))",
2673 .cra_driver_name
= "authenc-hmac-md5-"
2674 "rfc3686-ctr-aes-caam-qi2",
2677 .setkey
= aead_setkey
,
2678 .setauthsize
= aead_setauthsize
,
2679 .encrypt
= aead_encrypt
,
2680 .decrypt
= aead_decrypt
,
2681 .ivsize
= CTR_RFC3686_IV_SIZE
,
2682 .maxauthsize
= MD5_DIGEST_SIZE
,
2685 .class1_alg_type
= OP_ALG_ALGSEL_AES
|
2686 OP_ALG_AAI_CTR_MOD128
,
2687 .class2_alg_type
= OP_ALG_ALGSEL_MD5
|
2688 OP_ALG_AAI_HMAC_PRECOMP
,
2695 .cra_name
= "seqiv(authenc("
2696 "hmac(md5),rfc3686(ctr(aes))))",
2697 .cra_driver_name
= "seqiv-authenc-hmac-md5-"
2698 "rfc3686-ctr-aes-caam-qi2",
2701 .setkey
= aead_setkey
,
2702 .setauthsize
= aead_setauthsize
,
2703 .encrypt
= aead_encrypt
,
2704 .decrypt
= aead_decrypt
,
2705 .ivsize
= CTR_RFC3686_IV_SIZE
,
2706 .maxauthsize
= MD5_DIGEST_SIZE
,
2709 .class1_alg_type
= OP_ALG_ALGSEL_AES
|
2710 OP_ALG_AAI_CTR_MOD128
,
2711 .class2_alg_type
= OP_ALG_ALGSEL_MD5
|
2712 OP_ALG_AAI_HMAC_PRECOMP
,
2720 .cra_name
= "authenc(hmac(sha1),"
2721 "rfc3686(ctr(aes)))",
2722 .cra_driver_name
= "authenc-hmac-sha1-"
2723 "rfc3686-ctr-aes-caam-qi2",
2726 .setkey
= aead_setkey
,
2727 .setauthsize
= aead_setauthsize
,
2728 .encrypt
= aead_encrypt
,
2729 .decrypt
= aead_decrypt
,
2730 .ivsize
= CTR_RFC3686_IV_SIZE
,
2731 .maxauthsize
= SHA1_DIGEST_SIZE
,
2734 .class1_alg_type
= OP_ALG_ALGSEL_AES
|
2735 OP_ALG_AAI_CTR_MOD128
,
2736 .class2_alg_type
= OP_ALG_ALGSEL_SHA1
|
2737 OP_ALG_AAI_HMAC_PRECOMP
,
2744 .cra_name
= "seqiv(authenc("
2745 "hmac(sha1),rfc3686(ctr(aes))))",
2746 .cra_driver_name
= "seqiv-authenc-hmac-sha1-"
2747 "rfc3686-ctr-aes-caam-qi2",
2750 .setkey
= aead_setkey
,
2751 .setauthsize
= aead_setauthsize
,
2752 .encrypt
= aead_encrypt
,
2753 .decrypt
= aead_decrypt
,
2754 .ivsize
= CTR_RFC3686_IV_SIZE
,
2755 .maxauthsize
= SHA1_DIGEST_SIZE
,
2758 .class1_alg_type
= OP_ALG_ALGSEL_AES
|
2759 OP_ALG_AAI_CTR_MOD128
,
2760 .class2_alg_type
= OP_ALG_ALGSEL_SHA1
|
2761 OP_ALG_AAI_HMAC_PRECOMP
,
2769 .cra_name
= "authenc(hmac(sha224),"
2770 "rfc3686(ctr(aes)))",
2771 .cra_driver_name
= "authenc-hmac-sha224-"
2772 "rfc3686-ctr-aes-caam-qi2",
2775 .setkey
= aead_setkey
,
2776 .setauthsize
= aead_setauthsize
,
2777 .encrypt
= aead_encrypt
,
2778 .decrypt
= aead_decrypt
,
2779 .ivsize
= CTR_RFC3686_IV_SIZE
,
2780 .maxauthsize
= SHA224_DIGEST_SIZE
,
2783 .class1_alg_type
= OP_ALG_ALGSEL_AES
|
2784 OP_ALG_AAI_CTR_MOD128
,
2785 .class2_alg_type
= OP_ALG_ALGSEL_SHA224
|
2786 OP_ALG_AAI_HMAC_PRECOMP
,
2793 .cra_name
= "seqiv(authenc("
2794 "hmac(sha224),rfc3686(ctr(aes))))",
2795 .cra_driver_name
= "seqiv-authenc-hmac-sha224-"
2796 "rfc3686-ctr-aes-caam-qi2",
2799 .setkey
= aead_setkey
,
2800 .setauthsize
= aead_setauthsize
,
2801 .encrypt
= aead_encrypt
,
2802 .decrypt
= aead_decrypt
,
2803 .ivsize
= CTR_RFC3686_IV_SIZE
,
2804 .maxauthsize
= SHA224_DIGEST_SIZE
,
2807 .class1_alg_type
= OP_ALG_ALGSEL_AES
|
2808 OP_ALG_AAI_CTR_MOD128
,
2809 .class2_alg_type
= OP_ALG_ALGSEL_SHA224
|
2810 OP_ALG_AAI_HMAC_PRECOMP
,
2818 .cra_name
= "authenc(hmac(sha256),"
2819 "rfc3686(ctr(aes)))",
2820 .cra_driver_name
= "authenc-hmac-sha256-"
2821 "rfc3686-ctr-aes-caam-qi2",
2824 .setkey
= aead_setkey
,
2825 .setauthsize
= aead_setauthsize
,
2826 .encrypt
= aead_encrypt
,
2827 .decrypt
= aead_decrypt
,
2828 .ivsize
= CTR_RFC3686_IV_SIZE
,
2829 .maxauthsize
= SHA256_DIGEST_SIZE
,
2832 .class1_alg_type
= OP_ALG_ALGSEL_AES
|
2833 OP_ALG_AAI_CTR_MOD128
,
2834 .class2_alg_type
= OP_ALG_ALGSEL_SHA256
|
2835 OP_ALG_AAI_HMAC_PRECOMP
,
2842 .cra_name
= "seqiv(authenc(hmac(sha256),"
2843 "rfc3686(ctr(aes))))",
2844 .cra_driver_name
= "seqiv-authenc-hmac-sha256-"
2845 "rfc3686-ctr-aes-caam-qi2",
2848 .setkey
= aead_setkey
,
2849 .setauthsize
= aead_setauthsize
,
2850 .encrypt
= aead_encrypt
,
2851 .decrypt
= aead_decrypt
,
2852 .ivsize
= CTR_RFC3686_IV_SIZE
,
2853 .maxauthsize
= SHA256_DIGEST_SIZE
,
2856 .class1_alg_type
= OP_ALG_ALGSEL_AES
|
2857 OP_ALG_AAI_CTR_MOD128
,
2858 .class2_alg_type
= OP_ALG_ALGSEL_SHA256
|
2859 OP_ALG_AAI_HMAC_PRECOMP
,
2867 .cra_name
= "authenc(hmac(sha384),"
2868 "rfc3686(ctr(aes)))",
2869 .cra_driver_name
= "authenc-hmac-sha384-"
2870 "rfc3686-ctr-aes-caam-qi2",
2873 .setkey
= aead_setkey
,
2874 .setauthsize
= aead_setauthsize
,
2875 .encrypt
= aead_encrypt
,
2876 .decrypt
= aead_decrypt
,
2877 .ivsize
= CTR_RFC3686_IV_SIZE
,
2878 .maxauthsize
= SHA384_DIGEST_SIZE
,
2881 .class1_alg_type
= OP_ALG_ALGSEL_AES
|
2882 OP_ALG_AAI_CTR_MOD128
,
2883 .class2_alg_type
= OP_ALG_ALGSEL_SHA384
|
2884 OP_ALG_AAI_HMAC_PRECOMP
,
2891 .cra_name
= "seqiv(authenc(hmac(sha384),"
2892 "rfc3686(ctr(aes))))",
2893 .cra_driver_name
= "seqiv-authenc-hmac-sha384-"
2894 "rfc3686-ctr-aes-caam-qi2",
2897 .setkey
= aead_setkey
,
2898 .setauthsize
= aead_setauthsize
,
2899 .encrypt
= aead_encrypt
,
2900 .decrypt
= aead_decrypt
,
2901 .ivsize
= CTR_RFC3686_IV_SIZE
,
2902 .maxauthsize
= SHA384_DIGEST_SIZE
,
2905 .class1_alg_type
= OP_ALG_ALGSEL_AES
|
2906 OP_ALG_AAI_CTR_MOD128
,
2907 .class2_alg_type
= OP_ALG_ALGSEL_SHA384
|
2908 OP_ALG_AAI_HMAC_PRECOMP
,
2916 .cra_name
= "rfc7539(chacha20,poly1305)",
2917 .cra_driver_name
= "rfc7539-chacha20-poly1305-"
2921 .setkey
= chachapoly_setkey
,
2922 .setauthsize
= chachapoly_setauthsize
,
2923 .encrypt
= aead_encrypt
,
2924 .decrypt
= aead_decrypt
,
2925 .ivsize
= CHACHAPOLY_IV_SIZE
,
2926 .maxauthsize
= POLY1305_DIGEST_SIZE
,
2929 .class1_alg_type
= OP_ALG_ALGSEL_CHACHA20
|
2931 .class2_alg_type
= OP_ALG_ALGSEL_POLY1305
|
2939 .cra_name
= "rfc7539esp(chacha20,poly1305)",
2940 .cra_driver_name
= "rfc7539esp-chacha20-"
2941 "poly1305-caam-qi2",
2944 .setkey
= chachapoly_setkey
,
2945 .setauthsize
= chachapoly_setauthsize
,
2946 .encrypt
= aead_encrypt
,
2947 .decrypt
= aead_decrypt
,
2949 .maxauthsize
= POLY1305_DIGEST_SIZE
,
2952 .class1_alg_type
= OP_ALG_ALGSEL_CHACHA20
|
2954 .class2_alg_type
= OP_ALG_ALGSEL_POLY1305
|
2962 .cra_name
= "authenc(hmac(sha512),"
2963 "rfc3686(ctr(aes)))",
2964 .cra_driver_name
= "authenc-hmac-sha512-"
2965 "rfc3686-ctr-aes-caam-qi2",
2968 .setkey
= aead_setkey
,
2969 .setauthsize
= aead_setauthsize
,
2970 .encrypt
= aead_encrypt
,
2971 .decrypt
= aead_decrypt
,
2972 .ivsize
= CTR_RFC3686_IV_SIZE
,
2973 .maxauthsize
= SHA512_DIGEST_SIZE
,
2976 .class1_alg_type
= OP_ALG_ALGSEL_AES
|
2977 OP_ALG_AAI_CTR_MOD128
,
2978 .class2_alg_type
= OP_ALG_ALGSEL_SHA512
|
2979 OP_ALG_AAI_HMAC_PRECOMP
,
2986 .cra_name
= "seqiv(authenc(hmac(sha512),"
2987 "rfc3686(ctr(aes))))",
2988 .cra_driver_name
= "seqiv-authenc-hmac-sha512-"
2989 "rfc3686-ctr-aes-caam-qi2",
2992 .setkey
= aead_setkey
,
2993 .setauthsize
= aead_setauthsize
,
2994 .encrypt
= aead_encrypt
,
2995 .decrypt
= aead_decrypt
,
2996 .ivsize
= CTR_RFC3686_IV_SIZE
,
2997 .maxauthsize
= SHA512_DIGEST_SIZE
,
3000 .class1_alg_type
= OP_ALG_ALGSEL_AES
|
3001 OP_ALG_AAI_CTR_MOD128
,
3002 .class2_alg_type
= OP_ALG_ALGSEL_SHA512
|
3003 OP_ALG_AAI_HMAC_PRECOMP
,
3010 static void caam_skcipher_alg_init(struct caam_skcipher_alg
*t_alg
)
3012 struct skcipher_alg
*alg
= &t_alg
->skcipher
;
3014 alg
->base
.cra_module
= THIS_MODULE
;
3015 alg
->base
.cra_priority
= CAAM_CRA_PRIORITY
;
3016 alg
->base
.cra_ctxsize
= sizeof(struct caam_ctx
) + crypto_dma_padding();
3017 alg
->base
.cra_flags
|= (CRYPTO_ALG_ASYNC
| CRYPTO_ALG_ALLOCATES_MEMORY
|
3018 CRYPTO_ALG_KERN_DRIVER_ONLY
);
3020 alg
->init
= caam_cra_init_skcipher
;
3021 alg
->exit
= caam_cra_exit
;
3024 static void caam_aead_alg_init(struct caam_aead_alg
*t_alg
)
3026 struct aead_alg
*alg
= &t_alg
->aead
;
3028 alg
->base
.cra_module
= THIS_MODULE
;
3029 alg
->base
.cra_priority
= CAAM_CRA_PRIORITY
;
3030 alg
->base
.cra_ctxsize
= sizeof(struct caam_ctx
) + crypto_dma_padding();
3031 alg
->base
.cra_flags
= CRYPTO_ALG_ASYNC
| CRYPTO_ALG_ALLOCATES_MEMORY
|
3032 CRYPTO_ALG_KERN_DRIVER_ONLY
;
3034 alg
->init
= caam_cra_init_aead
;
3035 alg
->exit
= caam_cra_exit_aead
;
3038 /* max hash key is max split key size */
3039 #define CAAM_MAX_HASH_KEY_SIZE (SHA512_DIGEST_SIZE * 2)
3041 #define CAAM_MAX_HASH_BLOCK_SIZE SHA512_BLOCK_SIZE
3043 /* caam context sizes for hashes: running digest + 8 */
3044 #define HASH_MSG_LEN 8
3045 #define MAX_CTX_LEN (HASH_MSG_LEN + SHA512_DIGEST_SIZE)
3056 * struct caam_hash_ctx - ahash per-session context
3057 * @flc: Flow Contexts array
3058 * @key: authentication key
3059 * @flc_dma: I/O virtual addresses of the Flow Contexts
3060 * @dev: dpseci device
3061 * @ctx_len: size of Context Register
3062 * @adata: hashing algorithm details
3064 struct caam_hash_ctx
{
3065 struct caam_flc flc
[HASH_NUM_OP
];
3066 u8 key
[CAAM_MAX_HASH_BLOCK_SIZE
] ____cacheline_aligned
;
3067 dma_addr_t flc_dma
[HASH_NUM_OP
];
3070 struct alginfo adata
;
3074 struct caam_hash_state
{
3075 struct caam_request caam_req
;
3079 u8 buf
[CAAM_MAX_HASH_BLOCK_SIZE
] ____cacheline_aligned
;
3082 u8 caam_ctx
[MAX_CTX_LEN
] ____cacheline_aligned
;
3083 int (*update
)(struct ahash_request
*req
);
3084 int (*final
)(struct ahash_request
*req
);
3085 int (*finup
)(struct ahash_request
*req
);
3088 struct caam_export_state
{
3089 u8 buf
[CAAM_MAX_HASH_BLOCK_SIZE
];
3090 u8 caam_ctx
[MAX_CTX_LEN
];
3092 int (*update
)(struct ahash_request
*req
);
3093 int (*final
)(struct ahash_request
*req
);
3094 int (*finup
)(struct ahash_request
*req
);
3097 /* Map current buffer in state (if length > 0) and put it in link table */
3098 static inline int buf_map_to_qm_sg(struct device
*dev
,
3099 struct dpaa2_sg_entry
*qm_sg
,
3100 struct caam_hash_state
*state
)
3102 int buflen
= state
->buflen
;
3107 state
->buf_dma
= dma_map_single(dev
, state
->buf
, buflen
,
3109 if (dma_mapping_error(dev
, state
->buf_dma
)) {
3110 dev_err(dev
, "unable to map buf\n");
3115 dma_to_qm_sg_one(qm_sg
, state
->buf_dma
, buflen
, 0);
3120 /* Map state->caam_ctx, and add it to link table */
3121 static inline int ctx_map_to_qm_sg(struct device
*dev
,
3122 struct caam_hash_state
*state
, int ctx_len
,
3123 struct dpaa2_sg_entry
*qm_sg
, u32 flag
)
3125 state
->ctx_dma_len
= ctx_len
;
3126 state
->ctx_dma
= dma_map_single(dev
, state
->caam_ctx
, ctx_len
, flag
);
3127 if (dma_mapping_error(dev
, state
->ctx_dma
)) {
3128 dev_err(dev
, "unable to map ctx\n");
3133 dma_to_qm_sg_one(qm_sg
, state
->ctx_dma
, ctx_len
, 0);
3138 static int ahash_set_sh_desc(struct crypto_ahash
*ahash
)
3140 struct caam_hash_ctx
*ctx
= crypto_ahash_ctx_dma(ahash
);
3141 int digestsize
= crypto_ahash_digestsize(ahash
);
3142 struct dpaa2_caam_priv
*priv
= dev_get_drvdata(ctx
->dev
);
3143 struct caam_flc
*flc
;
3146 /* ahash_update shared descriptor */
3147 flc
= &ctx
->flc
[UPDATE
];
3148 desc
= flc
->sh_desc
;
3149 cnstr_shdsc_ahash(desc
, &ctx
->adata
, OP_ALG_AS_UPDATE
, ctx
->ctx_len
,
3150 ctx
->ctx_len
, true, priv
->sec_attr
.era
);
3151 flc
->flc
[1] = cpu_to_caam32(desc_len(desc
)); /* SDL */
3152 dma_sync_single_for_device(ctx
->dev
, ctx
->flc_dma
[UPDATE
],
3153 desc_bytes(desc
), DMA_BIDIRECTIONAL
);
3154 print_hex_dump_debug("ahash update shdesc@" __stringify(__LINE__
)": ",
3155 DUMP_PREFIX_ADDRESS
, 16, 4, desc
, desc_bytes(desc
),
3158 /* ahash_update_first shared descriptor */
3159 flc
= &ctx
->flc
[UPDATE_FIRST
];
3160 desc
= flc
->sh_desc
;
3161 cnstr_shdsc_ahash(desc
, &ctx
->adata
, OP_ALG_AS_INIT
, ctx
->ctx_len
,
3162 ctx
->ctx_len
, false, priv
->sec_attr
.era
);
3163 flc
->flc
[1] = cpu_to_caam32(desc_len(desc
)); /* SDL */
3164 dma_sync_single_for_device(ctx
->dev
, ctx
->flc_dma
[UPDATE_FIRST
],
3165 desc_bytes(desc
), DMA_BIDIRECTIONAL
);
3166 print_hex_dump_debug("ahash update first shdesc@" __stringify(__LINE__
)": ",
3167 DUMP_PREFIX_ADDRESS
, 16, 4, desc
, desc_bytes(desc
),
3170 /* ahash_final shared descriptor */
3171 flc
= &ctx
->flc
[FINALIZE
];
3172 desc
= flc
->sh_desc
;
3173 cnstr_shdsc_ahash(desc
, &ctx
->adata
, OP_ALG_AS_FINALIZE
, digestsize
,
3174 ctx
->ctx_len
, true, priv
->sec_attr
.era
);
3175 flc
->flc
[1] = cpu_to_caam32(desc_len(desc
)); /* SDL */
3176 dma_sync_single_for_device(ctx
->dev
, ctx
->flc_dma
[FINALIZE
],
3177 desc_bytes(desc
), DMA_BIDIRECTIONAL
);
3178 print_hex_dump_debug("ahash final shdesc@" __stringify(__LINE__
)": ",
3179 DUMP_PREFIX_ADDRESS
, 16, 4, desc
, desc_bytes(desc
),
3182 /* ahash_digest shared descriptor */
3183 flc
= &ctx
->flc
[DIGEST
];
3184 desc
= flc
->sh_desc
;
3185 cnstr_shdsc_ahash(desc
, &ctx
->adata
, OP_ALG_AS_INITFINAL
, digestsize
,
3186 ctx
->ctx_len
, false, priv
->sec_attr
.era
);
3187 flc
->flc
[1] = cpu_to_caam32(desc_len(desc
)); /* SDL */
3188 dma_sync_single_for_device(ctx
->dev
, ctx
->flc_dma
[DIGEST
],
3189 desc_bytes(desc
), DMA_BIDIRECTIONAL
);
3190 print_hex_dump_debug("ahash digest shdesc@" __stringify(__LINE__
)": ",
3191 DUMP_PREFIX_ADDRESS
, 16, 4, desc
, desc_bytes(desc
),
3197 struct split_key_sh_result
{
3198 struct completion completion
;
3203 static void split_key_sh_done(void *cbk_ctx
, u32 err
)
3205 struct split_key_sh_result
*res
= cbk_ctx
;
3207 dev_dbg(res
->dev
, "%s %d: err 0x%x\n", __func__
, __LINE__
, err
);
3209 res
->err
= err
? caam_qi2_strstatus(res
->dev
, err
) : 0;
3210 complete(&res
->completion
);
3213 /* Digest hash size if it is too large */
3214 static int hash_digest_key(struct caam_hash_ctx
*ctx
, u32
*keylen
, u8
*key
,
3217 struct caam_request
*req_ctx
;
3219 struct split_key_sh_result result
;
3221 struct caam_flc
*flc
;
3224 struct dpaa2_fl_entry
*in_fle
, *out_fle
;
3226 req_ctx
= kzalloc(sizeof(*req_ctx
), GFP_KERNEL
);
3230 in_fle
= &req_ctx
->fd_flt
[1];
3231 out_fle
= &req_ctx
->fd_flt
[0];
3233 flc
= kzalloc(sizeof(*flc
), GFP_KERNEL
);
3237 key_dma
= dma_map_single(ctx
->dev
, key
, *keylen
, DMA_BIDIRECTIONAL
);
3238 if (dma_mapping_error(ctx
->dev
, key_dma
)) {
3239 dev_err(ctx
->dev
, "unable to map key memory\n");
3243 desc
= flc
->sh_desc
;
3245 init_sh_desc(desc
, 0);
3247 /* descriptor to perform unkeyed hash on key_in */
3248 append_operation(desc
, ctx
->adata
.algtype
| OP_ALG_ENCRYPT
|
3249 OP_ALG_AS_INITFINAL
);
3250 append_seq_fifo_load(desc
, *keylen
, FIFOLD_CLASS_CLASS2
|
3251 FIFOLD_TYPE_LAST2
| FIFOLD_TYPE_MSG
);
3252 append_seq_store(desc
, digestsize
, LDST_CLASS_2_CCB
|
3253 LDST_SRCDST_BYTE_CONTEXT
);
3255 flc
->flc
[1] = cpu_to_caam32(desc_len(desc
)); /* SDL */
3256 flc_dma
= dma_map_single(ctx
->dev
, flc
, sizeof(flc
->flc
) +
3257 desc_bytes(desc
), DMA_TO_DEVICE
);
3258 if (dma_mapping_error(ctx
->dev
, flc_dma
)) {
3259 dev_err(ctx
->dev
, "unable to map shared descriptor\n");
3263 dpaa2_fl_set_final(in_fle
, true);
3264 dpaa2_fl_set_format(in_fle
, dpaa2_fl_single
);
3265 dpaa2_fl_set_addr(in_fle
, key_dma
);
3266 dpaa2_fl_set_len(in_fle
, *keylen
);
3267 dpaa2_fl_set_format(out_fle
, dpaa2_fl_single
);
3268 dpaa2_fl_set_addr(out_fle
, key_dma
);
3269 dpaa2_fl_set_len(out_fle
, digestsize
);
3271 print_hex_dump_debug("key_in@" __stringify(__LINE__
)": ",
3272 DUMP_PREFIX_ADDRESS
, 16, 4, key
, *keylen
, 1);
3273 print_hex_dump_debug("shdesc@" __stringify(__LINE__
)": ",
3274 DUMP_PREFIX_ADDRESS
, 16, 4, desc
, desc_bytes(desc
),
3278 init_completion(&result
.completion
);
3279 result
.dev
= ctx
->dev
;
3282 req_ctx
->flc_dma
= flc_dma
;
3283 req_ctx
->cbk
= split_key_sh_done
;
3284 req_ctx
->ctx
= &result
;
3286 ret
= dpaa2_caam_enqueue(ctx
->dev
, req_ctx
);
3287 if (ret
== -EINPROGRESS
) {
3289 wait_for_completion(&result
.completion
);
3291 print_hex_dump_debug("digested key@" __stringify(__LINE__
)": ",
3292 DUMP_PREFIX_ADDRESS
, 16, 4, key
,
3296 dma_unmap_single(ctx
->dev
, flc_dma
, sizeof(flc
->flc
) + desc_bytes(desc
),
3299 dma_unmap_single(ctx
->dev
, key_dma
, *keylen
, DMA_BIDIRECTIONAL
);
3305 *keylen
= digestsize
;
3310 static int ahash_setkey(struct crypto_ahash
*ahash
, const u8
*key
,
3311 unsigned int keylen
)
3313 struct caam_hash_ctx
*ctx
= crypto_ahash_ctx_dma(ahash
);
3314 unsigned int blocksize
= crypto_tfm_alg_blocksize(&ahash
->base
);
3315 unsigned int digestsize
= crypto_ahash_digestsize(ahash
);
3317 u8
*hashed_key
= NULL
;
3319 dev_dbg(ctx
->dev
, "keylen %d blocksize %d\n", keylen
, blocksize
);
3321 if (keylen
> blocksize
) {
3322 unsigned int aligned_len
=
3323 ALIGN(keylen
, dma_get_cache_alignment());
3325 if (aligned_len
< keylen
)
3328 hashed_key
= kmemdup(key
, aligned_len
, GFP_KERNEL
);
3331 ret
= hash_digest_key(ctx
, &keylen
, hashed_key
, digestsize
);
3337 ctx
->adata
.keylen
= keylen
;
3338 ctx
->adata
.keylen_pad
= split_key_len(ctx
->adata
.algtype
&
3339 OP_ALG_ALGSEL_MASK
);
3340 if (ctx
->adata
.keylen_pad
> CAAM_MAX_HASH_KEY_SIZE
)
3343 ctx
->adata
.key_virt
= key
;
3344 ctx
->adata
.key_inline
= true;
3347 * In case |user key| > |derived key|, using DKP<imm,imm> would result
3348 * in invalid opcodes (last bytes of user key) in the resulting
3349 * descriptor. Use DKP<ptr,imm> instead => both virtual and dma key
3350 * addresses are needed.
3352 if (keylen
> ctx
->adata
.keylen_pad
) {
3353 memcpy(ctx
->key
, key
, keylen
);
3354 dma_sync_single_for_device(ctx
->dev
, ctx
->adata
.key_dma
,
3355 ctx
->adata
.keylen_pad
,
3359 ret
= ahash_set_sh_desc(ahash
);
3367 static inline void ahash_unmap(struct device
*dev
, struct ahash_edesc
*edesc
,
3368 struct ahash_request
*req
)
3370 struct caam_hash_state
*state
= ahash_request_ctx_dma(req
);
3372 if (edesc
->src_nents
)
3373 dma_unmap_sg(dev
, req
->src
, edesc
->src_nents
, DMA_TO_DEVICE
);
3375 if (edesc
->qm_sg_bytes
)
3376 dma_unmap_single(dev
, edesc
->qm_sg_dma
, edesc
->qm_sg_bytes
,
3379 if (state
->buf_dma
) {
3380 dma_unmap_single(dev
, state
->buf_dma
, state
->buflen
,
3386 static inline void ahash_unmap_ctx(struct device
*dev
,
3387 struct ahash_edesc
*edesc
,
3388 struct ahash_request
*req
, u32 flag
)
3390 struct caam_hash_state
*state
= ahash_request_ctx_dma(req
);
3392 if (state
->ctx_dma
) {
3393 dma_unmap_single(dev
, state
->ctx_dma
, state
->ctx_dma_len
, flag
);
3396 ahash_unmap(dev
, edesc
, req
);
3399 static void ahash_done(void *cbk_ctx
, u32 status
)
3401 struct crypto_async_request
*areq
= cbk_ctx
;
3402 struct ahash_request
*req
= ahash_request_cast(areq
);
3403 struct crypto_ahash
*ahash
= crypto_ahash_reqtfm(req
);
3404 struct caam_hash_state
*state
= ahash_request_ctx_dma(req
);
3405 struct ahash_edesc
*edesc
= state
->caam_req
.edesc
;
3406 struct caam_hash_ctx
*ctx
= crypto_ahash_ctx_dma(ahash
);
3407 int digestsize
= crypto_ahash_digestsize(ahash
);
3410 dev_dbg(ctx
->dev
, "%s %d: err 0x%x\n", __func__
, __LINE__
, status
);
3412 if (unlikely(status
))
3413 ecode
= caam_qi2_strstatus(ctx
->dev
, status
);
3415 ahash_unmap_ctx(ctx
->dev
, edesc
, req
, DMA_FROM_DEVICE
);
3416 memcpy(req
->result
, state
->caam_ctx
, digestsize
);
3417 qi_cache_free(edesc
);
3419 print_hex_dump_debug("ctx@" __stringify(__LINE__
)": ",
3420 DUMP_PREFIX_ADDRESS
, 16, 4, state
->caam_ctx
,
3423 ahash_request_complete(req
, ecode
);
3426 static void ahash_done_bi(void *cbk_ctx
, u32 status
)
3428 struct crypto_async_request
*areq
= cbk_ctx
;
3429 struct ahash_request
*req
= ahash_request_cast(areq
);
3430 struct crypto_ahash
*ahash
= crypto_ahash_reqtfm(req
);
3431 struct caam_hash_state
*state
= ahash_request_ctx_dma(req
);
3432 struct ahash_edesc
*edesc
= state
->caam_req
.edesc
;
3433 struct caam_hash_ctx
*ctx
= crypto_ahash_ctx_dma(ahash
);
3436 dev_dbg(ctx
->dev
, "%s %d: err 0x%x\n", __func__
, __LINE__
, status
);
3438 if (unlikely(status
))
3439 ecode
= caam_qi2_strstatus(ctx
->dev
, status
);
3441 ahash_unmap_ctx(ctx
->dev
, edesc
, req
, DMA_BIDIRECTIONAL
);
3442 qi_cache_free(edesc
);
3444 scatterwalk_map_and_copy(state
->buf
, req
->src
,
3445 req
->nbytes
- state
->next_buflen
,
3446 state
->next_buflen
, 0);
3447 state
->buflen
= state
->next_buflen
;
3449 print_hex_dump_debug("buf@" __stringify(__LINE__
)": ",
3450 DUMP_PREFIX_ADDRESS
, 16, 4, state
->buf
,
3453 print_hex_dump_debug("ctx@" __stringify(__LINE__
)": ",
3454 DUMP_PREFIX_ADDRESS
, 16, 4, state
->caam_ctx
,
3457 print_hex_dump_debug("result@" __stringify(__LINE__
)": ",
3458 DUMP_PREFIX_ADDRESS
, 16, 4, req
->result
,
3459 crypto_ahash_digestsize(ahash
), 1);
3461 ahash_request_complete(req
, ecode
);
3464 static void ahash_done_ctx_src(void *cbk_ctx
, u32 status
)
3466 struct crypto_async_request
*areq
= cbk_ctx
;
3467 struct ahash_request
*req
= ahash_request_cast(areq
);
3468 struct crypto_ahash
*ahash
= crypto_ahash_reqtfm(req
);
3469 struct caam_hash_state
*state
= ahash_request_ctx_dma(req
);
3470 struct ahash_edesc
*edesc
= state
->caam_req
.edesc
;
3471 struct caam_hash_ctx
*ctx
= crypto_ahash_ctx_dma(ahash
);
3472 int digestsize
= crypto_ahash_digestsize(ahash
);
3475 dev_dbg(ctx
->dev
, "%s %d: err 0x%x\n", __func__
, __LINE__
, status
);
3477 if (unlikely(status
))
3478 ecode
= caam_qi2_strstatus(ctx
->dev
, status
);
3480 ahash_unmap_ctx(ctx
->dev
, edesc
, req
, DMA_BIDIRECTIONAL
);
3481 memcpy(req
->result
, state
->caam_ctx
, digestsize
);
3482 qi_cache_free(edesc
);
3484 print_hex_dump_debug("ctx@" __stringify(__LINE__
)": ",
3485 DUMP_PREFIX_ADDRESS
, 16, 4, state
->caam_ctx
,
3488 ahash_request_complete(req
, ecode
);
3491 static void ahash_done_ctx_dst(void *cbk_ctx
, u32 status
)
3493 struct crypto_async_request
*areq
= cbk_ctx
;
3494 struct ahash_request
*req
= ahash_request_cast(areq
);
3495 struct crypto_ahash
*ahash
= crypto_ahash_reqtfm(req
);
3496 struct caam_hash_state
*state
= ahash_request_ctx_dma(req
);
3497 struct ahash_edesc
*edesc
= state
->caam_req
.edesc
;
3498 struct caam_hash_ctx
*ctx
= crypto_ahash_ctx_dma(ahash
);
3501 dev_dbg(ctx
->dev
, "%s %d: err 0x%x\n", __func__
, __LINE__
, status
);
3503 if (unlikely(status
))
3504 ecode
= caam_qi2_strstatus(ctx
->dev
, status
);
3506 ahash_unmap_ctx(ctx
->dev
, edesc
, req
, DMA_FROM_DEVICE
);
3507 qi_cache_free(edesc
);
3509 scatterwalk_map_and_copy(state
->buf
, req
->src
,
3510 req
->nbytes
- state
->next_buflen
,
3511 state
->next_buflen
, 0);
3512 state
->buflen
= state
->next_buflen
;
3514 print_hex_dump_debug("buf@" __stringify(__LINE__
)": ",
3515 DUMP_PREFIX_ADDRESS
, 16, 4, state
->buf
,
3518 print_hex_dump_debug("ctx@" __stringify(__LINE__
)": ",
3519 DUMP_PREFIX_ADDRESS
, 16, 4, state
->caam_ctx
,
3522 print_hex_dump_debug("result@" __stringify(__LINE__
)": ",
3523 DUMP_PREFIX_ADDRESS
, 16, 4, req
->result
,
3524 crypto_ahash_digestsize(ahash
), 1);
3526 ahash_request_complete(req
, ecode
);
3529 static int ahash_update_ctx(struct ahash_request
*req
)
3531 struct crypto_ahash
*ahash
= crypto_ahash_reqtfm(req
);
3532 struct caam_hash_ctx
*ctx
= crypto_ahash_ctx_dma(ahash
);
3533 struct caam_hash_state
*state
= ahash_request_ctx_dma(req
);
3534 struct caam_request
*req_ctx
= &state
->caam_req
;
3535 struct dpaa2_fl_entry
*in_fle
= &req_ctx
->fd_flt
[1];
3536 struct dpaa2_fl_entry
*out_fle
= &req_ctx
->fd_flt
[0];
3537 gfp_t flags
= (req
->base
.flags
& CRYPTO_TFM_REQ_MAY_SLEEP
) ?
3538 GFP_KERNEL
: GFP_ATOMIC
;
3539 u8
*buf
= state
->buf
;
3540 int *buflen
= &state
->buflen
;
3541 int *next_buflen
= &state
->next_buflen
;
3542 int in_len
= *buflen
+ req
->nbytes
, to_hash
;
3543 int src_nents
, mapped_nents
, qm_sg_bytes
, qm_sg_src_index
;
3544 struct ahash_edesc
*edesc
;
3547 *next_buflen
= in_len
& (crypto_tfm_alg_blocksize(&ahash
->base
) - 1);
3548 to_hash
= in_len
- *next_buflen
;
3551 struct dpaa2_sg_entry
*sg_table
;
3552 int src_len
= req
->nbytes
- *next_buflen
;
3554 src_nents
= sg_nents_for_len(req
->src
, src_len
);
3555 if (src_nents
< 0) {
3556 dev_err(ctx
->dev
, "Invalid number of src SG.\n");
3561 mapped_nents
= dma_map_sg(ctx
->dev
, req
->src
, src_nents
,
3563 if (!mapped_nents
) {
3564 dev_err(ctx
->dev
, "unable to DMA map source\n");
3571 /* allocate space for base edesc and link tables */
3572 edesc
= qi_cache_zalloc(flags
);
3574 dma_unmap_sg(ctx
->dev
, req
->src
, src_nents
,
3579 edesc
->src_nents
= src_nents
;
3580 qm_sg_src_index
= 1 + (*buflen
? 1 : 0);
3581 qm_sg_bytes
= pad_sg_nents(qm_sg_src_index
+ mapped_nents
) *
3583 sg_table
= &edesc
->sgt
[0];
3585 ret
= ctx_map_to_qm_sg(ctx
->dev
, state
, ctx
->ctx_len
, sg_table
,
3590 ret
= buf_map_to_qm_sg(ctx
->dev
, sg_table
+ 1, state
);
3595 sg_to_qm_sg_last(req
->src
, src_len
,
3596 sg_table
+ qm_sg_src_index
, 0);
3598 dpaa2_sg_set_final(sg_table
+ qm_sg_src_index
- 1,
3602 edesc
->qm_sg_dma
= dma_map_single(ctx
->dev
, sg_table
,
3603 qm_sg_bytes
, DMA_TO_DEVICE
);
3604 if (dma_mapping_error(ctx
->dev
, edesc
->qm_sg_dma
)) {
3605 dev_err(ctx
->dev
, "unable to map S/G table\n");
3609 edesc
->qm_sg_bytes
= qm_sg_bytes
;
3611 memset(&req_ctx
->fd_flt
, 0, sizeof(req_ctx
->fd_flt
));
3612 dpaa2_fl_set_final(in_fle
, true);
3613 dpaa2_fl_set_format(in_fle
, dpaa2_fl_sg
);
3614 dpaa2_fl_set_addr(in_fle
, edesc
->qm_sg_dma
);
3615 dpaa2_fl_set_len(in_fle
, ctx
->ctx_len
+ to_hash
);
3616 dpaa2_fl_set_format(out_fle
, dpaa2_fl_single
);
3617 dpaa2_fl_set_addr(out_fle
, state
->ctx_dma
);
3618 dpaa2_fl_set_len(out_fle
, ctx
->ctx_len
);
3620 req_ctx
->flc
= &ctx
->flc
[UPDATE
];
3621 req_ctx
->flc_dma
= ctx
->flc_dma
[UPDATE
];
3622 req_ctx
->cbk
= ahash_done_bi
;
3623 req_ctx
->ctx
= &req
->base
;
3624 req_ctx
->edesc
= edesc
;
3626 ret
= dpaa2_caam_enqueue(ctx
->dev
, req_ctx
);
3627 if (ret
!= -EINPROGRESS
&&
3629 req
->base
.flags
& CRYPTO_TFM_REQ_MAY_BACKLOG
))
3631 } else if (*next_buflen
) {
3632 scatterwalk_map_and_copy(buf
+ *buflen
, req
->src
, 0,
3634 *buflen
= *next_buflen
;
3636 print_hex_dump_debug("buf@" __stringify(__LINE__
)": ",
3637 DUMP_PREFIX_ADDRESS
, 16, 4, buf
,
3643 ahash_unmap_ctx(ctx
->dev
, edesc
, req
, DMA_BIDIRECTIONAL
);
3644 qi_cache_free(edesc
);
3648 static int ahash_final_ctx(struct ahash_request
*req
)
3650 struct crypto_ahash
*ahash
= crypto_ahash_reqtfm(req
);
3651 struct caam_hash_ctx
*ctx
= crypto_ahash_ctx_dma(ahash
);
3652 struct caam_hash_state
*state
= ahash_request_ctx_dma(req
);
3653 struct caam_request
*req_ctx
= &state
->caam_req
;
3654 struct dpaa2_fl_entry
*in_fle
= &req_ctx
->fd_flt
[1];
3655 struct dpaa2_fl_entry
*out_fle
= &req_ctx
->fd_flt
[0];
3656 gfp_t flags
= (req
->base
.flags
& CRYPTO_TFM_REQ_MAY_SLEEP
) ?
3657 GFP_KERNEL
: GFP_ATOMIC
;
3658 int buflen
= state
->buflen
;
3660 int digestsize
= crypto_ahash_digestsize(ahash
);
3661 struct ahash_edesc
*edesc
;
3662 struct dpaa2_sg_entry
*sg_table
;
3665 /* allocate space for base edesc and link tables */
3666 edesc
= qi_cache_zalloc(flags
);
3670 qm_sg_bytes
= pad_sg_nents(1 + (buflen
? 1 : 0)) * sizeof(*sg_table
);
3671 sg_table
= &edesc
->sgt
[0];
3673 ret
= ctx_map_to_qm_sg(ctx
->dev
, state
, ctx
->ctx_len
, sg_table
,
3678 ret
= buf_map_to_qm_sg(ctx
->dev
, sg_table
+ 1, state
);
3682 dpaa2_sg_set_final(sg_table
+ (buflen
? 1 : 0), true);
3684 edesc
->qm_sg_dma
= dma_map_single(ctx
->dev
, sg_table
, qm_sg_bytes
,
3686 if (dma_mapping_error(ctx
->dev
, edesc
->qm_sg_dma
)) {
3687 dev_err(ctx
->dev
, "unable to map S/G table\n");
3691 edesc
->qm_sg_bytes
= qm_sg_bytes
;
3693 memset(&req_ctx
->fd_flt
, 0, sizeof(req_ctx
->fd_flt
));
3694 dpaa2_fl_set_final(in_fle
, true);
3695 dpaa2_fl_set_format(in_fle
, dpaa2_fl_sg
);
3696 dpaa2_fl_set_addr(in_fle
, edesc
->qm_sg_dma
);
3697 dpaa2_fl_set_len(in_fle
, ctx
->ctx_len
+ buflen
);
3698 dpaa2_fl_set_format(out_fle
, dpaa2_fl_single
);
3699 dpaa2_fl_set_addr(out_fle
, state
->ctx_dma
);
3700 dpaa2_fl_set_len(out_fle
, digestsize
);
3702 req_ctx
->flc
= &ctx
->flc
[FINALIZE
];
3703 req_ctx
->flc_dma
= ctx
->flc_dma
[FINALIZE
];
3704 req_ctx
->cbk
= ahash_done_ctx_src
;
3705 req_ctx
->ctx
= &req
->base
;
3706 req_ctx
->edesc
= edesc
;
3708 ret
= dpaa2_caam_enqueue(ctx
->dev
, req_ctx
);
3709 if (ret
== -EINPROGRESS
||
3710 (ret
== -EBUSY
&& req
->base
.flags
& CRYPTO_TFM_REQ_MAY_BACKLOG
))
3714 ahash_unmap_ctx(ctx
->dev
, edesc
, req
, DMA_BIDIRECTIONAL
);
3715 qi_cache_free(edesc
);
3719 static int ahash_finup_ctx(struct ahash_request
*req
)
3721 struct crypto_ahash
*ahash
= crypto_ahash_reqtfm(req
);
3722 struct caam_hash_ctx
*ctx
= crypto_ahash_ctx_dma(ahash
);
3723 struct caam_hash_state
*state
= ahash_request_ctx_dma(req
);
3724 struct caam_request
*req_ctx
= &state
->caam_req
;
3725 struct dpaa2_fl_entry
*in_fle
= &req_ctx
->fd_flt
[1];
3726 struct dpaa2_fl_entry
*out_fle
= &req_ctx
->fd_flt
[0];
3727 gfp_t flags
= (req
->base
.flags
& CRYPTO_TFM_REQ_MAY_SLEEP
) ?
3728 GFP_KERNEL
: GFP_ATOMIC
;
3729 int buflen
= state
->buflen
;
3730 int qm_sg_bytes
, qm_sg_src_index
;
3731 int src_nents
, mapped_nents
;
3732 int digestsize
= crypto_ahash_digestsize(ahash
);
3733 struct ahash_edesc
*edesc
;
3734 struct dpaa2_sg_entry
*sg_table
;
3737 src_nents
= sg_nents_for_len(req
->src
, req
->nbytes
);
3738 if (src_nents
< 0) {
3739 dev_err(ctx
->dev
, "Invalid number of src SG.\n");
3744 mapped_nents
= dma_map_sg(ctx
->dev
, req
->src
, src_nents
,
3746 if (!mapped_nents
) {
3747 dev_err(ctx
->dev
, "unable to DMA map source\n");
3754 /* allocate space for base edesc and link tables */
3755 edesc
= qi_cache_zalloc(flags
);
3757 dma_unmap_sg(ctx
->dev
, req
->src
, src_nents
, DMA_TO_DEVICE
);
3761 edesc
->src_nents
= src_nents
;
3762 qm_sg_src_index
= 1 + (buflen
? 1 : 0);
3763 qm_sg_bytes
= pad_sg_nents(qm_sg_src_index
+ mapped_nents
) *
3765 sg_table
= &edesc
->sgt
[0];
3767 ret
= ctx_map_to_qm_sg(ctx
->dev
, state
, ctx
->ctx_len
, sg_table
,
3772 ret
= buf_map_to_qm_sg(ctx
->dev
, sg_table
+ 1, state
);
3776 sg_to_qm_sg_last(req
->src
, req
->nbytes
, sg_table
+ qm_sg_src_index
, 0);
3778 edesc
->qm_sg_dma
= dma_map_single(ctx
->dev
, sg_table
, qm_sg_bytes
,
3780 if (dma_mapping_error(ctx
->dev
, edesc
->qm_sg_dma
)) {
3781 dev_err(ctx
->dev
, "unable to map S/G table\n");
3785 edesc
->qm_sg_bytes
= qm_sg_bytes
;
3787 memset(&req_ctx
->fd_flt
, 0, sizeof(req_ctx
->fd_flt
));
3788 dpaa2_fl_set_final(in_fle
, true);
3789 dpaa2_fl_set_format(in_fle
, dpaa2_fl_sg
);
3790 dpaa2_fl_set_addr(in_fle
, edesc
->qm_sg_dma
);
3791 dpaa2_fl_set_len(in_fle
, ctx
->ctx_len
+ buflen
+ req
->nbytes
);
3792 dpaa2_fl_set_format(out_fle
, dpaa2_fl_single
);
3793 dpaa2_fl_set_addr(out_fle
, state
->ctx_dma
);
3794 dpaa2_fl_set_len(out_fle
, digestsize
);
3796 req_ctx
->flc
= &ctx
->flc
[FINALIZE
];
3797 req_ctx
->flc_dma
= ctx
->flc_dma
[FINALIZE
];
3798 req_ctx
->cbk
= ahash_done_ctx_src
;
3799 req_ctx
->ctx
= &req
->base
;
3800 req_ctx
->edesc
= edesc
;
3802 ret
= dpaa2_caam_enqueue(ctx
->dev
, req_ctx
);
3803 if (ret
== -EINPROGRESS
||
3804 (ret
== -EBUSY
&& req
->base
.flags
& CRYPTO_TFM_REQ_MAY_BACKLOG
))
3808 ahash_unmap_ctx(ctx
->dev
, edesc
, req
, DMA_BIDIRECTIONAL
);
3809 qi_cache_free(edesc
);
3813 static int ahash_digest(struct ahash_request
*req
)
3815 struct crypto_ahash
*ahash
= crypto_ahash_reqtfm(req
);
3816 struct caam_hash_ctx
*ctx
= crypto_ahash_ctx_dma(ahash
);
3817 struct caam_hash_state
*state
= ahash_request_ctx_dma(req
);
3818 struct caam_request
*req_ctx
= &state
->caam_req
;
3819 struct dpaa2_fl_entry
*in_fle
= &req_ctx
->fd_flt
[1];
3820 struct dpaa2_fl_entry
*out_fle
= &req_ctx
->fd_flt
[0];
3821 gfp_t flags
= (req
->base
.flags
& CRYPTO_TFM_REQ_MAY_SLEEP
) ?
3822 GFP_KERNEL
: GFP_ATOMIC
;
3823 int digestsize
= crypto_ahash_digestsize(ahash
);
3824 int src_nents
, mapped_nents
;
3825 struct ahash_edesc
*edesc
;
3830 src_nents
= sg_nents_for_len(req
->src
, req
->nbytes
);
3831 if (src_nents
< 0) {
3832 dev_err(ctx
->dev
, "Invalid number of src SG.\n");
3837 mapped_nents
= dma_map_sg(ctx
->dev
, req
->src
, src_nents
,
3839 if (!mapped_nents
) {
3840 dev_err(ctx
->dev
, "unable to map source for DMA\n");
3847 /* allocate space for base edesc and link tables */
3848 edesc
= qi_cache_zalloc(flags
);
3850 dma_unmap_sg(ctx
->dev
, req
->src
, src_nents
, DMA_TO_DEVICE
);
3854 edesc
->src_nents
= src_nents
;
3855 memset(&req_ctx
->fd_flt
, 0, sizeof(req_ctx
->fd_flt
));
3857 if (mapped_nents
> 1) {
3859 struct dpaa2_sg_entry
*sg_table
= &edesc
->sgt
[0];
3861 qm_sg_bytes
= pad_sg_nents(mapped_nents
) * sizeof(*sg_table
);
3862 sg_to_qm_sg_last(req
->src
, req
->nbytes
, sg_table
, 0);
3863 edesc
->qm_sg_dma
= dma_map_single(ctx
->dev
, sg_table
,
3864 qm_sg_bytes
, DMA_TO_DEVICE
);
3865 if (dma_mapping_error(ctx
->dev
, edesc
->qm_sg_dma
)) {
3866 dev_err(ctx
->dev
, "unable to map S/G table\n");
3869 edesc
->qm_sg_bytes
= qm_sg_bytes
;
3870 dpaa2_fl_set_format(in_fle
, dpaa2_fl_sg
);
3871 dpaa2_fl_set_addr(in_fle
, edesc
->qm_sg_dma
);
3873 dpaa2_fl_set_format(in_fle
, dpaa2_fl_single
);
3874 dpaa2_fl_set_addr(in_fle
, sg_dma_address(req
->src
));
3877 state
->ctx_dma_len
= digestsize
;
3878 state
->ctx_dma
= dma_map_single(ctx
->dev
, state
->caam_ctx
, digestsize
,
3880 if (dma_mapping_error(ctx
->dev
, state
->ctx_dma
)) {
3881 dev_err(ctx
->dev
, "unable to map ctx\n");
3886 dpaa2_fl_set_final(in_fle
, true);
3887 dpaa2_fl_set_len(in_fle
, req
->nbytes
);
3888 dpaa2_fl_set_format(out_fle
, dpaa2_fl_single
);
3889 dpaa2_fl_set_addr(out_fle
, state
->ctx_dma
);
3890 dpaa2_fl_set_len(out_fle
, digestsize
);
3892 req_ctx
->flc
= &ctx
->flc
[DIGEST
];
3893 req_ctx
->flc_dma
= ctx
->flc_dma
[DIGEST
];
3894 req_ctx
->cbk
= ahash_done
;
3895 req_ctx
->ctx
= &req
->base
;
3896 req_ctx
->edesc
= edesc
;
3897 ret
= dpaa2_caam_enqueue(ctx
->dev
, req_ctx
);
3898 if (ret
== -EINPROGRESS
||
3899 (ret
== -EBUSY
&& req
->base
.flags
& CRYPTO_TFM_REQ_MAY_BACKLOG
))
3903 ahash_unmap_ctx(ctx
->dev
, edesc
, req
, DMA_FROM_DEVICE
);
3904 qi_cache_free(edesc
);
3908 static int ahash_final_no_ctx(struct ahash_request
*req
)
3910 struct crypto_ahash
*ahash
= crypto_ahash_reqtfm(req
);
3911 struct caam_hash_ctx
*ctx
= crypto_ahash_ctx_dma(ahash
);
3912 struct caam_hash_state
*state
= ahash_request_ctx_dma(req
);
3913 struct caam_request
*req_ctx
= &state
->caam_req
;
3914 struct dpaa2_fl_entry
*in_fle
= &req_ctx
->fd_flt
[1];
3915 struct dpaa2_fl_entry
*out_fle
= &req_ctx
->fd_flt
[0];
3916 gfp_t flags
= (req
->base
.flags
& CRYPTO_TFM_REQ_MAY_SLEEP
) ?
3917 GFP_KERNEL
: GFP_ATOMIC
;
3918 u8
*buf
= state
->buf
;
3919 int buflen
= state
->buflen
;
3920 int digestsize
= crypto_ahash_digestsize(ahash
);
3921 struct ahash_edesc
*edesc
;
3924 /* allocate space for base edesc and link tables */
3925 edesc
= qi_cache_zalloc(flags
);
3930 state
->buf_dma
= dma_map_single(ctx
->dev
, buf
, buflen
,
3932 if (dma_mapping_error(ctx
->dev
, state
->buf_dma
)) {
3933 dev_err(ctx
->dev
, "unable to map src\n");
3938 state
->ctx_dma_len
= digestsize
;
3939 state
->ctx_dma
= dma_map_single(ctx
->dev
, state
->caam_ctx
, digestsize
,
3941 if (dma_mapping_error(ctx
->dev
, state
->ctx_dma
)) {
3942 dev_err(ctx
->dev
, "unable to map ctx\n");
3947 memset(&req_ctx
->fd_flt
, 0, sizeof(req_ctx
->fd_flt
));
3948 dpaa2_fl_set_final(in_fle
, true);
3950 * crypto engine requires the input entry to be present when
3951 * "frame list" FD is used.
3952 * Since engine does not support FMT=2'b11 (unused entry type), leaving
3953 * in_fle zeroized (except for "Final" flag) is the best option.
3956 dpaa2_fl_set_format(in_fle
, dpaa2_fl_single
);
3957 dpaa2_fl_set_addr(in_fle
, state
->buf_dma
);
3958 dpaa2_fl_set_len(in_fle
, buflen
);
3960 dpaa2_fl_set_format(out_fle
, dpaa2_fl_single
);
3961 dpaa2_fl_set_addr(out_fle
, state
->ctx_dma
);
3962 dpaa2_fl_set_len(out_fle
, digestsize
);
3964 req_ctx
->flc
= &ctx
->flc
[DIGEST
];
3965 req_ctx
->flc_dma
= ctx
->flc_dma
[DIGEST
];
3966 req_ctx
->cbk
= ahash_done
;
3967 req_ctx
->ctx
= &req
->base
;
3968 req_ctx
->edesc
= edesc
;
3970 ret
= dpaa2_caam_enqueue(ctx
->dev
, req_ctx
);
3971 if (ret
== -EINPROGRESS
||
3972 (ret
== -EBUSY
&& req
->base
.flags
& CRYPTO_TFM_REQ_MAY_BACKLOG
))
3976 ahash_unmap_ctx(ctx
->dev
, edesc
, req
, DMA_FROM_DEVICE
);
3977 qi_cache_free(edesc
);
3981 static int ahash_update_no_ctx(struct ahash_request
*req
)
3983 struct crypto_ahash
*ahash
= crypto_ahash_reqtfm(req
);
3984 struct caam_hash_ctx
*ctx
= crypto_ahash_ctx_dma(ahash
);
3985 struct caam_hash_state
*state
= ahash_request_ctx_dma(req
);
3986 struct caam_request
*req_ctx
= &state
->caam_req
;
3987 struct dpaa2_fl_entry
*in_fle
= &req_ctx
->fd_flt
[1];
3988 struct dpaa2_fl_entry
*out_fle
= &req_ctx
->fd_flt
[0];
3989 gfp_t flags
= (req
->base
.flags
& CRYPTO_TFM_REQ_MAY_SLEEP
) ?
3990 GFP_KERNEL
: GFP_ATOMIC
;
3991 u8
*buf
= state
->buf
;
3992 int *buflen
= &state
->buflen
;
3993 int *next_buflen
= &state
->next_buflen
;
3994 int in_len
= *buflen
+ req
->nbytes
, to_hash
;
3995 int qm_sg_bytes
, src_nents
, mapped_nents
;
3996 struct ahash_edesc
*edesc
;
3999 *next_buflen
= in_len
& (crypto_tfm_alg_blocksize(&ahash
->base
) - 1);
4000 to_hash
= in_len
- *next_buflen
;
4003 struct dpaa2_sg_entry
*sg_table
;
4004 int src_len
= req
->nbytes
- *next_buflen
;
4006 src_nents
= sg_nents_for_len(req
->src
, src_len
);
4007 if (src_nents
< 0) {
4008 dev_err(ctx
->dev
, "Invalid number of src SG.\n");
4013 mapped_nents
= dma_map_sg(ctx
->dev
, req
->src
, src_nents
,
4015 if (!mapped_nents
) {
4016 dev_err(ctx
->dev
, "unable to DMA map source\n");
4023 /* allocate space for base edesc and link tables */
4024 edesc
= qi_cache_zalloc(flags
);
4026 dma_unmap_sg(ctx
->dev
, req
->src
, src_nents
,
4031 edesc
->src_nents
= src_nents
;
4032 qm_sg_bytes
= pad_sg_nents(1 + mapped_nents
) *
4034 sg_table
= &edesc
->sgt
[0];
4036 ret
= buf_map_to_qm_sg(ctx
->dev
, sg_table
, state
);
4040 sg_to_qm_sg_last(req
->src
, src_len
, sg_table
+ 1, 0);
4042 edesc
->qm_sg_dma
= dma_map_single(ctx
->dev
, sg_table
,
4043 qm_sg_bytes
, DMA_TO_DEVICE
);
4044 if (dma_mapping_error(ctx
->dev
, edesc
->qm_sg_dma
)) {
4045 dev_err(ctx
->dev
, "unable to map S/G table\n");
4049 edesc
->qm_sg_bytes
= qm_sg_bytes
;
4051 state
->ctx_dma_len
= ctx
->ctx_len
;
4052 state
->ctx_dma
= dma_map_single(ctx
->dev
, state
->caam_ctx
,
4053 ctx
->ctx_len
, DMA_FROM_DEVICE
);
4054 if (dma_mapping_error(ctx
->dev
, state
->ctx_dma
)) {
4055 dev_err(ctx
->dev
, "unable to map ctx\n");
4061 memset(&req_ctx
->fd_flt
, 0, sizeof(req_ctx
->fd_flt
));
4062 dpaa2_fl_set_final(in_fle
, true);
4063 dpaa2_fl_set_format(in_fle
, dpaa2_fl_sg
);
4064 dpaa2_fl_set_addr(in_fle
, edesc
->qm_sg_dma
);
4065 dpaa2_fl_set_len(in_fle
, to_hash
);
4066 dpaa2_fl_set_format(out_fle
, dpaa2_fl_single
);
4067 dpaa2_fl_set_addr(out_fle
, state
->ctx_dma
);
4068 dpaa2_fl_set_len(out_fle
, ctx
->ctx_len
);
4070 req_ctx
->flc
= &ctx
->flc
[UPDATE_FIRST
];
4071 req_ctx
->flc_dma
= ctx
->flc_dma
[UPDATE_FIRST
];
4072 req_ctx
->cbk
= ahash_done_ctx_dst
;
4073 req_ctx
->ctx
= &req
->base
;
4074 req_ctx
->edesc
= edesc
;
4076 ret
= dpaa2_caam_enqueue(ctx
->dev
, req_ctx
);
4077 if (ret
!= -EINPROGRESS
&&
4079 req
->base
.flags
& CRYPTO_TFM_REQ_MAY_BACKLOG
))
4082 state
->update
= ahash_update_ctx
;
4083 state
->finup
= ahash_finup_ctx
;
4084 state
->final
= ahash_final_ctx
;
4085 } else if (*next_buflen
) {
4086 scatterwalk_map_and_copy(buf
+ *buflen
, req
->src
, 0,
4088 *buflen
= *next_buflen
;
4090 print_hex_dump_debug("buf@" __stringify(__LINE__
)": ",
4091 DUMP_PREFIX_ADDRESS
, 16, 4, buf
,
4097 ahash_unmap_ctx(ctx
->dev
, edesc
, req
, DMA_TO_DEVICE
);
4098 qi_cache_free(edesc
);
4102 static int ahash_finup_no_ctx(struct ahash_request
*req
)
4104 struct crypto_ahash
*ahash
= crypto_ahash_reqtfm(req
);
4105 struct caam_hash_ctx
*ctx
= crypto_ahash_ctx_dma(ahash
);
4106 struct caam_hash_state
*state
= ahash_request_ctx_dma(req
);
4107 struct caam_request
*req_ctx
= &state
->caam_req
;
4108 struct dpaa2_fl_entry
*in_fle
= &req_ctx
->fd_flt
[1];
4109 struct dpaa2_fl_entry
*out_fle
= &req_ctx
->fd_flt
[0];
4110 gfp_t flags
= (req
->base
.flags
& CRYPTO_TFM_REQ_MAY_SLEEP
) ?
4111 GFP_KERNEL
: GFP_ATOMIC
;
4112 int buflen
= state
->buflen
;
4113 int qm_sg_bytes
, src_nents
, mapped_nents
;
4114 int digestsize
= crypto_ahash_digestsize(ahash
);
4115 struct ahash_edesc
*edesc
;
4116 struct dpaa2_sg_entry
*sg_table
;
4119 src_nents
= sg_nents_for_len(req
->src
, req
->nbytes
);
4120 if (src_nents
< 0) {
4121 dev_err(ctx
->dev
, "Invalid number of src SG.\n");
4126 mapped_nents
= dma_map_sg(ctx
->dev
, req
->src
, src_nents
,
4128 if (!mapped_nents
) {
4129 dev_err(ctx
->dev
, "unable to DMA map source\n");
4136 /* allocate space for base edesc and link tables */
4137 edesc
= qi_cache_zalloc(flags
);
4139 dma_unmap_sg(ctx
->dev
, req
->src
, src_nents
, DMA_TO_DEVICE
);
4143 edesc
->src_nents
= src_nents
;
4144 qm_sg_bytes
= pad_sg_nents(2 + mapped_nents
) * sizeof(*sg_table
);
4145 sg_table
= &edesc
->sgt
[0];
4147 ret
= buf_map_to_qm_sg(ctx
->dev
, sg_table
, state
);
4151 sg_to_qm_sg_last(req
->src
, req
->nbytes
, sg_table
+ 1, 0);
4153 edesc
->qm_sg_dma
= dma_map_single(ctx
->dev
, sg_table
, qm_sg_bytes
,
4155 if (dma_mapping_error(ctx
->dev
, edesc
->qm_sg_dma
)) {
4156 dev_err(ctx
->dev
, "unable to map S/G table\n");
4160 edesc
->qm_sg_bytes
= qm_sg_bytes
;
4162 state
->ctx_dma_len
= digestsize
;
4163 state
->ctx_dma
= dma_map_single(ctx
->dev
, state
->caam_ctx
, digestsize
,
4165 if (dma_mapping_error(ctx
->dev
, state
->ctx_dma
)) {
4166 dev_err(ctx
->dev
, "unable to map ctx\n");
4172 memset(&req_ctx
->fd_flt
, 0, sizeof(req_ctx
->fd_flt
));
4173 dpaa2_fl_set_final(in_fle
, true);
4174 dpaa2_fl_set_format(in_fle
, dpaa2_fl_sg
);
4175 dpaa2_fl_set_addr(in_fle
, edesc
->qm_sg_dma
);
4176 dpaa2_fl_set_len(in_fle
, buflen
+ req
->nbytes
);
4177 dpaa2_fl_set_format(out_fle
, dpaa2_fl_single
);
4178 dpaa2_fl_set_addr(out_fle
, state
->ctx_dma
);
4179 dpaa2_fl_set_len(out_fle
, digestsize
);
4181 req_ctx
->flc
= &ctx
->flc
[DIGEST
];
4182 req_ctx
->flc_dma
= ctx
->flc_dma
[DIGEST
];
4183 req_ctx
->cbk
= ahash_done
;
4184 req_ctx
->ctx
= &req
->base
;
4185 req_ctx
->edesc
= edesc
;
4186 ret
= dpaa2_caam_enqueue(ctx
->dev
, req_ctx
);
4187 if (ret
!= -EINPROGRESS
&&
4188 !(ret
== -EBUSY
&& req
->base
.flags
& CRYPTO_TFM_REQ_MAY_BACKLOG
))
4193 ahash_unmap_ctx(ctx
->dev
, edesc
, req
, DMA_FROM_DEVICE
);
4194 qi_cache_free(edesc
);
4198 static int ahash_update_first(struct ahash_request
*req
)
4200 struct crypto_ahash
*ahash
= crypto_ahash_reqtfm(req
);
4201 struct caam_hash_ctx
*ctx
= crypto_ahash_ctx_dma(ahash
);
4202 struct caam_hash_state
*state
= ahash_request_ctx_dma(req
);
4203 struct caam_request
*req_ctx
= &state
->caam_req
;
4204 struct dpaa2_fl_entry
*in_fle
= &req_ctx
->fd_flt
[1];
4205 struct dpaa2_fl_entry
*out_fle
= &req_ctx
->fd_flt
[0];
4206 gfp_t flags
= (req
->base
.flags
& CRYPTO_TFM_REQ_MAY_SLEEP
) ?
4207 GFP_KERNEL
: GFP_ATOMIC
;
4208 u8
*buf
= state
->buf
;
4209 int *buflen
= &state
->buflen
;
4210 int *next_buflen
= &state
->next_buflen
;
4212 int src_nents
, mapped_nents
;
4213 struct ahash_edesc
*edesc
;
4216 *next_buflen
= req
->nbytes
& (crypto_tfm_alg_blocksize(&ahash
->base
) -
4218 to_hash
= req
->nbytes
- *next_buflen
;
4221 struct dpaa2_sg_entry
*sg_table
;
4222 int src_len
= req
->nbytes
- *next_buflen
;
4224 src_nents
= sg_nents_for_len(req
->src
, src_len
);
4225 if (src_nents
< 0) {
4226 dev_err(ctx
->dev
, "Invalid number of src SG.\n");
4231 mapped_nents
= dma_map_sg(ctx
->dev
, req
->src
, src_nents
,
4233 if (!mapped_nents
) {
4234 dev_err(ctx
->dev
, "unable to map source for DMA\n");
4241 /* allocate space for base edesc and link tables */
4242 edesc
= qi_cache_zalloc(flags
);
4244 dma_unmap_sg(ctx
->dev
, req
->src
, src_nents
,
4249 edesc
->src_nents
= src_nents
;
4250 sg_table
= &edesc
->sgt
[0];
4252 memset(&req_ctx
->fd_flt
, 0, sizeof(req_ctx
->fd_flt
));
4253 dpaa2_fl_set_final(in_fle
, true);
4254 dpaa2_fl_set_len(in_fle
, to_hash
);
4256 if (mapped_nents
> 1) {
4259 sg_to_qm_sg_last(req
->src
, src_len
, sg_table
, 0);
4260 qm_sg_bytes
= pad_sg_nents(mapped_nents
) *
4262 edesc
->qm_sg_dma
= dma_map_single(ctx
->dev
, sg_table
,
4265 if (dma_mapping_error(ctx
->dev
, edesc
->qm_sg_dma
)) {
4266 dev_err(ctx
->dev
, "unable to map S/G table\n");
4270 edesc
->qm_sg_bytes
= qm_sg_bytes
;
4271 dpaa2_fl_set_format(in_fle
, dpaa2_fl_sg
);
4272 dpaa2_fl_set_addr(in_fle
, edesc
->qm_sg_dma
);
4274 dpaa2_fl_set_format(in_fle
, dpaa2_fl_single
);
4275 dpaa2_fl_set_addr(in_fle
, sg_dma_address(req
->src
));
4278 state
->ctx_dma_len
= ctx
->ctx_len
;
4279 state
->ctx_dma
= dma_map_single(ctx
->dev
, state
->caam_ctx
,
4280 ctx
->ctx_len
, DMA_FROM_DEVICE
);
4281 if (dma_mapping_error(ctx
->dev
, state
->ctx_dma
)) {
4282 dev_err(ctx
->dev
, "unable to map ctx\n");
4288 dpaa2_fl_set_format(out_fle
, dpaa2_fl_single
);
4289 dpaa2_fl_set_addr(out_fle
, state
->ctx_dma
);
4290 dpaa2_fl_set_len(out_fle
, ctx
->ctx_len
);
4292 req_ctx
->flc
= &ctx
->flc
[UPDATE_FIRST
];
4293 req_ctx
->flc_dma
= ctx
->flc_dma
[UPDATE_FIRST
];
4294 req_ctx
->cbk
= ahash_done_ctx_dst
;
4295 req_ctx
->ctx
= &req
->base
;
4296 req_ctx
->edesc
= edesc
;
4298 ret
= dpaa2_caam_enqueue(ctx
->dev
, req_ctx
);
4299 if (ret
!= -EINPROGRESS
&&
4300 !(ret
== -EBUSY
&& req
->base
.flags
&
4301 CRYPTO_TFM_REQ_MAY_BACKLOG
))
4304 state
->update
= ahash_update_ctx
;
4305 state
->finup
= ahash_finup_ctx
;
4306 state
->final
= ahash_final_ctx
;
4307 } else if (*next_buflen
) {
4308 state
->update
= ahash_update_no_ctx
;
4309 state
->finup
= ahash_finup_no_ctx
;
4310 state
->final
= ahash_final_no_ctx
;
4311 scatterwalk_map_and_copy(buf
, req
->src
, 0,
4313 *buflen
= *next_buflen
;
4315 print_hex_dump_debug("buf@" __stringify(__LINE__
)": ",
4316 DUMP_PREFIX_ADDRESS
, 16, 4, buf
,
4322 ahash_unmap_ctx(ctx
->dev
, edesc
, req
, DMA_TO_DEVICE
);
4323 qi_cache_free(edesc
);
4327 static int ahash_finup_first(struct ahash_request
*req
)
4329 return ahash_digest(req
);
4332 static int ahash_init(struct ahash_request
*req
)
4334 struct caam_hash_state
*state
= ahash_request_ctx_dma(req
);
4336 state
->update
= ahash_update_first
;
4337 state
->finup
= ahash_finup_first
;
4338 state
->final
= ahash_final_no_ctx
;
4341 state
->ctx_dma_len
= 0;
4344 state
->next_buflen
= 0;
4349 static int ahash_update(struct ahash_request
*req
)
4351 struct caam_hash_state
*state
= ahash_request_ctx_dma(req
);
4353 return state
->update(req
);
4356 static int ahash_finup(struct ahash_request
*req
)
4358 struct caam_hash_state
*state
= ahash_request_ctx_dma(req
);
4360 return state
->finup(req
);
4363 static int ahash_final(struct ahash_request
*req
)
4365 struct caam_hash_state
*state
= ahash_request_ctx_dma(req
);
4367 return state
->final(req
);
4370 static int ahash_export(struct ahash_request
*req
, void *out
)
4372 struct caam_hash_state
*state
= ahash_request_ctx_dma(req
);
4373 struct caam_export_state
*export
= out
;
4374 u8
*buf
= state
->buf
;
4375 int len
= state
->buflen
;
4377 memcpy(export
->buf
, buf
, len
);
4378 memcpy(export
->caam_ctx
, state
->caam_ctx
, sizeof(export
->caam_ctx
));
4379 export
->buflen
= len
;
4380 export
->update
= state
->update
;
4381 export
->final
= state
->final
;
4382 export
->finup
= state
->finup
;
4387 static int ahash_import(struct ahash_request
*req
, const void *in
)
4389 struct caam_hash_state
*state
= ahash_request_ctx_dma(req
);
4390 const struct caam_export_state
*export
= in
;
4392 memset(state
, 0, sizeof(*state
));
4393 memcpy(state
->buf
, export
->buf
, export
->buflen
);
4394 memcpy(state
->caam_ctx
, export
->caam_ctx
, sizeof(state
->caam_ctx
));
4395 state
->buflen
= export
->buflen
;
4396 state
->update
= export
->update
;
4397 state
->final
= export
->final
;
4398 state
->finup
= export
->finup
;
4403 struct caam_hash_template
{
4404 char name
[CRYPTO_MAX_ALG_NAME
];
4405 char driver_name
[CRYPTO_MAX_ALG_NAME
];
4406 char hmac_name
[CRYPTO_MAX_ALG_NAME
];
4407 char hmac_driver_name
[CRYPTO_MAX_ALG_NAME
];
4408 unsigned int blocksize
;
4409 struct ahash_alg template_ahash
;
4413 /* ahash descriptors */
4414 static struct caam_hash_template driver_hash
[] = {
4417 .driver_name
= "sha1-caam-qi2",
4418 .hmac_name
= "hmac(sha1)",
4419 .hmac_driver_name
= "hmac-sha1-caam-qi2",
4420 .blocksize
= SHA1_BLOCK_SIZE
,
4423 .update
= ahash_update
,
4424 .final
= ahash_final
,
4425 .finup
= ahash_finup
,
4426 .digest
= ahash_digest
,
4427 .export
= ahash_export
,
4428 .import
= ahash_import
,
4429 .setkey
= ahash_setkey
,
4431 .digestsize
= SHA1_DIGEST_SIZE
,
4432 .statesize
= sizeof(struct caam_export_state
),
4435 .alg_type
= OP_ALG_ALGSEL_SHA1
,
4438 .driver_name
= "sha224-caam-qi2",
4439 .hmac_name
= "hmac(sha224)",
4440 .hmac_driver_name
= "hmac-sha224-caam-qi2",
4441 .blocksize
= SHA224_BLOCK_SIZE
,
4444 .update
= ahash_update
,
4445 .final
= ahash_final
,
4446 .finup
= ahash_finup
,
4447 .digest
= ahash_digest
,
4448 .export
= ahash_export
,
4449 .import
= ahash_import
,
4450 .setkey
= ahash_setkey
,
4452 .digestsize
= SHA224_DIGEST_SIZE
,
4453 .statesize
= sizeof(struct caam_export_state
),
4456 .alg_type
= OP_ALG_ALGSEL_SHA224
,
4459 .driver_name
= "sha256-caam-qi2",
4460 .hmac_name
= "hmac(sha256)",
4461 .hmac_driver_name
= "hmac-sha256-caam-qi2",
4462 .blocksize
= SHA256_BLOCK_SIZE
,
4465 .update
= ahash_update
,
4466 .final
= ahash_final
,
4467 .finup
= ahash_finup
,
4468 .digest
= ahash_digest
,
4469 .export
= ahash_export
,
4470 .import
= ahash_import
,
4471 .setkey
= ahash_setkey
,
4473 .digestsize
= SHA256_DIGEST_SIZE
,
4474 .statesize
= sizeof(struct caam_export_state
),
4477 .alg_type
= OP_ALG_ALGSEL_SHA256
,
4480 .driver_name
= "sha384-caam-qi2",
4481 .hmac_name
= "hmac(sha384)",
4482 .hmac_driver_name
= "hmac-sha384-caam-qi2",
4483 .blocksize
= SHA384_BLOCK_SIZE
,
4486 .update
= ahash_update
,
4487 .final
= ahash_final
,
4488 .finup
= ahash_finup
,
4489 .digest
= ahash_digest
,
4490 .export
= ahash_export
,
4491 .import
= ahash_import
,
4492 .setkey
= ahash_setkey
,
4494 .digestsize
= SHA384_DIGEST_SIZE
,
4495 .statesize
= sizeof(struct caam_export_state
),
4498 .alg_type
= OP_ALG_ALGSEL_SHA384
,
4501 .driver_name
= "sha512-caam-qi2",
4502 .hmac_name
= "hmac(sha512)",
4503 .hmac_driver_name
= "hmac-sha512-caam-qi2",
4504 .blocksize
= SHA512_BLOCK_SIZE
,
4507 .update
= ahash_update
,
4508 .final
= ahash_final
,
4509 .finup
= ahash_finup
,
4510 .digest
= ahash_digest
,
4511 .export
= ahash_export
,
4512 .import
= ahash_import
,
4513 .setkey
= ahash_setkey
,
4515 .digestsize
= SHA512_DIGEST_SIZE
,
4516 .statesize
= sizeof(struct caam_export_state
),
4519 .alg_type
= OP_ALG_ALGSEL_SHA512
,
4522 .driver_name
= "md5-caam-qi2",
4523 .hmac_name
= "hmac(md5)",
4524 .hmac_driver_name
= "hmac-md5-caam-qi2",
4525 .blocksize
= MD5_BLOCK_WORDS
* 4,
4528 .update
= ahash_update
,
4529 .final
= ahash_final
,
4530 .finup
= ahash_finup
,
4531 .digest
= ahash_digest
,
4532 .export
= ahash_export
,
4533 .import
= ahash_import
,
4534 .setkey
= ahash_setkey
,
4536 .digestsize
= MD5_DIGEST_SIZE
,
4537 .statesize
= sizeof(struct caam_export_state
),
4540 .alg_type
= OP_ALG_ALGSEL_MD5
,
4544 struct caam_hash_alg
{
4545 struct list_head entry
;
4549 struct ahash_alg ahash_alg
;
4552 static int caam_hash_cra_init(struct crypto_tfm
*tfm
)
4554 struct crypto_ahash
*ahash
= __crypto_ahash_cast(tfm
);
4555 struct crypto_alg
*base
= tfm
->__crt_alg
;
4556 struct hash_alg_common
*halg
=
4557 container_of(base
, struct hash_alg_common
, base
);
4558 struct ahash_alg
*alg
=
4559 container_of(halg
, struct ahash_alg
, halg
);
4560 struct caam_hash_alg
*caam_hash
=
4561 container_of(alg
, struct caam_hash_alg
, ahash_alg
);
4562 struct caam_hash_ctx
*ctx
= crypto_tfm_ctx_dma(tfm
);
4563 /* Sizes for MDHA running digests: MD5, SHA1, 224, 256, 384, 512 */
4564 static const u8 runninglen
[] = { HASH_MSG_LEN
+ MD5_DIGEST_SIZE
,
4565 HASH_MSG_LEN
+ SHA1_DIGEST_SIZE
,
4567 HASH_MSG_LEN
+ SHA256_DIGEST_SIZE
,
4569 HASH_MSG_LEN
+ SHA512_DIGEST_SIZE
};
4570 dma_addr_t dma_addr
;
4573 ctx
->dev
= caam_hash
->dev
;
4575 if (caam_hash
->is_hmac
) {
4576 ctx
->adata
.key_dma
= dma_map_single_attrs(ctx
->dev
, ctx
->key
,
4577 ARRAY_SIZE(ctx
->key
),
4579 DMA_ATTR_SKIP_CPU_SYNC
);
4580 if (dma_mapping_error(ctx
->dev
, ctx
->adata
.key_dma
)) {
4581 dev_err(ctx
->dev
, "unable to map key\n");
4586 dma_addr
= dma_map_single_attrs(ctx
->dev
, ctx
->flc
, sizeof(ctx
->flc
),
4588 DMA_ATTR_SKIP_CPU_SYNC
);
4589 if (dma_mapping_error(ctx
->dev
, dma_addr
)) {
4590 dev_err(ctx
->dev
, "unable to map shared descriptors\n");
4591 if (ctx
->adata
.key_dma
)
4592 dma_unmap_single_attrs(ctx
->dev
, ctx
->adata
.key_dma
,
4593 ARRAY_SIZE(ctx
->key
),
4595 DMA_ATTR_SKIP_CPU_SYNC
);
4599 for (i
= 0; i
< HASH_NUM_OP
; i
++)
4600 ctx
->flc_dma
[i
] = dma_addr
+ i
* sizeof(ctx
->flc
[i
]);
4602 /* copy descriptor header template value */
4603 ctx
->adata
.algtype
= OP_TYPE_CLASS2_ALG
| caam_hash
->alg_type
;
4605 ctx
->ctx_len
= runninglen
[(ctx
->adata
.algtype
&
4606 OP_ALG_ALGSEL_SUBMASK
) >>
4607 OP_ALG_ALGSEL_SHIFT
];
4609 crypto_ahash_set_reqsize_dma(ahash
, sizeof(struct caam_hash_state
));
4612 * For keyed hash algorithms shared descriptors
4613 * will be created later in setkey() callback
4615 return caam_hash
->is_hmac
? 0 : ahash_set_sh_desc(ahash
);
4618 static void caam_hash_cra_exit(struct crypto_tfm
*tfm
)
4620 struct caam_hash_ctx
*ctx
= crypto_tfm_ctx_dma(tfm
);
4622 dma_unmap_single_attrs(ctx
->dev
, ctx
->flc_dma
[0], sizeof(ctx
->flc
),
4623 DMA_BIDIRECTIONAL
, DMA_ATTR_SKIP_CPU_SYNC
);
4624 if (ctx
->adata
.key_dma
)
4625 dma_unmap_single_attrs(ctx
->dev
, ctx
->adata
.key_dma
,
4626 ARRAY_SIZE(ctx
->key
), DMA_TO_DEVICE
,
4627 DMA_ATTR_SKIP_CPU_SYNC
);
4630 static struct caam_hash_alg
*caam_hash_alloc(struct device
*dev
,
4631 struct caam_hash_template
*template, bool keyed
)
4633 struct caam_hash_alg
*t_alg
;
4634 struct ahash_alg
*halg
;
4635 struct crypto_alg
*alg
;
4637 t_alg
= kzalloc(sizeof(*t_alg
), GFP_KERNEL
);
4639 return ERR_PTR(-ENOMEM
);
4641 t_alg
->ahash_alg
= template->template_ahash
;
4642 halg
= &t_alg
->ahash_alg
;
4643 alg
= &halg
->halg
.base
;
4646 snprintf(alg
->cra_name
, CRYPTO_MAX_ALG_NAME
, "%s",
4647 template->hmac_name
);
4648 snprintf(alg
->cra_driver_name
, CRYPTO_MAX_ALG_NAME
, "%s",
4649 template->hmac_driver_name
);
4650 t_alg
->is_hmac
= true;
4652 snprintf(alg
->cra_name
, CRYPTO_MAX_ALG_NAME
, "%s",
4654 snprintf(alg
->cra_driver_name
, CRYPTO_MAX_ALG_NAME
, "%s",
4655 template->driver_name
);
4656 t_alg
->ahash_alg
.setkey
= NULL
;
4657 t_alg
->is_hmac
= false;
4659 alg
->cra_module
= THIS_MODULE
;
4660 alg
->cra_init
= caam_hash_cra_init
;
4661 alg
->cra_exit
= caam_hash_cra_exit
;
4662 alg
->cra_ctxsize
= sizeof(struct caam_hash_ctx
) + crypto_dma_padding();
4663 alg
->cra_priority
= CAAM_CRA_PRIORITY
;
4664 alg
->cra_blocksize
= template->blocksize
;
4665 alg
->cra_alignmask
= 0;
4666 alg
->cra_flags
= CRYPTO_ALG_ASYNC
| CRYPTO_ALG_ALLOCATES_MEMORY
;
4668 t_alg
->alg_type
= template->alg_type
;
4674 static void dpaa2_caam_fqdan_cb(struct dpaa2_io_notification_ctx
*nctx
)
4676 struct dpaa2_caam_priv_per_cpu
*ppriv
;
4678 ppriv
= container_of(nctx
, struct dpaa2_caam_priv_per_cpu
, nctx
);
4679 napi_schedule_irqoff(&ppriv
->napi
);
4682 static int __cold
dpaa2_dpseci_dpio_setup(struct dpaa2_caam_priv
*priv
)
4684 struct device
*dev
= priv
->dev
;
4685 struct dpaa2_io_notification_ctx
*nctx
;
4686 struct dpaa2_caam_priv_per_cpu
*ppriv
;
4687 int err
, i
= 0, cpu
;
4689 for_each_online_cpu(cpu
) {
4690 ppriv
= per_cpu_ptr(priv
->ppriv
, cpu
);
4692 nctx
= &ppriv
->nctx
;
4694 nctx
->id
= ppriv
->rsp_fqid
;
4695 nctx
->desired_cpu
= cpu
;
4696 nctx
->cb
= dpaa2_caam_fqdan_cb
;
4698 /* Register notification callbacks */
4699 ppriv
->dpio
= dpaa2_io_service_select(cpu
);
4700 err
= dpaa2_io_service_register(ppriv
->dpio
, nctx
, dev
);
4701 if (unlikely(err
)) {
4702 dev_dbg(dev
, "No affine DPIO for cpu %d\n", cpu
);
4705 * If no affine DPIO for this core, there's probably
4706 * none available for next cores either. Signal we want
4707 * to retry later, in case the DPIO devices weren't
4710 err
= -EPROBE_DEFER
;
4714 ppriv
->store
= dpaa2_io_store_create(DPAA2_CAAM_STORE_SIZE
,
4716 if (unlikely(!ppriv
->store
)) {
4717 dev_err(dev
, "dpaa2_io_store_create() failed\n");
4722 if (++i
== priv
->num_pairs
)
4729 for_each_online_cpu(cpu
) {
4730 ppriv
= per_cpu_ptr(priv
->ppriv
, cpu
);
4731 if (!ppriv
->nctx
.cb
)
4733 dpaa2_io_service_deregister(ppriv
->dpio
, &ppriv
->nctx
, dev
);
4736 for_each_online_cpu(cpu
) {
4737 ppriv
= per_cpu_ptr(priv
->ppriv
, cpu
);
4740 dpaa2_io_store_destroy(ppriv
->store
);
4746 static void __cold
dpaa2_dpseci_dpio_free(struct dpaa2_caam_priv
*priv
)
4748 struct dpaa2_caam_priv_per_cpu
*ppriv
;
4751 for_each_online_cpu(cpu
) {
4752 ppriv
= per_cpu_ptr(priv
->ppriv
, cpu
);
4753 dpaa2_io_service_deregister(ppriv
->dpio
, &ppriv
->nctx
,
4755 dpaa2_io_store_destroy(ppriv
->store
);
4757 if (++i
== priv
->num_pairs
)
4762 static int dpaa2_dpseci_bind(struct dpaa2_caam_priv
*priv
)
4764 struct dpseci_rx_queue_cfg rx_queue_cfg
;
4765 struct device
*dev
= priv
->dev
;
4766 struct fsl_mc_device
*ls_dev
= to_fsl_mc_device(dev
);
4767 struct dpaa2_caam_priv_per_cpu
*ppriv
;
4768 int err
= 0, i
= 0, cpu
;
4770 /* Configure Rx queues */
4771 for_each_online_cpu(cpu
) {
4772 ppriv
= per_cpu_ptr(priv
->ppriv
, cpu
);
4774 rx_queue_cfg
.options
= DPSECI_QUEUE_OPT_DEST
|
4775 DPSECI_QUEUE_OPT_USER_CTX
;
4776 rx_queue_cfg
.order_preservation_en
= 0;
4777 rx_queue_cfg
.dest_cfg
.dest_type
= DPSECI_DEST_DPIO
;
4778 rx_queue_cfg
.dest_cfg
.dest_id
= ppriv
->nctx
.dpio_id
;
4780 * Rx priority (WQ) doesn't really matter, since we use
4781 * pull mode, i.e. volatile dequeues from specific FQs
4783 rx_queue_cfg
.dest_cfg
.priority
= 0;
4784 rx_queue_cfg
.user_ctx
= ppriv
->nctx
.qman64
;
4786 err
= dpseci_set_rx_queue(priv
->mc_io
, 0, ls_dev
->mc_handle
, i
,
4789 dev_err(dev
, "dpseci_set_rx_queue() failed with err %d\n",
4794 if (++i
== priv
->num_pairs
)
4801 static void dpaa2_dpseci_congestion_free(struct dpaa2_caam_priv
*priv
)
4803 struct device
*dev
= priv
->dev
;
4805 if (!priv
->cscn_mem
)
4808 dma_unmap_single(dev
, priv
->cscn_dma
, DPAA2_CSCN_SIZE
, DMA_FROM_DEVICE
);
4809 kfree(priv
->cscn_mem
);
4812 static void dpaa2_dpseci_free(struct dpaa2_caam_priv
*priv
)
4814 struct device
*dev
= priv
->dev
;
4815 struct fsl_mc_device
*ls_dev
= to_fsl_mc_device(dev
);
4818 if (DPSECI_VER(priv
->major_ver
, priv
->minor_ver
) > DPSECI_VER(5, 3)) {
4819 err
= dpseci_reset(priv
->mc_io
, 0, ls_dev
->mc_handle
);
4821 dev_err(dev
, "dpseci_reset() failed\n");
4824 dpaa2_dpseci_congestion_free(priv
);
4825 dpseci_close(priv
->mc_io
, 0, ls_dev
->mc_handle
);
4828 static void dpaa2_caam_process_fd(struct dpaa2_caam_priv
*priv
,
4829 const struct dpaa2_fd
*fd
)
4831 struct caam_request
*req
;
4834 if (dpaa2_fd_get_format(fd
) != dpaa2_fd_list
) {
4835 dev_err(priv
->dev
, "Only Frame List FD format is supported!\n");
4839 fd_err
= dpaa2_fd_get_ctrl(fd
) & FD_CTRL_ERR_MASK
;
4840 if (unlikely(fd_err
))
4841 dev_err_ratelimited(priv
->dev
, "FD error: %08x\n", fd_err
);
4844 * FD[ADDR] is guaranteed to be valid, irrespective of errors reported
4845 * in FD[ERR] or FD[FRC].
4847 req
= dpaa2_caam_iova_to_virt(priv
, dpaa2_fd_get_addr(fd
));
4848 dma_unmap_single(priv
->dev
, req
->fd_flt_dma
, sizeof(req
->fd_flt
),
4850 req
->cbk(req
->ctx
, dpaa2_fd_get_frc(fd
));
4853 static int dpaa2_caam_pull_fq(struct dpaa2_caam_priv_per_cpu
*ppriv
)
4857 /* Retry while portal is busy */
4859 err
= dpaa2_io_service_pull_fq(ppriv
->dpio
, ppriv
->rsp_fqid
,
4861 } while (err
== -EBUSY
);
4864 dev_err(ppriv
->priv
->dev
, "dpaa2_io_service_pull err %d", err
);
4869 static int dpaa2_caam_store_consume(struct dpaa2_caam_priv_per_cpu
*ppriv
)
4871 struct dpaa2_dq
*dq
;
4872 int cleaned
= 0, is_last
;
4875 dq
= dpaa2_io_store_next(ppriv
->store
, &is_last
);
4876 if (unlikely(!dq
)) {
4877 if (unlikely(!is_last
)) {
4878 dev_dbg(ppriv
->priv
->dev
,
4879 "FQ %d returned no valid frames\n",
4882 * MUST retry until we get some sort of
4883 * valid response token (be it "empty dequeue"
4884 * or a valid frame).
4892 dpaa2_caam_process_fd(ppriv
->priv
, dpaa2_dq_fd(dq
));
4899 static int dpaa2_dpseci_poll(struct napi_struct
*napi
, int budget
)
4901 struct dpaa2_caam_priv_per_cpu
*ppriv
;
4902 struct dpaa2_caam_priv
*priv
;
4903 int err
, cleaned
= 0, store_cleaned
;
4905 ppriv
= container_of(napi
, struct dpaa2_caam_priv_per_cpu
, napi
);
4908 if (unlikely(dpaa2_caam_pull_fq(ppriv
)))
4912 store_cleaned
= dpaa2_caam_store_consume(ppriv
);
4913 cleaned
+= store_cleaned
;
4915 if (store_cleaned
== 0 ||
4916 cleaned
> budget
- DPAA2_CAAM_STORE_SIZE
)
4919 /* Try to dequeue some more */
4920 err
= dpaa2_caam_pull_fq(ppriv
);
4925 if (cleaned
< budget
) {
4926 napi_complete_done(napi
, cleaned
);
4927 err
= dpaa2_io_service_rearm(ppriv
->dpio
, &ppriv
->nctx
);
4929 dev_err(priv
->dev
, "Notification rearm failed: %d\n",
4936 static int dpaa2_dpseci_congestion_setup(struct dpaa2_caam_priv
*priv
,
4939 struct dpseci_congestion_notification_cfg cong_notif_cfg
= { 0 };
4940 struct device
*dev
= priv
->dev
;
4941 unsigned int alignmask
;
4945 * Congestion group feature supported starting with DPSECI API v5.1
4946 * and only when object has been created with this capability.
4948 if ((DPSECI_VER(priv
->major_ver
, priv
->minor_ver
) < DPSECI_VER(5, 1)) ||
4949 !(priv
->dpseci_attr
.options
& DPSECI_OPT_HAS_CG
))
4952 alignmask
= DPAA2_CSCN_ALIGN
- 1;
4953 alignmask
|= dma_get_cache_alignment() - 1;
4954 priv
->cscn_mem
= kzalloc(ALIGN(DPAA2_CSCN_SIZE
, alignmask
+ 1),
4956 if (!priv
->cscn_mem
)
4959 priv
->cscn_dma
= dma_map_single(dev
, priv
->cscn_mem
,
4960 DPAA2_CSCN_SIZE
, DMA_FROM_DEVICE
);
4961 if (dma_mapping_error(dev
, priv
->cscn_dma
)) {
4962 dev_err(dev
, "Error mapping CSCN memory area\n");
4967 cong_notif_cfg
.units
= DPSECI_CONGESTION_UNIT_BYTES
;
4968 cong_notif_cfg
.threshold_entry
= DPAA2_SEC_CONG_ENTRY_THRESH
;
4969 cong_notif_cfg
.threshold_exit
= DPAA2_SEC_CONG_EXIT_THRESH
;
4970 cong_notif_cfg
.message_ctx
= (uintptr_t)priv
;
4971 cong_notif_cfg
.message_iova
= priv
->cscn_dma
;
4972 cong_notif_cfg
.notification_mode
= DPSECI_CGN_MODE_WRITE_MEM_ON_ENTER
|
4973 DPSECI_CGN_MODE_WRITE_MEM_ON_EXIT
|
4974 DPSECI_CGN_MODE_COHERENT_WRITE
;
4976 err
= dpseci_set_congestion_notification(priv
->mc_io
, 0, token
,
4979 dev_err(dev
, "dpseci_set_congestion_notification failed\n");
4986 dma_unmap_single(dev
, priv
->cscn_dma
, DPAA2_CSCN_SIZE
, DMA_FROM_DEVICE
);
4988 kfree(priv
->cscn_mem
);
4993 static void free_dpaa2_pcpu_netdev(struct dpaa2_caam_priv
*priv
, const cpumask_t
*cpus
)
4995 struct dpaa2_caam_priv_per_cpu
*ppriv
;
4998 for_each_cpu(i
, cpus
) {
4999 ppriv
= per_cpu_ptr(priv
->ppriv
, i
);
5000 free_netdev(ppriv
->net_dev
);
5004 static int __cold
dpaa2_dpseci_setup(struct fsl_mc_device
*ls_dev
)
5006 struct device
*dev
= &ls_dev
->dev
;
5007 struct dpaa2_caam_priv
*priv
;
5008 struct dpaa2_caam_priv_per_cpu
*ppriv
;
5009 cpumask_var_t clean_mask
;
5014 if (!zalloc_cpumask_var(&clean_mask
, GFP_KERNEL
))
5017 priv
= dev_get_drvdata(dev
);
5020 priv
->dpsec_id
= ls_dev
->obj_desc
.id
;
5022 /* Get a handle for the DPSECI this interface is associate with */
5023 err
= dpseci_open(priv
->mc_io
, 0, priv
->dpsec_id
, &ls_dev
->mc_handle
);
5025 dev_err(dev
, "dpseci_open() failed: %d\n", err
);
5029 err
= dpseci_get_api_version(priv
->mc_io
, 0, &priv
->major_ver
,
5032 dev_err(dev
, "dpseci_get_api_version() failed\n");
5036 dev_info(dev
, "dpseci v%d.%d\n", priv
->major_ver
, priv
->minor_ver
);
5038 if (DPSECI_VER(priv
->major_ver
, priv
->minor_ver
) > DPSECI_VER(5, 3)) {
5039 err
= dpseci_reset(priv
->mc_io
, 0, ls_dev
->mc_handle
);
5041 dev_err(dev
, "dpseci_reset() failed\n");
5046 err
= dpseci_get_attributes(priv
->mc_io
, 0, ls_dev
->mc_handle
,
5047 &priv
->dpseci_attr
);
5049 dev_err(dev
, "dpseci_get_attributes() failed\n");
5053 err
= dpseci_get_sec_attr(priv
->mc_io
, 0, ls_dev
->mc_handle
,
5056 dev_err(dev
, "dpseci_get_sec_attr() failed\n");
5060 err
= dpaa2_dpseci_congestion_setup(priv
, ls_dev
->mc_handle
);
5062 dev_err(dev
, "setup_congestion() failed\n");
5066 priv
->num_pairs
= min(priv
->dpseci_attr
.num_rx_queues
,
5067 priv
->dpseci_attr
.num_tx_queues
);
5068 if (priv
->num_pairs
> num_online_cpus()) {
5069 dev_warn(dev
, "%d queues won't be used\n",
5070 priv
->num_pairs
- num_online_cpus());
5071 priv
->num_pairs
= num_online_cpus();
5074 for (i
= 0; i
< priv
->dpseci_attr
.num_rx_queues
; i
++) {
5075 err
= dpseci_get_rx_queue(priv
->mc_io
, 0, ls_dev
->mc_handle
, i
,
5076 &priv
->rx_queue_attr
[i
]);
5078 dev_err(dev
, "dpseci_get_rx_queue() failed\n");
5079 goto err_get_rx_queue
;
5083 for (i
= 0; i
< priv
->dpseci_attr
.num_tx_queues
; i
++) {
5084 err
= dpseci_get_tx_queue(priv
->mc_io
, 0, ls_dev
->mc_handle
, i
,
5085 &priv
->tx_queue_attr
[i
]);
5087 dev_err(dev
, "dpseci_get_tx_queue() failed\n");
5088 goto err_get_rx_queue
;
5093 for_each_online_cpu(cpu
) {
5096 j
= i
% priv
->num_pairs
;
5098 ppriv
= per_cpu_ptr(priv
->ppriv
, cpu
);
5099 ppriv
->req_fqid
= priv
->tx_queue_attr
[j
].fqid
;
5102 * Allow all cores to enqueue, while only some of them
5103 * will take part in dequeuing.
5105 if (++i
> priv
->num_pairs
)
5108 ppriv
->rsp_fqid
= priv
->rx_queue_attr
[j
].fqid
;
5111 dev_dbg(dev
, "pair %d: rx queue %d, tx queue %d\n", j
,
5112 priv
->rx_queue_attr
[j
].fqid
,
5113 priv
->tx_queue_attr
[j
].fqid
);
5115 ppriv
->net_dev
= alloc_netdev_dummy(0);
5116 if (!ppriv
->net_dev
) {
5118 goto err_alloc_netdev
;
5120 cpumask_set_cpu(cpu
, clean_mask
);
5121 ppriv
->net_dev
->dev
= *dev
;
5123 netif_napi_add_tx_weight(ppriv
->net_dev
, &ppriv
->napi
,
5125 DPAA2_CAAM_NAPI_WEIGHT
);
5132 free_dpaa2_pcpu_netdev(priv
, clean_mask
);
5134 dpaa2_dpseci_congestion_free(priv
);
5136 dpseci_close(priv
->mc_io
, 0, ls_dev
->mc_handle
);
5139 free_cpumask_var(clean_mask
);
5144 static int dpaa2_dpseci_enable(struct dpaa2_caam_priv
*priv
)
5146 struct device
*dev
= priv
->dev
;
5147 struct fsl_mc_device
*ls_dev
= to_fsl_mc_device(dev
);
5148 struct dpaa2_caam_priv_per_cpu
*ppriv
;
5151 for (i
= 0; i
< priv
->num_pairs
; i
++) {
5152 ppriv
= per_cpu_ptr(priv
->ppriv
, i
);
5153 napi_enable(&ppriv
->napi
);
5156 return dpseci_enable(priv
->mc_io
, 0, ls_dev
->mc_handle
);
5159 static int __cold
dpaa2_dpseci_disable(struct dpaa2_caam_priv
*priv
)
5161 struct device
*dev
= priv
->dev
;
5162 struct dpaa2_caam_priv_per_cpu
*ppriv
;
5163 struct fsl_mc_device
*ls_dev
= to_fsl_mc_device(dev
);
5164 int i
, err
= 0, enabled
;
5166 err
= dpseci_disable(priv
->mc_io
, 0, ls_dev
->mc_handle
);
5168 dev_err(dev
, "dpseci_disable() failed\n");
5172 err
= dpseci_is_enabled(priv
->mc_io
, 0, ls_dev
->mc_handle
, &enabled
);
5174 dev_err(dev
, "dpseci_is_enabled() failed\n");
5178 dev_dbg(dev
, "disable: %s\n", enabled
? "false" : "true");
5180 for (i
= 0; i
< priv
->num_pairs
; i
++) {
5181 ppriv
= per_cpu_ptr(priv
->ppriv
, i
);
5182 napi_disable(&ppriv
->napi
);
5183 netif_napi_del(&ppriv
->napi
);
5184 free_netdev(ppriv
->net_dev
);
5190 static struct list_head hash_list
;
5192 static int dpaa2_caam_probe(struct fsl_mc_device
*dpseci_dev
)
5195 struct dpaa2_caam_priv
*priv
;
5197 bool registered
= false;
5200 * There is no way to get CAAM endianness - there is no direct register
5201 * space access and MC f/w does not provide this attribute.
5202 * All DPAA2-based SoCs have little endian CAAM, thus hard-code this
5205 caam_little_end
= true;
5209 dev
= &dpseci_dev
->dev
;
5211 priv
= devm_kzalloc(dev
, sizeof(*priv
), GFP_KERNEL
);
5215 dev_set_drvdata(dev
, priv
);
5217 priv
->domain
= iommu_get_domain_for_dev(dev
);
5219 qi_cache
= kmem_cache_create("dpaa2_caamqicache", CAAM_QI_MEMCACHE_SIZE
,
5222 dev_err(dev
, "Can't allocate SEC cache\n");
5226 err
= dma_set_mask_and_coherent(dev
, DMA_BIT_MASK(49));
5228 dev_err(dev
, "dma_set_mask_and_coherent() failed\n");
5232 /* Obtain a MC portal */
5233 err
= fsl_mc_portal_allocate(dpseci_dev
, 0, &priv
->mc_io
);
5236 err
= -EPROBE_DEFER
;
5238 dev_err(dev
, "MC portal allocation failed\n");
5243 priv
->ppriv
= alloc_percpu(*priv
->ppriv
);
5245 dev_err(dev
, "alloc_percpu() failed\n");
5247 goto err_alloc_ppriv
;
5250 /* DPSECI initialization */
5251 err
= dpaa2_dpseci_setup(dpseci_dev
);
5253 dev_err(dev
, "dpaa2_dpseci_setup() failed\n");
5254 goto err_dpseci_setup
;
5258 err
= dpaa2_dpseci_dpio_setup(priv
);
5260 dev_err_probe(dev
, err
, "dpaa2_dpseci_dpio_setup() failed\n");
5261 goto err_dpio_setup
;
5264 /* DPSECI binding to DPIO */
5265 err
= dpaa2_dpseci_bind(priv
);
5267 dev_err(dev
, "dpaa2_dpseci_bind() failed\n");
5272 err
= dpaa2_dpseci_enable(priv
);
5274 dev_err(dev
, "dpaa2_dpseci_enable() failed\n");
5278 dpaa2_dpseci_debugfs_init(priv
);
5280 /* register crypto algorithms the device supports */
5281 for (i
= 0; i
< ARRAY_SIZE(driver_algs
); i
++) {
5282 struct caam_skcipher_alg
*t_alg
= driver_algs
+ i
;
5283 u32 alg_sel
= t_alg
->caam
.class1_alg_type
& OP_ALG_ALGSEL_MASK
;
5285 /* Skip DES algorithms if not supported by device */
5286 if (!priv
->sec_attr
.des_acc_num
&&
5287 (alg_sel
== OP_ALG_ALGSEL_3DES
||
5288 alg_sel
== OP_ALG_ALGSEL_DES
))
5291 /* Skip AES algorithms if not supported by device */
5292 if (!priv
->sec_attr
.aes_acc_num
&&
5293 alg_sel
== OP_ALG_ALGSEL_AES
)
5296 /* Skip CHACHA20 algorithms if not supported by device */
5297 if (alg_sel
== OP_ALG_ALGSEL_CHACHA20
&&
5298 !priv
->sec_attr
.ccha_acc_num
)
5301 t_alg
->caam
.dev
= dev
;
5302 caam_skcipher_alg_init(t_alg
);
5304 err
= crypto_register_skcipher(&t_alg
->skcipher
);
5306 dev_warn(dev
, "%s alg registration failed: %d\n",
5307 t_alg
->skcipher
.base
.cra_driver_name
, err
);
5311 t_alg
->registered
= true;
5315 for (i
= 0; i
< ARRAY_SIZE(driver_aeads
); i
++) {
5316 struct caam_aead_alg
*t_alg
= driver_aeads
+ i
;
5317 u32 c1_alg_sel
= t_alg
->caam
.class1_alg_type
&
5319 u32 c2_alg_sel
= t_alg
->caam
.class2_alg_type
&
5322 /* Skip DES algorithms if not supported by device */
5323 if (!priv
->sec_attr
.des_acc_num
&&
5324 (c1_alg_sel
== OP_ALG_ALGSEL_3DES
||
5325 c1_alg_sel
== OP_ALG_ALGSEL_DES
))
5328 /* Skip AES algorithms if not supported by device */
5329 if (!priv
->sec_attr
.aes_acc_num
&&
5330 c1_alg_sel
== OP_ALG_ALGSEL_AES
)
5333 /* Skip CHACHA20 algorithms if not supported by device */
5334 if (c1_alg_sel
== OP_ALG_ALGSEL_CHACHA20
&&
5335 !priv
->sec_attr
.ccha_acc_num
)
5338 /* Skip POLY1305 algorithms if not supported by device */
5339 if (c2_alg_sel
== OP_ALG_ALGSEL_POLY1305
&&
5340 !priv
->sec_attr
.ptha_acc_num
)
5344 * Skip algorithms requiring message digests
5345 * if MD not supported by device.
5347 if ((c2_alg_sel
& ~OP_ALG_ALGSEL_SUBMASK
) == 0x40 &&
5348 !priv
->sec_attr
.md_acc_num
)
5351 t_alg
->caam
.dev
= dev
;
5352 caam_aead_alg_init(t_alg
);
5354 err
= crypto_register_aead(&t_alg
->aead
);
5356 dev_warn(dev
, "%s alg registration failed: %d\n",
5357 t_alg
->aead
.base
.cra_driver_name
, err
);
5361 t_alg
->registered
= true;
5365 dev_info(dev
, "algorithms registered in /proc/crypto\n");
5367 /* register hash algorithms the device supports */
5368 INIT_LIST_HEAD(&hash_list
);
5371 * Skip registration of any hashing algorithms if MD block
5374 if (!priv
->sec_attr
.md_acc_num
)
5377 for (i
= 0; i
< ARRAY_SIZE(driver_hash
); i
++) {
5378 struct caam_hash_alg
*t_alg
;
5379 struct caam_hash_template
*alg
= driver_hash
+ i
;
5381 /* register hmac version */
5382 t_alg
= caam_hash_alloc(dev
, alg
, true);
5383 if (IS_ERR(t_alg
)) {
5384 err
= PTR_ERR(t_alg
);
5385 dev_warn(dev
, "%s hash alg allocation failed: %d\n",
5386 alg
->hmac_driver_name
, err
);
5390 err
= crypto_register_ahash(&t_alg
->ahash_alg
);
5392 dev_warn(dev
, "%s alg registration failed: %d\n",
5393 t_alg
->ahash_alg
.halg
.base
.cra_driver_name
,
5397 list_add_tail(&t_alg
->entry
, &hash_list
);
5400 /* register unkeyed version */
5401 t_alg
= caam_hash_alloc(dev
, alg
, false);
5402 if (IS_ERR(t_alg
)) {
5403 err
= PTR_ERR(t_alg
);
5404 dev_warn(dev
, "%s alg allocation failed: %d\n",
5405 alg
->driver_name
, err
);
5409 err
= crypto_register_ahash(&t_alg
->ahash_alg
);
5411 dev_warn(dev
, "%s alg registration failed: %d\n",
5412 t_alg
->ahash_alg
.halg
.base
.cra_driver_name
,
5416 list_add_tail(&t_alg
->entry
, &hash_list
);
5419 if (!list_empty(&hash_list
))
5420 dev_info(dev
, "hash algorithms registered in /proc/crypto\n");
5425 dpaa2_dpseci_dpio_free(priv
);
5427 dpaa2_dpseci_free(priv
);
5429 free_percpu(priv
->ppriv
);
5431 fsl_mc_portal_free(priv
->mc_io
);
5433 kmem_cache_destroy(qi_cache
);
5438 static void __cold
dpaa2_caam_remove(struct fsl_mc_device
*ls_dev
)
5441 struct dpaa2_caam_priv
*priv
;
5445 priv
= dev_get_drvdata(dev
);
5447 dpaa2_dpseci_debugfs_exit(priv
);
5449 for (i
= 0; i
< ARRAY_SIZE(driver_aeads
); i
++) {
5450 struct caam_aead_alg
*t_alg
= driver_aeads
+ i
;
5452 if (t_alg
->registered
)
5453 crypto_unregister_aead(&t_alg
->aead
);
5456 for (i
= 0; i
< ARRAY_SIZE(driver_algs
); i
++) {
5457 struct caam_skcipher_alg
*t_alg
= driver_algs
+ i
;
5459 if (t_alg
->registered
)
5460 crypto_unregister_skcipher(&t_alg
->skcipher
);
5463 if (hash_list
.next
) {
5464 struct caam_hash_alg
*t_hash_alg
, *p
;
5466 list_for_each_entry_safe(t_hash_alg
, p
, &hash_list
, entry
) {
5467 crypto_unregister_ahash(&t_hash_alg
->ahash_alg
);
5468 list_del(&t_hash_alg
->entry
);
5473 dpaa2_dpseci_disable(priv
);
5474 dpaa2_dpseci_dpio_free(priv
);
5475 dpaa2_dpseci_free(priv
);
5476 free_percpu(priv
->ppriv
);
5477 fsl_mc_portal_free(priv
->mc_io
);
5478 kmem_cache_destroy(qi_cache
);
5481 int dpaa2_caam_enqueue(struct device
*dev
, struct caam_request
*req
)
5484 struct dpaa2_caam_priv
*priv
= dev_get_drvdata(dev
);
5485 struct dpaa2_caam_priv_per_cpu
*ppriv
;
5489 return PTR_ERR(req
);
5491 if (priv
->cscn_mem
) {
5492 dma_sync_single_for_cpu(priv
->dev
, priv
->cscn_dma
,
5495 if (unlikely(dpaa2_cscn_state_congested(priv
->cscn_mem
))) {
5496 dev_dbg_ratelimited(dev
, "Dropping request\n");
5501 dpaa2_fl_set_flc(&req
->fd_flt
[1], req
->flc_dma
);
5503 req
->fd_flt_dma
= dma_map_single(dev
, req
->fd_flt
, sizeof(req
->fd_flt
),
5505 if (dma_mapping_error(dev
, req
->fd_flt_dma
)) {
5506 dev_err(dev
, "DMA mapping error for QI enqueue request\n");
5510 memset(&fd
, 0, sizeof(fd
));
5511 dpaa2_fd_set_format(&fd
, dpaa2_fd_list
);
5512 dpaa2_fd_set_addr(&fd
, req
->fd_flt_dma
);
5513 dpaa2_fd_set_len(&fd
, dpaa2_fl_get_len(&req
->fd_flt
[1]));
5514 dpaa2_fd_set_flc(&fd
, req
->flc_dma
);
5516 ppriv
= raw_cpu_ptr(priv
->ppriv
);
5517 for (i
= 0; i
< (priv
->dpseci_attr
.num_tx_queues
<< 1); i
++) {
5518 err
= dpaa2_io_service_enqueue_fq(ppriv
->dpio
, ppriv
->req_fqid
,
5526 if (unlikely(err
)) {
5527 dev_err_ratelimited(dev
, "Error enqueuing frame: %d\n", err
);
5531 return -EINPROGRESS
;
5534 dma_unmap_single(dev
, req
->fd_flt_dma
, sizeof(req
->fd_flt
),
5538 EXPORT_SYMBOL(dpaa2_caam_enqueue
);
5540 static const struct fsl_mc_device_id dpaa2_caam_match_id_table
[] = {
5542 .vendor
= FSL_MC_VENDOR_FREESCALE
,
5543 .obj_type
= "dpseci",
5547 MODULE_DEVICE_TABLE(fslmc
, dpaa2_caam_match_id_table
);
5549 static struct fsl_mc_driver dpaa2_caam_driver
= {
5551 .name
= KBUILD_MODNAME
,
5552 .owner
= THIS_MODULE
,
5554 .probe
= dpaa2_caam_probe
,
5555 .remove
= dpaa2_caam_remove
,
5556 .match_id_table
= dpaa2_caam_match_id_table
5559 MODULE_LICENSE("Dual BSD/GPL");
5560 MODULE_AUTHOR("Freescale Semiconductor, Inc");
5561 MODULE_DESCRIPTION("Freescale DPAA2 CAAM Driver");
5563 module_fsl_mc_driver(dpaa2_caam_driver
);