treewide: remove redundant IS_ERR() before error code check
[linux/fpc-iii.git] / drivers / crypto / caam / caamalg_qi2.c
blob28669cbecf77c54ca23756a78d571db9c44062fb
1 // SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
2 /*
3 * Copyright 2015-2016 Freescale Semiconductor Inc.
4 * Copyright 2017-2019 NXP
5 */
7 #include "compat.h"
8 #include "regs.h"
9 #include "caamalg_qi2.h"
10 #include "dpseci_cmd.h"
11 #include "desc_constr.h"
12 #include "error.h"
13 #include "sg_sw_sec4.h"
14 #include "sg_sw_qm2.h"
15 #include "key_gen.h"
16 #include "caamalg_desc.h"
17 #include "caamhash_desc.h"
18 #include "dpseci-debugfs.h"
19 #include <linux/fsl/mc.h>
20 #include <soc/fsl/dpaa2-io.h>
21 #include <soc/fsl/dpaa2-fd.h>
23 #define CAAM_CRA_PRIORITY 2000
25 /* max key is sum of AES_MAX_KEY_SIZE, max split key size */
26 #define CAAM_MAX_KEY_SIZE (AES_MAX_KEY_SIZE + CTR_RFC3686_NONCE_SIZE + \
27 SHA512_DIGEST_SIZE * 2)
30 * This is a a cache of buffers, from which the users of CAAM QI driver
31 * can allocate short buffers. It's speedier than doing kmalloc on the hotpath.
32 * NOTE: A more elegant solution would be to have some headroom in the frames
33 * being processed. This can be added by the dpaa2-eth driver. This would
34 * pose a problem for userspace application processing which cannot
35 * know of this limitation. So for now, this will work.
36 * NOTE: The memcache is SMP-safe. No need to handle spinlocks in-here
38 static struct kmem_cache *qi_cache;
40 struct caam_alg_entry {
41 struct device *dev;
42 int class1_alg_type;
43 int class2_alg_type;
44 bool rfc3686;
45 bool geniv;
46 bool nodkp;
49 struct caam_aead_alg {
50 struct aead_alg aead;
51 struct caam_alg_entry caam;
52 bool registered;
55 struct caam_skcipher_alg {
56 struct skcipher_alg skcipher;
57 struct caam_alg_entry caam;
58 bool registered;
61 /**
62 * caam_ctx - per-session context
63 * @flc: Flow Contexts array
64 * @key: [authentication key], encryption key
65 * @flc_dma: I/O virtual addresses of the Flow Contexts
66 * @key_dma: I/O virtual address of the key
67 * @dir: DMA direction for mapping key and Flow Contexts
68 * @dev: dpseci device
69 * @adata: authentication algorithm details
70 * @cdata: encryption algorithm details
71 * @authsize: authentication tag (a.k.a. ICV / MAC) size
73 struct caam_ctx {
74 struct caam_flc flc[NUM_OP];
75 u8 key[CAAM_MAX_KEY_SIZE];
76 dma_addr_t flc_dma[NUM_OP];
77 dma_addr_t key_dma;
78 enum dma_data_direction dir;
79 struct device *dev;
80 struct alginfo adata;
81 struct alginfo cdata;
82 unsigned int authsize;
85 static void *dpaa2_caam_iova_to_virt(struct dpaa2_caam_priv *priv,
86 dma_addr_t iova_addr)
88 phys_addr_t phys_addr;
90 phys_addr = priv->domain ? iommu_iova_to_phys(priv->domain, iova_addr) :
91 iova_addr;
93 return phys_to_virt(phys_addr);
97 * qi_cache_zalloc - Allocate buffers from CAAM-QI cache
99 * Allocate data on the hotpath. Instead of using kzalloc, one can use the
100 * services of the CAAM QI memory cache (backed by kmem_cache). The buffers
101 * will have a size of CAAM_QI_MEMCACHE_SIZE, which should be sufficient for
102 * hosting 16 SG entries.
104 * @flags - flags that would be used for the equivalent kmalloc(..) call
106 * Returns a pointer to a retrieved buffer on success or NULL on failure.
108 static inline void *qi_cache_zalloc(gfp_t flags)
110 return kmem_cache_zalloc(qi_cache, flags);
114 * qi_cache_free - Frees buffers allocated from CAAM-QI cache
116 * @obj - buffer previously allocated by qi_cache_zalloc
118 * No checking is being done, the call is a passthrough call to
119 * kmem_cache_free(...)
121 static inline void qi_cache_free(void *obj)
123 kmem_cache_free(qi_cache, obj);
126 static struct caam_request *to_caam_req(struct crypto_async_request *areq)
128 switch (crypto_tfm_alg_type(areq->tfm)) {
129 case CRYPTO_ALG_TYPE_SKCIPHER:
130 return skcipher_request_ctx(skcipher_request_cast(areq));
131 case CRYPTO_ALG_TYPE_AEAD:
132 return aead_request_ctx(container_of(areq, struct aead_request,
133 base));
134 case CRYPTO_ALG_TYPE_AHASH:
135 return ahash_request_ctx(ahash_request_cast(areq));
136 default:
137 return ERR_PTR(-EINVAL);
141 static void caam_unmap(struct device *dev, struct scatterlist *src,
142 struct scatterlist *dst, int src_nents,
143 int dst_nents, dma_addr_t iv_dma, int ivsize,
144 enum dma_data_direction iv_dir, dma_addr_t qm_sg_dma,
145 int qm_sg_bytes)
147 if (dst != src) {
148 if (src_nents)
149 dma_unmap_sg(dev, src, src_nents, DMA_TO_DEVICE);
150 if (dst_nents)
151 dma_unmap_sg(dev, dst, dst_nents, DMA_FROM_DEVICE);
152 } else {
153 dma_unmap_sg(dev, src, src_nents, DMA_BIDIRECTIONAL);
156 if (iv_dma)
157 dma_unmap_single(dev, iv_dma, ivsize, iv_dir);
159 if (qm_sg_bytes)
160 dma_unmap_single(dev, qm_sg_dma, qm_sg_bytes, DMA_TO_DEVICE);
163 static int aead_set_sh_desc(struct crypto_aead *aead)
165 struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead),
166 typeof(*alg), aead);
167 struct caam_ctx *ctx = crypto_aead_ctx(aead);
168 unsigned int ivsize = crypto_aead_ivsize(aead);
169 struct device *dev = ctx->dev;
170 struct dpaa2_caam_priv *priv = dev_get_drvdata(dev);
171 struct caam_flc *flc;
172 u32 *desc;
173 u32 ctx1_iv_off = 0;
174 u32 *nonce = NULL;
175 unsigned int data_len[2];
176 u32 inl_mask;
177 const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) ==
178 OP_ALG_AAI_CTR_MOD128);
179 const bool is_rfc3686 = alg->caam.rfc3686;
181 if (!ctx->cdata.keylen || !ctx->authsize)
182 return 0;
185 * AES-CTR needs to load IV in CONTEXT1 reg
186 * at an offset of 128bits (16bytes)
187 * CONTEXT1[255:128] = IV
189 if (ctr_mode)
190 ctx1_iv_off = 16;
193 * RFC3686 specific:
194 * CONTEXT1[255:128] = {NONCE, IV, COUNTER}
196 if (is_rfc3686) {
197 ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE;
198 nonce = (u32 *)((void *)ctx->key + ctx->adata.keylen_pad +
199 ctx->cdata.keylen - CTR_RFC3686_NONCE_SIZE);
203 * In case |user key| > |derived key|, using DKP<imm,imm> would result
204 * in invalid opcodes (last bytes of user key) in the resulting
205 * descriptor. Use DKP<ptr,imm> instead => both virtual and dma key
206 * addresses are needed.
208 ctx->adata.key_virt = ctx->key;
209 ctx->adata.key_dma = ctx->key_dma;
211 ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
212 ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
214 data_len[0] = ctx->adata.keylen_pad;
215 data_len[1] = ctx->cdata.keylen;
217 /* aead_encrypt shared descriptor */
218 if (desc_inline_query((alg->caam.geniv ? DESC_QI_AEAD_GIVENC_LEN :
219 DESC_QI_AEAD_ENC_LEN) +
220 (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0),
221 DESC_JOB_IO_LEN, data_len, &inl_mask,
222 ARRAY_SIZE(data_len)) < 0)
223 return -EINVAL;
225 ctx->adata.key_inline = !!(inl_mask & 1);
226 ctx->cdata.key_inline = !!(inl_mask & 2);
228 flc = &ctx->flc[ENCRYPT];
229 desc = flc->sh_desc;
231 if (alg->caam.geniv)
232 cnstr_shdsc_aead_givencap(desc, &ctx->cdata, &ctx->adata,
233 ivsize, ctx->authsize, is_rfc3686,
234 nonce, ctx1_iv_off, true,
235 priv->sec_attr.era);
236 else
237 cnstr_shdsc_aead_encap(desc, &ctx->cdata, &ctx->adata,
238 ivsize, ctx->authsize, is_rfc3686, nonce,
239 ctx1_iv_off, true, priv->sec_attr.era);
241 flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
242 dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
243 sizeof(flc->flc) + desc_bytes(desc),
244 ctx->dir);
246 /* aead_decrypt shared descriptor */
247 if (desc_inline_query(DESC_QI_AEAD_DEC_LEN +
248 (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0),
249 DESC_JOB_IO_LEN, data_len, &inl_mask,
250 ARRAY_SIZE(data_len)) < 0)
251 return -EINVAL;
253 ctx->adata.key_inline = !!(inl_mask & 1);
254 ctx->cdata.key_inline = !!(inl_mask & 2);
256 flc = &ctx->flc[DECRYPT];
257 desc = flc->sh_desc;
258 cnstr_shdsc_aead_decap(desc, &ctx->cdata, &ctx->adata,
259 ivsize, ctx->authsize, alg->caam.geniv,
260 is_rfc3686, nonce, ctx1_iv_off, true,
261 priv->sec_attr.era);
262 flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
263 dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
264 sizeof(flc->flc) + desc_bytes(desc),
265 ctx->dir);
267 return 0;
270 static int aead_setauthsize(struct crypto_aead *authenc, unsigned int authsize)
272 struct caam_ctx *ctx = crypto_aead_ctx(authenc);
274 ctx->authsize = authsize;
275 aead_set_sh_desc(authenc);
277 return 0;
280 static int aead_setkey(struct crypto_aead *aead, const u8 *key,
281 unsigned int keylen)
283 struct caam_ctx *ctx = crypto_aead_ctx(aead);
284 struct device *dev = ctx->dev;
285 struct crypto_authenc_keys keys;
287 if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
288 goto badkey;
290 dev_dbg(dev, "keylen %d enckeylen %d authkeylen %d\n",
291 keys.authkeylen + keys.enckeylen, keys.enckeylen,
292 keys.authkeylen);
293 print_hex_dump_debug("key in @" __stringify(__LINE__)": ",
294 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
296 ctx->adata.keylen = keys.authkeylen;
297 ctx->adata.keylen_pad = split_key_len(ctx->adata.algtype &
298 OP_ALG_ALGSEL_MASK);
300 if (ctx->adata.keylen_pad + keys.enckeylen > CAAM_MAX_KEY_SIZE)
301 goto badkey;
303 memcpy(ctx->key, keys.authkey, keys.authkeylen);
304 memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey, keys.enckeylen);
305 dma_sync_single_for_device(dev, ctx->key_dma, ctx->adata.keylen_pad +
306 keys.enckeylen, ctx->dir);
307 print_hex_dump_debug("ctx.key@" __stringify(__LINE__)": ",
308 DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
309 ctx->adata.keylen_pad + keys.enckeylen, 1);
311 ctx->cdata.keylen = keys.enckeylen;
313 memzero_explicit(&keys, sizeof(keys));
314 return aead_set_sh_desc(aead);
315 badkey:
316 memzero_explicit(&keys, sizeof(keys));
317 return -EINVAL;
320 static int des3_aead_setkey(struct crypto_aead *aead, const u8 *key,
321 unsigned int keylen)
323 struct crypto_authenc_keys keys;
324 int err;
326 err = crypto_authenc_extractkeys(&keys, key, keylen);
327 if (unlikely(err))
328 goto out;
330 err = -EINVAL;
331 if (keys.enckeylen != DES3_EDE_KEY_SIZE)
332 goto out;
334 err = crypto_des3_ede_verify_key(crypto_aead_tfm(aead), keys.enckey) ?:
335 aead_setkey(aead, key, keylen);
337 out:
338 memzero_explicit(&keys, sizeof(keys));
339 return err;
342 static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
343 bool encrypt)
345 struct crypto_aead *aead = crypto_aead_reqtfm(req);
346 struct caam_request *req_ctx = aead_request_ctx(req);
347 struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
348 struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
349 struct caam_ctx *ctx = crypto_aead_ctx(aead);
350 struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead),
351 typeof(*alg), aead);
352 struct device *dev = ctx->dev;
353 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
354 GFP_KERNEL : GFP_ATOMIC;
355 int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
356 int src_len, dst_len = 0;
357 struct aead_edesc *edesc;
358 dma_addr_t qm_sg_dma, iv_dma = 0;
359 int ivsize = 0;
360 unsigned int authsize = ctx->authsize;
361 int qm_sg_index = 0, qm_sg_nents = 0, qm_sg_bytes;
362 int in_len, out_len;
363 struct dpaa2_sg_entry *sg_table;
365 /* allocate space for base edesc, link tables and IV */
366 edesc = qi_cache_zalloc(GFP_DMA | flags);
367 if (unlikely(!edesc)) {
368 dev_err(dev, "could not allocate extended descriptor\n");
369 return ERR_PTR(-ENOMEM);
372 if (unlikely(req->dst != req->src)) {
373 src_len = req->assoclen + req->cryptlen;
374 dst_len = src_len + (encrypt ? authsize : (-authsize));
376 src_nents = sg_nents_for_len(req->src, src_len);
377 if (unlikely(src_nents < 0)) {
378 dev_err(dev, "Insufficient bytes (%d) in src S/G\n",
379 src_len);
380 qi_cache_free(edesc);
381 return ERR_PTR(src_nents);
384 dst_nents = sg_nents_for_len(req->dst, dst_len);
385 if (unlikely(dst_nents < 0)) {
386 dev_err(dev, "Insufficient bytes (%d) in dst S/G\n",
387 dst_len);
388 qi_cache_free(edesc);
389 return ERR_PTR(dst_nents);
392 if (src_nents) {
393 mapped_src_nents = dma_map_sg(dev, req->src, src_nents,
394 DMA_TO_DEVICE);
395 if (unlikely(!mapped_src_nents)) {
396 dev_err(dev, "unable to map source\n");
397 qi_cache_free(edesc);
398 return ERR_PTR(-ENOMEM);
400 } else {
401 mapped_src_nents = 0;
404 if (dst_nents) {
405 mapped_dst_nents = dma_map_sg(dev, req->dst, dst_nents,
406 DMA_FROM_DEVICE);
407 if (unlikely(!mapped_dst_nents)) {
408 dev_err(dev, "unable to map destination\n");
409 dma_unmap_sg(dev, req->src, src_nents,
410 DMA_TO_DEVICE);
411 qi_cache_free(edesc);
412 return ERR_PTR(-ENOMEM);
414 } else {
415 mapped_dst_nents = 0;
417 } else {
418 src_len = req->assoclen + req->cryptlen +
419 (encrypt ? authsize : 0);
421 src_nents = sg_nents_for_len(req->src, src_len);
422 if (unlikely(src_nents < 0)) {
423 dev_err(dev, "Insufficient bytes (%d) in src S/G\n",
424 src_len);
425 qi_cache_free(edesc);
426 return ERR_PTR(src_nents);
429 mapped_src_nents = dma_map_sg(dev, req->src, src_nents,
430 DMA_BIDIRECTIONAL);
431 if (unlikely(!mapped_src_nents)) {
432 dev_err(dev, "unable to map source\n");
433 qi_cache_free(edesc);
434 return ERR_PTR(-ENOMEM);
438 if ((alg->caam.rfc3686 && encrypt) || !alg->caam.geniv)
439 ivsize = crypto_aead_ivsize(aead);
442 * Create S/G table: req->assoclen, [IV,] req->src [, req->dst].
443 * Input is not contiguous.
444 * HW reads 4 S/G entries at a time; make sure the reads don't go beyond
445 * the end of the table by allocating more S/G entries. Logic:
446 * if (src != dst && output S/G)
447 * pad output S/G, if needed
448 * else if (src == dst && S/G)
449 * overlapping S/Gs; pad one of them
450 * else if (input S/G) ...
451 * pad input S/G, if needed
453 qm_sg_nents = 1 + !!ivsize + mapped_src_nents;
454 if (mapped_dst_nents > 1)
455 qm_sg_nents += pad_sg_nents(mapped_dst_nents);
456 else if ((req->src == req->dst) && (mapped_src_nents > 1))
457 qm_sg_nents = max(pad_sg_nents(qm_sg_nents),
458 1 + !!ivsize +
459 pad_sg_nents(mapped_src_nents));
460 else
461 qm_sg_nents = pad_sg_nents(qm_sg_nents);
463 sg_table = &edesc->sgt[0];
464 qm_sg_bytes = qm_sg_nents * sizeof(*sg_table);
465 if (unlikely(offsetof(struct aead_edesc, sgt) + qm_sg_bytes + ivsize >
466 CAAM_QI_MEMCACHE_SIZE)) {
467 dev_err(dev, "No space for %d S/G entries and/or %dB IV\n",
468 qm_sg_nents, ivsize);
469 caam_unmap(dev, req->src, req->dst, src_nents, dst_nents, 0,
470 0, DMA_NONE, 0, 0);
471 qi_cache_free(edesc);
472 return ERR_PTR(-ENOMEM);
475 if (ivsize) {
476 u8 *iv = (u8 *)(sg_table + qm_sg_nents);
478 /* Make sure IV is located in a DMAable area */
479 memcpy(iv, req->iv, ivsize);
481 iv_dma = dma_map_single(dev, iv, ivsize, DMA_TO_DEVICE);
482 if (dma_mapping_error(dev, iv_dma)) {
483 dev_err(dev, "unable to map IV\n");
484 caam_unmap(dev, req->src, req->dst, src_nents,
485 dst_nents, 0, 0, DMA_NONE, 0, 0);
486 qi_cache_free(edesc);
487 return ERR_PTR(-ENOMEM);
491 edesc->src_nents = src_nents;
492 edesc->dst_nents = dst_nents;
493 edesc->iv_dma = iv_dma;
495 if ((alg->caam.class1_alg_type & OP_ALG_ALGSEL_MASK) ==
496 OP_ALG_ALGSEL_CHACHA20 && ivsize != CHACHAPOLY_IV_SIZE)
498 * The associated data comes already with the IV but we need
499 * to skip it when we authenticate or encrypt...
501 edesc->assoclen = cpu_to_caam32(req->assoclen - ivsize);
502 else
503 edesc->assoclen = cpu_to_caam32(req->assoclen);
504 edesc->assoclen_dma = dma_map_single(dev, &edesc->assoclen, 4,
505 DMA_TO_DEVICE);
506 if (dma_mapping_error(dev, edesc->assoclen_dma)) {
507 dev_err(dev, "unable to map assoclen\n");
508 caam_unmap(dev, req->src, req->dst, src_nents, dst_nents,
509 iv_dma, ivsize, DMA_TO_DEVICE, 0, 0);
510 qi_cache_free(edesc);
511 return ERR_PTR(-ENOMEM);
514 dma_to_qm_sg_one(sg_table, edesc->assoclen_dma, 4, 0);
515 qm_sg_index++;
516 if (ivsize) {
517 dma_to_qm_sg_one(sg_table + qm_sg_index, iv_dma, ivsize, 0);
518 qm_sg_index++;
520 sg_to_qm_sg_last(req->src, src_len, sg_table + qm_sg_index, 0);
521 qm_sg_index += mapped_src_nents;
523 if (mapped_dst_nents > 1)
524 sg_to_qm_sg_last(req->dst, dst_len, sg_table + qm_sg_index, 0);
526 qm_sg_dma = dma_map_single(dev, sg_table, qm_sg_bytes, DMA_TO_DEVICE);
527 if (dma_mapping_error(dev, qm_sg_dma)) {
528 dev_err(dev, "unable to map S/G table\n");
529 dma_unmap_single(dev, edesc->assoclen_dma, 4, DMA_TO_DEVICE);
530 caam_unmap(dev, req->src, req->dst, src_nents, dst_nents,
531 iv_dma, ivsize, DMA_TO_DEVICE, 0, 0);
532 qi_cache_free(edesc);
533 return ERR_PTR(-ENOMEM);
536 edesc->qm_sg_dma = qm_sg_dma;
537 edesc->qm_sg_bytes = qm_sg_bytes;
539 out_len = req->assoclen + req->cryptlen +
540 (encrypt ? ctx->authsize : (-ctx->authsize));
541 in_len = 4 + ivsize + req->assoclen + req->cryptlen;
543 memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
544 dpaa2_fl_set_final(in_fle, true);
545 dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
546 dpaa2_fl_set_addr(in_fle, qm_sg_dma);
547 dpaa2_fl_set_len(in_fle, in_len);
549 if (req->dst == req->src) {
550 if (mapped_src_nents == 1) {
551 dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
552 dpaa2_fl_set_addr(out_fle, sg_dma_address(req->src));
553 } else {
554 dpaa2_fl_set_format(out_fle, dpaa2_fl_sg);
555 dpaa2_fl_set_addr(out_fle, qm_sg_dma +
556 (1 + !!ivsize) * sizeof(*sg_table));
558 } else if (!mapped_dst_nents) {
560 * crypto engine requires the output entry to be present when
561 * "frame list" FD is used.
562 * Since engine does not support FMT=2'b11 (unused entry type),
563 * leaving out_fle zeroized is the best option.
565 goto skip_out_fle;
566 } else if (mapped_dst_nents == 1) {
567 dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
568 dpaa2_fl_set_addr(out_fle, sg_dma_address(req->dst));
569 } else {
570 dpaa2_fl_set_format(out_fle, dpaa2_fl_sg);
571 dpaa2_fl_set_addr(out_fle, qm_sg_dma + qm_sg_index *
572 sizeof(*sg_table));
575 dpaa2_fl_set_len(out_fle, out_len);
577 skip_out_fle:
578 return edesc;
581 static int chachapoly_set_sh_desc(struct crypto_aead *aead)
583 struct caam_ctx *ctx = crypto_aead_ctx(aead);
584 unsigned int ivsize = crypto_aead_ivsize(aead);
585 struct device *dev = ctx->dev;
586 struct caam_flc *flc;
587 u32 *desc;
589 if (!ctx->cdata.keylen || !ctx->authsize)
590 return 0;
592 flc = &ctx->flc[ENCRYPT];
593 desc = flc->sh_desc;
594 cnstr_shdsc_chachapoly(desc, &ctx->cdata, &ctx->adata, ivsize,
595 ctx->authsize, true, true);
596 flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
597 dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
598 sizeof(flc->flc) + desc_bytes(desc),
599 ctx->dir);
601 flc = &ctx->flc[DECRYPT];
602 desc = flc->sh_desc;
603 cnstr_shdsc_chachapoly(desc, &ctx->cdata, &ctx->adata, ivsize,
604 ctx->authsize, false, true);
605 flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
606 dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
607 sizeof(flc->flc) + desc_bytes(desc),
608 ctx->dir);
610 return 0;
613 static int chachapoly_setauthsize(struct crypto_aead *aead,
614 unsigned int authsize)
616 struct caam_ctx *ctx = crypto_aead_ctx(aead);
618 if (authsize != POLY1305_DIGEST_SIZE)
619 return -EINVAL;
621 ctx->authsize = authsize;
622 return chachapoly_set_sh_desc(aead);
625 static int chachapoly_setkey(struct crypto_aead *aead, const u8 *key,
626 unsigned int keylen)
628 struct caam_ctx *ctx = crypto_aead_ctx(aead);
629 unsigned int ivsize = crypto_aead_ivsize(aead);
630 unsigned int saltlen = CHACHAPOLY_IV_SIZE - ivsize;
632 if (keylen != CHACHA_KEY_SIZE + saltlen)
633 return -EINVAL;
635 ctx->cdata.key_virt = key;
636 ctx->cdata.keylen = keylen - saltlen;
638 return chachapoly_set_sh_desc(aead);
641 static int gcm_set_sh_desc(struct crypto_aead *aead)
643 struct caam_ctx *ctx = crypto_aead_ctx(aead);
644 struct device *dev = ctx->dev;
645 unsigned int ivsize = crypto_aead_ivsize(aead);
646 struct caam_flc *flc;
647 u32 *desc;
648 int rem_bytes = CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN -
649 ctx->cdata.keylen;
651 if (!ctx->cdata.keylen || !ctx->authsize)
652 return 0;
655 * AES GCM encrypt shared descriptor
656 * Job Descriptor and Shared Descriptor
657 * must fit into the 64-word Descriptor h/w Buffer
659 if (rem_bytes >= DESC_QI_GCM_ENC_LEN) {
660 ctx->cdata.key_inline = true;
661 ctx->cdata.key_virt = ctx->key;
662 } else {
663 ctx->cdata.key_inline = false;
664 ctx->cdata.key_dma = ctx->key_dma;
667 flc = &ctx->flc[ENCRYPT];
668 desc = flc->sh_desc;
669 cnstr_shdsc_gcm_encap(desc, &ctx->cdata, ivsize, ctx->authsize, true);
670 flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
671 dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
672 sizeof(flc->flc) + desc_bytes(desc),
673 ctx->dir);
676 * Job Descriptor and Shared Descriptors
677 * must all fit into the 64-word Descriptor h/w Buffer
679 if (rem_bytes >= DESC_QI_GCM_DEC_LEN) {
680 ctx->cdata.key_inline = true;
681 ctx->cdata.key_virt = ctx->key;
682 } else {
683 ctx->cdata.key_inline = false;
684 ctx->cdata.key_dma = ctx->key_dma;
687 flc = &ctx->flc[DECRYPT];
688 desc = flc->sh_desc;
689 cnstr_shdsc_gcm_decap(desc, &ctx->cdata, ivsize, ctx->authsize, true);
690 flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
691 dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
692 sizeof(flc->flc) + desc_bytes(desc),
693 ctx->dir);
695 return 0;
698 static int gcm_setauthsize(struct crypto_aead *authenc, unsigned int authsize)
700 struct caam_ctx *ctx = crypto_aead_ctx(authenc);
701 int err;
703 err = crypto_gcm_check_authsize(authsize);
704 if (err)
705 return err;
707 ctx->authsize = authsize;
708 gcm_set_sh_desc(authenc);
710 return 0;
713 static int gcm_setkey(struct crypto_aead *aead,
714 const u8 *key, unsigned int keylen)
716 struct caam_ctx *ctx = crypto_aead_ctx(aead);
717 struct device *dev = ctx->dev;
718 int ret;
720 ret = aes_check_keylen(keylen);
721 if (ret)
722 return ret;
723 print_hex_dump_debug("key in @" __stringify(__LINE__)": ",
724 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
726 memcpy(ctx->key, key, keylen);
727 dma_sync_single_for_device(dev, ctx->key_dma, keylen, ctx->dir);
728 ctx->cdata.keylen = keylen;
730 return gcm_set_sh_desc(aead);
733 static int rfc4106_set_sh_desc(struct crypto_aead *aead)
735 struct caam_ctx *ctx = crypto_aead_ctx(aead);
736 struct device *dev = ctx->dev;
737 unsigned int ivsize = crypto_aead_ivsize(aead);
738 struct caam_flc *flc;
739 u32 *desc;
740 int rem_bytes = CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN -
741 ctx->cdata.keylen;
743 if (!ctx->cdata.keylen || !ctx->authsize)
744 return 0;
746 ctx->cdata.key_virt = ctx->key;
749 * RFC4106 encrypt shared descriptor
750 * Job Descriptor and Shared Descriptor
751 * must fit into the 64-word Descriptor h/w Buffer
753 if (rem_bytes >= DESC_QI_RFC4106_ENC_LEN) {
754 ctx->cdata.key_inline = true;
755 } else {
756 ctx->cdata.key_inline = false;
757 ctx->cdata.key_dma = ctx->key_dma;
760 flc = &ctx->flc[ENCRYPT];
761 desc = flc->sh_desc;
762 cnstr_shdsc_rfc4106_encap(desc, &ctx->cdata, ivsize, ctx->authsize,
763 true);
764 flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
765 dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
766 sizeof(flc->flc) + desc_bytes(desc),
767 ctx->dir);
770 * Job Descriptor and Shared Descriptors
771 * must all fit into the 64-word Descriptor h/w Buffer
773 if (rem_bytes >= DESC_QI_RFC4106_DEC_LEN) {
774 ctx->cdata.key_inline = true;
775 } else {
776 ctx->cdata.key_inline = false;
777 ctx->cdata.key_dma = ctx->key_dma;
780 flc = &ctx->flc[DECRYPT];
781 desc = flc->sh_desc;
782 cnstr_shdsc_rfc4106_decap(desc, &ctx->cdata, ivsize, ctx->authsize,
783 true);
784 flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
785 dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
786 sizeof(flc->flc) + desc_bytes(desc),
787 ctx->dir);
789 return 0;
792 static int rfc4106_setauthsize(struct crypto_aead *authenc,
793 unsigned int authsize)
795 struct caam_ctx *ctx = crypto_aead_ctx(authenc);
796 int err;
798 err = crypto_rfc4106_check_authsize(authsize);
799 if (err)
800 return err;
802 ctx->authsize = authsize;
803 rfc4106_set_sh_desc(authenc);
805 return 0;
808 static int rfc4106_setkey(struct crypto_aead *aead,
809 const u8 *key, unsigned int keylen)
811 struct caam_ctx *ctx = crypto_aead_ctx(aead);
812 struct device *dev = ctx->dev;
813 int ret;
815 ret = aes_check_keylen(keylen - 4);
816 if (ret)
817 return ret;
819 print_hex_dump_debug("key in @" __stringify(__LINE__)": ",
820 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
822 memcpy(ctx->key, key, keylen);
824 * The last four bytes of the key material are used as the salt value
825 * in the nonce. Update the AES key length.
827 ctx->cdata.keylen = keylen - 4;
828 dma_sync_single_for_device(dev, ctx->key_dma, ctx->cdata.keylen,
829 ctx->dir);
831 return rfc4106_set_sh_desc(aead);
834 static int rfc4543_set_sh_desc(struct crypto_aead *aead)
836 struct caam_ctx *ctx = crypto_aead_ctx(aead);
837 struct device *dev = ctx->dev;
838 unsigned int ivsize = crypto_aead_ivsize(aead);
839 struct caam_flc *flc;
840 u32 *desc;
841 int rem_bytes = CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN -
842 ctx->cdata.keylen;
844 if (!ctx->cdata.keylen || !ctx->authsize)
845 return 0;
847 ctx->cdata.key_virt = ctx->key;
850 * RFC4543 encrypt shared descriptor
851 * Job Descriptor and Shared Descriptor
852 * must fit into the 64-word Descriptor h/w Buffer
854 if (rem_bytes >= DESC_QI_RFC4543_ENC_LEN) {
855 ctx->cdata.key_inline = true;
856 } else {
857 ctx->cdata.key_inline = false;
858 ctx->cdata.key_dma = ctx->key_dma;
861 flc = &ctx->flc[ENCRYPT];
862 desc = flc->sh_desc;
863 cnstr_shdsc_rfc4543_encap(desc, &ctx->cdata, ivsize, ctx->authsize,
864 true);
865 flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
866 dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
867 sizeof(flc->flc) + desc_bytes(desc),
868 ctx->dir);
871 * Job Descriptor and Shared Descriptors
872 * must all fit into the 64-word Descriptor h/w Buffer
874 if (rem_bytes >= DESC_QI_RFC4543_DEC_LEN) {
875 ctx->cdata.key_inline = true;
876 } else {
877 ctx->cdata.key_inline = false;
878 ctx->cdata.key_dma = ctx->key_dma;
881 flc = &ctx->flc[DECRYPT];
882 desc = flc->sh_desc;
883 cnstr_shdsc_rfc4543_decap(desc, &ctx->cdata, ivsize, ctx->authsize,
884 true);
885 flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
886 dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
887 sizeof(flc->flc) + desc_bytes(desc),
888 ctx->dir);
890 return 0;
893 static int rfc4543_setauthsize(struct crypto_aead *authenc,
894 unsigned int authsize)
896 struct caam_ctx *ctx = crypto_aead_ctx(authenc);
898 if (authsize != 16)
899 return -EINVAL;
901 ctx->authsize = authsize;
902 rfc4543_set_sh_desc(authenc);
904 return 0;
907 static int rfc4543_setkey(struct crypto_aead *aead,
908 const u8 *key, unsigned int keylen)
910 struct caam_ctx *ctx = crypto_aead_ctx(aead);
911 struct device *dev = ctx->dev;
912 int ret;
914 ret = aes_check_keylen(keylen - 4);
915 if (ret)
916 return ret;
918 print_hex_dump_debug("key in @" __stringify(__LINE__)": ",
919 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
921 memcpy(ctx->key, key, keylen);
923 * The last four bytes of the key material are used as the salt value
924 * in the nonce. Update the AES key length.
926 ctx->cdata.keylen = keylen - 4;
927 dma_sync_single_for_device(dev, ctx->key_dma, ctx->cdata.keylen,
928 ctx->dir);
930 return rfc4543_set_sh_desc(aead);
933 static int skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key,
934 unsigned int keylen, const u32 ctx1_iv_off)
936 struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
937 struct caam_skcipher_alg *alg =
938 container_of(crypto_skcipher_alg(skcipher),
939 struct caam_skcipher_alg, skcipher);
940 struct device *dev = ctx->dev;
941 struct caam_flc *flc;
942 unsigned int ivsize = crypto_skcipher_ivsize(skcipher);
943 u32 *desc;
944 const bool is_rfc3686 = alg->caam.rfc3686;
946 print_hex_dump_debug("key in @" __stringify(__LINE__)": ",
947 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
949 ctx->cdata.keylen = keylen;
950 ctx->cdata.key_virt = key;
951 ctx->cdata.key_inline = true;
953 /* skcipher_encrypt shared descriptor */
954 flc = &ctx->flc[ENCRYPT];
955 desc = flc->sh_desc;
956 cnstr_shdsc_skcipher_encap(desc, &ctx->cdata, ivsize, is_rfc3686,
957 ctx1_iv_off);
958 flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
959 dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
960 sizeof(flc->flc) + desc_bytes(desc),
961 ctx->dir);
963 /* skcipher_decrypt shared descriptor */
964 flc = &ctx->flc[DECRYPT];
965 desc = flc->sh_desc;
966 cnstr_shdsc_skcipher_decap(desc, &ctx->cdata, ivsize, is_rfc3686,
967 ctx1_iv_off);
968 flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
969 dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
970 sizeof(flc->flc) + desc_bytes(desc),
971 ctx->dir);
973 return 0;
976 static int aes_skcipher_setkey(struct crypto_skcipher *skcipher,
977 const u8 *key, unsigned int keylen)
979 int err;
981 err = aes_check_keylen(keylen);
982 if (err)
983 return err;
985 return skcipher_setkey(skcipher, key, keylen, 0);
988 static int rfc3686_skcipher_setkey(struct crypto_skcipher *skcipher,
989 const u8 *key, unsigned int keylen)
991 u32 ctx1_iv_off;
992 int err;
995 * RFC3686 specific:
996 * | CONTEXT1[255:128] = {NONCE, IV, COUNTER}
997 * | *key = {KEY, NONCE}
999 ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE;
1000 keylen -= CTR_RFC3686_NONCE_SIZE;
1002 err = aes_check_keylen(keylen);
1003 if (err)
1004 return err;
1006 return skcipher_setkey(skcipher, key, keylen, ctx1_iv_off);
1009 static int ctr_skcipher_setkey(struct crypto_skcipher *skcipher,
1010 const u8 *key, unsigned int keylen)
1012 u32 ctx1_iv_off;
1013 int err;
1016 * AES-CTR needs to load IV in CONTEXT1 reg
1017 * at an offset of 128bits (16bytes)
1018 * CONTEXT1[255:128] = IV
1020 ctx1_iv_off = 16;
1022 err = aes_check_keylen(keylen);
1023 if (err)
1024 return err;
1026 return skcipher_setkey(skcipher, key, keylen, ctx1_iv_off);
1029 static int chacha20_skcipher_setkey(struct crypto_skcipher *skcipher,
1030 const u8 *key, unsigned int keylen)
1032 if (keylen != CHACHA_KEY_SIZE)
1033 return -EINVAL;
1035 return skcipher_setkey(skcipher, key, keylen, 0);
1038 static int des_skcipher_setkey(struct crypto_skcipher *skcipher,
1039 const u8 *key, unsigned int keylen)
1041 return verify_skcipher_des_key(skcipher, key) ?:
1042 skcipher_setkey(skcipher, key, keylen, 0);
1045 static int des3_skcipher_setkey(struct crypto_skcipher *skcipher,
1046 const u8 *key, unsigned int keylen)
1048 return verify_skcipher_des3_key(skcipher, key) ?:
1049 skcipher_setkey(skcipher, key, keylen, 0);
1052 static int xts_skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key,
1053 unsigned int keylen)
1055 struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
1056 struct device *dev = ctx->dev;
1057 struct caam_flc *flc;
1058 u32 *desc;
1060 if (keylen != 2 * AES_MIN_KEY_SIZE && keylen != 2 * AES_MAX_KEY_SIZE) {
1061 dev_err(dev, "key size mismatch\n");
1062 return -EINVAL;
1065 ctx->cdata.keylen = keylen;
1066 ctx->cdata.key_virt = key;
1067 ctx->cdata.key_inline = true;
1069 /* xts_skcipher_encrypt shared descriptor */
1070 flc = &ctx->flc[ENCRYPT];
1071 desc = flc->sh_desc;
1072 cnstr_shdsc_xts_skcipher_encap(desc, &ctx->cdata);
1073 flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
1074 dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
1075 sizeof(flc->flc) + desc_bytes(desc),
1076 ctx->dir);
1078 /* xts_skcipher_decrypt shared descriptor */
1079 flc = &ctx->flc[DECRYPT];
1080 desc = flc->sh_desc;
1081 cnstr_shdsc_xts_skcipher_decap(desc, &ctx->cdata);
1082 flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
1083 dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
1084 sizeof(flc->flc) + desc_bytes(desc),
1085 ctx->dir);
1087 return 0;
1090 static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req)
1092 struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
1093 struct caam_request *req_ctx = skcipher_request_ctx(req);
1094 struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
1095 struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
1096 struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
1097 struct device *dev = ctx->dev;
1098 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
1099 GFP_KERNEL : GFP_ATOMIC;
1100 int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
1101 struct skcipher_edesc *edesc;
1102 dma_addr_t iv_dma;
1103 u8 *iv;
1104 int ivsize = crypto_skcipher_ivsize(skcipher);
1105 int dst_sg_idx, qm_sg_ents, qm_sg_bytes;
1106 struct dpaa2_sg_entry *sg_table;
1108 src_nents = sg_nents_for_len(req->src, req->cryptlen);
1109 if (unlikely(src_nents < 0)) {
1110 dev_err(dev, "Insufficient bytes (%d) in src S/G\n",
1111 req->cryptlen);
1112 return ERR_PTR(src_nents);
1115 if (unlikely(req->dst != req->src)) {
1116 dst_nents = sg_nents_for_len(req->dst, req->cryptlen);
1117 if (unlikely(dst_nents < 0)) {
1118 dev_err(dev, "Insufficient bytes (%d) in dst S/G\n",
1119 req->cryptlen);
1120 return ERR_PTR(dst_nents);
1123 mapped_src_nents = dma_map_sg(dev, req->src, src_nents,
1124 DMA_TO_DEVICE);
1125 if (unlikely(!mapped_src_nents)) {
1126 dev_err(dev, "unable to map source\n");
1127 return ERR_PTR(-ENOMEM);
1130 mapped_dst_nents = dma_map_sg(dev, req->dst, dst_nents,
1131 DMA_FROM_DEVICE);
1132 if (unlikely(!mapped_dst_nents)) {
1133 dev_err(dev, "unable to map destination\n");
1134 dma_unmap_sg(dev, req->src, src_nents, DMA_TO_DEVICE);
1135 return ERR_PTR(-ENOMEM);
1137 } else {
1138 mapped_src_nents = dma_map_sg(dev, req->src, src_nents,
1139 DMA_BIDIRECTIONAL);
1140 if (unlikely(!mapped_src_nents)) {
1141 dev_err(dev, "unable to map source\n");
1142 return ERR_PTR(-ENOMEM);
1146 qm_sg_ents = 1 + mapped_src_nents;
1147 dst_sg_idx = qm_sg_ents;
1150 * Input, output HW S/G tables: [IV, src][dst, IV]
1151 * IV entries point to the same buffer
1152 * If src == dst, S/G entries are reused (S/G tables overlap)
1154 * HW reads 4 S/G entries at a time; make sure the reads don't go beyond
1155 * the end of the table by allocating more S/G entries.
1157 if (req->src != req->dst)
1158 qm_sg_ents += pad_sg_nents(mapped_dst_nents + 1);
1159 else
1160 qm_sg_ents = 1 + pad_sg_nents(qm_sg_ents);
1162 qm_sg_bytes = qm_sg_ents * sizeof(struct dpaa2_sg_entry);
1163 if (unlikely(offsetof(struct skcipher_edesc, sgt) + qm_sg_bytes +
1164 ivsize > CAAM_QI_MEMCACHE_SIZE)) {
1165 dev_err(dev, "No space for %d S/G entries and/or %dB IV\n",
1166 qm_sg_ents, ivsize);
1167 caam_unmap(dev, req->src, req->dst, src_nents, dst_nents, 0,
1168 0, DMA_NONE, 0, 0);
1169 return ERR_PTR(-ENOMEM);
1172 /* allocate space for base edesc, link tables and IV */
1173 edesc = qi_cache_zalloc(GFP_DMA | flags);
1174 if (unlikely(!edesc)) {
1175 dev_err(dev, "could not allocate extended descriptor\n");
1176 caam_unmap(dev, req->src, req->dst, src_nents, dst_nents, 0,
1177 0, DMA_NONE, 0, 0);
1178 return ERR_PTR(-ENOMEM);
1181 /* Make sure IV is located in a DMAable area */
1182 sg_table = &edesc->sgt[0];
1183 iv = (u8 *)(sg_table + qm_sg_ents);
1184 memcpy(iv, req->iv, ivsize);
1186 iv_dma = dma_map_single(dev, iv, ivsize, DMA_BIDIRECTIONAL);
1187 if (dma_mapping_error(dev, iv_dma)) {
1188 dev_err(dev, "unable to map IV\n");
1189 caam_unmap(dev, req->src, req->dst, src_nents, dst_nents, 0,
1190 0, DMA_NONE, 0, 0);
1191 qi_cache_free(edesc);
1192 return ERR_PTR(-ENOMEM);
1195 edesc->src_nents = src_nents;
1196 edesc->dst_nents = dst_nents;
1197 edesc->iv_dma = iv_dma;
1198 edesc->qm_sg_bytes = qm_sg_bytes;
1200 dma_to_qm_sg_one(sg_table, iv_dma, ivsize, 0);
1201 sg_to_qm_sg(req->src, req->cryptlen, sg_table + 1, 0);
1203 if (req->src != req->dst)
1204 sg_to_qm_sg(req->dst, req->cryptlen, sg_table + dst_sg_idx, 0);
1206 dma_to_qm_sg_one(sg_table + dst_sg_idx + mapped_dst_nents, iv_dma,
1207 ivsize, 0);
1209 edesc->qm_sg_dma = dma_map_single(dev, sg_table, edesc->qm_sg_bytes,
1210 DMA_TO_DEVICE);
1211 if (dma_mapping_error(dev, edesc->qm_sg_dma)) {
1212 dev_err(dev, "unable to map S/G table\n");
1213 caam_unmap(dev, req->src, req->dst, src_nents, dst_nents,
1214 iv_dma, ivsize, DMA_BIDIRECTIONAL, 0, 0);
1215 qi_cache_free(edesc);
1216 return ERR_PTR(-ENOMEM);
1219 memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
1220 dpaa2_fl_set_final(in_fle, true);
1221 dpaa2_fl_set_len(in_fle, req->cryptlen + ivsize);
1222 dpaa2_fl_set_len(out_fle, req->cryptlen + ivsize);
1224 dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
1225 dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
1227 dpaa2_fl_set_format(out_fle, dpaa2_fl_sg);
1229 if (req->src == req->dst)
1230 dpaa2_fl_set_addr(out_fle, edesc->qm_sg_dma +
1231 sizeof(*sg_table));
1232 else
1233 dpaa2_fl_set_addr(out_fle, edesc->qm_sg_dma + dst_sg_idx *
1234 sizeof(*sg_table));
1236 return edesc;
1239 static void aead_unmap(struct device *dev, struct aead_edesc *edesc,
1240 struct aead_request *req)
1242 struct crypto_aead *aead = crypto_aead_reqtfm(req);
1243 int ivsize = crypto_aead_ivsize(aead);
1245 caam_unmap(dev, req->src, req->dst, edesc->src_nents, edesc->dst_nents,
1246 edesc->iv_dma, ivsize, DMA_TO_DEVICE, edesc->qm_sg_dma,
1247 edesc->qm_sg_bytes);
1248 dma_unmap_single(dev, edesc->assoclen_dma, 4, DMA_TO_DEVICE);
1251 static void skcipher_unmap(struct device *dev, struct skcipher_edesc *edesc,
1252 struct skcipher_request *req)
1254 struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
1255 int ivsize = crypto_skcipher_ivsize(skcipher);
1257 caam_unmap(dev, req->src, req->dst, edesc->src_nents, edesc->dst_nents,
1258 edesc->iv_dma, ivsize, DMA_BIDIRECTIONAL, edesc->qm_sg_dma,
1259 edesc->qm_sg_bytes);
1262 static void aead_encrypt_done(void *cbk_ctx, u32 status)
1264 struct crypto_async_request *areq = cbk_ctx;
1265 struct aead_request *req = container_of(areq, struct aead_request,
1266 base);
1267 struct caam_request *req_ctx = to_caam_req(areq);
1268 struct aead_edesc *edesc = req_ctx->edesc;
1269 struct crypto_aead *aead = crypto_aead_reqtfm(req);
1270 struct caam_ctx *ctx = crypto_aead_ctx(aead);
1271 int ecode = 0;
1273 dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
1275 if (unlikely(status))
1276 ecode = caam_qi2_strstatus(ctx->dev, status);
1278 aead_unmap(ctx->dev, edesc, req);
1279 qi_cache_free(edesc);
1280 aead_request_complete(req, ecode);
1283 static void aead_decrypt_done(void *cbk_ctx, u32 status)
1285 struct crypto_async_request *areq = cbk_ctx;
1286 struct aead_request *req = container_of(areq, struct aead_request,
1287 base);
1288 struct caam_request *req_ctx = to_caam_req(areq);
1289 struct aead_edesc *edesc = req_ctx->edesc;
1290 struct crypto_aead *aead = crypto_aead_reqtfm(req);
1291 struct caam_ctx *ctx = crypto_aead_ctx(aead);
1292 int ecode = 0;
1294 dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
1296 if (unlikely(status))
1297 ecode = caam_qi2_strstatus(ctx->dev, status);
1299 aead_unmap(ctx->dev, edesc, req);
1300 qi_cache_free(edesc);
1301 aead_request_complete(req, ecode);
1304 static int aead_encrypt(struct aead_request *req)
1306 struct aead_edesc *edesc;
1307 struct crypto_aead *aead = crypto_aead_reqtfm(req);
1308 struct caam_ctx *ctx = crypto_aead_ctx(aead);
1309 struct caam_request *caam_req = aead_request_ctx(req);
1310 int ret;
1312 /* allocate extended descriptor */
1313 edesc = aead_edesc_alloc(req, true);
1314 if (IS_ERR(edesc))
1315 return PTR_ERR(edesc);
1317 caam_req->flc = &ctx->flc[ENCRYPT];
1318 caam_req->flc_dma = ctx->flc_dma[ENCRYPT];
1319 caam_req->cbk = aead_encrypt_done;
1320 caam_req->ctx = &req->base;
1321 caam_req->edesc = edesc;
1322 ret = dpaa2_caam_enqueue(ctx->dev, caam_req);
1323 if (ret != -EINPROGRESS &&
1324 !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
1325 aead_unmap(ctx->dev, edesc, req);
1326 qi_cache_free(edesc);
1329 return ret;
1332 static int aead_decrypt(struct aead_request *req)
1334 struct aead_edesc *edesc;
1335 struct crypto_aead *aead = crypto_aead_reqtfm(req);
1336 struct caam_ctx *ctx = crypto_aead_ctx(aead);
1337 struct caam_request *caam_req = aead_request_ctx(req);
1338 int ret;
1340 /* allocate extended descriptor */
1341 edesc = aead_edesc_alloc(req, false);
1342 if (IS_ERR(edesc))
1343 return PTR_ERR(edesc);
1345 caam_req->flc = &ctx->flc[DECRYPT];
1346 caam_req->flc_dma = ctx->flc_dma[DECRYPT];
1347 caam_req->cbk = aead_decrypt_done;
1348 caam_req->ctx = &req->base;
1349 caam_req->edesc = edesc;
1350 ret = dpaa2_caam_enqueue(ctx->dev, caam_req);
1351 if (ret != -EINPROGRESS &&
1352 !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
1353 aead_unmap(ctx->dev, edesc, req);
1354 qi_cache_free(edesc);
1357 return ret;
1360 static int ipsec_gcm_encrypt(struct aead_request *req)
1362 return crypto_ipsec_check_assoclen(req->assoclen) ? : aead_encrypt(req);
1365 static int ipsec_gcm_decrypt(struct aead_request *req)
1367 return crypto_ipsec_check_assoclen(req->assoclen) ? : aead_decrypt(req);
1370 static void skcipher_encrypt_done(void *cbk_ctx, u32 status)
1372 struct crypto_async_request *areq = cbk_ctx;
1373 struct skcipher_request *req = skcipher_request_cast(areq);
1374 struct caam_request *req_ctx = to_caam_req(areq);
1375 struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
1376 struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
1377 struct skcipher_edesc *edesc = req_ctx->edesc;
1378 int ecode = 0;
1379 int ivsize = crypto_skcipher_ivsize(skcipher);
1381 dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
1383 if (unlikely(status))
1384 ecode = caam_qi2_strstatus(ctx->dev, status);
1386 print_hex_dump_debug("dstiv @" __stringify(__LINE__)": ",
1387 DUMP_PREFIX_ADDRESS, 16, 4, req->iv,
1388 edesc->src_nents > 1 ? 100 : ivsize, 1);
1389 caam_dump_sg("dst @" __stringify(__LINE__)": ",
1390 DUMP_PREFIX_ADDRESS, 16, 4, req->dst,
1391 edesc->dst_nents > 1 ? 100 : req->cryptlen, 1);
1393 skcipher_unmap(ctx->dev, edesc, req);
1396 * The crypto API expects us to set the IV (req->iv) to the last
1397 * ciphertext block (CBC mode) or last counter (CTR mode).
1398 * This is used e.g. by the CTS mode.
1400 if (!ecode)
1401 memcpy(req->iv, (u8 *)&edesc->sgt[0] + edesc->qm_sg_bytes,
1402 ivsize);
1404 qi_cache_free(edesc);
1405 skcipher_request_complete(req, ecode);
1408 static void skcipher_decrypt_done(void *cbk_ctx, u32 status)
1410 struct crypto_async_request *areq = cbk_ctx;
1411 struct skcipher_request *req = skcipher_request_cast(areq);
1412 struct caam_request *req_ctx = to_caam_req(areq);
1413 struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
1414 struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
1415 struct skcipher_edesc *edesc = req_ctx->edesc;
1416 int ecode = 0;
1417 int ivsize = crypto_skcipher_ivsize(skcipher);
1419 dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
1421 if (unlikely(status))
1422 ecode = caam_qi2_strstatus(ctx->dev, status);
1424 print_hex_dump_debug("dstiv @" __stringify(__LINE__)": ",
1425 DUMP_PREFIX_ADDRESS, 16, 4, req->iv,
1426 edesc->src_nents > 1 ? 100 : ivsize, 1);
1427 caam_dump_sg("dst @" __stringify(__LINE__)": ",
1428 DUMP_PREFIX_ADDRESS, 16, 4, req->dst,
1429 edesc->dst_nents > 1 ? 100 : req->cryptlen, 1);
1431 skcipher_unmap(ctx->dev, edesc, req);
1434 * The crypto API expects us to set the IV (req->iv) to the last
1435 * ciphertext block (CBC mode) or last counter (CTR mode).
1436 * This is used e.g. by the CTS mode.
1438 if (!ecode)
1439 memcpy(req->iv, (u8 *)&edesc->sgt[0] + edesc->qm_sg_bytes,
1440 ivsize);
1442 qi_cache_free(edesc);
1443 skcipher_request_complete(req, ecode);
1446 static int skcipher_encrypt(struct skcipher_request *req)
1448 struct skcipher_edesc *edesc;
1449 struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
1450 struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
1451 struct caam_request *caam_req = skcipher_request_ctx(req);
1452 int ret;
1454 if (!req->cryptlen)
1455 return 0;
1457 /* allocate extended descriptor */
1458 edesc = skcipher_edesc_alloc(req);
1459 if (IS_ERR(edesc))
1460 return PTR_ERR(edesc);
1462 caam_req->flc = &ctx->flc[ENCRYPT];
1463 caam_req->flc_dma = ctx->flc_dma[ENCRYPT];
1464 caam_req->cbk = skcipher_encrypt_done;
1465 caam_req->ctx = &req->base;
1466 caam_req->edesc = edesc;
1467 ret = dpaa2_caam_enqueue(ctx->dev, caam_req);
1468 if (ret != -EINPROGRESS &&
1469 !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
1470 skcipher_unmap(ctx->dev, edesc, req);
1471 qi_cache_free(edesc);
1474 return ret;
1477 static int skcipher_decrypt(struct skcipher_request *req)
1479 struct skcipher_edesc *edesc;
1480 struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
1481 struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
1482 struct caam_request *caam_req = skcipher_request_ctx(req);
1483 int ret;
1485 if (!req->cryptlen)
1486 return 0;
1487 /* allocate extended descriptor */
1488 edesc = skcipher_edesc_alloc(req);
1489 if (IS_ERR(edesc))
1490 return PTR_ERR(edesc);
1492 caam_req->flc = &ctx->flc[DECRYPT];
1493 caam_req->flc_dma = ctx->flc_dma[DECRYPT];
1494 caam_req->cbk = skcipher_decrypt_done;
1495 caam_req->ctx = &req->base;
1496 caam_req->edesc = edesc;
1497 ret = dpaa2_caam_enqueue(ctx->dev, caam_req);
1498 if (ret != -EINPROGRESS &&
1499 !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
1500 skcipher_unmap(ctx->dev, edesc, req);
1501 qi_cache_free(edesc);
1504 return ret;
1507 static int caam_cra_init(struct caam_ctx *ctx, struct caam_alg_entry *caam,
1508 bool uses_dkp)
1510 dma_addr_t dma_addr;
1511 int i;
1513 /* copy descriptor header template value */
1514 ctx->cdata.algtype = OP_TYPE_CLASS1_ALG | caam->class1_alg_type;
1515 ctx->adata.algtype = OP_TYPE_CLASS2_ALG | caam->class2_alg_type;
1517 ctx->dev = caam->dev;
1518 ctx->dir = uses_dkp ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
1520 dma_addr = dma_map_single_attrs(ctx->dev, ctx->flc,
1521 offsetof(struct caam_ctx, flc_dma),
1522 ctx->dir, DMA_ATTR_SKIP_CPU_SYNC);
1523 if (dma_mapping_error(ctx->dev, dma_addr)) {
1524 dev_err(ctx->dev, "unable to map key, shared descriptors\n");
1525 return -ENOMEM;
1528 for (i = 0; i < NUM_OP; i++)
1529 ctx->flc_dma[i] = dma_addr + i * sizeof(ctx->flc[i]);
1530 ctx->key_dma = dma_addr + NUM_OP * sizeof(ctx->flc[0]);
1532 return 0;
1535 static int caam_cra_init_skcipher(struct crypto_skcipher *tfm)
1537 struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
1538 struct caam_skcipher_alg *caam_alg =
1539 container_of(alg, typeof(*caam_alg), skcipher);
1541 crypto_skcipher_set_reqsize(tfm, sizeof(struct caam_request));
1542 return caam_cra_init(crypto_skcipher_ctx(tfm), &caam_alg->caam, false);
1545 static int caam_cra_init_aead(struct crypto_aead *tfm)
1547 struct aead_alg *alg = crypto_aead_alg(tfm);
1548 struct caam_aead_alg *caam_alg = container_of(alg, typeof(*caam_alg),
1549 aead);
1551 crypto_aead_set_reqsize(tfm, sizeof(struct caam_request));
1552 return caam_cra_init(crypto_aead_ctx(tfm), &caam_alg->caam,
1553 !caam_alg->caam.nodkp);
1556 static void caam_exit_common(struct caam_ctx *ctx)
1558 dma_unmap_single_attrs(ctx->dev, ctx->flc_dma[0],
1559 offsetof(struct caam_ctx, flc_dma), ctx->dir,
1560 DMA_ATTR_SKIP_CPU_SYNC);
1563 static void caam_cra_exit(struct crypto_skcipher *tfm)
1565 caam_exit_common(crypto_skcipher_ctx(tfm));
1568 static void caam_cra_exit_aead(struct crypto_aead *tfm)
1570 caam_exit_common(crypto_aead_ctx(tfm));
1573 static struct caam_skcipher_alg driver_algs[] = {
1575 .skcipher = {
1576 .base = {
1577 .cra_name = "cbc(aes)",
1578 .cra_driver_name = "cbc-aes-caam-qi2",
1579 .cra_blocksize = AES_BLOCK_SIZE,
1581 .setkey = aes_skcipher_setkey,
1582 .encrypt = skcipher_encrypt,
1583 .decrypt = skcipher_decrypt,
1584 .min_keysize = AES_MIN_KEY_SIZE,
1585 .max_keysize = AES_MAX_KEY_SIZE,
1586 .ivsize = AES_BLOCK_SIZE,
1588 .caam.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1591 .skcipher = {
1592 .base = {
1593 .cra_name = "cbc(des3_ede)",
1594 .cra_driver_name = "cbc-3des-caam-qi2",
1595 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
1597 .setkey = des3_skcipher_setkey,
1598 .encrypt = skcipher_encrypt,
1599 .decrypt = skcipher_decrypt,
1600 .min_keysize = DES3_EDE_KEY_SIZE,
1601 .max_keysize = DES3_EDE_KEY_SIZE,
1602 .ivsize = DES3_EDE_BLOCK_SIZE,
1604 .caam.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
1607 .skcipher = {
1608 .base = {
1609 .cra_name = "cbc(des)",
1610 .cra_driver_name = "cbc-des-caam-qi2",
1611 .cra_blocksize = DES_BLOCK_SIZE,
1613 .setkey = des_skcipher_setkey,
1614 .encrypt = skcipher_encrypt,
1615 .decrypt = skcipher_decrypt,
1616 .min_keysize = DES_KEY_SIZE,
1617 .max_keysize = DES_KEY_SIZE,
1618 .ivsize = DES_BLOCK_SIZE,
1620 .caam.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
1623 .skcipher = {
1624 .base = {
1625 .cra_name = "ctr(aes)",
1626 .cra_driver_name = "ctr-aes-caam-qi2",
1627 .cra_blocksize = 1,
1629 .setkey = ctr_skcipher_setkey,
1630 .encrypt = skcipher_encrypt,
1631 .decrypt = skcipher_decrypt,
1632 .min_keysize = AES_MIN_KEY_SIZE,
1633 .max_keysize = AES_MAX_KEY_SIZE,
1634 .ivsize = AES_BLOCK_SIZE,
1635 .chunksize = AES_BLOCK_SIZE,
1637 .caam.class1_alg_type = OP_ALG_ALGSEL_AES |
1638 OP_ALG_AAI_CTR_MOD128,
1641 .skcipher = {
1642 .base = {
1643 .cra_name = "rfc3686(ctr(aes))",
1644 .cra_driver_name = "rfc3686-ctr-aes-caam-qi2",
1645 .cra_blocksize = 1,
1647 .setkey = rfc3686_skcipher_setkey,
1648 .encrypt = skcipher_encrypt,
1649 .decrypt = skcipher_decrypt,
1650 .min_keysize = AES_MIN_KEY_SIZE +
1651 CTR_RFC3686_NONCE_SIZE,
1652 .max_keysize = AES_MAX_KEY_SIZE +
1653 CTR_RFC3686_NONCE_SIZE,
1654 .ivsize = CTR_RFC3686_IV_SIZE,
1655 .chunksize = AES_BLOCK_SIZE,
1657 .caam = {
1658 .class1_alg_type = OP_ALG_ALGSEL_AES |
1659 OP_ALG_AAI_CTR_MOD128,
1660 .rfc3686 = true,
1664 .skcipher = {
1665 .base = {
1666 .cra_name = "xts(aes)",
1667 .cra_driver_name = "xts-aes-caam-qi2",
1668 .cra_blocksize = AES_BLOCK_SIZE,
1670 .setkey = xts_skcipher_setkey,
1671 .encrypt = skcipher_encrypt,
1672 .decrypt = skcipher_decrypt,
1673 .min_keysize = 2 * AES_MIN_KEY_SIZE,
1674 .max_keysize = 2 * AES_MAX_KEY_SIZE,
1675 .ivsize = AES_BLOCK_SIZE,
1677 .caam.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_XTS,
1680 .skcipher = {
1681 .base = {
1682 .cra_name = "chacha20",
1683 .cra_driver_name = "chacha20-caam-qi2",
1684 .cra_blocksize = 1,
1686 .setkey = chacha20_skcipher_setkey,
1687 .encrypt = skcipher_encrypt,
1688 .decrypt = skcipher_decrypt,
1689 .min_keysize = CHACHA_KEY_SIZE,
1690 .max_keysize = CHACHA_KEY_SIZE,
1691 .ivsize = CHACHA_IV_SIZE,
1693 .caam.class1_alg_type = OP_ALG_ALGSEL_CHACHA20,
1697 static struct caam_aead_alg driver_aeads[] = {
1699 .aead = {
1700 .base = {
1701 .cra_name = "rfc4106(gcm(aes))",
1702 .cra_driver_name = "rfc4106-gcm-aes-caam-qi2",
1703 .cra_blocksize = 1,
1705 .setkey = rfc4106_setkey,
1706 .setauthsize = rfc4106_setauthsize,
1707 .encrypt = ipsec_gcm_encrypt,
1708 .decrypt = ipsec_gcm_decrypt,
1709 .ivsize = 8,
1710 .maxauthsize = AES_BLOCK_SIZE,
1712 .caam = {
1713 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
1714 .nodkp = true,
1718 .aead = {
1719 .base = {
1720 .cra_name = "rfc4543(gcm(aes))",
1721 .cra_driver_name = "rfc4543-gcm-aes-caam-qi2",
1722 .cra_blocksize = 1,
1724 .setkey = rfc4543_setkey,
1725 .setauthsize = rfc4543_setauthsize,
1726 .encrypt = ipsec_gcm_encrypt,
1727 .decrypt = ipsec_gcm_decrypt,
1728 .ivsize = 8,
1729 .maxauthsize = AES_BLOCK_SIZE,
1731 .caam = {
1732 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
1733 .nodkp = true,
1736 /* Galois Counter Mode */
1738 .aead = {
1739 .base = {
1740 .cra_name = "gcm(aes)",
1741 .cra_driver_name = "gcm-aes-caam-qi2",
1742 .cra_blocksize = 1,
1744 .setkey = gcm_setkey,
1745 .setauthsize = gcm_setauthsize,
1746 .encrypt = aead_encrypt,
1747 .decrypt = aead_decrypt,
1748 .ivsize = 12,
1749 .maxauthsize = AES_BLOCK_SIZE,
1751 .caam = {
1752 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
1753 .nodkp = true,
1756 /* single-pass ipsec_esp descriptor */
1758 .aead = {
1759 .base = {
1760 .cra_name = "authenc(hmac(md5),cbc(aes))",
1761 .cra_driver_name = "authenc-hmac-md5-"
1762 "cbc-aes-caam-qi2",
1763 .cra_blocksize = AES_BLOCK_SIZE,
1765 .setkey = aead_setkey,
1766 .setauthsize = aead_setauthsize,
1767 .encrypt = aead_encrypt,
1768 .decrypt = aead_decrypt,
1769 .ivsize = AES_BLOCK_SIZE,
1770 .maxauthsize = MD5_DIGEST_SIZE,
1772 .caam = {
1773 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1774 .class2_alg_type = OP_ALG_ALGSEL_MD5 |
1775 OP_ALG_AAI_HMAC_PRECOMP,
1779 .aead = {
1780 .base = {
1781 .cra_name = "echainiv(authenc(hmac(md5),"
1782 "cbc(aes)))",
1783 .cra_driver_name = "echainiv-authenc-hmac-md5-"
1784 "cbc-aes-caam-qi2",
1785 .cra_blocksize = AES_BLOCK_SIZE,
1787 .setkey = aead_setkey,
1788 .setauthsize = aead_setauthsize,
1789 .encrypt = aead_encrypt,
1790 .decrypt = aead_decrypt,
1791 .ivsize = AES_BLOCK_SIZE,
1792 .maxauthsize = MD5_DIGEST_SIZE,
1794 .caam = {
1795 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1796 .class2_alg_type = OP_ALG_ALGSEL_MD5 |
1797 OP_ALG_AAI_HMAC_PRECOMP,
1798 .geniv = true,
1802 .aead = {
1803 .base = {
1804 .cra_name = "authenc(hmac(sha1),cbc(aes))",
1805 .cra_driver_name = "authenc-hmac-sha1-"
1806 "cbc-aes-caam-qi2",
1807 .cra_blocksize = AES_BLOCK_SIZE,
1809 .setkey = aead_setkey,
1810 .setauthsize = aead_setauthsize,
1811 .encrypt = aead_encrypt,
1812 .decrypt = aead_decrypt,
1813 .ivsize = AES_BLOCK_SIZE,
1814 .maxauthsize = SHA1_DIGEST_SIZE,
1816 .caam = {
1817 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1818 .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
1819 OP_ALG_AAI_HMAC_PRECOMP,
1823 .aead = {
1824 .base = {
1825 .cra_name = "echainiv(authenc(hmac(sha1),"
1826 "cbc(aes)))",
1827 .cra_driver_name = "echainiv-authenc-"
1828 "hmac-sha1-cbc-aes-caam-qi2",
1829 .cra_blocksize = AES_BLOCK_SIZE,
1831 .setkey = aead_setkey,
1832 .setauthsize = aead_setauthsize,
1833 .encrypt = aead_encrypt,
1834 .decrypt = aead_decrypt,
1835 .ivsize = AES_BLOCK_SIZE,
1836 .maxauthsize = SHA1_DIGEST_SIZE,
1838 .caam = {
1839 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1840 .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
1841 OP_ALG_AAI_HMAC_PRECOMP,
1842 .geniv = true,
1846 .aead = {
1847 .base = {
1848 .cra_name = "authenc(hmac(sha224),cbc(aes))",
1849 .cra_driver_name = "authenc-hmac-sha224-"
1850 "cbc-aes-caam-qi2",
1851 .cra_blocksize = AES_BLOCK_SIZE,
1853 .setkey = aead_setkey,
1854 .setauthsize = aead_setauthsize,
1855 .encrypt = aead_encrypt,
1856 .decrypt = aead_decrypt,
1857 .ivsize = AES_BLOCK_SIZE,
1858 .maxauthsize = SHA224_DIGEST_SIZE,
1860 .caam = {
1861 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1862 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
1863 OP_ALG_AAI_HMAC_PRECOMP,
1867 .aead = {
1868 .base = {
1869 .cra_name = "echainiv(authenc(hmac(sha224),"
1870 "cbc(aes)))",
1871 .cra_driver_name = "echainiv-authenc-"
1872 "hmac-sha224-cbc-aes-caam-qi2",
1873 .cra_blocksize = AES_BLOCK_SIZE,
1875 .setkey = aead_setkey,
1876 .setauthsize = aead_setauthsize,
1877 .encrypt = aead_encrypt,
1878 .decrypt = aead_decrypt,
1879 .ivsize = AES_BLOCK_SIZE,
1880 .maxauthsize = SHA224_DIGEST_SIZE,
1882 .caam = {
1883 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1884 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
1885 OP_ALG_AAI_HMAC_PRECOMP,
1886 .geniv = true,
1890 .aead = {
1891 .base = {
1892 .cra_name = "authenc(hmac(sha256),cbc(aes))",
1893 .cra_driver_name = "authenc-hmac-sha256-"
1894 "cbc-aes-caam-qi2",
1895 .cra_blocksize = AES_BLOCK_SIZE,
1897 .setkey = aead_setkey,
1898 .setauthsize = aead_setauthsize,
1899 .encrypt = aead_encrypt,
1900 .decrypt = aead_decrypt,
1901 .ivsize = AES_BLOCK_SIZE,
1902 .maxauthsize = SHA256_DIGEST_SIZE,
1904 .caam = {
1905 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1906 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
1907 OP_ALG_AAI_HMAC_PRECOMP,
1911 .aead = {
1912 .base = {
1913 .cra_name = "echainiv(authenc(hmac(sha256),"
1914 "cbc(aes)))",
1915 .cra_driver_name = "echainiv-authenc-"
1916 "hmac-sha256-cbc-aes-"
1917 "caam-qi2",
1918 .cra_blocksize = AES_BLOCK_SIZE,
1920 .setkey = aead_setkey,
1921 .setauthsize = aead_setauthsize,
1922 .encrypt = aead_encrypt,
1923 .decrypt = aead_decrypt,
1924 .ivsize = AES_BLOCK_SIZE,
1925 .maxauthsize = SHA256_DIGEST_SIZE,
1927 .caam = {
1928 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1929 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
1930 OP_ALG_AAI_HMAC_PRECOMP,
1931 .geniv = true,
1935 .aead = {
1936 .base = {
1937 .cra_name = "authenc(hmac(sha384),cbc(aes))",
1938 .cra_driver_name = "authenc-hmac-sha384-"
1939 "cbc-aes-caam-qi2",
1940 .cra_blocksize = AES_BLOCK_SIZE,
1942 .setkey = aead_setkey,
1943 .setauthsize = aead_setauthsize,
1944 .encrypt = aead_encrypt,
1945 .decrypt = aead_decrypt,
1946 .ivsize = AES_BLOCK_SIZE,
1947 .maxauthsize = SHA384_DIGEST_SIZE,
1949 .caam = {
1950 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1951 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
1952 OP_ALG_AAI_HMAC_PRECOMP,
1956 .aead = {
1957 .base = {
1958 .cra_name = "echainiv(authenc(hmac(sha384),"
1959 "cbc(aes)))",
1960 .cra_driver_name = "echainiv-authenc-"
1961 "hmac-sha384-cbc-aes-"
1962 "caam-qi2",
1963 .cra_blocksize = AES_BLOCK_SIZE,
1965 .setkey = aead_setkey,
1966 .setauthsize = aead_setauthsize,
1967 .encrypt = aead_encrypt,
1968 .decrypt = aead_decrypt,
1969 .ivsize = AES_BLOCK_SIZE,
1970 .maxauthsize = SHA384_DIGEST_SIZE,
1972 .caam = {
1973 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1974 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
1975 OP_ALG_AAI_HMAC_PRECOMP,
1976 .geniv = true,
1980 .aead = {
1981 .base = {
1982 .cra_name = "authenc(hmac(sha512),cbc(aes))",
1983 .cra_driver_name = "authenc-hmac-sha512-"
1984 "cbc-aes-caam-qi2",
1985 .cra_blocksize = AES_BLOCK_SIZE,
1987 .setkey = aead_setkey,
1988 .setauthsize = aead_setauthsize,
1989 .encrypt = aead_encrypt,
1990 .decrypt = aead_decrypt,
1991 .ivsize = AES_BLOCK_SIZE,
1992 .maxauthsize = SHA512_DIGEST_SIZE,
1994 .caam = {
1995 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1996 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
1997 OP_ALG_AAI_HMAC_PRECOMP,
2001 .aead = {
2002 .base = {
2003 .cra_name = "echainiv(authenc(hmac(sha512),"
2004 "cbc(aes)))",
2005 .cra_driver_name = "echainiv-authenc-"
2006 "hmac-sha512-cbc-aes-"
2007 "caam-qi2",
2008 .cra_blocksize = AES_BLOCK_SIZE,
2010 .setkey = aead_setkey,
2011 .setauthsize = aead_setauthsize,
2012 .encrypt = aead_encrypt,
2013 .decrypt = aead_decrypt,
2014 .ivsize = AES_BLOCK_SIZE,
2015 .maxauthsize = SHA512_DIGEST_SIZE,
2017 .caam = {
2018 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
2019 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
2020 OP_ALG_AAI_HMAC_PRECOMP,
2021 .geniv = true,
2025 .aead = {
2026 .base = {
2027 .cra_name = "authenc(hmac(md5),cbc(des3_ede))",
2028 .cra_driver_name = "authenc-hmac-md5-"
2029 "cbc-des3_ede-caam-qi2",
2030 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2032 .setkey = des3_aead_setkey,
2033 .setauthsize = aead_setauthsize,
2034 .encrypt = aead_encrypt,
2035 .decrypt = aead_decrypt,
2036 .ivsize = DES3_EDE_BLOCK_SIZE,
2037 .maxauthsize = MD5_DIGEST_SIZE,
2039 .caam = {
2040 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2041 .class2_alg_type = OP_ALG_ALGSEL_MD5 |
2042 OP_ALG_AAI_HMAC_PRECOMP,
2046 .aead = {
2047 .base = {
2048 .cra_name = "echainiv(authenc(hmac(md5),"
2049 "cbc(des3_ede)))",
2050 .cra_driver_name = "echainiv-authenc-hmac-md5-"
2051 "cbc-des3_ede-caam-qi2",
2052 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2054 .setkey = des3_aead_setkey,
2055 .setauthsize = aead_setauthsize,
2056 .encrypt = aead_encrypt,
2057 .decrypt = aead_decrypt,
2058 .ivsize = DES3_EDE_BLOCK_SIZE,
2059 .maxauthsize = MD5_DIGEST_SIZE,
2061 .caam = {
2062 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2063 .class2_alg_type = OP_ALG_ALGSEL_MD5 |
2064 OP_ALG_AAI_HMAC_PRECOMP,
2065 .geniv = true,
2069 .aead = {
2070 .base = {
2071 .cra_name = "authenc(hmac(sha1),"
2072 "cbc(des3_ede))",
2073 .cra_driver_name = "authenc-hmac-sha1-"
2074 "cbc-des3_ede-caam-qi2",
2075 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2077 .setkey = des3_aead_setkey,
2078 .setauthsize = aead_setauthsize,
2079 .encrypt = aead_encrypt,
2080 .decrypt = aead_decrypt,
2081 .ivsize = DES3_EDE_BLOCK_SIZE,
2082 .maxauthsize = SHA1_DIGEST_SIZE,
2084 .caam = {
2085 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2086 .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
2087 OP_ALG_AAI_HMAC_PRECOMP,
2091 .aead = {
2092 .base = {
2093 .cra_name = "echainiv(authenc(hmac(sha1),"
2094 "cbc(des3_ede)))",
2095 .cra_driver_name = "echainiv-authenc-"
2096 "hmac-sha1-"
2097 "cbc-des3_ede-caam-qi2",
2098 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2100 .setkey = des3_aead_setkey,
2101 .setauthsize = aead_setauthsize,
2102 .encrypt = aead_encrypt,
2103 .decrypt = aead_decrypt,
2104 .ivsize = DES3_EDE_BLOCK_SIZE,
2105 .maxauthsize = SHA1_DIGEST_SIZE,
2107 .caam = {
2108 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2109 .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
2110 OP_ALG_AAI_HMAC_PRECOMP,
2111 .geniv = true,
2115 .aead = {
2116 .base = {
2117 .cra_name = "authenc(hmac(sha224),"
2118 "cbc(des3_ede))",
2119 .cra_driver_name = "authenc-hmac-sha224-"
2120 "cbc-des3_ede-caam-qi2",
2121 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2123 .setkey = des3_aead_setkey,
2124 .setauthsize = aead_setauthsize,
2125 .encrypt = aead_encrypt,
2126 .decrypt = aead_decrypt,
2127 .ivsize = DES3_EDE_BLOCK_SIZE,
2128 .maxauthsize = SHA224_DIGEST_SIZE,
2130 .caam = {
2131 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2132 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
2133 OP_ALG_AAI_HMAC_PRECOMP,
2137 .aead = {
2138 .base = {
2139 .cra_name = "echainiv(authenc(hmac(sha224),"
2140 "cbc(des3_ede)))",
2141 .cra_driver_name = "echainiv-authenc-"
2142 "hmac-sha224-"
2143 "cbc-des3_ede-caam-qi2",
2144 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2146 .setkey = des3_aead_setkey,
2147 .setauthsize = aead_setauthsize,
2148 .encrypt = aead_encrypt,
2149 .decrypt = aead_decrypt,
2150 .ivsize = DES3_EDE_BLOCK_SIZE,
2151 .maxauthsize = SHA224_DIGEST_SIZE,
2153 .caam = {
2154 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2155 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
2156 OP_ALG_AAI_HMAC_PRECOMP,
2157 .geniv = true,
2161 .aead = {
2162 .base = {
2163 .cra_name = "authenc(hmac(sha256),"
2164 "cbc(des3_ede))",
2165 .cra_driver_name = "authenc-hmac-sha256-"
2166 "cbc-des3_ede-caam-qi2",
2167 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2169 .setkey = des3_aead_setkey,
2170 .setauthsize = aead_setauthsize,
2171 .encrypt = aead_encrypt,
2172 .decrypt = aead_decrypt,
2173 .ivsize = DES3_EDE_BLOCK_SIZE,
2174 .maxauthsize = SHA256_DIGEST_SIZE,
2176 .caam = {
2177 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2178 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
2179 OP_ALG_AAI_HMAC_PRECOMP,
2183 .aead = {
2184 .base = {
2185 .cra_name = "echainiv(authenc(hmac(sha256),"
2186 "cbc(des3_ede)))",
2187 .cra_driver_name = "echainiv-authenc-"
2188 "hmac-sha256-"
2189 "cbc-des3_ede-caam-qi2",
2190 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2192 .setkey = des3_aead_setkey,
2193 .setauthsize = aead_setauthsize,
2194 .encrypt = aead_encrypt,
2195 .decrypt = aead_decrypt,
2196 .ivsize = DES3_EDE_BLOCK_SIZE,
2197 .maxauthsize = SHA256_DIGEST_SIZE,
2199 .caam = {
2200 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2201 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
2202 OP_ALG_AAI_HMAC_PRECOMP,
2203 .geniv = true,
2207 .aead = {
2208 .base = {
2209 .cra_name = "authenc(hmac(sha384),"
2210 "cbc(des3_ede))",
2211 .cra_driver_name = "authenc-hmac-sha384-"
2212 "cbc-des3_ede-caam-qi2",
2213 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2215 .setkey = des3_aead_setkey,
2216 .setauthsize = aead_setauthsize,
2217 .encrypt = aead_encrypt,
2218 .decrypt = aead_decrypt,
2219 .ivsize = DES3_EDE_BLOCK_SIZE,
2220 .maxauthsize = SHA384_DIGEST_SIZE,
2222 .caam = {
2223 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2224 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
2225 OP_ALG_AAI_HMAC_PRECOMP,
2229 .aead = {
2230 .base = {
2231 .cra_name = "echainiv(authenc(hmac(sha384),"
2232 "cbc(des3_ede)))",
2233 .cra_driver_name = "echainiv-authenc-"
2234 "hmac-sha384-"
2235 "cbc-des3_ede-caam-qi2",
2236 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2238 .setkey = des3_aead_setkey,
2239 .setauthsize = aead_setauthsize,
2240 .encrypt = aead_encrypt,
2241 .decrypt = aead_decrypt,
2242 .ivsize = DES3_EDE_BLOCK_SIZE,
2243 .maxauthsize = SHA384_DIGEST_SIZE,
2245 .caam = {
2246 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2247 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
2248 OP_ALG_AAI_HMAC_PRECOMP,
2249 .geniv = true,
2253 .aead = {
2254 .base = {
2255 .cra_name = "authenc(hmac(sha512),"
2256 "cbc(des3_ede))",
2257 .cra_driver_name = "authenc-hmac-sha512-"
2258 "cbc-des3_ede-caam-qi2",
2259 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2261 .setkey = des3_aead_setkey,
2262 .setauthsize = aead_setauthsize,
2263 .encrypt = aead_encrypt,
2264 .decrypt = aead_decrypt,
2265 .ivsize = DES3_EDE_BLOCK_SIZE,
2266 .maxauthsize = SHA512_DIGEST_SIZE,
2268 .caam = {
2269 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2270 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
2271 OP_ALG_AAI_HMAC_PRECOMP,
2275 .aead = {
2276 .base = {
2277 .cra_name = "echainiv(authenc(hmac(sha512),"
2278 "cbc(des3_ede)))",
2279 .cra_driver_name = "echainiv-authenc-"
2280 "hmac-sha512-"
2281 "cbc-des3_ede-caam-qi2",
2282 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2284 .setkey = des3_aead_setkey,
2285 .setauthsize = aead_setauthsize,
2286 .encrypt = aead_encrypt,
2287 .decrypt = aead_decrypt,
2288 .ivsize = DES3_EDE_BLOCK_SIZE,
2289 .maxauthsize = SHA512_DIGEST_SIZE,
2291 .caam = {
2292 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2293 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
2294 OP_ALG_AAI_HMAC_PRECOMP,
2295 .geniv = true,
2299 .aead = {
2300 .base = {
2301 .cra_name = "authenc(hmac(md5),cbc(des))",
2302 .cra_driver_name = "authenc-hmac-md5-"
2303 "cbc-des-caam-qi2",
2304 .cra_blocksize = DES_BLOCK_SIZE,
2306 .setkey = aead_setkey,
2307 .setauthsize = aead_setauthsize,
2308 .encrypt = aead_encrypt,
2309 .decrypt = aead_decrypt,
2310 .ivsize = DES_BLOCK_SIZE,
2311 .maxauthsize = MD5_DIGEST_SIZE,
2313 .caam = {
2314 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2315 .class2_alg_type = OP_ALG_ALGSEL_MD5 |
2316 OP_ALG_AAI_HMAC_PRECOMP,
2320 .aead = {
2321 .base = {
2322 .cra_name = "echainiv(authenc(hmac(md5),"
2323 "cbc(des)))",
2324 .cra_driver_name = "echainiv-authenc-hmac-md5-"
2325 "cbc-des-caam-qi2",
2326 .cra_blocksize = DES_BLOCK_SIZE,
2328 .setkey = aead_setkey,
2329 .setauthsize = aead_setauthsize,
2330 .encrypt = aead_encrypt,
2331 .decrypt = aead_decrypt,
2332 .ivsize = DES_BLOCK_SIZE,
2333 .maxauthsize = MD5_DIGEST_SIZE,
2335 .caam = {
2336 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2337 .class2_alg_type = OP_ALG_ALGSEL_MD5 |
2338 OP_ALG_AAI_HMAC_PRECOMP,
2339 .geniv = true,
2343 .aead = {
2344 .base = {
2345 .cra_name = "authenc(hmac(sha1),cbc(des))",
2346 .cra_driver_name = "authenc-hmac-sha1-"
2347 "cbc-des-caam-qi2",
2348 .cra_blocksize = DES_BLOCK_SIZE,
2350 .setkey = aead_setkey,
2351 .setauthsize = aead_setauthsize,
2352 .encrypt = aead_encrypt,
2353 .decrypt = aead_decrypt,
2354 .ivsize = DES_BLOCK_SIZE,
2355 .maxauthsize = SHA1_DIGEST_SIZE,
2357 .caam = {
2358 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2359 .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
2360 OP_ALG_AAI_HMAC_PRECOMP,
2364 .aead = {
2365 .base = {
2366 .cra_name = "echainiv(authenc(hmac(sha1),"
2367 "cbc(des)))",
2368 .cra_driver_name = "echainiv-authenc-"
2369 "hmac-sha1-cbc-des-caam-qi2",
2370 .cra_blocksize = DES_BLOCK_SIZE,
2372 .setkey = aead_setkey,
2373 .setauthsize = aead_setauthsize,
2374 .encrypt = aead_encrypt,
2375 .decrypt = aead_decrypt,
2376 .ivsize = DES_BLOCK_SIZE,
2377 .maxauthsize = SHA1_DIGEST_SIZE,
2379 .caam = {
2380 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2381 .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
2382 OP_ALG_AAI_HMAC_PRECOMP,
2383 .geniv = true,
2387 .aead = {
2388 .base = {
2389 .cra_name = "authenc(hmac(sha224),cbc(des))",
2390 .cra_driver_name = "authenc-hmac-sha224-"
2391 "cbc-des-caam-qi2",
2392 .cra_blocksize = DES_BLOCK_SIZE,
2394 .setkey = aead_setkey,
2395 .setauthsize = aead_setauthsize,
2396 .encrypt = aead_encrypt,
2397 .decrypt = aead_decrypt,
2398 .ivsize = DES_BLOCK_SIZE,
2399 .maxauthsize = SHA224_DIGEST_SIZE,
2401 .caam = {
2402 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2403 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
2404 OP_ALG_AAI_HMAC_PRECOMP,
2408 .aead = {
2409 .base = {
2410 .cra_name = "echainiv(authenc(hmac(sha224),"
2411 "cbc(des)))",
2412 .cra_driver_name = "echainiv-authenc-"
2413 "hmac-sha224-cbc-des-"
2414 "caam-qi2",
2415 .cra_blocksize = DES_BLOCK_SIZE,
2417 .setkey = aead_setkey,
2418 .setauthsize = aead_setauthsize,
2419 .encrypt = aead_encrypt,
2420 .decrypt = aead_decrypt,
2421 .ivsize = DES_BLOCK_SIZE,
2422 .maxauthsize = SHA224_DIGEST_SIZE,
2424 .caam = {
2425 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2426 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
2427 OP_ALG_AAI_HMAC_PRECOMP,
2428 .geniv = true,
2432 .aead = {
2433 .base = {
2434 .cra_name = "authenc(hmac(sha256),cbc(des))",
2435 .cra_driver_name = "authenc-hmac-sha256-"
2436 "cbc-des-caam-qi2",
2437 .cra_blocksize = DES_BLOCK_SIZE,
2439 .setkey = aead_setkey,
2440 .setauthsize = aead_setauthsize,
2441 .encrypt = aead_encrypt,
2442 .decrypt = aead_decrypt,
2443 .ivsize = DES_BLOCK_SIZE,
2444 .maxauthsize = SHA256_DIGEST_SIZE,
2446 .caam = {
2447 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2448 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
2449 OP_ALG_AAI_HMAC_PRECOMP,
2453 .aead = {
2454 .base = {
2455 .cra_name = "echainiv(authenc(hmac(sha256),"
2456 "cbc(des)))",
2457 .cra_driver_name = "echainiv-authenc-"
2458 "hmac-sha256-cbc-des-"
2459 "caam-qi2",
2460 .cra_blocksize = DES_BLOCK_SIZE,
2462 .setkey = aead_setkey,
2463 .setauthsize = aead_setauthsize,
2464 .encrypt = aead_encrypt,
2465 .decrypt = aead_decrypt,
2466 .ivsize = DES_BLOCK_SIZE,
2467 .maxauthsize = SHA256_DIGEST_SIZE,
2469 .caam = {
2470 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2471 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
2472 OP_ALG_AAI_HMAC_PRECOMP,
2473 .geniv = true,
2477 .aead = {
2478 .base = {
2479 .cra_name = "authenc(hmac(sha384),cbc(des))",
2480 .cra_driver_name = "authenc-hmac-sha384-"
2481 "cbc-des-caam-qi2",
2482 .cra_blocksize = DES_BLOCK_SIZE,
2484 .setkey = aead_setkey,
2485 .setauthsize = aead_setauthsize,
2486 .encrypt = aead_encrypt,
2487 .decrypt = aead_decrypt,
2488 .ivsize = DES_BLOCK_SIZE,
2489 .maxauthsize = SHA384_DIGEST_SIZE,
2491 .caam = {
2492 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2493 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
2494 OP_ALG_AAI_HMAC_PRECOMP,
2498 .aead = {
2499 .base = {
2500 .cra_name = "echainiv(authenc(hmac(sha384),"
2501 "cbc(des)))",
2502 .cra_driver_name = "echainiv-authenc-"
2503 "hmac-sha384-cbc-des-"
2504 "caam-qi2",
2505 .cra_blocksize = DES_BLOCK_SIZE,
2507 .setkey = aead_setkey,
2508 .setauthsize = aead_setauthsize,
2509 .encrypt = aead_encrypt,
2510 .decrypt = aead_decrypt,
2511 .ivsize = DES_BLOCK_SIZE,
2512 .maxauthsize = SHA384_DIGEST_SIZE,
2514 .caam = {
2515 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2516 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
2517 OP_ALG_AAI_HMAC_PRECOMP,
2518 .geniv = true,
2522 .aead = {
2523 .base = {
2524 .cra_name = "authenc(hmac(sha512),cbc(des))",
2525 .cra_driver_name = "authenc-hmac-sha512-"
2526 "cbc-des-caam-qi2",
2527 .cra_blocksize = DES_BLOCK_SIZE,
2529 .setkey = aead_setkey,
2530 .setauthsize = aead_setauthsize,
2531 .encrypt = aead_encrypt,
2532 .decrypt = aead_decrypt,
2533 .ivsize = DES_BLOCK_SIZE,
2534 .maxauthsize = SHA512_DIGEST_SIZE,
2536 .caam = {
2537 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2538 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
2539 OP_ALG_AAI_HMAC_PRECOMP,
2543 .aead = {
2544 .base = {
2545 .cra_name = "echainiv(authenc(hmac(sha512),"
2546 "cbc(des)))",
2547 .cra_driver_name = "echainiv-authenc-"
2548 "hmac-sha512-cbc-des-"
2549 "caam-qi2",
2550 .cra_blocksize = DES_BLOCK_SIZE,
2552 .setkey = aead_setkey,
2553 .setauthsize = aead_setauthsize,
2554 .encrypt = aead_encrypt,
2555 .decrypt = aead_decrypt,
2556 .ivsize = DES_BLOCK_SIZE,
2557 .maxauthsize = SHA512_DIGEST_SIZE,
2559 .caam = {
2560 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2561 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
2562 OP_ALG_AAI_HMAC_PRECOMP,
2563 .geniv = true,
2567 .aead = {
2568 .base = {
2569 .cra_name = "authenc(hmac(md5),"
2570 "rfc3686(ctr(aes)))",
2571 .cra_driver_name = "authenc-hmac-md5-"
2572 "rfc3686-ctr-aes-caam-qi2",
2573 .cra_blocksize = 1,
2575 .setkey = aead_setkey,
2576 .setauthsize = aead_setauthsize,
2577 .encrypt = aead_encrypt,
2578 .decrypt = aead_decrypt,
2579 .ivsize = CTR_RFC3686_IV_SIZE,
2580 .maxauthsize = MD5_DIGEST_SIZE,
2582 .caam = {
2583 .class1_alg_type = OP_ALG_ALGSEL_AES |
2584 OP_ALG_AAI_CTR_MOD128,
2585 .class2_alg_type = OP_ALG_ALGSEL_MD5 |
2586 OP_ALG_AAI_HMAC_PRECOMP,
2587 .rfc3686 = true,
2591 .aead = {
2592 .base = {
2593 .cra_name = "seqiv(authenc("
2594 "hmac(md5),rfc3686(ctr(aes))))",
2595 .cra_driver_name = "seqiv-authenc-hmac-md5-"
2596 "rfc3686-ctr-aes-caam-qi2",
2597 .cra_blocksize = 1,
2599 .setkey = aead_setkey,
2600 .setauthsize = aead_setauthsize,
2601 .encrypt = aead_encrypt,
2602 .decrypt = aead_decrypt,
2603 .ivsize = CTR_RFC3686_IV_SIZE,
2604 .maxauthsize = MD5_DIGEST_SIZE,
2606 .caam = {
2607 .class1_alg_type = OP_ALG_ALGSEL_AES |
2608 OP_ALG_AAI_CTR_MOD128,
2609 .class2_alg_type = OP_ALG_ALGSEL_MD5 |
2610 OP_ALG_AAI_HMAC_PRECOMP,
2611 .rfc3686 = true,
2612 .geniv = true,
2616 .aead = {
2617 .base = {
2618 .cra_name = "authenc(hmac(sha1),"
2619 "rfc3686(ctr(aes)))",
2620 .cra_driver_name = "authenc-hmac-sha1-"
2621 "rfc3686-ctr-aes-caam-qi2",
2622 .cra_blocksize = 1,
2624 .setkey = aead_setkey,
2625 .setauthsize = aead_setauthsize,
2626 .encrypt = aead_encrypt,
2627 .decrypt = aead_decrypt,
2628 .ivsize = CTR_RFC3686_IV_SIZE,
2629 .maxauthsize = SHA1_DIGEST_SIZE,
2631 .caam = {
2632 .class1_alg_type = OP_ALG_ALGSEL_AES |
2633 OP_ALG_AAI_CTR_MOD128,
2634 .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
2635 OP_ALG_AAI_HMAC_PRECOMP,
2636 .rfc3686 = true,
2640 .aead = {
2641 .base = {
2642 .cra_name = "seqiv(authenc("
2643 "hmac(sha1),rfc3686(ctr(aes))))",
2644 .cra_driver_name = "seqiv-authenc-hmac-sha1-"
2645 "rfc3686-ctr-aes-caam-qi2",
2646 .cra_blocksize = 1,
2648 .setkey = aead_setkey,
2649 .setauthsize = aead_setauthsize,
2650 .encrypt = aead_encrypt,
2651 .decrypt = aead_decrypt,
2652 .ivsize = CTR_RFC3686_IV_SIZE,
2653 .maxauthsize = SHA1_DIGEST_SIZE,
2655 .caam = {
2656 .class1_alg_type = OP_ALG_ALGSEL_AES |
2657 OP_ALG_AAI_CTR_MOD128,
2658 .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
2659 OP_ALG_AAI_HMAC_PRECOMP,
2660 .rfc3686 = true,
2661 .geniv = true,
2665 .aead = {
2666 .base = {
2667 .cra_name = "authenc(hmac(sha224),"
2668 "rfc3686(ctr(aes)))",
2669 .cra_driver_name = "authenc-hmac-sha224-"
2670 "rfc3686-ctr-aes-caam-qi2",
2671 .cra_blocksize = 1,
2673 .setkey = aead_setkey,
2674 .setauthsize = aead_setauthsize,
2675 .encrypt = aead_encrypt,
2676 .decrypt = aead_decrypt,
2677 .ivsize = CTR_RFC3686_IV_SIZE,
2678 .maxauthsize = SHA224_DIGEST_SIZE,
2680 .caam = {
2681 .class1_alg_type = OP_ALG_ALGSEL_AES |
2682 OP_ALG_AAI_CTR_MOD128,
2683 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
2684 OP_ALG_AAI_HMAC_PRECOMP,
2685 .rfc3686 = true,
2689 .aead = {
2690 .base = {
2691 .cra_name = "seqiv(authenc("
2692 "hmac(sha224),rfc3686(ctr(aes))))",
2693 .cra_driver_name = "seqiv-authenc-hmac-sha224-"
2694 "rfc3686-ctr-aes-caam-qi2",
2695 .cra_blocksize = 1,
2697 .setkey = aead_setkey,
2698 .setauthsize = aead_setauthsize,
2699 .encrypt = aead_encrypt,
2700 .decrypt = aead_decrypt,
2701 .ivsize = CTR_RFC3686_IV_SIZE,
2702 .maxauthsize = SHA224_DIGEST_SIZE,
2704 .caam = {
2705 .class1_alg_type = OP_ALG_ALGSEL_AES |
2706 OP_ALG_AAI_CTR_MOD128,
2707 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
2708 OP_ALG_AAI_HMAC_PRECOMP,
2709 .rfc3686 = true,
2710 .geniv = true,
2714 .aead = {
2715 .base = {
2716 .cra_name = "authenc(hmac(sha256),"
2717 "rfc3686(ctr(aes)))",
2718 .cra_driver_name = "authenc-hmac-sha256-"
2719 "rfc3686-ctr-aes-caam-qi2",
2720 .cra_blocksize = 1,
2722 .setkey = aead_setkey,
2723 .setauthsize = aead_setauthsize,
2724 .encrypt = aead_encrypt,
2725 .decrypt = aead_decrypt,
2726 .ivsize = CTR_RFC3686_IV_SIZE,
2727 .maxauthsize = SHA256_DIGEST_SIZE,
2729 .caam = {
2730 .class1_alg_type = OP_ALG_ALGSEL_AES |
2731 OP_ALG_AAI_CTR_MOD128,
2732 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
2733 OP_ALG_AAI_HMAC_PRECOMP,
2734 .rfc3686 = true,
2738 .aead = {
2739 .base = {
2740 .cra_name = "seqiv(authenc(hmac(sha256),"
2741 "rfc3686(ctr(aes))))",
2742 .cra_driver_name = "seqiv-authenc-hmac-sha256-"
2743 "rfc3686-ctr-aes-caam-qi2",
2744 .cra_blocksize = 1,
2746 .setkey = aead_setkey,
2747 .setauthsize = aead_setauthsize,
2748 .encrypt = aead_encrypt,
2749 .decrypt = aead_decrypt,
2750 .ivsize = CTR_RFC3686_IV_SIZE,
2751 .maxauthsize = SHA256_DIGEST_SIZE,
2753 .caam = {
2754 .class1_alg_type = OP_ALG_ALGSEL_AES |
2755 OP_ALG_AAI_CTR_MOD128,
2756 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
2757 OP_ALG_AAI_HMAC_PRECOMP,
2758 .rfc3686 = true,
2759 .geniv = true,
2763 .aead = {
2764 .base = {
2765 .cra_name = "authenc(hmac(sha384),"
2766 "rfc3686(ctr(aes)))",
2767 .cra_driver_name = "authenc-hmac-sha384-"
2768 "rfc3686-ctr-aes-caam-qi2",
2769 .cra_blocksize = 1,
2771 .setkey = aead_setkey,
2772 .setauthsize = aead_setauthsize,
2773 .encrypt = aead_encrypt,
2774 .decrypt = aead_decrypt,
2775 .ivsize = CTR_RFC3686_IV_SIZE,
2776 .maxauthsize = SHA384_DIGEST_SIZE,
2778 .caam = {
2779 .class1_alg_type = OP_ALG_ALGSEL_AES |
2780 OP_ALG_AAI_CTR_MOD128,
2781 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
2782 OP_ALG_AAI_HMAC_PRECOMP,
2783 .rfc3686 = true,
2787 .aead = {
2788 .base = {
2789 .cra_name = "seqiv(authenc(hmac(sha384),"
2790 "rfc3686(ctr(aes))))",
2791 .cra_driver_name = "seqiv-authenc-hmac-sha384-"
2792 "rfc3686-ctr-aes-caam-qi2",
2793 .cra_blocksize = 1,
2795 .setkey = aead_setkey,
2796 .setauthsize = aead_setauthsize,
2797 .encrypt = aead_encrypt,
2798 .decrypt = aead_decrypt,
2799 .ivsize = CTR_RFC3686_IV_SIZE,
2800 .maxauthsize = SHA384_DIGEST_SIZE,
2802 .caam = {
2803 .class1_alg_type = OP_ALG_ALGSEL_AES |
2804 OP_ALG_AAI_CTR_MOD128,
2805 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
2806 OP_ALG_AAI_HMAC_PRECOMP,
2807 .rfc3686 = true,
2808 .geniv = true,
2812 .aead = {
2813 .base = {
2814 .cra_name = "rfc7539(chacha20,poly1305)",
2815 .cra_driver_name = "rfc7539-chacha20-poly1305-"
2816 "caam-qi2",
2817 .cra_blocksize = 1,
2819 .setkey = chachapoly_setkey,
2820 .setauthsize = chachapoly_setauthsize,
2821 .encrypt = aead_encrypt,
2822 .decrypt = aead_decrypt,
2823 .ivsize = CHACHAPOLY_IV_SIZE,
2824 .maxauthsize = POLY1305_DIGEST_SIZE,
2826 .caam = {
2827 .class1_alg_type = OP_ALG_ALGSEL_CHACHA20 |
2828 OP_ALG_AAI_AEAD,
2829 .class2_alg_type = OP_ALG_ALGSEL_POLY1305 |
2830 OP_ALG_AAI_AEAD,
2831 .nodkp = true,
2835 .aead = {
2836 .base = {
2837 .cra_name = "rfc7539esp(chacha20,poly1305)",
2838 .cra_driver_name = "rfc7539esp-chacha20-"
2839 "poly1305-caam-qi2",
2840 .cra_blocksize = 1,
2842 .setkey = chachapoly_setkey,
2843 .setauthsize = chachapoly_setauthsize,
2844 .encrypt = aead_encrypt,
2845 .decrypt = aead_decrypt,
2846 .ivsize = 8,
2847 .maxauthsize = POLY1305_DIGEST_SIZE,
2849 .caam = {
2850 .class1_alg_type = OP_ALG_ALGSEL_CHACHA20 |
2851 OP_ALG_AAI_AEAD,
2852 .class2_alg_type = OP_ALG_ALGSEL_POLY1305 |
2853 OP_ALG_AAI_AEAD,
2854 .nodkp = true,
2858 .aead = {
2859 .base = {
2860 .cra_name = "authenc(hmac(sha512),"
2861 "rfc3686(ctr(aes)))",
2862 .cra_driver_name = "authenc-hmac-sha512-"
2863 "rfc3686-ctr-aes-caam-qi2",
2864 .cra_blocksize = 1,
2866 .setkey = aead_setkey,
2867 .setauthsize = aead_setauthsize,
2868 .encrypt = aead_encrypt,
2869 .decrypt = aead_decrypt,
2870 .ivsize = CTR_RFC3686_IV_SIZE,
2871 .maxauthsize = SHA512_DIGEST_SIZE,
2873 .caam = {
2874 .class1_alg_type = OP_ALG_ALGSEL_AES |
2875 OP_ALG_AAI_CTR_MOD128,
2876 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
2877 OP_ALG_AAI_HMAC_PRECOMP,
2878 .rfc3686 = true,
2882 .aead = {
2883 .base = {
2884 .cra_name = "seqiv(authenc(hmac(sha512),"
2885 "rfc3686(ctr(aes))))",
2886 .cra_driver_name = "seqiv-authenc-hmac-sha512-"
2887 "rfc3686-ctr-aes-caam-qi2",
2888 .cra_blocksize = 1,
2890 .setkey = aead_setkey,
2891 .setauthsize = aead_setauthsize,
2892 .encrypt = aead_encrypt,
2893 .decrypt = aead_decrypt,
2894 .ivsize = CTR_RFC3686_IV_SIZE,
2895 .maxauthsize = SHA512_DIGEST_SIZE,
2897 .caam = {
2898 .class1_alg_type = OP_ALG_ALGSEL_AES |
2899 OP_ALG_AAI_CTR_MOD128,
2900 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
2901 OP_ALG_AAI_HMAC_PRECOMP,
2902 .rfc3686 = true,
2903 .geniv = true,
2908 static void caam_skcipher_alg_init(struct caam_skcipher_alg *t_alg)
2910 struct skcipher_alg *alg = &t_alg->skcipher;
2912 alg->base.cra_module = THIS_MODULE;
2913 alg->base.cra_priority = CAAM_CRA_PRIORITY;
2914 alg->base.cra_ctxsize = sizeof(struct caam_ctx);
2915 alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY;
2917 alg->init = caam_cra_init_skcipher;
2918 alg->exit = caam_cra_exit;
2921 static void caam_aead_alg_init(struct caam_aead_alg *t_alg)
2923 struct aead_alg *alg = &t_alg->aead;
2925 alg->base.cra_module = THIS_MODULE;
2926 alg->base.cra_priority = CAAM_CRA_PRIORITY;
2927 alg->base.cra_ctxsize = sizeof(struct caam_ctx);
2928 alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY;
2930 alg->init = caam_cra_init_aead;
2931 alg->exit = caam_cra_exit_aead;
2934 /* max hash key is max split key size */
2935 #define CAAM_MAX_HASH_KEY_SIZE (SHA512_DIGEST_SIZE * 2)
2937 #define CAAM_MAX_HASH_BLOCK_SIZE SHA512_BLOCK_SIZE
2939 /* caam context sizes for hashes: running digest + 8 */
2940 #define HASH_MSG_LEN 8
2941 #define MAX_CTX_LEN (HASH_MSG_LEN + SHA512_DIGEST_SIZE)
2943 enum hash_optype {
2944 UPDATE = 0,
2945 UPDATE_FIRST,
2946 FINALIZE,
2947 DIGEST,
2948 HASH_NUM_OP
2952 * caam_hash_ctx - ahash per-session context
2953 * @flc: Flow Contexts array
2954 * @key: authentication key
2955 * @flc_dma: I/O virtual addresses of the Flow Contexts
2956 * @dev: dpseci device
2957 * @ctx_len: size of Context Register
2958 * @adata: hashing algorithm details
2960 struct caam_hash_ctx {
2961 struct caam_flc flc[HASH_NUM_OP];
2962 u8 key[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned;
2963 dma_addr_t flc_dma[HASH_NUM_OP];
2964 struct device *dev;
2965 int ctx_len;
2966 struct alginfo adata;
2969 /* ahash state */
2970 struct caam_hash_state {
2971 struct caam_request caam_req;
2972 dma_addr_t buf_dma;
2973 dma_addr_t ctx_dma;
2974 int ctx_dma_len;
2975 u8 buf[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned;
2976 int buflen;
2977 int next_buflen;
2978 u8 caam_ctx[MAX_CTX_LEN] ____cacheline_aligned;
2979 int (*update)(struct ahash_request *req);
2980 int (*final)(struct ahash_request *req);
2981 int (*finup)(struct ahash_request *req);
2984 struct caam_export_state {
2985 u8 buf[CAAM_MAX_HASH_BLOCK_SIZE];
2986 u8 caam_ctx[MAX_CTX_LEN];
2987 int buflen;
2988 int (*update)(struct ahash_request *req);
2989 int (*final)(struct ahash_request *req);
2990 int (*finup)(struct ahash_request *req);
2993 /* Map current buffer in state (if length > 0) and put it in link table */
2994 static inline int buf_map_to_qm_sg(struct device *dev,
2995 struct dpaa2_sg_entry *qm_sg,
2996 struct caam_hash_state *state)
2998 int buflen = state->buflen;
3000 if (!buflen)
3001 return 0;
3003 state->buf_dma = dma_map_single(dev, state->buf, buflen,
3004 DMA_TO_DEVICE);
3005 if (dma_mapping_error(dev, state->buf_dma)) {
3006 dev_err(dev, "unable to map buf\n");
3007 state->buf_dma = 0;
3008 return -ENOMEM;
3011 dma_to_qm_sg_one(qm_sg, state->buf_dma, buflen, 0);
3013 return 0;
3016 /* Map state->caam_ctx, and add it to link table */
3017 static inline int ctx_map_to_qm_sg(struct device *dev,
3018 struct caam_hash_state *state, int ctx_len,
3019 struct dpaa2_sg_entry *qm_sg, u32 flag)
3021 state->ctx_dma_len = ctx_len;
3022 state->ctx_dma = dma_map_single(dev, state->caam_ctx, ctx_len, flag);
3023 if (dma_mapping_error(dev, state->ctx_dma)) {
3024 dev_err(dev, "unable to map ctx\n");
3025 state->ctx_dma = 0;
3026 return -ENOMEM;
3029 dma_to_qm_sg_one(qm_sg, state->ctx_dma, ctx_len, 0);
3031 return 0;
3034 static int ahash_set_sh_desc(struct crypto_ahash *ahash)
3036 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
3037 int digestsize = crypto_ahash_digestsize(ahash);
3038 struct dpaa2_caam_priv *priv = dev_get_drvdata(ctx->dev);
3039 struct caam_flc *flc;
3040 u32 *desc;
3042 /* ahash_update shared descriptor */
3043 flc = &ctx->flc[UPDATE];
3044 desc = flc->sh_desc;
3045 cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_UPDATE, ctx->ctx_len,
3046 ctx->ctx_len, true, priv->sec_attr.era);
3047 flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
3048 dma_sync_single_for_device(ctx->dev, ctx->flc_dma[UPDATE],
3049 desc_bytes(desc), DMA_BIDIRECTIONAL);
3050 print_hex_dump_debug("ahash update shdesc@" __stringify(__LINE__)": ",
3051 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
3054 /* ahash_update_first shared descriptor */
3055 flc = &ctx->flc[UPDATE_FIRST];
3056 desc = flc->sh_desc;
3057 cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_INIT, ctx->ctx_len,
3058 ctx->ctx_len, false, priv->sec_attr.era);
3059 flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
3060 dma_sync_single_for_device(ctx->dev, ctx->flc_dma[UPDATE_FIRST],
3061 desc_bytes(desc), DMA_BIDIRECTIONAL);
3062 print_hex_dump_debug("ahash update first shdesc@" __stringify(__LINE__)": ",
3063 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
3066 /* ahash_final shared descriptor */
3067 flc = &ctx->flc[FINALIZE];
3068 desc = flc->sh_desc;
3069 cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_FINALIZE, digestsize,
3070 ctx->ctx_len, true, priv->sec_attr.era);
3071 flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
3072 dma_sync_single_for_device(ctx->dev, ctx->flc_dma[FINALIZE],
3073 desc_bytes(desc), DMA_BIDIRECTIONAL);
3074 print_hex_dump_debug("ahash final shdesc@" __stringify(__LINE__)": ",
3075 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
3078 /* ahash_digest shared descriptor */
3079 flc = &ctx->flc[DIGEST];
3080 desc = flc->sh_desc;
3081 cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_INITFINAL, digestsize,
3082 ctx->ctx_len, false, priv->sec_attr.era);
3083 flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
3084 dma_sync_single_for_device(ctx->dev, ctx->flc_dma[DIGEST],
3085 desc_bytes(desc), DMA_BIDIRECTIONAL);
3086 print_hex_dump_debug("ahash digest shdesc@" __stringify(__LINE__)": ",
3087 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
3090 return 0;
3093 struct split_key_sh_result {
3094 struct completion completion;
3095 int err;
3096 struct device *dev;
3099 static void split_key_sh_done(void *cbk_ctx, u32 err)
3101 struct split_key_sh_result *res = cbk_ctx;
3103 dev_dbg(res->dev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
3105 res->err = err ? caam_qi2_strstatus(res->dev, err) : 0;
3106 complete(&res->completion);
3109 /* Digest hash size if it is too large */
3110 static int hash_digest_key(struct caam_hash_ctx *ctx, u32 *keylen, u8 *key,
3111 u32 digestsize)
3113 struct caam_request *req_ctx;
3114 u32 *desc;
3115 struct split_key_sh_result result;
3116 dma_addr_t key_dma;
3117 struct caam_flc *flc;
3118 dma_addr_t flc_dma;
3119 int ret = -ENOMEM;
3120 struct dpaa2_fl_entry *in_fle, *out_fle;
3122 req_ctx = kzalloc(sizeof(*req_ctx), GFP_KERNEL | GFP_DMA);
3123 if (!req_ctx)
3124 return -ENOMEM;
3126 in_fle = &req_ctx->fd_flt[1];
3127 out_fle = &req_ctx->fd_flt[0];
3129 flc = kzalloc(sizeof(*flc), GFP_KERNEL | GFP_DMA);
3130 if (!flc)
3131 goto err_flc;
3133 key_dma = dma_map_single(ctx->dev, key, *keylen, DMA_BIDIRECTIONAL);
3134 if (dma_mapping_error(ctx->dev, key_dma)) {
3135 dev_err(ctx->dev, "unable to map key memory\n");
3136 goto err_key_dma;
3139 desc = flc->sh_desc;
3141 init_sh_desc(desc, 0);
3143 /* descriptor to perform unkeyed hash on key_in */
3144 append_operation(desc, ctx->adata.algtype | OP_ALG_ENCRYPT |
3145 OP_ALG_AS_INITFINAL);
3146 append_seq_fifo_load(desc, *keylen, FIFOLD_CLASS_CLASS2 |
3147 FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_MSG);
3148 append_seq_store(desc, digestsize, LDST_CLASS_2_CCB |
3149 LDST_SRCDST_BYTE_CONTEXT);
3151 flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
3152 flc_dma = dma_map_single(ctx->dev, flc, sizeof(flc->flc) +
3153 desc_bytes(desc), DMA_TO_DEVICE);
3154 if (dma_mapping_error(ctx->dev, flc_dma)) {
3155 dev_err(ctx->dev, "unable to map shared descriptor\n");
3156 goto err_flc_dma;
3159 dpaa2_fl_set_final(in_fle, true);
3160 dpaa2_fl_set_format(in_fle, dpaa2_fl_single);
3161 dpaa2_fl_set_addr(in_fle, key_dma);
3162 dpaa2_fl_set_len(in_fle, *keylen);
3163 dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
3164 dpaa2_fl_set_addr(out_fle, key_dma);
3165 dpaa2_fl_set_len(out_fle, digestsize);
3167 print_hex_dump_debug("key_in@" __stringify(__LINE__)": ",
3168 DUMP_PREFIX_ADDRESS, 16, 4, key, *keylen, 1);
3169 print_hex_dump_debug("shdesc@" __stringify(__LINE__)": ",
3170 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
3173 result.err = 0;
3174 init_completion(&result.completion);
3175 result.dev = ctx->dev;
3177 req_ctx->flc = flc;
3178 req_ctx->flc_dma = flc_dma;
3179 req_ctx->cbk = split_key_sh_done;
3180 req_ctx->ctx = &result;
3182 ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
3183 if (ret == -EINPROGRESS) {
3184 /* in progress */
3185 wait_for_completion(&result.completion);
3186 ret = result.err;
3187 print_hex_dump_debug("digested key@" __stringify(__LINE__)": ",
3188 DUMP_PREFIX_ADDRESS, 16, 4, key,
3189 digestsize, 1);
3192 dma_unmap_single(ctx->dev, flc_dma, sizeof(flc->flc) + desc_bytes(desc),
3193 DMA_TO_DEVICE);
3194 err_flc_dma:
3195 dma_unmap_single(ctx->dev, key_dma, *keylen, DMA_BIDIRECTIONAL);
3196 err_key_dma:
3197 kfree(flc);
3198 err_flc:
3199 kfree(req_ctx);
3201 *keylen = digestsize;
3203 return ret;
3206 static int ahash_setkey(struct crypto_ahash *ahash, const u8 *key,
3207 unsigned int keylen)
3209 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
3210 unsigned int blocksize = crypto_tfm_alg_blocksize(&ahash->base);
3211 unsigned int digestsize = crypto_ahash_digestsize(ahash);
3212 int ret;
3213 u8 *hashed_key = NULL;
3215 dev_dbg(ctx->dev, "keylen %d blocksize %d\n", keylen, blocksize);
3217 if (keylen > blocksize) {
3218 hashed_key = kmemdup(key, keylen, GFP_KERNEL | GFP_DMA);
3219 if (!hashed_key)
3220 return -ENOMEM;
3221 ret = hash_digest_key(ctx, &keylen, hashed_key, digestsize);
3222 if (ret)
3223 goto bad_free_key;
3224 key = hashed_key;
3227 ctx->adata.keylen = keylen;
3228 ctx->adata.keylen_pad = split_key_len(ctx->adata.algtype &
3229 OP_ALG_ALGSEL_MASK);
3230 if (ctx->adata.keylen_pad > CAAM_MAX_HASH_KEY_SIZE)
3231 goto bad_free_key;
3233 ctx->adata.key_virt = key;
3234 ctx->adata.key_inline = true;
3237 * In case |user key| > |derived key|, using DKP<imm,imm> would result
3238 * in invalid opcodes (last bytes of user key) in the resulting
3239 * descriptor. Use DKP<ptr,imm> instead => both virtual and dma key
3240 * addresses are needed.
3242 if (keylen > ctx->adata.keylen_pad) {
3243 memcpy(ctx->key, key, keylen);
3244 dma_sync_single_for_device(ctx->dev, ctx->adata.key_dma,
3245 ctx->adata.keylen_pad,
3246 DMA_TO_DEVICE);
3249 ret = ahash_set_sh_desc(ahash);
3250 kfree(hashed_key);
3251 return ret;
3252 bad_free_key:
3253 kfree(hashed_key);
3254 return -EINVAL;
3257 static inline void ahash_unmap(struct device *dev, struct ahash_edesc *edesc,
3258 struct ahash_request *req)
3260 struct caam_hash_state *state = ahash_request_ctx(req);
3262 if (edesc->src_nents)
3263 dma_unmap_sg(dev, req->src, edesc->src_nents, DMA_TO_DEVICE);
3265 if (edesc->qm_sg_bytes)
3266 dma_unmap_single(dev, edesc->qm_sg_dma, edesc->qm_sg_bytes,
3267 DMA_TO_DEVICE);
3269 if (state->buf_dma) {
3270 dma_unmap_single(dev, state->buf_dma, state->buflen,
3271 DMA_TO_DEVICE);
3272 state->buf_dma = 0;
3276 static inline void ahash_unmap_ctx(struct device *dev,
3277 struct ahash_edesc *edesc,
3278 struct ahash_request *req, u32 flag)
3280 struct caam_hash_state *state = ahash_request_ctx(req);
3282 if (state->ctx_dma) {
3283 dma_unmap_single(dev, state->ctx_dma, state->ctx_dma_len, flag);
3284 state->ctx_dma = 0;
3286 ahash_unmap(dev, edesc, req);
3289 static void ahash_done(void *cbk_ctx, u32 status)
3291 struct crypto_async_request *areq = cbk_ctx;
3292 struct ahash_request *req = ahash_request_cast(areq);
3293 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
3294 struct caam_hash_state *state = ahash_request_ctx(req);
3295 struct ahash_edesc *edesc = state->caam_req.edesc;
3296 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
3297 int digestsize = crypto_ahash_digestsize(ahash);
3298 int ecode = 0;
3300 dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
3302 if (unlikely(status))
3303 ecode = caam_qi2_strstatus(ctx->dev, status);
3305 ahash_unmap_ctx(ctx->dev, edesc, req, DMA_FROM_DEVICE);
3306 memcpy(req->result, state->caam_ctx, digestsize);
3307 qi_cache_free(edesc);
3309 print_hex_dump_debug("ctx@" __stringify(__LINE__)": ",
3310 DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
3311 ctx->ctx_len, 1);
3313 req->base.complete(&req->base, ecode);
3316 static void ahash_done_bi(void *cbk_ctx, u32 status)
3318 struct crypto_async_request *areq = cbk_ctx;
3319 struct ahash_request *req = ahash_request_cast(areq);
3320 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
3321 struct caam_hash_state *state = ahash_request_ctx(req);
3322 struct ahash_edesc *edesc = state->caam_req.edesc;
3323 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
3324 int ecode = 0;
3326 dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
3328 if (unlikely(status))
3329 ecode = caam_qi2_strstatus(ctx->dev, status);
3331 ahash_unmap_ctx(ctx->dev, edesc, req, DMA_BIDIRECTIONAL);
3332 qi_cache_free(edesc);
3334 scatterwalk_map_and_copy(state->buf, req->src,
3335 req->nbytes - state->next_buflen,
3336 state->next_buflen, 0);
3337 state->buflen = state->next_buflen;
3339 print_hex_dump_debug("buf@" __stringify(__LINE__)": ",
3340 DUMP_PREFIX_ADDRESS, 16, 4, state->buf,
3341 state->buflen, 1);
3343 print_hex_dump_debug("ctx@" __stringify(__LINE__)": ",
3344 DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
3345 ctx->ctx_len, 1);
3346 if (req->result)
3347 print_hex_dump_debug("result@" __stringify(__LINE__)": ",
3348 DUMP_PREFIX_ADDRESS, 16, 4, req->result,
3349 crypto_ahash_digestsize(ahash), 1);
3351 req->base.complete(&req->base, ecode);
3354 static void ahash_done_ctx_src(void *cbk_ctx, u32 status)
3356 struct crypto_async_request *areq = cbk_ctx;
3357 struct ahash_request *req = ahash_request_cast(areq);
3358 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
3359 struct caam_hash_state *state = ahash_request_ctx(req);
3360 struct ahash_edesc *edesc = state->caam_req.edesc;
3361 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
3362 int digestsize = crypto_ahash_digestsize(ahash);
3363 int ecode = 0;
3365 dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
3367 if (unlikely(status))
3368 ecode = caam_qi2_strstatus(ctx->dev, status);
3370 ahash_unmap_ctx(ctx->dev, edesc, req, DMA_BIDIRECTIONAL);
3371 memcpy(req->result, state->caam_ctx, digestsize);
3372 qi_cache_free(edesc);
3374 print_hex_dump_debug("ctx@" __stringify(__LINE__)": ",
3375 DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
3376 ctx->ctx_len, 1);
3378 req->base.complete(&req->base, ecode);
3381 static void ahash_done_ctx_dst(void *cbk_ctx, u32 status)
3383 struct crypto_async_request *areq = cbk_ctx;
3384 struct ahash_request *req = ahash_request_cast(areq);
3385 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
3386 struct caam_hash_state *state = ahash_request_ctx(req);
3387 struct ahash_edesc *edesc = state->caam_req.edesc;
3388 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
3389 int ecode = 0;
3391 dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
3393 if (unlikely(status))
3394 ecode = caam_qi2_strstatus(ctx->dev, status);
3396 ahash_unmap_ctx(ctx->dev, edesc, req, DMA_FROM_DEVICE);
3397 qi_cache_free(edesc);
3399 scatterwalk_map_and_copy(state->buf, req->src,
3400 req->nbytes - state->next_buflen,
3401 state->next_buflen, 0);
3402 state->buflen = state->next_buflen;
3404 print_hex_dump_debug("buf@" __stringify(__LINE__)": ",
3405 DUMP_PREFIX_ADDRESS, 16, 4, state->buf,
3406 state->buflen, 1);
3408 print_hex_dump_debug("ctx@" __stringify(__LINE__)": ",
3409 DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
3410 ctx->ctx_len, 1);
3411 if (req->result)
3412 print_hex_dump_debug("result@" __stringify(__LINE__)": ",
3413 DUMP_PREFIX_ADDRESS, 16, 4, req->result,
3414 crypto_ahash_digestsize(ahash), 1);
3416 req->base.complete(&req->base, ecode);
3419 static int ahash_update_ctx(struct ahash_request *req)
3421 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
3422 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
3423 struct caam_hash_state *state = ahash_request_ctx(req);
3424 struct caam_request *req_ctx = &state->caam_req;
3425 struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
3426 struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
3427 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
3428 GFP_KERNEL : GFP_ATOMIC;
3429 u8 *buf = state->buf;
3430 int *buflen = &state->buflen;
3431 int *next_buflen = &state->next_buflen;
3432 int in_len = *buflen + req->nbytes, to_hash;
3433 int src_nents, mapped_nents, qm_sg_bytes, qm_sg_src_index;
3434 struct ahash_edesc *edesc;
3435 int ret = 0;
3437 *next_buflen = in_len & (crypto_tfm_alg_blocksize(&ahash->base) - 1);
3438 to_hash = in_len - *next_buflen;
3440 if (to_hash) {
3441 struct dpaa2_sg_entry *sg_table;
3442 int src_len = req->nbytes - *next_buflen;
3444 src_nents = sg_nents_for_len(req->src, src_len);
3445 if (src_nents < 0) {
3446 dev_err(ctx->dev, "Invalid number of src SG.\n");
3447 return src_nents;
3450 if (src_nents) {
3451 mapped_nents = dma_map_sg(ctx->dev, req->src, src_nents,
3452 DMA_TO_DEVICE);
3453 if (!mapped_nents) {
3454 dev_err(ctx->dev, "unable to DMA map source\n");
3455 return -ENOMEM;
3457 } else {
3458 mapped_nents = 0;
3461 /* allocate space for base edesc and link tables */
3462 edesc = qi_cache_zalloc(GFP_DMA | flags);
3463 if (!edesc) {
3464 dma_unmap_sg(ctx->dev, req->src, src_nents,
3465 DMA_TO_DEVICE);
3466 return -ENOMEM;
3469 edesc->src_nents = src_nents;
3470 qm_sg_src_index = 1 + (*buflen ? 1 : 0);
3471 qm_sg_bytes = pad_sg_nents(qm_sg_src_index + mapped_nents) *
3472 sizeof(*sg_table);
3473 sg_table = &edesc->sgt[0];
3475 ret = ctx_map_to_qm_sg(ctx->dev, state, ctx->ctx_len, sg_table,
3476 DMA_BIDIRECTIONAL);
3477 if (ret)
3478 goto unmap_ctx;
3480 ret = buf_map_to_qm_sg(ctx->dev, sg_table + 1, state);
3481 if (ret)
3482 goto unmap_ctx;
3484 if (mapped_nents) {
3485 sg_to_qm_sg_last(req->src, src_len,
3486 sg_table + qm_sg_src_index, 0);
3487 } else {
3488 dpaa2_sg_set_final(sg_table + qm_sg_src_index - 1,
3489 true);
3492 edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table,
3493 qm_sg_bytes, DMA_TO_DEVICE);
3494 if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
3495 dev_err(ctx->dev, "unable to map S/G table\n");
3496 ret = -ENOMEM;
3497 goto unmap_ctx;
3499 edesc->qm_sg_bytes = qm_sg_bytes;
3501 memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
3502 dpaa2_fl_set_final(in_fle, true);
3503 dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
3504 dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
3505 dpaa2_fl_set_len(in_fle, ctx->ctx_len + to_hash);
3506 dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
3507 dpaa2_fl_set_addr(out_fle, state->ctx_dma);
3508 dpaa2_fl_set_len(out_fle, ctx->ctx_len);
3510 req_ctx->flc = &ctx->flc[UPDATE];
3511 req_ctx->flc_dma = ctx->flc_dma[UPDATE];
3512 req_ctx->cbk = ahash_done_bi;
3513 req_ctx->ctx = &req->base;
3514 req_ctx->edesc = edesc;
3516 ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
3517 if (ret != -EINPROGRESS &&
3518 !(ret == -EBUSY &&
3519 req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
3520 goto unmap_ctx;
3521 } else if (*next_buflen) {
3522 scatterwalk_map_and_copy(buf + *buflen, req->src, 0,
3523 req->nbytes, 0);
3524 *buflen = *next_buflen;
3526 print_hex_dump_debug("buf@" __stringify(__LINE__)": ",
3527 DUMP_PREFIX_ADDRESS, 16, 4, buf,
3528 *buflen, 1);
3531 return ret;
3532 unmap_ctx:
3533 ahash_unmap_ctx(ctx->dev, edesc, req, DMA_BIDIRECTIONAL);
3534 qi_cache_free(edesc);
3535 return ret;
3538 static int ahash_final_ctx(struct ahash_request *req)
3540 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
3541 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
3542 struct caam_hash_state *state = ahash_request_ctx(req);
3543 struct caam_request *req_ctx = &state->caam_req;
3544 struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
3545 struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
3546 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
3547 GFP_KERNEL : GFP_ATOMIC;
3548 int buflen = state->buflen;
3549 int qm_sg_bytes;
3550 int digestsize = crypto_ahash_digestsize(ahash);
3551 struct ahash_edesc *edesc;
3552 struct dpaa2_sg_entry *sg_table;
3553 int ret;
3555 /* allocate space for base edesc and link tables */
3556 edesc = qi_cache_zalloc(GFP_DMA | flags);
3557 if (!edesc)
3558 return -ENOMEM;
3560 qm_sg_bytes = pad_sg_nents(1 + (buflen ? 1 : 0)) * sizeof(*sg_table);
3561 sg_table = &edesc->sgt[0];
3563 ret = ctx_map_to_qm_sg(ctx->dev, state, ctx->ctx_len, sg_table,
3564 DMA_BIDIRECTIONAL);
3565 if (ret)
3566 goto unmap_ctx;
3568 ret = buf_map_to_qm_sg(ctx->dev, sg_table + 1, state);
3569 if (ret)
3570 goto unmap_ctx;
3572 dpaa2_sg_set_final(sg_table + (buflen ? 1 : 0), true);
3574 edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table, qm_sg_bytes,
3575 DMA_TO_DEVICE);
3576 if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
3577 dev_err(ctx->dev, "unable to map S/G table\n");
3578 ret = -ENOMEM;
3579 goto unmap_ctx;
3581 edesc->qm_sg_bytes = qm_sg_bytes;
3583 memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
3584 dpaa2_fl_set_final(in_fle, true);
3585 dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
3586 dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
3587 dpaa2_fl_set_len(in_fle, ctx->ctx_len + buflen);
3588 dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
3589 dpaa2_fl_set_addr(out_fle, state->ctx_dma);
3590 dpaa2_fl_set_len(out_fle, digestsize);
3592 req_ctx->flc = &ctx->flc[FINALIZE];
3593 req_ctx->flc_dma = ctx->flc_dma[FINALIZE];
3594 req_ctx->cbk = ahash_done_ctx_src;
3595 req_ctx->ctx = &req->base;
3596 req_ctx->edesc = edesc;
3598 ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
3599 if (ret == -EINPROGRESS ||
3600 (ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
3601 return ret;
3603 unmap_ctx:
3604 ahash_unmap_ctx(ctx->dev, edesc, req, DMA_BIDIRECTIONAL);
3605 qi_cache_free(edesc);
3606 return ret;
3609 static int ahash_finup_ctx(struct ahash_request *req)
3611 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
3612 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
3613 struct caam_hash_state *state = ahash_request_ctx(req);
3614 struct caam_request *req_ctx = &state->caam_req;
3615 struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
3616 struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
3617 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
3618 GFP_KERNEL : GFP_ATOMIC;
3619 int buflen = state->buflen;
3620 int qm_sg_bytes, qm_sg_src_index;
3621 int src_nents, mapped_nents;
3622 int digestsize = crypto_ahash_digestsize(ahash);
3623 struct ahash_edesc *edesc;
3624 struct dpaa2_sg_entry *sg_table;
3625 int ret;
3627 src_nents = sg_nents_for_len(req->src, req->nbytes);
3628 if (src_nents < 0) {
3629 dev_err(ctx->dev, "Invalid number of src SG.\n");
3630 return src_nents;
3633 if (src_nents) {
3634 mapped_nents = dma_map_sg(ctx->dev, req->src, src_nents,
3635 DMA_TO_DEVICE);
3636 if (!mapped_nents) {
3637 dev_err(ctx->dev, "unable to DMA map source\n");
3638 return -ENOMEM;
3640 } else {
3641 mapped_nents = 0;
3644 /* allocate space for base edesc and link tables */
3645 edesc = qi_cache_zalloc(GFP_DMA | flags);
3646 if (!edesc) {
3647 dma_unmap_sg(ctx->dev, req->src, src_nents, DMA_TO_DEVICE);
3648 return -ENOMEM;
3651 edesc->src_nents = src_nents;
3652 qm_sg_src_index = 1 + (buflen ? 1 : 0);
3653 qm_sg_bytes = pad_sg_nents(qm_sg_src_index + mapped_nents) *
3654 sizeof(*sg_table);
3655 sg_table = &edesc->sgt[0];
3657 ret = ctx_map_to_qm_sg(ctx->dev, state, ctx->ctx_len, sg_table,
3658 DMA_BIDIRECTIONAL);
3659 if (ret)
3660 goto unmap_ctx;
3662 ret = buf_map_to_qm_sg(ctx->dev, sg_table + 1, state);
3663 if (ret)
3664 goto unmap_ctx;
3666 sg_to_qm_sg_last(req->src, req->nbytes, sg_table + qm_sg_src_index, 0);
3668 edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table, qm_sg_bytes,
3669 DMA_TO_DEVICE);
3670 if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
3671 dev_err(ctx->dev, "unable to map S/G table\n");
3672 ret = -ENOMEM;
3673 goto unmap_ctx;
3675 edesc->qm_sg_bytes = qm_sg_bytes;
3677 memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
3678 dpaa2_fl_set_final(in_fle, true);
3679 dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
3680 dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
3681 dpaa2_fl_set_len(in_fle, ctx->ctx_len + buflen + req->nbytes);
3682 dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
3683 dpaa2_fl_set_addr(out_fle, state->ctx_dma);
3684 dpaa2_fl_set_len(out_fle, digestsize);
3686 req_ctx->flc = &ctx->flc[FINALIZE];
3687 req_ctx->flc_dma = ctx->flc_dma[FINALIZE];
3688 req_ctx->cbk = ahash_done_ctx_src;
3689 req_ctx->ctx = &req->base;
3690 req_ctx->edesc = edesc;
3692 ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
3693 if (ret == -EINPROGRESS ||
3694 (ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
3695 return ret;
3697 unmap_ctx:
3698 ahash_unmap_ctx(ctx->dev, edesc, req, DMA_BIDIRECTIONAL);
3699 qi_cache_free(edesc);
3700 return ret;
3703 static int ahash_digest(struct ahash_request *req)
3705 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
3706 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
3707 struct caam_hash_state *state = ahash_request_ctx(req);
3708 struct caam_request *req_ctx = &state->caam_req;
3709 struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
3710 struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
3711 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
3712 GFP_KERNEL : GFP_ATOMIC;
3713 int digestsize = crypto_ahash_digestsize(ahash);
3714 int src_nents, mapped_nents;
3715 struct ahash_edesc *edesc;
3716 int ret = -ENOMEM;
3718 state->buf_dma = 0;
3720 src_nents = sg_nents_for_len(req->src, req->nbytes);
3721 if (src_nents < 0) {
3722 dev_err(ctx->dev, "Invalid number of src SG.\n");
3723 return src_nents;
3726 if (src_nents) {
3727 mapped_nents = dma_map_sg(ctx->dev, req->src, src_nents,
3728 DMA_TO_DEVICE);
3729 if (!mapped_nents) {
3730 dev_err(ctx->dev, "unable to map source for DMA\n");
3731 return ret;
3733 } else {
3734 mapped_nents = 0;
3737 /* allocate space for base edesc and link tables */
3738 edesc = qi_cache_zalloc(GFP_DMA | flags);
3739 if (!edesc) {
3740 dma_unmap_sg(ctx->dev, req->src, src_nents, DMA_TO_DEVICE);
3741 return ret;
3744 edesc->src_nents = src_nents;
3745 memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
3747 if (mapped_nents > 1) {
3748 int qm_sg_bytes;
3749 struct dpaa2_sg_entry *sg_table = &edesc->sgt[0];
3751 qm_sg_bytes = pad_sg_nents(mapped_nents) * sizeof(*sg_table);
3752 sg_to_qm_sg_last(req->src, req->nbytes, sg_table, 0);
3753 edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table,
3754 qm_sg_bytes, DMA_TO_DEVICE);
3755 if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
3756 dev_err(ctx->dev, "unable to map S/G table\n");
3757 goto unmap;
3759 edesc->qm_sg_bytes = qm_sg_bytes;
3760 dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
3761 dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
3762 } else {
3763 dpaa2_fl_set_format(in_fle, dpaa2_fl_single);
3764 dpaa2_fl_set_addr(in_fle, sg_dma_address(req->src));
3767 state->ctx_dma_len = digestsize;
3768 state->ctx_dma = dma_map_single(ctx->dev, state->caam_ctx, digestsize,
3769 DMA_FROM_DEVICE);
3770 if (dma_mapping_error(ctx->dev, state->ctx_dma)) {
3771 dev_err(ctx->dev, "unable to map ctx\n");
3772 state->ctx_dma = 0;
3773 goto unmap;
3776 dpaa2_fl_set_final(in_fle, true);
3777 dpaa2_fl_set_len(in_fle, req->nbytes);
3778 dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
3779 dpaa2_fl_set_addr(out_fle, state->ctx_dma);
3780 dpaa2_fl_set_len(out_fle, digestsize);
3782 req_ctx->flc = &ctx->flc[DIGEST];
3783 req_ctx->flc_dma = ctx->flc_dma[DIGEST];
3784 req_ctx->cbk = ahash_done;
3785 req_ctx->ctx = &req->base;
3786 req_ctx->edesc = edesc;
3787 ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
3788 if (ret == -EINPROGRESS ||
3789 (ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
3790 return ret;
3792 unmap:
3793 ahash_unmap_ctx(ctx->dev, edesc, req, DMA_FROM_DEVICE);
3794 qi_cache_free(edesc);
3795 return ret;
3798 static int ahash_final_no_ctx(struct ahash_request *req)
3800 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
3801 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
3802 struct caam_hash_state *state = ahash_request_ctx(req);
3803 struct caam_request *req_ctx = &state->caam_req;
3804 struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
3805 struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
3806 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
3807 GFP_KERNEL : GFP_ATOMIC;
3808 u8 *buf = state->buf;
3809 int buflen = state->buflen;
3810 int digestsize = crypto_ahash_digestsize(ahash);
3811 struct ahash_edesc *edesc;
3812 int ret = -ENOMEM;
3814 /* allocate space for base edesc and link tables */
3815 edesc = qi_cache_zalloc(GFP_DMA | flags);
3816 if (!edesc)
3817 return ret;
3819 if (buflen) {
3820 state->buf_dma = dma_map_single(ctx->dev, buf, buflen,
3821 DMA_TO_DEVICE);
3822 if (dma_mapping_error(ctx->dev, state->buf_dma)) {
3823 dev_err(ctx->dev, "unable to map src\n");
3824 goto unmap;
3828 state->ctx_dma_len = digestsize;
3829 state->ctx_dma = dma_map_single(ctx->dev, state->caam_ctx, digestsize,
3830 DMA_FROM_DEVICE);
3831 if (dma_mapping_error(ctx->dev, state->ctx_dma)) {
3832 dev_err(ctx->dev, "unable to map ctx\n");
3833 state->ctx_dma = 0;
3834 goto unmap;
3837 memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
3838 dpaa2_fl_set_final(in_fle, true);
3840 * crypto engine requires the input entry to be present when
3841 * "frame list" FD is used.
3842 * Since engine does not support FMT=2'b11 (unused entry type), leaving
3843 * in_fle zeroized (except for "Final" flag) is the best option.
3845 if (buflen) {
3846 dpaa2_fl_set_format(in_fle, dpaa2_fl_single);
3847 dpaa2_fl_set_addr(in_fle, state->buf_dma);
3848 dpaa2_fl_set_len(in_fle, buflen);
3850 dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
3851 dpaa2_fl_set_addr(out_fle, state->ctx_dma);
3852 dpaa2_fl_set_len(out_fle, digestsize);
3854 req_ctx->flc = &ctx->flc[DIGEST];
3855 req_ctx->flc_dma = ctx->flc_dma[DIGEST];
3856 req_ctx->cbk = ahash_done;
3857 req_ctx->ctx = &req->base;
3858 req_ctx->edesc = edesc;
3860 ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
3861 if (ret == -EINPROGRESS ||
3862 (ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
3863 return ret;
3865 unmap:
3866 ahash_unmap_ctx(ctx->dev, edesc, req, DMA_FROM_DEVICE);
3867 qi_cache_free(edesc);
3868 return ret;
3871 static int ahash_update_no_ctx(struct ahash_request *req)
3873 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
3874 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
3875 struct caam_hash_state *state = ahash_request_ctx(req);
3876 struct caam_request *req_ctx = &state->caam_req;
3877 struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
3878 struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
3879 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
3880 GFP_KERNEL : GFP_ATOMIC;
3881 u8 *buf = state->buf;
3882 int *buflen = &state->buflen;
3883 int *next_buflen = &state->next_buflen;
3884 int in_len = *buflen + req->nbytes, to_hash;
3885 int qm_sg_bytes, src_nents, mapped_nents;
3886 struct ahash_edesc *edesc;
3887 int ret = 0;
3889 *next_buflen = in_len & (crypto_tfm_alg_blocksize(&ahash->base) - 1);
3890 to_hash = in_len - *next_buflen;
3892 if (to_hash) {
3893 struct dpaa2_sg_entry *sg_table;
3894 int src_len = req->nbytes - *next_buflen;
3896 src_nents = sg_nents_for_len(req->src, src_len);
3897 if (src_nents < 0) {
3898 dev_err(ctx->dev, "Invalid number of src SG.\n");
3899 return src_nents;
3902 if (src_nents) {
3903 mapped_nents = dma_map_sg(ctx->dev, req->src, src_nents,
3904 DMA_TO_DEVICE);
3905 if (!mapped_nents) {
3906 dev_err(ctx->dev, "unable to DMA map source\n");
3907 return -ENOMEM;
3909 } else {
3910 mapped_nents = 0;
3913 /* allocate space for base edesc and link tables */
3914 edesc = qi_cache_zalloc(GFP_DMA | flags);
3915 if (!edesc) {
3916 dma_unmap_sg(ctx->dev, req->src, src_nents,
3917 DMA_TO_DEVICE);
3918 return -ENOMEM;
3921 edesc->src_nents = src_nents;
3922 qm_sg_bytes = pad_sg_nents(1 + mapped_nents) *
3923 sizeof(*sg_table);
3924 sg_table = &edesc->sgt[0];
3926 ret = buf_map_to_qm_sg(ctx->dev, sg_table, state);
3927 if (ret)
3928 goto unmap_ctx;
3930 sg_to_qm_sg_last(req->src, src_len, sg_table + 1, 0);
3932 edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table,
3933 qm_sg_bytes, DMA_TO_DEVICE);
3934 if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
3935 dev_err(ctx->dev, "unable to map S/G table\n");
3936 ret = -ENOMEM;
3937 goto unmap_ctx;
3939 edesc->qm_sg_bytes = qm_sg_bytes;
3941 state->ctx_dma_len = ctx->ctx_len;
3942 state->ctx_dma = dma_map_single(ctx->dev, state->caam_ctx,
3943 ctx->ctx_len, DMA_FROM_DEVICE);
3944 if (dma_mapping_error(ctx->dev, state->ctx_dma)) {
3945 dev_err(ctx->dev, "unable to map ctx\n");
3946 state->ctx_dma = 0;
3947 ret = -ENOMEM;
3948 goto unmap_ctx;
3951 memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
3952 dpaa2_fl_set_final(in_fle, true);
3953 dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
3954 dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
3955 dpaa2_fl_set_len(in_fle, to_hash);
3956 dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
3957 dpaa2_fl_set_addr(out_fle, state->ctx_dma);
3958 dpaa2_fl_set_len(out_fle, ctx->ctx_len);
3960 req_ctx->flc = &ctx->flc[UPDATE_FIRST];
3961 req_ctx->flc_dma = ctx->flc_dma[UPDATE_FIRST];
3962 req_ctx->cbk = ahash_done_ctx_dst;
3963 req_ctx->ctx = &req->base;
3964 req_ctx->edesc = edesc;
3966 ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
3967 if (ret != -EINPROGRESS &&
3968 !(ret == -EBUSY &&
3969 req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
3970 goto unmap_ctx;
3972 state->update = ahash_update_ctx;
3973 state->finup = ahash_finup_ctx;
3974 state->final = ahash_final_ctx;
3975 } else if (*next_buflen) {
3976 scatterwalk_map_and_copy(buf + *buflen, req->src, 0,
3977 req->nbytes, 0);
3978 *buflen = *next_buflen;
3980 print_hex_dump_debug("buf@" __stringify(__LINE__)": ",
3981 DUMP_PREFIX_ADDRESS, 16, 4, buf,
3982 *buflen, 1);
3985 return ret;
3986 unmap_ctx:
3987 ahash_unmap_ctx(ctx->dev, edesc, req, DMA_TO_DEVICE);
3988 qi_cache_free(edesc);
3989 return ret;
3992 static int ahash_finup_no_ctx(struct ahash_request *req)
3994 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
3995 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
3996 struct caam_hash_state *state = ahash_request_ctx(req);
3997 struct caam_request *req_ctx = &state->caam_req;
3998 struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
3999 struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
4000 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
4001 GFP_KERNEL : GFP_ATOMIC;
4002 int buflen = state->buflen;
4003 int qm_sg_bytes, src_nents, mapped_nents;
4004 int digestsize = crypto_ahash_digestsize(ahash);
4005 struct ahash_edesc *edesc;
4006 struct dpaa2_sg_entry *sg_table;
4007 int ret;
4009 src_nents = sg_nents_for_len(req->src, req->nbytes);
4010 if (src_nents < 0) {
4011 dev_err(ctx->dev, "Invalid number of src SG.\n");
4012 return src_nents;
4015 if (src_nents) {
4016 mapped_nents = dma_map_sg(ctx->dev, req->src, src_nents,
4017 DMA_TO_DEVICE);
4018 if (!mapped_nents) {
4019 dev_err(ctx->dev, "unable to DMA map source\n");
4020 return -ENOMEM;
4022 } else {
4023 mapped_nents = 0;
4026 /* allocate space for base edesc and link tables */
4027 edesc = qi_cache_zalloc(GFP_DMA | flags);
4028 if (!edesc) {
4029 dma_unmap_sg(ctx->dev, req->src, src_nents, DMA_TO_DEVICE);
4030 return -ENOMEM;
4033 edesc->src_nents = src_nents;
4034 qm_sg_bytes = pad_sg_nents(2 + mapped_nents) * sizeof(*sg_table);
4035 sg_table = &edesc->sgt[0];
4037 ret = buf_map_to_qm_sg(ctx->dev, sg_table, state);
4038 if (ret)
4039 goto unmap;
4041 sg_to_qm_sg_last(req->src, req->nbytes, sg_table + 1, 0);
4043 edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table, qm_sg_bytes,
4044 DMA_TO_DEVICE);
4045 if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
4046 dev_err(ctx->dev, "unable to map S/G table\n");
4047 ret = -ENOMEM;
4048 goto unmap;
4050 edesc->qm_sg_bytes = qm_sg_bytes;
4052 state->ctx_dma_len = digestsize;
4053 state->ctx_dma = dma_map_single(ctx->dev, state->caam_ctx, digestsize,
4054 DMA_FROM_DEVICE);
4055 if (dma_mapping_error(ctx->dev, state->ctx_dma)) {
4056 dev_err(ctx->dev, "unable to map ctx\n");
4057 state->ctx_dma = 0;
4058 ret = -ENOMEM;
4059 goto unmap;
4062 memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
4063 dpaa2_fl_set_final(in_fle, true);
4064 dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
4065 dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
4066 dpaa2_fl_set_len(in_fle, buflen + req->nbytes);
4067 dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
4068 dpaa2_fl_set_addr(out_fle, state->ctx_dma);
4069 dpaa2_fl_set_len(out_fle, digestsize);
4071 req_ctx->flc = &ctx->flc[DIGEST];
4072 req_ctx->flc_dma = ctx->flc_dma[DIGEST];
4073 req_ctx->cbk = ahash_done;
4074 req_ctx->ctx = &req->base;
4075 req_ctx->edesc = edesc;
4076 ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
4077 if (ret != -EINPROGRESS &&
4078 !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
4079 goto unmap;
4081 return ret;
4082 unmap:
4083 ahash_unmap_ctx(ctx->dev, edesc, req, DMA_FROM_DEVICE);
4084 qi_cache_free(edesc);
4085 return -ENOMEM;
4088 static int ahash_update_first(struct ahash_request *req)
4090 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
4091 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
4092 struct caam_hash_state *state = ahash_request_ctx(req);
4093 struct caam_request *req_ctx = &state->caam_req;
4094 struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
4095 struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
4096 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
4097 GFP_KERNEL : GFP_ATOMIC;
4098 u8 *buf = state->buf;
4099 int *buflen = &state->buflen;
4100 int *next_buflen = &state->next_buflen;
4101 int to_hash;
4102 int src_nents, mapped_nents;
4103 struct ahash_edesc *edesc;
4104 int ret = 0;
4106 *next_buflen = req->nbytes & (crypto_tfm_alg_blocksize(&ahash->base) -
4108 to_hash = req->nbytes - *next_buflen;
4110 if (to_hash) {
4111 struct dpaa2_sg_entry *sg_table;
4112 int src_len = req->nbytes - *next_buflen;
4114 src_nents = sg_nents_for_len(req->src, src_len);
4115 if (src_nents < 0) {
4116 dev_err(ctx->dev, "Invalid number of src SG.\n");
4117 return src_nents;
4120 if (src_nents) {
4121 mapped_nents = dma_map_sg(ctx->dev, req->src, src_nents,
4122 DMA_TO_DEVICE);
4123 if (!mapped_nents) {
4124 dev_err(ctx->dev, "unable to map source for DMA\n");
4125 return -ENOMEM;
4127 } else {
4128 mapped_nents = 0;
4131 /* allocate space for base edesc and link tables */
4132 edesc = qi_cache_zalloc(GFP_DMA | flags);
4133 if (!edesc) {
4134 dma_unmap_sg(ctx->dev, req->src, src_nents,
4135 DMA_TO_DEVICE);
4136 return -ENOMEM;
4139 edesc->src_nents = src_nents;
4140 sg_table = &edesc->sgt[0];
4142 memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
4143 dpaa2_fl_set_final(in_fle, true);
4144 dpaa2_fl_set_len(in_fle, to_hash);
4146 if (mapped_nents > 1) {
4147 int qm_sg_bytes;
4149 sg_to_qm_sg_last(req->src, src_len, sg_table, 0);
4150 qm_sg_bytes = pad_sg_nents(mapped_nents) *
4151 sizeof(*sg_table);
4152 edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table,
4153 qm_sg_bytes,
4154 DMA_TO_DEVICE);
4155 if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
4156 dev_err(ctx->dev, "unable to map S/G table\n");
4157 ret = -ENOMEM;
4158 goto unmap_ctx;
4160 edesc->qm_sg_bytes = qm_sg_bytes;
4161 dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
4162 dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
4163 } else {
4164 dpaa2_fl_set_format(in_fle, dpaa2_fl_single);
4165 dpaa2_fl_set_addr(in_fle, sg_dma_address(req->src));
4168 state->ctx_dma_len = ctx->ctx_len;
4169 state->ctx_dma = dma_map_single(ctx->dev, state->caam_ctx,
4170 ctx->ctx_len, DMA_FROM_DEVICE);
4171 if (dma_mapping_error(ctx->dev, state->ctx_dma)) {
4172 dev_err(ctx->dev, "unable to map ctx\n");
4173 state->ctx_dma = 0;
4174 ret = -ENOMEM;
4175 goto unmap_ctx;
4178 dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
4179 dpaa2_fl_set_addr(out_fle, state->ctx_dma);
4180 dpaa2_fl_set_len(out_fle, ctx->ctx_len);
4182 req_ctx->flc = &ctx->flc[UPDATE_FIRST];
4183 req_ctx->flc_dma = ctx->flc_dma[UPDATE_FIRST];
4184 req_ctx->cbk = ahash_done_ctx_dst;
4185 req_ctx->ctx = &req->base;
4186 req_ctx->edesc = edesc;
4188 ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
4189 if (ret != -EINPROGRESS &&
4190 !(ret == -EBUSY && req->base.flags &
4191 CRYPTO_TFM_REQ_MAY_BACKLOG))
4192 goto unmap_ctx;
4194 state->update = ahash_update_ctx;
4195 state->finup = ahash_finup_ctx;
4196 state->final = ahash_final_ctx;
4197 } else if (*next_buflen) {
4198 state->update = ahash_update_no_ctx;
4199 state->finup = ahash_finup_no_ctx;
4200 state->final = ahash_final_no_ctx;
4201 scatterwalk_map_and_copy(buf, req->src, 0,
4202 req->nbytes, 0);
4203 *buflen = *next_buflen;
4205 print_hex_dump_debug("buf@" __stringify(__LINE__)": ",
4206 DUMP_PREFIX_ADDRESS, 16, 4, buf,
4207 *buflen, 1);
4210 return ret;
4211 unmap_ctx:
4212 ahash_unmap_ctx(ctx->dev, edesc, req, DMA_TO_DEVICE);
4213 qi_cache_free(edesc);
4214 return ret;
4217 static int ahash_finup_first(struct ahash_request *req)
4219 return ahash_digest(req);
4222 static int ahash_init(struct ahash_request *req)
4224 struct caam_hash_state *state = ahash_request_ctx(req);
4226 state->update = ahash_update_first;
4227 state->finup = ahash_finup_first;
4228 state->final = ahash_final_no_ctx;
4230 state->ctx_dma = 0;
4231 state->ctx_dma_len = 0;
4232 state->buf_dma = 0;
4233 state->buflen = 0;
4234 state->next_buflen = 0;
4236 return 0;
4239 static int ahash_update(struct ahash_request *req)
4241 struct caam_hash_state *state = ahash_request_ctx(req);
4243 return state->update(req);
4246 static int ahash_finup(struct ahash_request *req)
4248 struct caam_hash_state *state = ahash_request_ctx(req);
4250 return state->finup(req);
4253 static int ahash_final(struct ahash_request *req)
4255 struct caam_hash_state *state = ahash_request_ctx(req);
4257 return state->final(req);
4260 static int ahash_export(struct ahash_request *req, void *out)
4262 struct caam_hash_state *state = ahash_request_ctx(req);
4263 struct caam_export_state *export = out;
4264 u8 *buf = state->buf;
4265 int len = state->buflen;
4267 memcpy(export->buf, buf, len);
4268 memcpy(export->caam_ctx, state->caam_ctx, sizeof(export->caam_ctx));
4269 export->buflen = len;
4270 export->update = state->update;
4271 export->final = state->final;
4272 export->finup = state->finup;
4274 return 0;
4277 static int ahash_import(struct ahash_request *req, const void *in)
4279 struct caam_hash_state *state = ahash_request_ctx(req);
4280 const struct caam_export_state *export = in;
4282 memset(state, 0, sizeof(*state));
4283 memcpy(state->buf, export->buf, export->buflen);
4284 memcpy(state->caam_ctx, export->caam_ctx, sizeof(state->caam_ctx));
4285 state->buflen = export->buflen;
4286 state->update = export->update;
4287 state->final = export->final;
4288 state->finup = export->finup;
4290 return 0;
4293 struct caam_hash_template {
4294 char name[CRYPTO_MAX_ALG_NAME];
4295 char driver_name[CRYPTO_MAX_ALG_NAME];
4296 char hmac_name[CRYPTO_MAX_ALG_NAME];
4297 char hmac_driver_name[CRYPTO_MAX_ALG_NAME];
4298 unsigned int blocksize;
4299 struct ahash_alg template_ahash;
4300 u32 alg_type;
4303 /* ahash descriptors */
4304 static struct caam_hash_template driver_hash[] = {
4306 .name = "sha1",
4307 .driver_name = "sha1-caam-qi2",
4308 .hmac_name = "hmac(sha1)",
4309 .hmac_driver_name = "hmac-sha1-caam-qi2",
4310 .blocksize = SHA1_BLOCK_SIZE,
4311 .template_ahash = {
4312 .init = ahash_init,
4313 .update = ahash_update,
4314 .final = ahash_final,
4315 .finup = ahash_finup,
4316 .digest = ahash_digest,
4317 .export = ahash_export,
4318 .import = ahash_import,
4319 .setkey = ahash_setkey,
4320 .halg = {
4321 .digestsize = SHA1_DIGEST_SIZE,
4322 .statesize = sizeof(struct caam_export_state),
4325 .alg_type = OP_ALG_ALGSEL_SHA1,
4326 }, {
4327 .name = "sha224",
4328 .driver_name = "sha224-caam-qi2",
4329 .hmac_name = "hmac(sha224)",
4330 .hmac_driver_name = "hmac-sha224-caam-qi2",
4331 .blocksize = SHA224_BLOCK_SIZE,
4332 .template_ahash = {
4333 .init = ahash_init,
4334 .update = ahash_update,
4335 .final = ahash_final,
4336 .finup = ahash_finup,
4337 .digest = ahash_digest,
4338 .export = ahash_export,
4339 .import = ahash_import,
4340 .setkey = ahash_setkey,
4341 .halg = {
4342 .digestsize = SHA224_DIGEST_SIZE,
4343 .statesize = sizeof(struct caam_export_state),
4346 .alg_type = OP_ALG_ALGSEL_SHA224,
4347 }, {
4348 .name = "sha256",
4349 .driver_name = "sha256-caam-qi2",
4350 .hmac_name = "hmac(sha256)",
4351 .hmac_driver_name = "hmac-sha256-caam-qi2",
4352 .blocksize = SHA256_BLOCK_SIZE,
4353 .template_ahash = {
4354 .init = ahash_init,
4355 .update = ahash_update,
4356 .final = ahash_final,
4357 .finup = ahash_finup,
4358 .digest = ahash_digest,
4359 .export = ahash_export,
4360 .import = ahash_import,
4361 .setkey = ahash_setkey,
4362 .halg = {
4363 .digestsize = SHA256_DIGEST_SIZE,
4364 .statesize = sizeof(struct caam_export_state),
4367 .alg_type = OP_ALG_ALGSEL_SHA256,
4368 }, {
4369 .name = "sha384",
4370 .driver_name = "sha384-caam-qi2",
4371 .hmac_name = "hmac(sha384)",
4372 .hmac_driver_name = "hmac-sha384-caam-qi2",
4373 .blocksize = SHA384_BLOCK_SIZE,
4374 .template_ahash = {
4375 .init = ahash_init,
4376 .update = ahash_update,
4377 .final = ahash_final,
4378 .finup = ahash_finup,
4379 .digest = ahash_digest,
4380 .export = ahash_export,
4381 .import = ahash_import,
4382 .setkey = ahash_setkey,
4383 .halg = {
4384 .digestsize = SHA384_DIGEST_SIZE,
4385 .statesize = sizeof(struct caam_export_state),
4388 .alg_type = OP_ALG_ALGSEL_SHA384,
4389 }, {
4390 .name = "sha512",
4391 .driver_name = "sha512-caam-qi2",
4392 .hmac_name = "hmac(sha512)",
4393 .hmac_driver_name = "hmac-sha512-caam-qi2",
4394 .blocksize = SHA512_BLOCK_SIZE,
4395 .template_ahash = {
4396 .init = ahash_init,
4397 .update = ahash_update,
4398 .final = ahash_final,
4399 .finup = ahash_finup,
4400 .digest = ahash_digest,
4401 .export = ahash_export,
4402 .import = ahash_import,
4403 .setkey = ahash_setkey,
4404 .halg = {
4405 .digestsize = SHA512_DIGEST_SIZE,
4406 .statesize = sizeof(struct caam_export_state),
4409 .alg_type = OP_ALG_ALGSEL_SHA512,
4410 }, {
4411 .name = "md5",
4412 .driver_name = "md5-caam-qi2",
4413 .hmac_name = "hmac(md5)",
4414 .hmac_driver_name = "hmac-md5-caam-qi2",
4415 .blocksize = MD5_BLOCK_WORDS * 4,
4416 .template_ahash = {
4417 .init = ahash_init,
4418 .update = ahash_update,
4419 .final = ahash_final,
4420 .finup = ahash_finup,
4421 .digest = ahash_digest,
4422 .export = ahash_export,
4423 .import = ahash_import,
4424 .setkey = ahash_setkey,
4425 .halg = {
4426 .digestsize = MD5_DIGEST_SIZE,
4427 .statesize = sizeof(struct caam_export_state),
4430 .alg_type = OP_ALG_ALGSEL_MD5,
4434 struct caam_hash_alg {
4435 struct list_head entry;
4436 struct device *dev;
4437 int alg_type;
4438 struct ahash_alg ahash_alg;
4441 static int caam_hash_cra_init(struct crypto_tfm *tfm)
4443 struct crypto_ahash *ahash = __crypto_ahash_cast(tfm);
4444 struct crypto_alg *base = tfm->__crt_alg;
4445 struct hash_alg_common *halg =
4446 container_of(base, struct hash_alg_common, base);
4447 struct ahash_alg *alg =
4448 container_of(halg, struct ahash_alg, halg);
4449 struct caam_hash_alg *caam_hash =
4450 container_of(alg, struct caam_hash_alg, ahash_alg);
4451 struct caam_hash_ctx *ctx = crypto_tfm_ctx(tfm);
4452 /* Sizes for MDHA running digests: MD5, SHA1, 224, 256, 384, 512 */
4453 static const u8 runninglen[] = { HASH_MSG_LEN + MD5_DIGEST_SIZE,
4454 HASH_MSG_LEN + SHA1_DIGEST_SIZE,
4455 HASH_MSG_LEN + 32,
4456 HASH_MSG_LEN + SHA256_DIGEST_SIZE,
4457 HASH_MSG_LEN + 64,
4458 HASH_MSG_LEN + SHA512_DIGEST_SIZE };
4459 dma_addr_t dma_addr;
4460 int i;
4462 ctx->dev = caam_hash->dev;
4464 if (alg->setkey) {
4465 ctx->adata.key_dma = dma_map_single_attrs(ctx->dev, ctx->key,
4466 ARRAY_SIZE(ctx->key),
4467 DMA_TO_DEVICE,
4468 DMA_ATTR_SKIP_CPU_SYNC);
4469 if (dma_mapping_error(ctx->dev, ctx->adata.key_dma)) {
4470 dev_err(ctx->dev, "unable to map key\n");
4471 return -ENOMEM;
4475 dma_addr = dma_map_single_attrs(ctx->dev, ctx->flc, sizeof(ctx->flc),
4476 DMA_BIDIRECTIONAL,
4477 DMA_ATTR_SKIP_CPU_SYNC);
4478 if (dma_mapping_error(ctx->dev, dma_addr)) {
4479 dev_err(ctx->dev, "unable to map shared descriptors\n");
4480 if (ctx->adata.key_dma)
4481 dma_unmap_single_attrs(ctx->dev, ctx->adata.key_dma,
4482 ARRAY_SIZE(ctx->key),
4483 DMA_TO_DEVICE,
4484 DMA_ATTR_SKIP_CPU_SYNC);
4485 return -ENOMEM;
4488 for (i = 0; i < HASH_NUM_OP; i++)
4489 ctx->flc_dma[i] = dma_addr + i * sizeof(ctx->flc[i]);
4491 /* copy descriptor header template value */
4492 ctx->adata.algtype = OP_TYPE_CLASS2_ALG | caam_hash->alg_type;
4494 ctx->ctx_len = runninglen[(ctx->adata.algtype &
4495 OP_ALG_ALGSEL_SUBMASK) >>
4496 OP_ALG_ALGSEL_SHIFT];
4498 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
4499 sizeof(struct caam_hash_state));
4501 return ahash_set_sh_desc(ahash);
4504 static void caam_hash_cra_exit(struct crypto_tfm *tfm)
4506 struct caam_hash_ctx *ctx = crypto_tfm_ctx(tfm);
4508 dma_unmap_single_attrs(ctx->dev, ctx->flc_dma[0], sizeof(ctx->flc),
4509 DMA_BIDIRECTIONAL, DMA_ATTR_SKIP_CPU_SYNC);
4510 if (ctx->adata.key_dma)
4511 dma_unmap_single_attrs(ctx->dev, ctx->adata.key_dma,
4512 ARRAY_SIZE(ctx->key), DMA_TO_DEVICE,
4513 DMA_ATTR_SKIP_CPU_SYNC);
4516 static struct caam_hash_alg *caam_hash_alloc(struct device *dev,
4517 struct caam_hash_template *template, bool keyed)
4519 struct caam_hash_alg *t_alg;
4520 struct ahash_alg *halg;
4521 struct crypto_alg *alg;
4523 t_alg = kzalloc(sizeof(*t_alg), GFP_KERNEL);
4524 if (!t_alg)
4525 return ERR_PTR(-ENOMEM);
4527 t_alg->ahash_alg = template->template_ahash;
4528 halg = &t_alg->ahash_alg;
4529 alg = &halg->halg.base;
4531 if (keyed) {
4532 snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s",
4533 template->hmac_name);
4534 snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
4535 template->hmac_driver_name);
4536 } else {
4537 snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s",
4538 template->name);
4539 snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
4540 template->driver_name);
4541 t_alg->ahash_alg.setkey = NULL;
4543 alg->cra_module = THIS_MODULE;
4544 alg->cra_init = caam_hash_cra_init;
4545 alg->cra_exit = caam_hash_cra_exit;
4546 alg->cra_ctxsize = sizeof(struct caam_hash_ctx);
4547 alg->cra_priority = CAAM_CRA_PRIORITY;
4548 alg->cra_blocksize = template->blocksize;
4549 alg->cra_alignmask = 0;
4550 alg->cra_flags = CRYPTO_ALG_ASYNC;
4552 t_alg->alg_type = template->alg_type;
4553 t_alg->dev = dev;
4555 return t_alg;
4558 static void dpaa2_caam_fqdan_cb(struct dpaa2_io_notification_ctx *nctx)
4560 struct dpaa2_caam_priv_per_cpu *ppriv;
4562 ppriv = container_of(nctx, struct dpaa2_caam_priv_per_cpu, nctx);
4563 napi_schedule_irqoff(&ppriv->napi);
4566 static int __cold dpaa2_dpseci_dpio_setup(struct dpaa2_caam_priv *priv)
4568 struct device *dev = priv->dev;
4569 struct dpaa2_io_notification_ctx *nctx;
4570 struct dpaa2_caam_priv_per_cpu *ppriv;
4571 int err, i = 0, cpu;
4573 for_each_online_cpu(cpu) {
4574 ppriv = per_cpu_ptr(priv->ppriv, cpu);
4575 ppriv->priv = priv;
4576 nctx = &ppriv->nctx;
4577 nctx->is_cdan = 0;
4578 nctx->id = ppriv->rsp_fqid;
4579 nctx->desired_cpu = cpu;
4580 nctx->cb = dpaa2_caam_fqdan_cb;
4582 /* Register notification callbacks */
4583 ppriv->dpio = dpaa2_io_service_select(cpu);
4584 err = dpaa2_io_service_register(ppriv->dpio, nctx, dev);
4585 if (unlikely(err)) {
4586 dev_dbg(dev, "No affine DPIO for cpu %d\n", cpu);
4587 nctx->cb = NULL;
4589 * If no affine DPIO for this core, there's probably
4590 * none available for next cores either. Signal we want
4591 * to retry later, in case the DPIO devices weren't
4592 * probed yet.
4594 err = -EPROBE_DEFER;
4595 goto err;
4598 ppriv->store = dpaa2_io_store_create(DPAA2_CAAM_STORE_SIZE,
4599 dev);
4600 if (unlikely(!ppriv->store)) {
4601 dev_err(dev, "dpaa2_io_store_create() failed\n");
4602 err = -ENOMEM;
4603 goto err;
4606 if (++i == priv->num_pairs)
4607 break;
4610 return 0;
4612 err:
4613 for_each_online_cpu(cpu) {
4614 ppriv = per_cpu_ptr(priv->ppriv, cpu);
4615 if (!ppriv->nctx.cb)
4616 break;
4617 dpaa2_io_service_deregister(ppriv->dpio, &ppriv->nctx, dev);
4620 for_each_online_cpu(cpu) {
4621 ppriv = per_cpu_ptr(priv->ppriv, cpu);
4622 if (!ppriv->store)
4623 break;
4624 dpaa2_io_store_destroy(ppriv->store);
4627 return err;
4630 static void __cold dpaa2_dpseci_dpio_free(struct dpaa2_caam_priv *priv)
4632 struct dpaa2_caam_priv_per_cpu *ppriv;
4633 int i = 0, cpu;
4635 for_each_online_cpu(cpu) {
4636 ppriv = per_cpu_ptr(priv->ppriv, cpu);
4637 dpaa2_io_service_deregister(ppriv->dpio, &ppriv->nctx,
4638 priv->dev);
4639 dpaa2_io_store_destroy(ppriv->store);
4641 if (++i == priv->num_pairs)
4642 return;
4646 static int dpaa2_dpseci_bind(struct dpaa2_caam_priv *priv)
4648 struct dpseci_rx_queue_cfg rx_queue_cfg;
4649 struct device *dev = priv->dev;
4650 struct fsl_mc_device *ls_dev = to_fsl_mc_device(dev);
4651 struct dpaa2_caam_priv_per_cpu *ppriv;
4652 int err = 0, i = 0, cpu;
4654 /* Configure Rx queues */
4655 for_each_online_cpu(cpu) {
4656 ppriv = per_cpu_ptr(priv->ppriv, cpu);
4658 rx_queue_cfg.options = DPSECI_QUEUE_OPT_DEST |
4659 DPSECI_QUEUE_OPT_USER_CTX;
4660 rx_queue_cfg.order_preservation_en = 0;
4661 rx_queue_cfg.dest_cfg.dest_type = DPSECI_DEST_DPIO;
4662 rx_queue_cfg.dest_cfg.dest_id = ppriv->nctx.dpio_id;
4664 * Rx priority (WQ) doesn't really matter, since we use
4665 * pull mode, i.e. volatile dequeues from specific FQs
4667 rx_queue_cfg.dest_cfg.priority = 0;
4668 rx_queue_cfg.user_ctx = ppriv->nctx.qman64;
4670 err = dpseci_set_rx_queue(priv->mc_io, 0, ls_dev->mc_handle, i,
4671 &rx_queue_cfg);
4672 if (err) {
4673 dev_err(dev, "dpseci_set_rx_queue() failed with err %d\n",
4674 err);
4675 return err;
4678 if (++i == priv->num_pairs)
4679 break;
4682 return err;
4685 static void dpaa2_dpseci_congestion_free(struct dpaa2_caam_priv *priv)
4687 struct device *dev = priv->dev;
4689 if (!priv->cscn_mem)
4690 return;
4692 dma_unmap_single(dev, priv->cscn_dma, DPAA2_CSCN_SIZE, DMA_FROM_DEVICE);
4693 kfree(priv->cscn_mem);
4696 static void dpaa2_dpseci_free(struct dpaa2_caam_priv *priv)
4698 struct device *dev = priv->dev;
4699 struct fsl_mc_device *ls_dev = to_fsl_mc_device(dev);
4701 dpaa2_dpseci_congestion_free(priv);
4702 dpseci_close(priv->mc_io, 0, ls_dev->mc_handle);
4705 static void dpaa2_caam_process_fd(struct dpaa2_caam_priv *priv,
4706 const struct dpaa2_fd *fd)
4708 struct caam_request *req;
4709 u32 fd_err;
4711 if (dpaa2_fd_get_format(fd) != dpaa2_fd_list) {
4712 dev_err(priv->dev, "Only Frame List FD format is supported!\n");
4713 return;
4716 fd_err = dpaa2_fd_get_ctrl(fd) & FD_CTRL_ERR_MASK;
4717 if (unlikely(fd_err))
4718 dev_err_ratelimited(priv->dev, "FD error: %08x\n", fd_err);
4721 * FD[ADDR] is guaranteed to be valid, irrespective of errors reported
4722 * in FD[ERR] or FD[FRC].
4724 req = dpaa2_caam_iova_to_virt(priv, dpaa2_fd_get_addr(fd));
4725 dma_unmap_single(priv->dev, req->fd_flt_dma, sizeof(req->fd_flt),
4726 DMA_BIDIRECTIONAL);
4727 req->cbk(req->ctx, dpaa2_fd_get_frc(fd));
4730 static int dpaa2_caam_pull_fq(struct dpaa2_caam_priv_per_cpu *ppriv)
4732 int err;
4734 /* Retry while portal is busy */
4735 do {
4736 err = dpaa2_io_service_pull_fq(ppriv->dpio, ppriv->rsp_fqid,
4737 ppriv->store);
4738 } while (err == -EBUSY);
4740 if (unlikely(err))
4741 dev_err(ppriv->priv->dev, "dpaa2_io_service_pull err %d", err);
4743 return err;
4746 static int dpaa2_caam_store_consume(struct dpaa2_caam_priv_per_cpu *ppriv)
4748 struct dpaa2_dq *dq;
4749 int cleaned = 0, is_last;
4751 do {
4752 dq = dpaa2_io_store_next(ppriv->store, &is_last);
4753 if (unlikely(!dq)) {
4754 if (unlikely(!is_last)) {
4755 dev_dbg(ppriv->priv->dev,
4756 "FQ %d returned no valid frames\n",
4757 ppriv->rsp_fqid);
4759 * MUST retry until we get some sort of
4760 * valid response token (be it "empty dequeue"
4761 * or a valid frame).
4763 continue;
4765 break;
4768 /* Process FD */
4769 dpaa2_caam_process_fd(ppriv->priv, dpaa2_dq_fd(dq));
4770 cleaned++;
4771 } while (!is_last);
4773 return cleaned;
4776 static int dpaa2_dpseci_poll(struct napi_struct *napi, int budget)
4778 struct dpaa2_caam_priv_per_cpu *ppriv;
4779 struct dpaa2_caam_priv *priv;
4780 int err, cleaned = 0, store_cleaned;
4782 ppriv = container_of(napi, struct dpaa2_caam_priv_per_cpu, napi);
4783 priv = ppriv->priv;
4785 if (unlikely(dpaa2_caam_pull_fq(ppriv)))
4786 return 0;
4788 do {
4789 store_cleaned = dpaa2_caam_store_consume(ppriv);
4790 cleaned += store_cleaned;
4792 if (store_cleaned == 0 ||
4793 cleaned > budget - DPAA2_CAAM_STORE_SIZE)
4794 break;
4796 /* Try to dequeue some more */
4797 err = dpaa2_caam_pull_fq(ppriv);
4798 if (unlikely(err))
4799 break;
4800 } while (1);
4802 if (cleaned < budget) {
4803 napi_complete_done(napi, cleaned);
4804 err = dpaa2_io_service_rearm(ppriv->dpio, &ppriv->nctx);
4805 if (unlikely(err))
4806 dev_err(priv->dev, "Notification rearm failed: %d\n",
4807 err);
4810 return cleaned;
4813 static int dpaa2_dpseci_congestion_setup(struct dpaa2_caam_priv *priv,
4814 u16 token)
4816 struct dpseci_congestion_notification_cfg cong_notif_cfg = { 0 };
4817 struct device *dev = priv->dev;
4818 int err;
4821 * Congestion group feature supported starting with DPSECI API v5.1
4822 * and only when object has been created with this capability.
4824 if ((DPSECI_VER(priv->major_ver, priv->minor_ver) < DPSECI_VER(5, 1)) ||
4825 !(priv->dpseci_attr.options & DPSECI_OPT_HAS_CG))
4826 return 0;
4828 priv->cscn_mem = kzalloc(DPAA2_CSCN_SIZE + DPAA2_CSCN_ALIGN,
4829 GFP_KERNEL | GFP_DMA);
4830 if (!priv->cscn_mem)
4831 return -ENOMEM;
4833 priv->cscn_mem_aligned = PTR_ALIGN(priv->cscn_mem, DPAA2_CSCN_ALIGN);
4834 priv->cscn_dma = dma_map_single(dev, priv->cscn_mem_aligned,
4835 DPAA2_CSCN_SIZE, DMA_FROM_DEVICE);
4836 if (dma_mapping_error(dev, priv->cscn_dma)) {
4837 dev_err(dev, "Error mapping CSCN memory area\n");
4838 err = -ENOMEM;
4839 goto err_dma_map;
4842 cong_notif_cfg.units = DPSECI_CONGESTION_UNIT_BYTES;
4843 cong_notif_cfg.threshold_entry = DPAA2_SEC_CONG_ENTRY_THRESH;
4844 cong_notif_cfg.threshold_exit = DPAA2_SEC_CONG_EXIT_THRESH;
4845 cong_notif_cfg.message_ctx = (uintptr_t)priv;
4846 cong_notif_cfg.message_iova = priv->cscn_dma;
4847 cong_notif_cfg.notification_mode = DPSECI_CGN_MODE_WRITE_MEM_ON_ENTER |
4848 DPSECI_CGN_MODE_WRITE_MEM_ON_EXIT |
4849 DPSECI_CGN_MODE_COHERENT_WRITE;
4851 err = dpseci_set_congestion_notification(priv->mc_io, 0, token,
4852 &cong_notif_cfg);
4853 if (err) {
4854 dev_err(dev, "dpseci_set_congestion_notification failed\n");
4855 goto err_set_cong;
4858 return 0;
4860 err_set_cong:
4861 dma_unmap_single(dev, priv->cscn_dma, DPAA2_CSCN_SIZE, DMA_FROM_DEVICE);
4862 err_dma_map:
4863 kfree(priv->cscn_mem);
4865 return err;
4868 static int __cold dpaa2_dpseci_setup(struct fsl_mc_device *ls_dev)
4870 struct device *dev = &ls_dev->dev;
4871 struct dpaa2_caam_priv *priv;
4872 struct dpaa2_caam_priv_per_cpu *ppriv;
4873 int err, cpu;
4874 u8 i;
4876 priv = dev_get_drvdata(dev);
4878 priv->dev = dev;
4879 priv->dpsec_id = ls_dev->obj_desc.id;
4881 /* Get a handle for the DPSECI this interface is associate with */
4882 err = dpseci_open(priv->mc_io, 0, priv->dpsec_id, &ls_dev->mc_handle);
4883 if (err) {
4884 dev_err(dev, "dpseci_open() failed: %d\n", err);
4885 goto err_open;
4888 err = dpseci_get_api_version(priv->mc_io, 0, &priv->major_ver,
4889 &priv->minor_ver);
4890 if (err) {
4891 dev_err(dev, "dpseci_get_api_version() failed\n");
4892 goto err_get_vers;
4895 dev_info(dev, "dpseci v%d.%d\n", priv->major_ver, priv->minor_ver);
4897 err = dpseci_get_attributes(priv->mc_io, 0, ls_dev->mc_handle,
4898 &priv->dpseci_attr);
4899 if (err) {
4900 dev_err(dev, "dpseci_get_attributes() failed\n");
4901 goto err_get_vers;
4904 err = dpseci_get_sec_attr(priv->mc_io, 0, ls_dev->mc_handle,
4905 &priv->sec_attr);
4906 if (err) {
4907 dev_err(dev, "dpseci_get_sec_attr() failed\n");
4908 goto err_get_vers;
4911 err = dpaa2_dpseci_congestion_setup(priv, ls_dev->mc_handle);
4912 if (err) {
4913 dev_err(dev, "setup_congestion() failed\n");
4914 goto err_get_vers;
4917 priv->num_pairs = min(priv->dpseci_attr.num_rx_queues,
4918 priv->dpseci_attr.num_tx_queues);
4919 if (priv->num_pairs > num_online_cpus()) {
4920 dev_warn(dev, "%d queues won't be used\n",
4921 priv->num_pairs - num_online_cpus());
4922 priv->num_pairs = num_online_cpus();
4925 for (i = 0; i < priv->dpseci_attr.num_rx_queues; i++) {
4926 err = dpseci_get_rx_queue(priv->mc_io, 0, ls_dev->mc_handle, i,
4927 &priv->rx_queue_attr[i]);
4928 if (err) {
4929 dev_err(dev, "dpseci_get_rx_queue() failed\n");
4930 goto err_get_rx_queue;
4934 for (i = 0; i < priv->dpseci_attr.num_tx_queues; i++) {
4935 err = dpseci_get_tx_queue(priv->mc_io, 0, ls_dev->mc_handle, i,
4936 &priv->tx_queue_attr[i]);
4937 if (err) {
4938 dev_err(dev, "dpseci_get_tx_queue() failed\n");
4939 goto err_get_rx_queue;
4943 i = 0;
4944 for_each_online_cpu(cpu) {
4945 u8 j;
4947 j = i % priv->num_pairs;
4949 ppriv = per_cpu_ptr(priv->ppriv, cpu);
4950 ppriv->req_fqid = priv->tx_queue_attr[j].fqid;
4953 * Allow all cores to enqueue, while only some of them
4954 * will take part in dequeuing.
4956 if (++i > priv->num_pairs)
4957 continue;
4959 ppriv->rsp_fqid = priv->rx_queue_attr[j].fqid;
4960 ppriv->prio = j;
4962 dev_dbg(dev, "pair %d: rx queue %d, tx queue %d\n", j,
4963 priv->rx_queue_attr[j].fqid,
4964 priv->tx_queue_attr[j].fqid);
4966 ppriv->net_dev.dev = *dev;
4967 INIT_LIST_HEAD(&ppriv->net_dev.napi_list);
4968 netif_napi_add(&ppriv->net_dev, &ppriv->napi, dpaa2_dpseci_poll,
4969 DPAA2_CAAM_NAPI_WEIGHT);
4972 return 0;
4974 err_get_rx_queue:
4975 dpaa2_dpseci_congestion_free(priv);
4976 err_get_vers:
4977 dpseci_close(priv->mc_io, 0, ls_dev->mc_handle);
4978 err_open:
4979 return err;
4982 static int dpaa2_dpseci_enable(struct dpaa2_caam_priv *priv)
4984 struct device *dev = priv->dev;
4985 struct fsl_mc_device *ls_dev = to_fsl_mc_device(dev);
4986 struct dpaa2_caam_priv_per_cpu *ppriv;
4987 int i;
4989 for (i = 0; i < priv->num_pairs; i++) {
4990 ppriv = per_cpu_ptr(priv->ppriv, i);
4991 napi_enable(&ppriv->napi);
4994 return dpseci_enable(priv->mc_io, 0, ls_dev->mc_handle);
4997 static int __cold dpaa2_dpseci_disable(struct dpaa2_caam_priv *priv)
4999 struct device *dev = priv->dev;
5000 struct dpaa2_caam_priv_per_cpu *ppriv;
5001 struct fsl_mc_device *ls_dev = to_fsl_mc_device(dev);
5002 int i, err = 0, enabled;
5004 err = dpseci_disable(priv->mc_io, 0, ls_dev->mc_handle);
5005 if (err) {
5006 dev_err(dev, "dpseci_disable() failed\n");
5007 return err;
5010 err = dpseci_is_enabled(priv->mc_io, 0, ls_dev->mc_handle, &enabled);
5011 if (err) {
5012 dev_err(dev, "dpseci_is_enabled() failed\n");
5013 return err;
5016 dev_dbg(dev, "disable: %s\n", enabled ? "false" : "true");
5018 for (i = 0; i < priv->num_pairs; i++) {
5019 ppriv = per_cpu_ptr(priv->ppriv, i);
5020 napi_disable(&ppriv->napi);
5021 netif_napi_del(&ppriv->napi);
5024 return 0;
5027 static struct list_head hash_list;
5029 static int dpaa2_caam_probe(struct fsl_mc_device *dpseci_dev)
5031 struct device *dev;
5032 struct dpaa2_caam_priv *priv;
5033 int i, err = 0;
5034 bool registered = false;
5037 * There is no way to get CAAM endianness - there is no direct register
5038 * space access and MC f/w does not provide this attribute.
5039 * All DPAA2-based SoCs have little endian CAAM, thus hard-code this
5040 * property.
5042 caam_little_end = true;
5044 caam_imx = false;
5046 dev = &dpseci_dev->dev;
5048 priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
5049 if (!priv)
5050 return -ENOMEM;
5052 dev_set_drvdata(dev, priv);
5054 priv->domain = iommu_get_domain_for_dev(dev);
5056 qi_cache = kmem_cache_create("dpaa2_caamqicache", CAAM_QI_MEMCACHE_SIZE,
5057 0, SLAB_CACHE_DMA, NULL);
5058 if (!qi_cache) {
5059 dev_err(dev, "Can't allocate SEC cache\n");
5060 return -ENOMEM;
5063 err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(49));
5064 if (err) {
5065 dev_err(dev, "dma_set_mask_and_coherent() failed\n");
5066 goto err_dma_mask;
5069 /* Obtain a MC portal */
5070 err = fsl_mc_portal_allocate(dpseci_dev, 0, &priv->mc_io);
5071 if (err) {
5072 if (err == -ENXIO)
5073 err = -EPROBE_DEFER;
5074 else
5075 dev_err(dev, "MC portal allocation failed\n");
5077 goto err_dma_mask;
5080 priv->ppriv = alloc_percpu(*priv->ppriv);
5081 if (!priv->ppriv) {
5082 dev_err(dev, "alloc_percpu() failed\n");
5083 err = -ENOMEM;
5084 goto err_alloc_ppriv;
5087 /* DPSECI initialization */
5088 err = dpaa2_dpseci_setup(dpseci_dev);
5089 if (err) {
5090 dev_err(dev, "dpaa2_dpseci_setup() failed\n");
5091 goto err_dpseci_setup;
5094 /* DPIO */
5095 err = dpaa2_dpseci_dpio_setup(priv);
5096 if (err) {
5097 if (err != -EPROBE_DEFER)
5098 dev_err(dev, "dpaa2_dpseci_dpio_setup() failed\n");
5099 goto err_dpio_setup;
5102 /* DPSECI binding to DPIO */
5103 err = dpaa2_dpseci_bind(priv);
5104 if (err) {
5105 dev_err(dev, "dpaa2_dpseci_bind() failed\n");
5106 goto err_bind;
5109 /* DPSECI enable */
5110 err = dpaa2_dpseci_enable(priv);
5111 if (err) {
5112 dev_err(dev, "dpaa2_dpseci_enable() failed\n");
5113 goto err_bind;
5116 dpaa2_dpseci_debugfs_init(priv);
5118 /* register crypto algorithms the device supports */
5119 for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
5120 struct caam_skcipher_alg *t_alg = driver_algs + i;
5121 u32 alg_sel = t_alg->caam.class1_alg_type & OP_ALG_ALGSEL_MASK;
5123 /* Skip DES algorithms if not supported by device */
5124 if (!priv->sec_attr.des_acc_num &&
5125 (alg_sel == OP_ALG_ALGSEL_3DES ||
5126 alg_sel == OP_ALG_ALGSEL_DES))
5127 continue;
5129 /* Skip AES algorithms if not supported by device */
5130 if (!priv->sec_attr.aes_acc_num &&
5131 alg_sel == OP_ALG_ALGSEL_AES)
5132 continue;
5134 /* Skip CHACHA20 algorithms if not supported by device */
5135 if (alg_sel == OP_ALG_ALGSEL_CHACHA20 &&
5136 !priv->sec_attr.ccha_acc_num)
5137 continue;
5139 t_alg->caam.dev = dev;
5140 caam_skcipher_alg_init(t_alg);
5142 err = crypto_register_skcipher(&t_alg->skcipher);
5143 if (err) {
5144 dev_warn(dev, "%s alg registration failed: %d\n",
5145 t_alg->skcipher.base.cra_driver_name, err);
5146 continue;
5149 t_alg->registered = true;
5150 registered = true;
5153 for (i = 0; i < ARRAY_SIZE(driver_aeads); i++) {
5154 struct caam_aead_alg *t_alg = driver_aeads + i;
5155 u32 c1_alg_sel = t_alg->caam.class1_alg_type &
5156 OP_ALG_ALGSEL_MASK;
5157 u32 c2_alg_sel = t_alg->caam.class2_alg_type &
5158 OP_ALG_ALGSEL_MASK;
5160 /* Skip DES algorithms if not supported by device */
5161 if (!priv->sec_attr.des_acc_num &&
5162 (c1_alg_sel == OP_ALG_ALGSEL_3DES ||
5163 c1_alg_sel == OP_ALG_ALGSEL_DES))
5164 continue;
5166 /* Skip AES algorithms if not supported by device */
5167 if (!priv->sec_attr.aes_acc_num &&
5168 c1_alg_sel == OP_ALG_ALGSEL_AES)
5169 continue;
5171 /* Skip CHACHA20 algorithms if not supported by device */
5172 if (c1_alg_sel == OP_ALG_ALGSEL_CHACHA20 &&
5173 !priv->sec_attr.ccha_acc_num)
5174 continue;
5176 /* Skip POLY1305 algorithms if not supported by device */
5177 if (c2_alg_sel == OP_ALG_ALGSEL_POLY1305 &&
5178 !priv->sec_attr.ptha_acc_num)
5179 continue;
5182 * Skip algorithms requiring message digests
5183 * if MD not supported by device.
5185 if ((c2_alg_sel & ~OP_ALG_ALGSEL_SUBMASK) == 0x40 &&
5186 !priv->sec_attr.md_acc_num)
5187 continue;
5189 t_alg->caam.dev = dev;
5190 caam_aead_alg_init(t_alg);
5192 err = crypto_register_aead(&t_alg->aead);
5193 if (err) {
5194 dev_warn(dev, "%s alg registration failed: %d\n",
5195 t_alg->aead.base.cra_driver_name, err);
5196 continue;
5199 t_alg->registered = true;
5200 registered = true;
5202 if (registered)
5203 dev_info(dev, "algorithms registered in /proc/crypto\n");
5205 /* register hash algorithms the device supports */
5206 INIT_LIST_HEAD(&hash_list);
5209 * Skip registration of any hashing algorithms if MD block
5210 * is not present.
5212 if (!priv->sec_attr.md_acc_num)
5213 return 0;
5215 for (i = 0; i < ARRAY_SIZE(driver_hash); i++) {
5216 struct caam_hash_alg *t_alg;
5217 struct caam_hash_template *alg = driver_hash + i;
5219 /* register hmac version */
5220 t_alg = caam_hash_alloc(dev, alg, true);
5221 if (IS_ERR(t_alg)) {
5222 err = PTR_ERR(t_alg);
5223 dev_warn(dev, "%s hash alg allocation failed: %d\n",
5224 alg->driver_name, err);
5225 continue;
5228 err = crypto_register_ahash(&t_alg->ahash_alg);
5229 if (err) {
5230 dev_warn(dev, "%s alg registration failed: %d\n",
5231 t_alg->ahash_alg.halg.base.cra_driver_name,
5232 err);
5233 kfree(t_alg);
5234 } else {
5235 list_add_tail(&t_alg->entry, &hash_list);
5238 /* register unkeyed version */
5239 t_alg = caam_hash_alloc(dev, alg, false);
5240 if (IS_ERR(t_alg)) {
5241 err = PTR_ERR(t_alg);
5242 dev_warn(dev, "%s alg allocation failed: %d\n",
5243 alg->driver_name, err);
5244 continue;
5247 err = crypto_register_ahash(&t_alg->ahash_alg);
5248 if (err) {
5249 dev_warn(dev, "%s alg registration failed: %d\n",
5250 t_alg->ahash_alg.halg.base.cra_driver_name,
5251 err);
5252 kfree(t_alg);
5253 } else {
5254 list_add_tail(&t_alg->entry, &hash_list);
5257 if (!list_empty(&hash_list))
5258 dev_info(dev, "hash algorithms registered in /proc/crypto\n");
5260 return err;
5262 err_bind:
5263 dpaa2_dpseci_dpio_free(priv);
5264 err_dpio_setup:
5265 dpaa2_dpseci_free(priv);
5266 err_dpseci_setup:
5267 free_percpu(priv->ppriv);
5268 err_alloc_ppriv:
5269 fsl_mc_portal_free(priv->mc_io);
5270 err_dma_mask:
5271 kmem_cache_destroy(qi_cache);
5273 return err;
5276 static int __cold dpaa2_caam_remove(struct fsl_mc_device *ls_dev)
5278 struct device *dev;
5279 struct dpaa2_caam_priv *priv;
5280 int i;
5282 dev = &ls_dev->dev;
5283 priv = dev_get_drvdata(dev);
5285 dpaa2_dpseci_debugfs_exit(priv);
5287 for (i = 0; i < ARRAY_SIZE(driver_aeads); i++) {
5288 struct caam_aead_alg *t_alg = driver_aeads + i;
5290 if (t_alg->registered)
5291 crypto_unregister_aead(&t_alg->aead);
5294 for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
5295 struct caam_skcipher_alg *t_alg = driver_algs + i;
5297 if (t_alg->registered)
5298 crypto_unregister_skcipher(&t_alg->skcipher);
5301 if (hash_list.next) {
5302 struct caam_hash_alg *t_hash_alg, *p;
5304 list_for_each_entry_safe(t_hash_alg, p, &hash_list, entry) {
5305 crypto_unregister_ahash(&t_hash_alg->ahash_alg);
5306 list_del(&t_hash_alg->entry);
5307 kfree(t_hash_alg);
5311 dpaa2_dpseci_disable(priv);
5312 dpaa2_dpseci_dpio_free(priv);
5313 dpaa2_dpseci_free(priv);
5314 free_percpu(priv->ppriv);
5315 fsl_mc_portal_free(priv->mc_io);
5316 kmem_cache_destroy(qi_cache);
5318 return 0;
5321 int dpaa2_caam_enqueue(struct device *dev, struct caam_request *req)
5323 struct dpaa2_fd fd;
5324 struct dpaa2_caam_priv *priv = dev_get_drvdata(dev);
5325 struct dpaa2_caam_priv_per_cpu *ppriv;
5326 int err = 0, i;
5328 if (IS_ERR(req))
5329 return PTR_ERR(req);
5331 if (priv->cscn_mem) {
5332 dma_sync_single_for_cpu(priv->dev, priv->cscn_dma,
5333 DPAA2_CSCN_SIZE,
5334 DMA_FROM_DEVICE);
5335 if (unlikely(dpaa2_cscn_state_congested(priv->cscn_mem_aligned))) {
5336 dev_dbg_ratelimited(dev, "Dropping request\n");
5337 return -EBUSY;
5341 dpaa2_fl_set_flc(&req->fd_flt[1], req->flc_dma);
5343 req->fd_flt_dma = dma_map_single(dev, req->fd_flt, sizeof(req->fd_flt),
5344 DMA_BIDIRECTIONAL);
5345 if (dma_mapping_error(dev, req->fd_flt_dma)) {
5346 dev_err(dev, "DMA mapping error for QI enqueue request\n");
5347 goto err_out;
5350 memset(&fd, 0, sizeof(fd));
5351 dpaa2_fd_set_format(&fd, dpaa2_fd_list);
5352 dpaa2_fd_set_addr(&fd, req->fd_flt_dma);
5353 dpaa2_fd_set_len(&fd, dpaa2_fl_get_len(&req->fd_flt[1]));
5354 dpaa2_fd_set_flc(&fd, req->flc_dma);
5356 ppriv = this_cpu_ptr(priv->ppriv);
5357 for (i = 0; i < (priv->dpseci_attr.num_tx_queues << 1); i++) {
5358 err = dpaa2_io_service_enqueue_fq(ppriv->dpio, ppriv->req_fqid,
5359 &fd);
5360 if (err != -EBUSY)
5361 break;
5363 cpu_relax();
5366 if (unlikely(err)) {
5367 dev_err_ratelimited(dev, "Error enqueuing frame: %d\n", err);
5368 goto err_out;
5371 return -EINPROGRESS;
5373 err_out:
5374 dma_unmap_single(dev, req->fd_flt_dma, sizeof(req->fd_flt),
5375 DMA_BIDIRECTIONAL);
5376 return -EIO;
5378 EXPORT_SYMBOL(dpaa2_caam_enqueue);
5380 static const struct fsl_mc_device_id dpaa2_caam_match_id_table[] = {
5382 .vendor = FSL_MC_VENDOR_FREESCALE,
5383 .obj_type = "dpseci",
5385 { .vendor = 0x0 }
5388 static struct fsl_mc_driver dpaa2_caam_driver = {
5389 .driver = {
5390 .name = KBUILD_MODNAME,
5391 .owner = THIS_MODULE,
5393 .probe = dpaa2_caam_probe,
5394 .remove = dpaa2_caam_remove,
5395 .match_id_table = dpaa2_caam_match_id_table
5398 MODULE_LICENSE("Dual BSD/GPL");
5399 MODULE_AUTHOR("Freescale Semiconductor, Inc");
5400 MODULE_DESCRIPTION("Freescale DPAA2 CAAM Driver");
5402 module_fsl_mc_driver(dpaa2_caam_driver);