WIP FPC-III support
[linux/fpc-iii.git] / drivers / crypto / caam / caamalg_qi2.c
bloba780e627838ae9a3aa41835a32d353c4d4ea00ff
1 // SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
2 /*
3 * Copyright 2015-2016 Freescale Semiconductor Inc.
4 * Copyright 2017-2019 NXP
5 */
7 #include "compat.h"
8 #include "regs.h"
9 #include "caamalg_qi2.h"
10 #include "dpseci_cmd.h"
11 #include "desc_constr.h"
12 #include "error.h"
13 #include "sg_sw_sec4.h"
14 #include "sg_sw_qm2.h"
15 #include "key_gen.h"
16 #include "caamalg_desc.h"
17 #include "caamhash_desc.h"
18 #include "dpseci-debugfs.h"
19 #include <linux/fsl/mc.h>
20 #include <soc/fsl/dpaa2-io.h>
21 #include <soc/fsl/dpaa2-fd.h>
22 #include <crypto/xts.h>
23 #include <asm/unaligned.h>
25 #define CAAM_CRA_PRIORITY 2000
27 /* max key is sum of AES_MAX_KEY_SIZE, max split key size */
28 #define CAAM_MAX_KEY_SIZE (AES_MAX_KEY_SIZE + CTR_RFC3686_NONCE_SIZE + \
29 SHA512_DIGEST_SIZE * 2)
32 * This is a a cache of buffers, from which the users of CAAM QI driver
33 * can allocate short buffers. It's speedier than doing kmalloc on the hotpath.
34 * NOTE: A more elegant solution would be to have some headroom in the frames
35 * being processed. This can be added by the dpaa2-eth driver. This would
36 * pose a problem for userspace application processing which cannot
37 * know of this limitation. So for now, this will work.
38 * NOTE: The memcache is SMP-safe. No need to handle spinlocks in-here
40 static struct kmem_cache *qi_cache;
42 struct caam_alg_entry {
43 struct device *dev;
44 int class1_alg_type;
45 int class2_alg_type;
46 bool rfc3686;
47 bool geniv;
48 bool nodkp;
51 struct caam_aead_alg {
52 struct aead_alg aead;
53 struct caam_alg_entry caam;
54 bool registered;
57 struct caam_skcipher_alg {
58 struct skcipher_alg skcipher;
59 struct caam_alg_entry caam;
60 bool registered;
63 /**
64 * struct caam_ctx - per-session context
65 * @flc: Flow Contexts array
66 * @key: [authentication key], encryption key
67 * @flc_dma: I/O virtual addresses of the Flow Contexts
68 * @key_dma: I/O virtual address of the key
69 * @dir: DMA direction for mapping key and Flow Contexts
70 * @dev: dpseci device
71 * @adata: authentication algorithm details
72 * @cdata: encryption algorithm details
73 * @authsize: authentication tag (a.k.a. ICV / MAC) size
75 struct caam_ctx {
76 struct caam_flc flc[NUM_OP];
77 u8 key[CAAM_MAX_KEY_SIZE];
78 dma_addr_t flc_dma[NUM_OP];
79 dma_addr_t key_dma;
80 enum dma_data_direction dir;
81 struct device *dev;
82 struct alginfo adata;
83 struct alginfo cdata;
84 unsigned int authsize;
85 bool xts_key_fallback;
86 struct crypto_skcipher *fallback;
89 static void *dpaa2_caam_iova_to_virt(struct dpaa2_caam_priv *priv,
90 dma_addr_t iova_addr)
92 phys_addr_t phys_addr;
94 phys_addr = priv->domain ? iommu_iova_to_phys(priv->domain, iova_addr) :
95 iova_addr;
97 return phys_to_virt(phys_addr);
101 * qi_cache_zalloc - Allocate buffers from CAAM-QI cache
103 * Allocate data on the hotpath. Instead of using kzalloc, one can use the
104 * services of the CAAM QI memory cache (backed by kmem_cache). The buffers
105 * will have a size of CAAM_QI_MEMCACHE_SIZE, which should be sufficient for
106 * hosting 16 SG entries.
108 * @flags - flags that would be used for the equivalent kmalloc(..) call
110 * Returns a pointer to a retrieved buffer on success or NULL on failure.
112 static inline void *qi_cache_zalloc(gfp_t flags)
114 return kmem_cache_zalloc(qi_cache, flags);
118 * qi_cache_free - Frees buffers allocated from CAAM-QI cache
120 * @obj - buffer previously allocated by qi_cache_zalloc
122 * No checking is being done, the call is a passthrough call to
123 * kmem_cache_free(...)
125 static inline void qi_cache_free(void *obj)
127 kmem_cache_free(qi_cache, obj);
130 static struct caam_request *to_caam_req(struct crypto_async_request *areq)
132 switch (crypto_tfm_alg_type(areq->tfm)) {
133 case CRYPTO_ALG_TYPE_SKCIPHER:
134 return skcipher_request_ctx(skcipher_request_cast(areq));
135 case CRYPTO_ALG_TYPE_AEAD:
136 return aead_request_ctx(container_of(areq, struct aead_request,
137 base));
138 case CRYPTO_ALG_TYPE_AHASH:
139 return ahash_request_ctx(ahash_request_cast(areq));
140 default:
141 return ERR_PTR(-EINVAL);
145 static void caam_unmap(struct device *dev, struct scatterlist *src,
146 struct scatterlist *dst, int src_nents,
147 int dst_nents, dma_addr_t iv_dma, int ivsize,
148 enum dma_data_direction iv_dir, dma_addr_t qm_sg_dma,
149 int qm_sg_bytes)
151 if (dst != src) {
152 if (src_nents)
153 dma_unmap_sg(dev, src, src_nents, DMA_TO_DEVICE);
154 if (dst_nents)
155 dma_unmap_sg(dev, dst, dst_nents, DMA_FROM_DEVICE);
156 } else {
157 dma_unmap_sg(dev, src, src_nents, DMA_BIDIRECTIONAL);
160 if (iv_dma)
161 dma_unmap_single(dev, iv_dma, ivsize, iv_dir);
163 if (qm_sg_bytes)
164 dma_unmap_single(dev, qm_sg_dma, qm_sg_bytes, DMA_TO_DEVICE);
167 static int aead_set_sh_desc(struct crypto_aead *aead)
169 struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead),
170 typeof(*alg), aead);
171 struct caam_ctx *ctx = crypto_aead_ctx(aead);
172 unsigned int ivsize = crypto_aead_ivsize(aead);
173 struct device *dev = ctx->dev;
174 struct dpaa2_caam_priv *priv = dev_get_drvdata(dev);
175 struct caam_flc *flc;
176 u32 *desc;
177 u32 ctx1_iv_off = 0;
178 u32 *nonce = NULL;
179 unsigned int data_len[2];
180 u32 inl_mask;
181 const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) ==
182 OP_ALG_AAI_CTR_MOD128);
183 const bool is_rfc3686 = alg->caam.rfc3686;
185 if (!ctx->cdata.keylen || !ctx->authsize)
186 return 0;
189 * AES-CTR needs to load IV in CONTEXT1 reg
190 * at an offset of 128bits (16bytes)
191 * CONTEXT1[255:128] = IV
193 if (ctr_mode)
194 ctx1_iv_off = 16;
197 * RFC3686 specific:
198 * CONTEXT1[255:128] = {NONCE, IV, COUNTER}
200 if (is_rfc3686) {
201 ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE;
202 nonce = (u32 *)((void *)ctx->key + ctx->adata.keylen_pad +
203 ctx->cdata.keylen - CTR_RFC3686_NONCE_SIZE);
207 * In case |user key| > |derived key|, using DKP<imm,imm> would result
208 * in invalid opcodes (last bytes of user key) in the resulting
209 * descriptor. Use DKP<ptr,imm> instead => both virtual and dma key
210 * addresses are needed.
212 ctx->adata.key_virt = ctx->key;
213 ctx->adata.key_dma = ctx->key_dma;
215 ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
216 ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
218 data_len[0] = ctx->adata.keylen_pad;
219 data_len[1] = ctx->cdata.keylen;
221 /* aead_encrypt shared descriptor */
222 if (desc_inline_query((alg->caam.geniv ? DESC_QI_AEAD_GIVENC_LEN :
223 DESC_QI_AEAD_ENC_LEN) +
224 (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0),
225 DESC_JOB_IO_LEN, data_len, &inl_mask,
226 ARRAY_SIZE(data_len)) < 0)
227 return -EINVAL;
229 ctx->adata.key_inline = !!(inl_mask & 1);
230 ctx->cdata.key_inline = !!(inl_mask & 2);
232 flc = &ctx->flc[ENCRYPT];
233 desc = flc->sh_desc;
235 if (alg->caam.geniv)
236 cnstr_shdsc_aead_givencap(desc, &ctx->cdata, &ctx->adata,
237 ivsize, ctx->authsize, is_rfc3686,
238 nonce, ctx1_iv_off, true,
239 priv->sec_attr.era);
240 else
241 cnstr_shdsc_aead_encap(desc, &ctx->cdata, &ctx->adata,
242 ivsize, ctx->authsize, is_rfc3686, nonce,
243 ctx1_iv_off, true, priv->sec_attr.era);
245 flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
246 dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
247 sizeof(flc->flc) + desc_bytes(desc),
248 ctx->dir);
250 /* aead_decrypt shared descriptor */
251 if (desc_inline_query(DESC_QI_AEAD_DEC_LEN +
252 (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0),
253 DESC_JOB_IO_LEN, data_len, &inl_mask,
254 ARRAY_SIZE(data_len)) < 0)
255 return -EINVAL;
257 ctx->adata.key_inline = !!(inl_mask & 1);
258 ctx->cdata.key_inline = !!(inl_mask & 2);
260 flc = &ctx->flc[DECRYPT];
261 desc = flc->sh_desc;
262 cnstr_shdsc_aead_decap(desc, &ctx->cdata, &ctx->adata,
263 ivsize, ctx->authsize, alg->caam.geniv,
264 is_rfc3686, nonce, ctx1_iv_off, true,
265 priv->sec_attr.era);
266 flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
267 dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
268 sizeof(flc->flc) + desc_bytes(desc),
269 ctx->dir);
271 return 0;
274 static int aead_setauthsize(struct crypto_aead *authenc, unsigned int authsize)
276 struct caam_ctx *ctx = crypto_aead_ctx(authenc);
278 ctx->authsize = authsize;
279 aead_set_sh_desc(authenc);
281 return 0;
284 static int aead_setkey(struct crypto_aead *aead, const u8 *key,
285 unsigned int keylen)
287 struct caam_ctx *ctx = crypto_aead_ctx(aead);
288 struct device *dev = ctx->dev;
289 struct crypto_authenc_keys keys;
291 if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
292 goto badkey;
294 dev_dbg(dev, "keylen %d enckeylen %d authkeylen %d\n",
295 keys.authkeylen + keys.enckeylen, keys.enckeylen,
296 keys.authkeylen);
297 print_hex_dump_debug("key in @" __stringify(__LINE__)": ",
298 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
300 ctx->adata.keylen = keys.authkeylen;
301 ctx->adata.keylen_pad = split_key_len(ctx->adata.algtype &
302 OP_ALG_ALGSEL_MASK);
304 if (ctx->adata.keylen_pad + keys.enckeylen > CAAM_MAX_KEY_SIZE)
305 goto badkey;
307 memcpy(ctx->key, keys.authkey, keys.authkeylen);
308 memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey, keys.enckeylen);
309 dma_sync_single_for_device(dev, ctx->key_dma, ctx->adata.keylen_pad +
310 keys.enckeylen, ctx->dir);
311 print_hex_dump_debug("ctx.key@" __stringify(__LINE__)": ",
312 DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
313 ctx->adata.keylen_pad + keys.enckeylen, 1);
315 ctx->cdata.keylen = keys.enckeylen;
317 memzero_explicit(&keys, sizeof(keys));
318 return aead_set_sh_desc(aead);
319 badkey:
320 memzero_explicit(&keys, sizeof(keys));
321 return -EINVAL;
324 static int des3_aead_setkey(struct crypto_aead *aead, const u8 *key,
325 unsigned int keylen)
327 struct crypto_authenc_keys keys;
328 int err;
330 err = crypto_authenc_extractkeys(&keys, key, keylen);
331 if (unlikely(err))
332 goto out;
334 err = -EINVAL;
335 if (keys.enckeylen != DES3_EDE_KEY_SIZE)
336 goto out;
338 err = crypto_des3_ede_verify_key(crypto_aead_tfm(aead), keys.enckey) ?:
339 aead_setkey(aead, key, keylen);
341 out:
342 memzero_explicit(&keys, sizeof(keys));
343 return err;
346 static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
347 bool encrypt)
349 struct crypto_aead *aead = crypto_aead_reqtfm(req);
350 struct caam_request *req_ctx = aead_request_ctx(req);
351 struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
352 struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
353 struct caam_ctx *ctx = crypto_aead_ctx(aead);
354 struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead),
355 typeof(*alg), aead);
356 struct device *dev = ctx->dev;
357 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
358 GFP_KERNEL : GFP_ATOMIC;
359 int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
360 int src_len, dst_len = 0;
361 struct aead_edesc *edesc;
362 dma_addr_t qm_sg_dma, iv_dma = 0;
363 int ivsize = 0;
364 unsigned int authsize = ctx->authsize;
365 int qm_sg_index = 0, qm_sg_nents = 0, qm_sg_bytes;
366 int in_len, out_len;
367 struct dpaa2_sg_entry *sg_table;
369 /* allocate space for base edesc, link tables and IV */
370 edesc = qi_cache_zalloc(GFP_DMA | flags);
371 if (unlikely(!edesc)) {
372 dev_err(dev, "could not allocate extended descriptor\n");
373 return ERR_PTR(-ENOMEM);
376 if (unlikely(req->dst != req->src)) {
377 src_len = req->assoclen + req->cryptlen;
378 dst_len = src_len + (encrypt ? authsize : (-authsize));
380 src_nents = sg_nents_for_len(req->src, src_len);
381 if (unlikely(src_nents < 0)) {
382 dev_err(dev, "Insufficient bytes (%d) in src S/G\n",
383 src_len);
384 qi_cache_free(edesc);
385 return ERR_PTR(src_nents);
388 dst_nents = sg_nents_for_len(req->dst, dst_len);
389 if (unlikely(dst_nents < 0)) {
390 dev_err(dev, "Insufficient bytes (%d) in dst S/G\n",
391 dst_len);
392 qi_cache_free(edesc);
393 return ERR_PTR(dst_nents);
396 if (src_nents) {
397 mapped_src_nents = dma_map_sg(dev, req->src, src_nents,
398 DMA_TO_DEVICE);
399 if (unlikely(!mapped_src_nents)) {
400 dev_err(dev, "unable to map source\n");
401 qi_cache_free(edesc);
402 return ERR_PTR(-ENOMEM);
404 } else {
405 mapped_src_nents = 0;
408 if (dst_nents) {
409 mapped_dst_nents = dma_map_sg(dev, req->dst, dst_nents,
410 DMA_FROM_DEVICE);
411 if (unlikely(!mapped_dst_nents)) {
412 dev_err(dev, "unable to map destination\n");
413 dma_unmap_sg(dev, req->src, src_nents,
414 DMA_TO_DEVICE);
415 qi_cache_free(edesc);
416 return ERR_PTR(-ENOMEM);
418 } else {
419 mapped_dst_nents = 0;
421 } else {
422 src_len = req->assoclen + req->cryptlen +
423 (encrypt ? authsize : 0);
425 src_nents = sg_nents_for_len(req->src, src_len);
426 if (unlikely(src_nents < 0)) {
427 dev_err(dev, "Insufficient bytes (%d) in src S/G\n",
428 src_len);
429 qi_cache_free(edesc);
430 return ERR_PTR(src_nents);
433 mapped_src_nents = dma_map_sg(dev, req->src, src_nents,
434 DMA_BIDIRECTIONAL);
435 if (unlikely(!mapped_src_nents)) {
436 dev_err(dev, "unable to map source\n");
437 qi_cache_free(edesc);
438 return ERR_PTR(-ENOMEM);
442 if ((alg->caam.rfc3686 && encrypt) || !alg->caam.geniv)
443 ivsize = crypto_aead_ivsize(aead);
446 * Create S/G table: req->assoclen, [IV,] req->src [, req->dst].
447 * Input is not contiguous.
448 * HW reads 4 S/G entries at a time; make sure the reads don't go beyond
449 * the end of the table by allocating more S/G entries. Logic:
450 * if (src != dst && output S/G)
451 * pad output S/G, if needed
452 * else if (src == dst && S/G)
453 * overlapping S/Gs; pad one of them
454 * else if (input S/G) ...
455 * pad input S/G, if needed
457 qm_sg_nents = 1 + !!ivsize + mapped_src_nents;
458 if (mapped_dst_nents > 1)
459 qm_sg_nents += pad_sg_nents(mapped_dst_nents);
460 else if ((req->src == req->dst) && (mapped_src_nents > 1))
461 qm_sg_nents = max(pad_sg_nents(qm_sg_nents),
462 1 + !!ivsize +
463 pad_sg_nents(mapped_src_nents));
464 else
465 qm_sg_nents = pad_sg_nents(qm_sg_nents);
467 sg_table = &edesc->sgt[0];
468 qm_sg_bytes = qm_sg_nents * sizeof(*sg_table);
469 if (unlikely(offsetof(struct aead_edesc, sgt) + qm_sg_bytes + ivsize >
470 CAAM_QI_MEMCACHE_SIZE)) {
471 dev_err(dev, "No space for %d S/G entries and/or %dB IV\n",
472 qm_sg_nents, ivsize);
473 caam_unmap(dev, req->src, req->dst, src_nents, dst_nents, 0,
474 0, DMA_NONE, 0, 0);
475 qi_cache_free(edesc);
476 return ERR_PTR(-ENOMEM);
479 if (ivsize) {
480 u8 *iv = (u8 *)(sg_table + qm_sg_nents);
482 /* Make sure IV is located in a DMAable area */
483 memcpy(iv, req->iv, ivsize);
485 iv_dma = dma_map_single(dev, iv, ivsize, DMA_TO_DEVICE);
486 if (dma_mapping_error(dev, iv_dma)) {
487 dev_err(dev, "unable to map IV\n");
488 caam_unmap(dev, req->src, req->dst, src_nents,
489 dst_nents, 0, 0, DMA_NONE, 0, 0);
490 qi_cache_free(edesc);
491 return ERR_PTR(-ENOMEM);
495 edesc->src_nents = src_nents;
496 edesc->dst_nents = dst_nents;
497 edesc->iv_dma = iv_dma;
499 if ((alg->caam.class1_alg_type & OP_ALG_ALGSEL_MASK) ==
500 OP_ALG_ALGSEL_CHACHA20 && ivsize != CHACHAPOLY_IV_SIZE)
502 * The associated data comes already with the IV but we need
503 * to skip it when we authenticate or encrypt...
505 edesc->assoclen = cpu_to_caam32(req->assoclen - ivsize);
506 else
507 edesc->assoclen = cpu_to_caam32(req->assoclen);
508 edesc->assoclen_dma = dma_map_single(dev, &edesc->assoclen, 4,
509 DMA_TO_DEVICE);
510 if (dma_mapping_error(dev, edesc->assoclen_dma)) {
511 dev_err(dev, "unable to map assoclen\n");
512 caam_unmap(dev, req->src, req->dst, src_nents, dst_nents,
513 iv_dma, ivsize, DMA_TO_DEVICE, 0, 0);
514 qi_cache_free(edesc);
515 return ERR_PTR(-ENOMEM);
518 dma_to_qm_sg_one(sg_table, edesc->assoclen_dma, 4, 0);
519 qm_sg_index++;
520 if (ivsize) {
521 dma_to_qm_sg_one(sg_table + qm_sg_index, iv_dma, ivsize, 0);
522 qm_sg_index++;
524 sg_to_qm_sg_last(req->src, src_len, sg_table + qm_sg_index, 0);
525 qm_sg_index += mapped_src_nents;
527 if (mapped_dst_nents > 1)
528 sg_to_qm_sg_last(req->dst, dst_len, sg_table + qm_sg_index, 0);
530 qm_sg_dma = dma_map_single(dev, sg_table, qm_sg_bytes, DMA_TO_DEVICE);
531 if (dma_mapping_error(dev, qm_sg_dma)) {
532 dev_err(dev, "unable to map S/G table\n");
533 dma_unmap_single(dev, edesc->assoclen_dma, 4, DMA_TO_DEVICE);
534 caam_unmap(dev, req->src, req->dst, src_nents, dst_nents,
535 iv_dma, ivsize, DMA_TO_DEVICE, 0, 0);
536 qi_cache_free(edesc);
537 return ERR_PTR(-ENOMEM);
540 edesc->qm_sg_dma = qm_sg_dma;
541 edesc->qm_sg_bytes = qm_sg_bytes;
543 out_len = req->assoclen + req->cryptlen +
544 (encrypt ? ctx->authsize : (-ctx->authsize));
545 in_len = 4 + ivsize + req->assoclen + req->cryptlen;
547 memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
548 dpaa2_fl_set_final(in_fle, true);
549 dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
550 dpaa2_fl_set_addr(in_fle, qm_sg_dma);
551 dpaa2_fl_set_len(in_fle, in_len);
553 if (req->dst == req->src) {
554 if (mapped_src_nents == 1) {
555 dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
556 dpaa2_fl_set_addr(out_fle, sg_dma_address(req->src));
557 } else {
558 dpaa2_fl_set_format(out_fle, dpaa2_fl_sg);
559 dpaa2_fl_set_addr(out_fle, qm_sg_dma +
560 (1 + !!ivsize) * sizeof(*sg_table));
562 } else if (!mapped_dst_nents) {
564 * crypto engine requires the output entry to be present when
565 * "frame list" FD is used.
566 * Since engine does not support FMT=2'b11 (unused entry type),
567 * leaving out_fle zeroized is the best option.
569 goto skip_out_fle;
570 } else if (mapped_dst_nents == 1) {
571 dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
572 dpaa2_fl_set_addr(out_fle, sg_dma_address(req->dst));
573 } else {
574 dpaa2_fl_set_format(out_fle, dpaa2_fl_sg);
575 dpaa2_fl_set_addr(out_fle, qm_sg_dma + qm_sg_index *
576 sizeof(*sg_table));
579 dpaa2_fl_set_len(out_fle, out_len);
581 skip_out_fle:
582 return edesc;
585 static int chachapoly_set_sh_desc(struct crypto_aead *aead)
587 struct caam_ctx *ctx = crypto_aead_ctx(aead);
588 unsigned int ivsize = crypto_aead_ivsize(aead);
589 struct device *dev = ctx->dev;
590 struct caam_flc *flc;
591 u32 *desc;
593 if (!ctx->cdata.keylen || !ctx->authsize)
594 return 0;
596 flc = &ctx->flc[ENCRYPT];
597 desc = flc->sh_desc;
598 cnstr_shdsc_chachapoly(desc, &ctx->cdata, &ctx->adata, ivsize,
599 ctx->authsize, true, true);
600 flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
601 dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
602 sizeof(flc->flc) + desc_bytes(desc),
603 ctx->dir);
605 flc = &ctx->flc[DECRYPT];
606 desc = flc->sh_desc;
607 cnstr_shdsc_chachapoly(desc, &ctx->cdata, &ctx->adata, ivsize,
608 ctx->authsize, false, true);
609 flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
610 dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
611 sizeof(flc->flc) + desc_bytes(desc),
612 ctx->dir);
614 return 0;
617 static int chachapoly_setauthsize(struct crypto_aead *aead,
618 unsigned int authsize)
620 struct caam_ctx *ctx = crypto_aead_ctx(aead);
622 if (authsize != POLY1305_DIGEST_SIZE)
623 return -EINVAL;
625 ctx->authsize = authsize;
626 return chachapoly_set_sh_desc(aead);
629 static int chachapoly_setkey(struct crypto_aead *aead, const u8 *key,
630 unsigned int keylen)
632 struct caam_ctx *ctx = crypto_aead_ctx(aead);
633 unsigned int ivsize = crypto_aead_ivsize(aead);
634 unsigned int saltlen = CHACHAPOLY_IV_SIZE - ivsize;
636 if (keylen != CHACHA_KEY_SIZE + saltlen)
637 return -EINVAL;
639 ctx->cdata.key_virt = key;
640 ctx->cdata.keylen = keylen - saltlen;
642 return chachapoly_set_sh_desc(aead);
645 static int gcm_set_sh_desc(struct crypto_aead *aead)
647 struct caam_ctx *ctx = crypto_aead_ctx(aead);
648 struct device *dev = ctx->dev;
649 unsigned int ivsize = crypto_aead_ivsize(aead);
650 struct caam_flc *flc;
651 u32 *desc;
652 int rem_bytes = CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN -
653 ctx->cdata.keylen;
655 if (!ctx->cdata.keylen || !ctx->authsize)
656 return 0;
659 * AES GCM encrypt shared descriptor
660 * Job Descriptor and Shared Descriptor
661 * must fit into the 64-word Descriptor h/w Buffer
663 if (rem_bytes >= DESC_QI_GCM_ENC_LEN) {
664 ctx->cdata.key_inline = true;
665 ctx->cdata.key_virt = ctx->key;
666 } else {
667 ctx->cdata.key_inline = false;
668 ctx->cdata.key_dma = ctx->key_dma;
671 flc = &ctx->flc[ENCRYPT];
672 desc = flc->sh_desc;
673 cnstr_shdsc_gcm_encap(desc, &ctx->cdata, ivsize, ctx->authsize, true);
674 flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
675 dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
676 sizeof(flc->flc) + desc_bytes(desc),
677 ctx->dir);
680 * Job Descriptor and Shared Descriptors
681 * must all fit into the 64-word Descriptor h/w Buffer
683 if (rem_bytes >= DESC_QI_GCM_DEC_LEN) {
684 ctx->cdata.key_inline = true;
685 ctx->cdata.key_virt = ctx->key;
686 } else {
687 ctx->cdata.key_inline = false;
688 ctx->cdata.key_dma = ctx->key_dma;
691 flc = &ctx->flc[DECRYPT];
692 desc = flc->sh_desc;
693 cnstr_shdsc_gcm_decap(desc, &ctx->cdata, ivsize, ctx->authsize, true);
694 flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
695 dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
696 sizeof(flc->flc) + desc_bytes(desc),
697 ctx->dir);
699 return 0;
702 static int gcm_setauthsize(struct crypto_aead *authenc, unsigned int authsize)
704 struct caam_ctx *ctx = crypto_aead_ctx(authenc);
705 int err;
707 err = crypto_gcm_check_authsize(authsize);
708 if (err)
709 return err;
711 ctx->authsize = authsize;
712 gcm_set_sh_desc(authenc);
714 return 0;
717 static int gcm_setkey(struct crypto_aead *aead,
718 const u8 *key, unsigned int keylen)
720 struct caam_ctx *ctx = crypto_aead_ctx(aead);
721 struct device *dev = ctx->dev;
722 int ret;
724 ret = aes_check_keylen(keylen);
725 if (ret)
726 return ret;
727 print_hex_dump_debug("key in @" __stringify(__LINE__)": ",
728 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
730 memcpy(ctx->key, key, keylen);
731 dma_sync_single_for_device(dev, ctx->key_dma, keylen, ctx->dir);
732 ctx->cdata.keylen = keylen;
734 return gcm_set_sh_desc(aead);
737 static int rfc4106_set_sh_desc(struct crypto_aead *aead)
739 struct caam_ctx *ctx = crypto_aead_ctx(aead);
740 struct device *dev = ctx->dev;
741 unsigned int ivsize = crypto_aead_ivsize(aead);
742 struct caam_flc *flc;
743 u32 *desc;
744 int rem_bytes = CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN -
745 ctx->cdata.keylen;
747 if (!ctx->cdata.keylen || !ctx->authsize)
748 return 0;
750 ctx->cdata.key_virt = ctx->key;
753 * RFC4106 encrypt shared descriptor
754 * Job Descriptor and Shared Descriptor
755 * must fit into the 64-word Descriptor h/w Buffer
757 if (rem_bytes >= DESC_QI_RFC4106_ENC_LEN) {
758 ctx->cdata.key_inline = true;
759 } else {
760 ctx->cdata.key_inline = false;
761 ctx->cdata.key_dma = ctx->key_dma;
764 flc = &ctx->flc[ENCRYPT];
765 desc = flc->sh_desc;
766 cnstr_shdsc_rfc4106_encap(desc, &ctx->cdata, ivsize, ctx->authsize,
767 true);
768 flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
769 dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
770 sizeof(flc->flc) + desc_bytes(desc),
771 ctx->dir);
774 * Job Descriptor and Shared Descriptors
775 * must all fit into the 64-word Descriptor h/w Buffer
777 if (rem_bytes >= DESC_QI_RFC4106_DEC_LEN) {
778 ctx->cdata.key_inline = true;
779 } else {
780 ctx->cdata.key_inline = false;
781 ctx->cdata.key_dma = ctx->key_dma;
784 flc = &ctx->flc[DECRYPT];
785 desc = flc->sh_desc;
786 cnstr_shdsc_rfc4106_decap(desc, &ctx->cdata, ivsize, ctx->authsize,
787 true);
788 flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
789 dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
790 sizeof(flc->flc) + desc_bytes(desc),
791 ctx->dir);
793 return 0;
796 static int rfc4106_setauthsize(struct crypto_aead *authenc,
797 unsigned int authsize)
799 struct caam_ctx *ctx = crypto_aead_ctx(authenc);
800 int err;
802 err = crypto_rfc4106_check_authsize(authsize);
803 if (err)
804 return err;
806 ctx->authsize = authsize;
807 rfc4106_set_sh_desc(authenc);
809 return 0;
812 static int rfc4106_setkey(struct crypto_aead *aead,
813 const u8 *key, unsigned int keylen)
815 struct caam_ctx *ctx = crypto_aead_ctx(aead);
816 struct device *dev = ctx->dev;
817 int ret;
819 ret = aes_check_keylen(keylen - 4);
820 if (ret)
821 return ret;
823 print_hex_dump_debug("key in @" __stringify(__LINE__)": ",
824 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
826 memcpy(ctx->key, key, keylen);
828 * The last four bytes of the key material are used as the salt value
829 * in the nonce. Update the AES key length.
831 ctx->cdata.keylen = keylen - 4;
832 dma_sync_single_for_device(dev, ctx->key_dma, ctx->cdata.keylen,
833 ctx->dir);
835 return rfc4106_set_sh_desc(aead);
838 static int rfc4543_set_sh_desc(struct crypto_aead *aead)
840 struct caam_ctx *ctx = crypto_aead_ctx(aead);
841 struct device *dev = ctx->dev;
842 unsigned int ivsize = crypto_aead_ivsize(aead);
843 struct caam_flc *flc;
844 u32 *desc;
845 int rem_bytes = CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN -
846 ctx->cdata.keylen;
848 if (!ctx->cdata.keylen || !ctx->authsize)
849 return 0;
851 ctx->cdata.key_virt = ctx->key;
854 * RFC4543 encrypt shared descriptor
855 * Job Descriptor and Shared Descriptor
856 * must fit into the 64-word Descriptor h/w Buffer
858 if (rem_bytes >= DESC_QI_RFC4543_ENC_LEN) {
859 ctx->cdata.key_inline = true;
860 } else {
861 ctx->cdata.key_inline = false;
862 ctx->cdata.key_dma = ctx->key_dma;
865 flc = &ctx->flc[ENCRYPT];
866 desc = flc->sh_desc;
867 cnstr_shdsc_rfc4543_encap(desc, &ctx->cdata, ivsize, ctx->authsize,
868 true);
869 flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
870 dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
871 sizeof(flc->flc) + desc_bytes(desc),
872 ctx->dir);
875 * Job Descriptor and Shared Descriptors
876 * must all fit into the 64-word Descriptor h/w Buffer
878 if (rem_bytes >= DESC_QI_RFC4543_DEC_LEN) {
879 ctx->cdata.key_inline = true;
880 } else {
881 ctx->cdata.key_inline = false;
882 ctx->cdata.key_dma = ctx->key_dma;
885 flc = &ctx->flc[DECRYPT];
886 desc = flc->sh_desc;
887 cnstr_shdsc_rfc4543_decap(desc, &ctx->cdata, ivsize, ctx->authsize,
888 true);
889 flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
890 dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
891 sizeof(flc->flc) + desc_bytes(desc),
892 ctx->dir);
894 return 0;
897 static int rfc4543_setauthsize(struct crypto_aead *authenc,
898 unsigned int authsize)
900 struct caam_ctx *ctx = crypto_aead_ctx(authenc);
902 if (authsize != 16)
903 return -EINVAL;
905 ctx->authsize = authsize;
906 rfc4543_set_sh_desc(authenc);
908 return 0;
911 static int rfc4543_setkey(struct crypto_aead *aead,
912 const u8 *key, unsigned int keylen)
914 struct caam_ctx *ctx = crypto_aead_ctx(aead);
915 struct device *dev = ctx->dev;
916 int ret;
918 ret = aes_check_keylen(keylen - 4);
919 if (ret)
920 return ret;
922 print_hex_dump_debug("key in @" __stringify(__LINE__)": ",
923 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
925 memcpy(ctx->key, key, keylen);
927 * The last four bytes of the key material are used as the salt value
928 * in the nonce. Update the AES key length.
930 ctx->cdata.keylen = keylen - 4;
931 dma_sync_single_for_device(dev, ctx->key_dma, ctx->cdata.keylen,
932 ctx->dir);
934 return rfc4543_set_sh_desc(aead);
937 static int skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key,
938 unsigned int keylen, const u32 ctx1_iv_off)
940 struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
941 struct caam_skcipher_alg *alg =
942 container_of(crypto_skcipher_alg(skcipher),
943 struct caam_skcipher_alg, skcipher);
944 struct device *dev = ctx->dev;
945 struct caam_flc *flc;
946 unsigned int ivsize = crypto_skcipher_ivsize(skcipher);
947 u32 *desc;
948 const bool is_rfc3686 = alg->caam.rfc3686;
950 print_hex_dump_debug("key in @" __stringify(__LINE__)": ",
951 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
953 ctx->cdata.keylen = keylen;
954 ctx->cdata.key_virt = key;
955 ctx->cdata.key_inline = true;
957 /* skcipher_encrypt shared descriptor */
958 flc = &ctx->flc[ENCRYPT];
959 desc = flc->sh_desc;
960 cnstr_shdsc_skcipher_encap(desc, &ctx->cdata, ivsize, is_rfc3686,
961 ctx1_iv_off);
962 flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
963 dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
964 sizeof(flc->flc) + desc_bytes(desc),
965 ctx->dir);
967 /* skcipher_decrypt shared descriptor */
968 flc = &ctx->flc[DECRYPT];
969 desc = flc->sh_desc;
970 cnstr_shdsc_skcipher_decap(desc, &ctx->cdata, ivsize, is_rfc3686,
971 ctx1_iv_off);
972 flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
973 dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
974 sizeof(flc->flc) + desc_bytes(desc),
975 ctx->dir);
977 return 0;
980 static int aes_skcipher_setkey(struct crypto_skcipher *skcipher,
981 const u8 *key, unsigned int keylen)
983 int err;
985 err = aes_check_keylen(keylen);
986 if (err)
987 return err;
989 return skcipher_setkey(skcipher, key, keylen, 0);
992 static int rfc3686_skcipher_setkey(struct crypto_skcipher *skcipher,
993 const u8 *key, unsigned int keylen)
995 u32 ctx1_iv_off;
996 int err;
999 * RFC3686 specific:
1000 * | CONTEXT1[255:128] = {NONCE, IV, COUNTER}
1001 * | *key = {KEY, NONCE}
1003 ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE;
1004 keylen -= CTR_RFC3686_NONCE_SIZE;
1006 err = aes_check_keylen(keylen);
1007 if (err)
1008 return err;
1010 return skcipher_setkey(skcipher, key, keylen, ctx1_iv_off);
1013 static int ctr_skcipher_setkey(struct crypto_skcipher *skcipher,
1014 const u8 *key, unsigned int keylen)
1016 u32 ctx1_iv_off;
1017 int err;
1020 * AES-CTR needs to load IV in CONTEXT1 reg
1021 * at an offset of 128bits (16bytes)
1022 * CONTEXT1[255:128] = IV
1024 ctx1_iv_off = 16;
1026 err = aes_check_keylen(keylen);
1027 if (err)
1028 return err;
1030 return skcipher_setkey(skcipher, key, keylen, ctx1_iv_off);
1033 static int chacha20_skcipher_setkey(struct crypto_skcipher *skcipher,
1034 const u8 *key, unsigned int keylen)
1036 if (keylen != CHACHA_KEY_SIZE)
1037 return -EINVAL;
1039 return skcipher_setkey(skcipher, key, keylen, 0);
1042 static int des_skcipher_setkey(struct crypto_skcipher *skcipher,
1043 const u8 *key, unsigned int keylen)
1045 return verify_skcipher_des_key(skcipher, key) ?:
1046 skcipher_setkey(skcipher, key, keylen, 0);
1049 static int des3_skcipher_setkey(struct crypto_skcipher *skcipher,
1050 const u8 *key, unsigned int keylen)
1052 return verify_skcipher_des3_key(skcipher, key) ?:
1053 skcipher_setkey(skcipher, key, keylen, 0);
1056 static int xts_skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key,
1057 unsigned int keylen)
1059 struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
1060 struct device *dev = ctx->dev;
1061 struct dpaa2_caam_priv *priv = dev_get_drvdata(dev);
1062 struct caam_flc *flc;
1063 u32 *desc;
1064 int err;
1066 err = xts_verify_key(skcipher, key, keylen);
1067 if (err) {
1068 dev_dbg(dev, "key size mismatch\n");
1069 return err;
1072 if (keylen != 2 * AES_KEYSIZE_128 && keylen != 2 * AES_KEYSIZE_256)
1073 ctx->xts_key_fallback = true;
1075 if (priv->sec_attr.era <= 8 || ctx->xts_key_fallback) {
1076 err = crypto_skcipher_setkey(ctx->fallback, key, keylen);
1077 if (err)
1078 return err;
1081 ctx->cdata.keylen = keylen;
1082 ctx->cdata.key_virt = key;
1083 ctx->cdata.key_inline = true;
1085 /* xts_skcipher_encrypt shared descriptor */
1086 flc = &ctx->flc[ENCRYPT];
1087 desc = flc->sh_desc;
1088 cnstr_shdsc_xts_skcipher_encap(desc, &ctx->cdata);
1089 flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
1090 dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
1091 sizeof(flc->flc) + desc_bytes(desc),
1092 ctx->dir);
1094 /* xts_skcipher_decrypt shared descriptor */
1095 flc = &ctx->flc[DECRYPT];
1096 desc = flc->sh_desc;
1097 cnstr_shdsc_xts_skcipher_decap(desc, &ctx->cdata);
1098 flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
1099 dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
1100 sizeof(flc->flc) + desc_bytes(desc),
1101 ctx->dir);
1103 return 0;
1106 static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req)
1108 struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
1109 struct caam_request *req_ctx = skcipher_request_ctx(req);
1110 struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
1111 struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
1112 struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
1113 struct device *dev = ctx->dev;
1114 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
1115 GFP_KERNEL : GFP_ATOMIC;
1116 int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
1117 struct skcipher_edesc *edesc;
1118 dma_addr_t iv_dma;
1119 u8 *iv;
1120 int ivsize = crypto_skcipher_ivsize(skcipher);
1121 int dst_sg_idx, qm_sg_ents, qm_sg_bytes;
1122 struct dpaa2_sg_entry *sg_table;
1124 src_nents = sg_nents_for_len(req->src, req->cryptlen);
1125 if (unlikely(src_nents < 0)) {
1126 dev_err(dev, "Insufficient bytes (%d) in src S/G\n",
1127 req->cryptlen);
1128 return ERR_PTR(src_nents);
1131 if (unlikely(req->dst != req->src)) {
1132 dst_nents = sg_nents_for_len(req->dst, req->cryptlen);
1133 if (unlikely(dst_nents < 0)) {
1134 dev_err(dev, "Insufficient bytes (%d) in dst S/G\n",
1135 req->cryptlen);
1136 return ERR_PTR(dst_nents);
1139 mapped_src_nents = dma_map_sg(dev, req->src, src_nents,
1140 DMA_TO_DEVICE);
1141 if (unlikely(!mapped_src_nents)) {
1142 dev_err(dev, "unable to map source\n");
1143 return ERR_PTR(-ENOMEM);
1146 mapped_dst_nents = dma_map_sg(dev, req->dst, dst_nents,
1147 DMA_FROM_DEVICE);
1148 if (unlikely(!mapped_dst_nents)) {
1149 dev_err(dev, "unable to map destination\n");
1150 dma_unmap_sg(dev, req->src, src_nents, DMA_TO_DEVICE);
1151 return ERR_PTR(-ENOMEM);
1153 } else {
1154 mapped_src_nents = dma_map_sg(dev, req->src, src_nents,
1155 DMA_BIDIRECTIONAL);
1156 if (unlikely(!mapped_src_nents)) {
1157 dev_err(dev, "unable to map source\n");
1158 return ERR_PTR(-ENOMEM);
1162 qm_sg_ents = 1 + mapped_src_nents;
1163 dst_sg_idx = qm_sg_ents;
1166 * Input, output HW S/G tables: [IV, src][dst, IV]
1167 * IV entries point to the same buffer
1168 * If src == dst, S/G entries are reused (S/G tables overlap)
1170 * HW reads 4 S/G entries at a time; make sure the reads don't go beyond
1171 * the end of the table by allocating more S/G entries.
1173 if (req->src != req->dst)
1174 qm_sg_ents += pad_sg_nents(mapped_dst_nents + 1);
1175 else
1176 qm_sg_ents = 1 + pad_sg_nents(qm_sg_ents);
1178 qm_sg_bytes = qm_sg_ents * sizeof(struct dpaa2_sg_entry);
1179 if (unlikely(offsetof(struct skcipher_edesc, sgt) + qm_sg_bytes +
1180 ivsize > CAAM_QI_MEMCACHE_SIZE)) {
1181 dev_err(dev, "No space for %d S/G entries and/or %dB IV\n",
1182 qm_sg_ents, ivsize);
1183 caam_unmap(dev, req->src, req->dst, src_nents, dst_nents, 0,
1184 0, DMA_NONE, 0, 0);
1185 return ERR_PTR(-ENOMEM);
1188 /* allocate space for base edesc, link tables and IV */
1189 edesc = qi_cache_zalloc(GFP_DMA | flags);
1190 if (unlikely(!edesc)) {
1191 dev_err(dev, "could not allocate extended descriptor\n");
1192 caam_unmap(dev, req->src, req->dst, src_nents, dst_nents, 0,
1193 0, DMA_NONE, 0, 0);
1194 return ERR_PTR(-ENOMEM);
1197 /* Make sure IV is located in a DMAable area */
1198 sg_table = &edesc->sgt[0];
1199 iv = (u8 *)(sg_table + qm_sg_ents);
1200 memcpy(iv, req->iv, ivsize);
1202 iv_dma = dma_map_single(dev, iv, ivsize, DMA_BIDIRECTIONAL);
1203 if (dma_mapping_error(dev, iv_dma)) {
1204 dev_err(dev, "unable to map IV\n");
1205 caam_unmap(dev, req->src, req->dst, src_nents, dst_nents, 0,
1206 0, DMA_NONE, 0, 0);
1207 qi_cache_free(edesc);
1208 return ERR_PTR(-ENOMEM);
1211 edesc->src_nents = src_nents;
1212 edesc->dst_nents = dst_nents;
1213 edesc->iv_dma = iv_dma;
1214 edesc->qm_sg_bytes = qm_sg_bytes;
1216 dma_to_qm_sg_one(sg_table, iv_dma, ivsize, 0);
1217 sg_to_qm_sg(req->src, req->cryptlen, sg_table + 1, 0);
1219 if (req->src != req->dst)
1220 sg_to_qm_sg(req->dst, req->cryptlen, sg_table + dst_sg_idx, 0);
1222 dma_to_qm_sg_one(sg_table + dst_sg_idx + mapped_dst_nents, iv_dma,
1223 ivsize, 0);
1225 edesc->qm_sg_dma = dma_map_single(dev, sg_table, edesc->qm_sg_bytes,
1226 DMA_TO_DEVICE);
1227 if (dma_mapping_error(dev, edesc->qm_sg_dma)) {
1228 dev_err(dev, "unable to map S/G table\n");
1229 caam_unmap(dev, req->src, req->dst, src_nents, dst_nents,
1230 iv_dma, ivsize, DMA_BIDIRECTIONAL, 0, 0);
1231 qi_cache_free(edesc);
1232 return ERR_PTR(-ENOMEM);
1235 memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
1236 dpaa2_fl_set_final(in_fle, true);
1237 dpaa2_fl_set_len(in_fle, req->cryptlen + ivsize);
1238 dpaa2_fl_set_len(out_fle, req->cryptlen + ivsize);
1240 dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
1241 dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
1243 dpaa2_fl_set_format(out_fle, dpaa2_fl_sg);
1245 if (req->src == req->dst)
1246 dpaa2_fl_set_addr(out_fle, edesc->qm_sg_dma +
1247 sizeof(*sg_table));
1248 else
1249 dpaa2_fl_set_addr(out_fle, edesc->qm_sg_dma + dst_sg_idx *
1250 sizeof(*sg_table));
1252 return edesc;
1255 static void aead_unmap(struct device *dev, struct aead_edesc *edesc,
1256 struct aead_request *req)
1258 struct crypto_aead *aead = crypto_aead_reqtfm(req);
1259 int ivsize = crypto_aead_ivsize(aead);
1261 caam_unmap(dev, req->src, req->dst, edesc->src_nents, edesc->dst_nents,
1262 edesc->iv_dma, ivsize, DMA_TO_DEVICE, edesc->qm_sg_dma,
1263 edesc->qm_sg_bytes);
1264 dma_unmap_single(dev, edesc->assoclen_dma, 4, DMA_TO_DEVICE);
1267 static void skcipher_unmap(struct device *dev, struct skcipher_edesc *edesc,
1268 struct skcipher_request *req)
1270 struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
1271 int ivsize = crypto_skcipher_ivsize(skcipher);
1273 caam_unmap(dev, req->src, req->dst, edesc->src_nents, edesc->dst_nents,
1274 edesc->iv_dma, ivsize, DMA_BIDIRECTIONAL, edesc->qm_sg_dma,
1275 edesc->qm_sg_bytes);
1278 static void aead_encrypt_done(void *cbk_ctx, u32 status)
1280 struct crypto_async_request *areq = cbk_ctx;
1281 struct aead_request *req = container_of(areq, struct aead_request,
1282 base);
1283 struct caam_request *req_ctx = to_caam_req(areq);
1284 struct aead_edesc *edesc = req_ctx->edesc;
1285 struct crypto_aead *aead = crypto_aead_reqtfm(req);
1286 struct caam_ctx *ctx = crypto_aead_ctx(aead);
1287 int ecode = 0;
1289 dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
1291 if (unlikely(status))
1292 ecode = caam_qi2_strstatus(ctx->dev, status);
1294 aead_unmap(ctx->dev, edesc, req);
1295 qi_cache_free(edesc);
1296 aead_request_complete(req, ecode);
1299 static void aead_decrypt_done(void *cbk_ctx, u32 status)
1301 struct crypto_async_request *areq = cbk_ctx;
1302 struct aead_request *req = container_of(areq, struct aead_request,
1303 base);
1304 struct caam_request *req_ctx = to_caam_req(areq);
1305 struct aead_edesc *edesc = req_ctx->edesc;
1306 struct crypto_aead *aead = crypto_aead_reqtfm(req);
1307 struct caam_ctx *ctx = crypto_aead_ctx(aead);
1308 int ecode = 0;
1310 dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
1312 if (unlikely(status))
1313 ecode = caam_qi2_strstatus(ctx->dev, status);
1315 aead_unmap(ctx->dev, edesc, req);
1316 qi_cache_free(edesc);
1317 aead_request_complete(req, ecode);
1320 static int aead_encrypt(struct aead_request *req)
1322 struct aead_edesc *edesc;
1323 struct crypto_aead *aead = crypto_aead_reqtfm(req);
1324 struct caam_ctx *ctx = crypto_aead_ctx(aead);
1325 struct caam_request *caam_req = aead_request_ctx(req);
1326 int ret;
1328 /* allocate extended descriptor */
1329 edesc = aead_edesc_alloc(req, true);
1330 if (IS_ERR(edesc))
1331 return PTR_ERR(edesc);
1333 caam_req->flc = &ctx->flc[ENCRYPT];
1334 caam_req->flc_dma = ctx->flc_dma[ENCRYPT];
1335 caam_req->cbk = aead_encrypt_done;
1336 caam_req->ctx = &req->base;
1337 caam_req->edesc = edesc;
1338 ret = dpaa2_caam_enqueue(ctx->dev, caam_req);
1339 if (ret != -EINPROGRESS &&
1340 !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
1341 aead_unmap(ctx->dev, edesc, req);
1342 qi_cache_free(edesc);
1345 return ret;
1348 static int aead_decrypt(struct aead_request *req)
1350 struct aead_edesc *edesc;
1351 struct crypto_aead *aead = crypto_aead_reqtfm(req);
1352 struct caam_ctx *ctx = crypto_aead_ctx(aead);
1353 struct caam_request *caam_req = aead_request_ctx(req);
1354 int ret;
1356 /* allocate extended descriptor */
1357 edesc = aead_edesc_alloc(req, false);
1358 if (IS_ERR(edesc))
1359 return PTR_ERR(edesc);
1361 caam_req->flc = &ctx->flc[DECRYPT];
1362 caam_req->flc_dma = ctx->flc_dma[DECRYPT];
1363 caam_req->cbk = aead_decrypt_done;
1364 caam_req->ctx = &req->base;
1365 caam_req->edesc = edesc;
1366 ret = dpaa2_caam_enqueue(ctx->dev, caam_req);
1367 if (ret != -EINPROGRESS &&
1368 !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
1369 aead_unmap(ctx->dev, edesc, req);
1370 qi_cache_free(edesc);
1373 return ret;
1376 static int ipsec_gcm_encrypt(struct aead_request *req)
1378 return crypto_ipsec_check_assoclen(req->assoclen) ? : aead_encrypt(req);
1381 static int ipsec_gcm_decrypt(struct aead_request *req)
1383 return crypto_ipsec_check_assoclen(req->assoclen) ? : aead_decrypt(req);
1386 static void skcipher_encrypt_done(void *cbk_ctx, u32 status)
1388 struct crypto_async_request *areq = cbk_ctx;
1389 struct skcipher_request *req = skcipher_request_cast(areq);
1390 struct caam_request *req_ctx = to_caam_req(areq);
1391 struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
1392 struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
1393 struct skcipher_edesc *edesc = req_ctx->edesc;
1394 int ecode = 0;
1395 int ivsize = crypto_skcipher_ivsize(skcipher);
1397 dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
1399 if (unlikely(status))
1400 ecode = caam_qi2_strstatus(ctx->dev, status);
1402 print_hex_dump_debug("dstiv @" __stringify(__LINE__)": ",
1403 DUMP_PREFIX_ADDRESS, 16, 4, req->iv,
1404 edesc->src_nents > 1 ? 100 : ivsize, 1);
1405 caam_dump_sg("dst @" __stringify(__LINE__)": ",
1406 DUMP_PREFIX_ADDRESS, 16, 4, req->dst,
1407 edesc->dst_nents > 1 ? 100 : req->cryptlen, 1);
1409 skcipher_unmap(ctx->dev, edesc, req);
1412 * The crypto API expects us to set the IV (req->iv) to the last
1413 * ciphertext block (CBC mode) or last counter (CTR mode).
1414 * This is used e.g. by the CTS mode.
1416 if (!ecode)
1417 memcpy(req->iv, (u8 *)&edesc->sgt[0] + edesc->qm_sg_bytes,
1418 ivsize);
1420 qi_cache_free(edesc);
1421 skcipher_request_complete(req, ecode);
1424 static void skcipher_decrypt_done(void *cbk_ctx, u32 status)
1426 struct crypto_async_request *areq = cbk_ctx;
1427 struct skcipher_request *req = skcipher_request_cast(areq);
1428 struct caam_request *req_ctx = to_caam_req(areq);
1429 struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
1430 struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
1431 struct skcipher_edesc *edesc = req_ctx->edesc;
1432 int ecode = 0;
1433 int ivsize = crypto_skcipher_ivsize(skcipher);
1435 dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
1437 if (unlikely(status))
1438 ecode = caam_qi2_strstatus(ctx->dev, status);
1440 print_hex_dump_debug("dstiv @" __stringify(__LINE__)": ",
1441 DUMP_PREFIX_ADDRESS, 16, 4, req->iv,
1442 edesc->src_nents > 1 ? 100 : ivsize, 1);
1443 caam_dump_sg("dst @" __stringify(__LINE__)": ",
1444 DUMP_PREFIX_ADDRESS, 16, 4, req->dst,
1445 edesc->dst_nents > 1 ? 100 : req->cryptlen, 1);
1447 skcipher_unmap(ctx->dev, edesc, req);
1450 * The crypto API expects us to set the IV (req->iv) to the last
1451 * ciphertext block (CBC mode) or last counter (CTR mode).
1452 * This is used e.g. by the CTS mode.
1454 if (!ecode)
1455 memcpy(req->iv, (u8 *)&edesc->sgt[0] + edesc->qm_sg_bytes,
1456 ivsize);
1458 qi_cache_free(edesc);
1459 skcipher_request_complete(req, ecode);
1462 static inline bool xts_skcipher_ivsize(struct skcipher_request *req)
1464 struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
1465 unsigned int ivsize = crypto_skcipher_ivsize(skcipher);
1467 return !!get_unaligned((u64 *)(req->iv + (ivsize / 2)));
1470 static int skcipher_encrypt(struct skcipher_request *req)
1472 struct skcipher_edesc *edesc;
1473 struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
1474 struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
1475 struct caam_request *caam_req = skcipher_request_ctx(req);
1476 struct dpaa2_caam_priv *priv = dev_get_drvdata(ctx->dev);
1477 int ret;
1480 * XTS is expected to return an error even for input length = 0
1481 * Note that the case input length < block size will be caught during
1482 * HW offloading and return an error.
1484 if (!req->cryptlen && !ctx->fallback)
1485 return 0;
1487 if (ctx->fallback && ((priv->sec_attr.era <= 8 && xts_skcipher_ivsize(req)) ||
1488 ctx->xts_key_fallback)) {
1489 skcipher_request_set_tfm(&caam_req->fallback_req, ctx->fallback);
1490 skcipher_request_set_callback(&caam_req->fallback_req,
1491 req->base.flags,
1492 req->base.complete,
1493 req->base.data);
1494 skcipher_request_set_crypt(&caam_req->fallback_req, req->src,
1495 req->dst, req->cryptlen, req->iv);
1497 return crypto_skcipher_encrypt(&caam_req->fallback_req);
1500 /* allocate extended descriptor */
1501 edesc = skcipher_edesc_alloc(req);
1502 if (IS_ERR(edesc))
1503 return PTR_ERR(edesc);
1505 caam_req->flc = &ctx->flc[ENCRYPT];
1506 caam_req->flc_dma = ctx->flc_dma[ENCRYPT];
1507 caam_req->cbk = skcipher_encrypt_done;
1508 caam_req->ctx = &req->base;
1509 caam_req->edesc = edesc;
1510 ret = dpaa2_caam_enqueue(ctx->dev, caam_req);
1511 if (ret != -EINPROGRESS &&
1512 !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
1513 skcipher_unmap(ctx->dev, edesc, req);
1514 qi_cache_free(edesc);
1517 return ret;
1520 static int skcipher_decrypt(struct skcipher_request *req)
1522 struct skcipher_edesc *edesc;
1523 struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
1524 struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
1525 struct caam_request *caam_req = skcipher_request_ctx(req);
1526 struct dpaa2_caam_priv *priv = dev_get_drvdata(ctx->dev);
1527 int ret;
1530 * XTS is expected to return an error even for input length = 0
1531 * Note that the case input length < block size will be caught during
1532 * HW offloading and return an error.
1534 if (!req->cryptlen && !ctx->fallback)
1535 return 0;
1537 if (ctx->fallback && ((priv->sec_attr.era <= 8 && xts_skcipher_ivsize(req)) ||
1538 ctx->xts_key_fallback)) {
1539 skcipher_request_set_tfm(&caam_req->fallback_req, ctx->fallback);
1540 skcipher_request_set_callback(&caam_req->fallback_req,
1541 req->base.flags,
1542 req->base.complete,
1543 req->base.data);
1544 skcipher_request_set_crypt(&caam_req->fallback_req, req->src,
1545 req->dst, req->cryptlen, req->iv);
1547 return crypto_skcipher_decrypt(&caam_req->fallback_req);
1550 /* allocate extended descriptor */
1551 edesc = skcipher_edesc_alloc(req);
1552 if (IS_ERR(edesc))
1553 return PTR_ERR(edesc);
1555 caam_req->flc = &ctx->flc[DECRYPT];
1556 caam_req->flc_dma = ctx->flc_dma[DECRYPT];
1557 caam_req->cbk = skcipher_decrypt_done;
1558 caam_req->ctx = &req->base;
1559 caam_req->edesc = edesc;
1560 ret = dpaa2_caam_enqueue(ctx->dev, caam_req);
1561 if (ret != -EINPROGRESS &&
1562 !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
1563 skcipher_unmap(ctx->dev, edesc, req);
1564 qi_cache_free(edesc);
1567 return ret;
1570 static int caam_cra_init(struct caam_ctx *ctx, struct caam_alg_entry *caam,
1571 bool uses_dkp)
1573 dma_addr_t dma_addr;
1574 int i;
1576 /* copy descriptor header template value */
1577 ctx->cdata.algtype = OP_TYPE_CLASS1_ALG | caam->class1_alg_type;
1578 ctx->adata.algtype = OP_TYPE_CLASS2_ALG | caam->class2_alg_type;
1580 ctx->dev = caam->dev;
1581 ctx->dir = uses_dkp ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
1583 dma_addr = dma_map_single_attrs(ctx->dev, ctx->flc,
1584 offsetof(struct caam_ctx, flc_dma),
1585 ctx->dir, DMA_ATTR_SKIP_CPU_SYNC);
1586 if (dma_mapping_error(ctx->dev, dma_addr)) {
1587 dev_err(ctx->dev, "unable to map key, shared descriptors\n");
1588 return -ENOMEM;
1591 for (i = 0; i < NUM_OP; i++)
1592 ctx->flc_dma[i] = dma_addr + i * sizeof(ctx->flc[i]);
1593 ctx->key_dma = dma_addr + NUM_OP * sizeof(ctx->flc[0]);
1595 return 0;
1598 static int caam_cra_init_skcipher(struct crypto_skcipher *tfm)
1600 struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
1601 struct caam_skcipher_alg *caam_alg =
1602 container_of(alg, typeof(*caam_alg), skcipher);
1603 struct caam_ctx *ctx = crypto_skcipher_ctx(tfm);
1604 u32 alg_aai = caam_alg->caam.class1_alg_type & OP_ALG_AAI_MASK;
1605 int ret = 0;
1607 if (alg_aai == OP_ALG_AAI_XTS) {
1608 const char *tfm_name = crypto_tfm_alg_name(&tfm->base);
1609 struct crypto_skcipher *fallback;
1611 fallback = crypto_alloc_skcipher(tfm_name, 0,
1612 CRYPTO_ALG_NEED_FALLBACK);
1613 if (IS_ERR(fallback)) {
1614 dev_err(caam_alg->caam.dev,
1615 "Failed to allocate %s fallback: %ld\n",
1616 tfm_name, PTR_ERR(fallback));
1617 return PTR_ERR(fallback);
1620 ctx->fallback = fallback;
1621 crypto_skcipher_set_reqsize(tfm, sizeof(struct caam_request) +
1622 crypto_skcipher_reqsize(fallback));
1623 } else {
1624 crypto_skcipher_set_reqsize(tfm, sizeof(struct caam_request));
1627 ret = caam_cra_init(ctx, &caam_alg->caam, false);
1628 if (ret && ctx->fallback)
1629 crypto_free_skcipher(ctx->fallback);
1631 return ret;
1634 static int caam_cra_init_aead(struct crypto_aead *tfm)
1636 struct aead_alg *alg = crypto_aead_alg(tfm);
1637 struct caam_aead_alg *caam_alg = container_of(alg, typeof(*caam_alg),
1638 aead);
1640 crypto_aead_set_reqsize(tfm, sizeof(struct caam_request));
1641 return caam_cra_init(crypto_aead_ctx(tfm), &caam_alg->caam,
1642 !caam_alg->caam.nodkp);
1645 static void caam_exit_common(struct caam_ctx *ctx)
1647 dma_unmap_single_attrs(ctx->dev, ctx->flc_dma[0],
1648 offsetof(struct caam_ctx, flc_dma), ctx->dir,
1649 DMA_ATTR_SKIP_CPU_SYNC);
1652 static void caam_cra_exit(struct crypto_skcipher *tfm)
1654 struct caam_ctx *ctx = crypto_skcipher_ctx(tfm);
1656 if (ctx->fallback)
1657 crypto_free_skcipher(ctx->fallback);
1658 caam_exit_common(ctx);
1661 static void caam_cra_exit_aead(struct crypto_aead *tfm)
1663 caam_exit_common(crypto_aead_ctx(tfm));
1666 static struct caam_skcipher_alg driver_algs[] = {
1668 .skcipher = {
1669 .base = {
1670 .cra_name = "cbc(aes)",
1671 .cra_driver_name = "cbc-aes-caam-qi2",
1672 .cra_blocksize = AES_BLOCK_SIZE,
1674 .setkey = aes_skcipher_setkey,
1675 .encrypt = skcipher_encrypt,
1676 .decrypt = skcipher_decrypt,
1677 .min_keysize = AES_MIN_KEY_SIZE,
1678 .max_keysize = AES_MAX_KEY_SIZE,
1679 .ivsize = AES_BLOCK_SIZE,
1681 .caam.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1684 .skcipher = {
1685 .base = {
1686 .cra_name = "cbc(des3_ede)",
1687 .cra_driver_name = "cbc-3des-caam-qi2",
1688 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
1690 .setkey = des3_skcipher_setkey,
1691 .encrypt = skcipher_encrypt,
1692 .decrypt = skcipher_decrypt,
1693 .min_keysize = DES3_EDE_KEY_SIZE,
1694 .max_keysize = DES3_EDE_KEY_SIZE,
1695 .ivsize = DES3_EDE_BLOCK_SIZE,
1697 .caam.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
1700 .skcipher = {
1701 .base = {
1702 .cra_name = "cbc(des)",
1703 .cra_driver_name = "cbc-des-caam-qi2",
1704 .cra_blocksize = DES_BLOCK_SIZE,
1706 .setkey = des_skcipher_setkey,
1707 .encrypt = skcipher_encrypt,
1708 .decrypt = skcipher_decrypt,
1709 .min_keysize = DES_KEY_SIZE,
1710 .max_keysize = DES_KEY_SIZE,
1711 .ivsize = DES_BLOCK_SIZE,
1713 .caam.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
1716 .skcipher = {
1717 .base = {
1718 .cra_name = "ctr(aes)",
1719 .cra_driver_name = "ctr-aes-caam-qi2",
1720 .cra_blocksize = 1,
1722 .setkey = ctr_skcipher_setkey,
1723 .encrypt = skcipher_encrypt,
1724 .decrypt = skcipher_decrypt,
1725 .min_keysize = AES_MIN_KEY_SIZE,
1726 .max_keysize = AES_MAX_KEY_SIZE,
1727 .ivsize = AES_BLOCK_SIZE,
1728 .chunksize = AES_BLOCK_SIZE,
1730 .caam.class1_alg_type = OP_ALG_ALGSEL_AES |
1731 OP_ALG_AAI_CTR_MOD128,
1734 .skcipher = {
1735 .base = {
1736 .cra_name = "rfc3686(ctr(aes))",
1737 .cra_driver_name = "rfc3686-ctr-aes-caam-qi2",
1738 .cra_blocksize = 1,
1740 .setkey = rfc3686_skcipher_setkey,
1741 .encrypt = skcipher_encrypt,
1742 .decrypt = skcipher_decrypt,
1743 .min_keysize = AES_MIN_KEY_SIZE +
1744 CTR_RFC3686_NONCE_SIZE,
1745 .max_keysize = AES_MAX_KEY_SIZE +
1746 CTR_RFC3686_NONCE_SIZE,
1747 .ivsize = CTR_RFC3686_IV_SIZE,
1748 .chunksize = AES_BLOCK_SIZE,
1750 .caam = {
1751 .class1_alg_type = OP_ALG_ALGSEL_AES |
1752 OP_ALG_AAI_CTR_MOD128,
1753 .rfc3686 = true,
1757 .skcipher = {
1758 .base = {
1759 .cra_name = "xts(aes)",
1760 .cra_driver_name = "xts-aes-caam-qi2",
1761 .cra_flags = CRYPTO_ALG_NEED_FALLBACK,
1762 .cra_blocksize = AES_BLOCK_SIZE,
1764 .setkey = xts_skcipher_setkey,
1765 .encrypt = skcipher_encrypt,
1766 .decrypt = skcipher_decrypt,
1767 .min_keysize = 2 * AES_MIN_KEY_SIZE,
1768 .max_keysize = 2 * AES_MAX_KEY_SIZE,
1769 .ivsize = AES_BLOCK_SIZE,
1771 .caam.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_XTS,
1774 .skcipher = {
1775 .base = {
1776 .cra_name = "chacha20",
1777 .cra_driver_name = "chacha20-caam-qi2",
1778 .cra_blocksize = 1,
1780 .setkey = chacha20_skcipher_setkey,
1781 .encrypt = skcipher_encrypt,
1782 .decrypt = skcipher_decrypt,
1783 .min_keysize = CHACHA_KEY_SIZE,
1784 .max_keysize = CHACHA_KEY_SIZE,
1785 .ivsize = CHACHA_IV_SIZE,
1787 .caam.class1_alg_type = OP_ALG_ALGSEL_CHACHA20,
1791 static struct caam_aead_alg driver_aeads[] = {
1793 .aead = {
1794 .base = {
1795 .cra_name = "rfc4106(gcm(aes))",
1796 .cra_driver_name = "rfc4106-gcm-aes-caam-qi2",
1797 .cra_blocksize = 1,
1799 .setkey = rfc4106_setkey,
1800 .setauthsize = rfc4106_setauthsize,
1801 .encrypt = ipsec_gcm_encrypt,
1802 .decrypt = ipsec_gcm_decrypt,
1803 .ivsize = 8,
1804 .maxauthsize = AES_BLOCK_SIZE,
1806 .caam = {
1807 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
1808 .nodkp = true,
1812 .aead = {
1813 .base = {
1814 .cra_name = "rfc4543(gcm(aes))",
1815 .cra_driver_name = "rfc4543-gcm-aes-caam-qi2",
1816 .cra_blocksize = 1,
1818 .setkey = rfc4543_setkey,
1819 .setauthsize = rfc4543_setauthsize,
1820 .encrypt = ipsec_gcm_encrypt,
1821 .decrypt = ipsec_gcm_decrypt,
1822 .ivsize = 8,
1823 .maxauthsize = AES_BLOCK_SIZE,
1825 .caam = {
1826 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
1827 .nodkp = true,
1830 /* Galois Counter Mode */
1832 .aead = {
1833 .base = {
1834 .cra_name = "gcm(aes)",
1835 .cra_driver_name = "gcm-aes-caam-qi2",
1836 .cra_blocksize = 1,
1838 .setkey = gcm_setkey,
1839 .setauthsize = gcm_setauthsize,
1840 .encrypt = aead_encrypt,
1841 .decrypt = aead_decrypt,
1842 .ivsize = 12,
1843 .maxauthsize = AES_BLOCK_SIZE,
1845 .caam = {
1846 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
1847 .nodkp = true,
1850 /* single-pass ipsec_esp descriptor */
1852 .aead = {
1853 .base = {
1854 .cra_name = "authenc(hmac(md5),cbc(aes))",
1855 .cra_driver_name = "authenc-hmac-md5-"
1856 "cbc-aes-caam-qi2",
1857 .cra_blocksize = AES_BLOCK_SIZE,
1859 .setkey = aead_setkey,
1860 .setauthsize = aead_setauthsize,
1861 .encrypt = aead_encrypt,
1862 .decrypt = aead_decrypt,
1863 .ivsize = AES_BLOCK_SIZE,
1864 .maxauthsize = MD5_DIGEST_SIZE,
1866 .caam = {
1867 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1868 .class2_alg_type = OP_ALG_ALGSEL_MD5 |
1869 OP_ALG_AAI_HMAC_PRECOMP,
1873 .aead = {
1874 .base = {
1875 .cra_name = "echainiv(authenc(hmac(md5),"
1876 "cbc(aes)))",
1877 .cra_driver_name = "echainiv-authenc-hmac-md5-"
1878 "cbc-aes-caam-qi2",
1879 .cra_blocksize = AES_BLOCK_SIZE,
1881 .setkey = aead_setkey,
1882 .setauthsize = aead_setauthsize,
1883 .encrypt = aead_encrypt,
1884 .decrypt = aead_decrypt,
1885 .ivsize = AES_BLOCK_SIZE,
1886 .maxauthsize = MD5_DIGEST_SIZE,
1888 .caam = {
1889 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1890 .class2_alg_type = OP_ALG_ALGSEL_MD5 |
1891 OP_ALG_AAI_HMAC_PRECOMP,
1892 .geniv = true,
1896 .aead = {
1897 .base = {
1898 .cra_name = "authenc(hmac(sha1),cbc(aes))",
1899 .cra_driver_name = "authenc-hmac-sha1-"
1900 "cbc-aes-caam-qi2",
1901 .cra_blocksize = AES_BLOCK_SIZE,
1903 .setkey = aead_setkey,
1904 .setauthsize = aead_setauthsize,
1905 .encrypt = aead_encrypt,
1906 .decrypt = aead_decrypt,
1907 .ivsize = AES_BLOCK_SIZE,
1908 .maxauthsize = SHA1_DIGEST_SIZE,
1910 .caam = {
1911 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1912 .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
1913 OP_ALG_AAI_HMAC_PRECOMP,
1917 .aead = {
1918 .base = {
1919 .cra_name = "echainiv(authenc(hmac(sha1),"
1920 "cbc(aes)))",
1921 .cra_driver_name = "echainiv-authenc-"
1922 "hmac-sha1-cbc-aes-caam-qi2",
1923 .cra_blocksize = AES_BLOCK_SIZE,
1925 .setkey = aead_setkey,
1926 .setauthsize = aead_setauthsize,
1927 .encrypt = aead_encrypt,
1928 .decrypt = aead_decrypt,
1929 .ivsize = AES_BLOCK_SIZE,
1930 .maxauthsize = SHA1_DIGEST_SIZE,
1932 .caam = {
1933 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1934 .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
1935 OP_ALG_AAI_HMAC_PRECOMP,
1936 .geniv = true,
1940 .aead = {
1941 .base = {
1942 .cra_name = "authenc(hmac(sha224),cbc(aes))",
1943 .cra_driver_name = "authenc-hmac-sha224-"
1944 "cbc-aes-caam-qi2",
1945 .cra_blocksize = AES_BLOCK_SIZE,
1947 .setkey = aead_setkey,
1948 .setauthsize = aead_setauthsize,
1949 .encrypt = aead_encrypt,
1950 .decrypt = aead_decrypt,
1951 .ivsize = AES_BLOCK_SIZE,
1952 .maxauthsize = SHA224_DIGEST_SIZE,
1954 .caam = {
1955 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1956 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
1957 OP_ALG_AAI_HMAC_PRECOMP,
1961 .aead = {
1962 .base = {
1963 .cra_name = "echainiv(authenc(hmac(sha224),"
1964 "cbc(aes)))",
1965 .cra_driver_name = "echainiv-authenc-"
1966 "hmac-sha224-cbc-aes-caam-qi2",
1967 .cra_blocksize = AES_BLOCK_SIZE,
1969 .setkey = aead_setkey,
1970 .setauthsize = aead_setauthsize,
1971 .encrypt = aead_encrypt,
1972 .decrypt = aead_decrypt,
1973 .ivsize = AES_BLOCK_SIZE,
1974 .maxauthsize = SHA224_DIGEST_SIZE,
1976 .caam = {
1977 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1978 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
1979 OP_ALG_AAI_HMAC_PRECOMP,
1980 .geniv = true,
1984 .aead = {
1985 .base = {
1986 .cra_name = "authenc(hmac(sha256),cbc(aes))",
1987 .cra_driver_name = "authenc-hmac-sha256-"
1988 "cbc-aes-caam-qi2",
1989 .cra_blocksize = AES_BLOCK_SIZE,
1991 .setkey = aead_setkey,
1992 .setauthsize = aead_setauthsize,
1993 .encrypt = aead_encrypt,
1994 .decrypt = aead_decrypt,
1995 .ivsize = AES_BLOCK_SIZE,
1996 .maxauthsize = SHA256_DIGEST_SIZE,
1998 .caam = {
1999 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
2000 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
2001 OP_ALG_AAI_HMAC_PRECOMP,
2005 .aead = {
2006 .base = {
2007 .cra_name = "echainiv(authenc(hmac(sha256),"
2008 "cbc(aes)))",
2009 .cra_driver_name = "echainiv-authenc-"
2010 "hmac-sha256-cbc-aes-"
2011 "caam-qi2",
2012 .cra_blocksize = AES_BLOCK_SIZE,
2014 .setkey = aead_setkey,
2015 .setauthsize = aead_setauthsize,
2016 .encrypt = aead_encrypt,
2017 .decrypt = aead_decrypt,
2018 .ivsize = AES_BLOCK_SIZE,
2019 .maxauthsize = SHA256_DIGEST_SIZE,
2021 .caam = {
2022 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
2023 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
2024 OP_ALG_AAI_HMAC_PRECOMP,
2025 .geniv = true,
2029 .aead = {
2030 .base = {
2031 .cra_name = "authenc(hmac(sha384),cbc(aes))",
2032 .cra_driver_name = "authenc-hmac-sha384-"
2033 "cbc-aes-caam-qi2",
2034 .cra_blocksize = AES_BLOCK_SIZE,
2036 .setkey = aead_setkey,
2037 .setauthsize = aead_setauthsize,
2038 .encrypt = aead_encrypt,
2039 .decrypt = aead_decrypt,
2040 .ivsize = AES_BLOCK_SIZE,
2041 .maxauthsize = SHA384_DIGEST_SIZE,
2043 .caam = {
2044 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
2045 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
2046 OP_ALG_AAI_HMAC_PRECOMP,
2050 .aead = {
2051 .base = {
2052 .cra_name = "echainiv(authenc(hmac(sha384),"
2053 "cbc(aes)))",
2054 .cra_driver_name = "echainiv-authenc-"
2055 "hmac-sha384-cbc-aes-"
2056 "caam-qi2",
2057 .cra_blocksize = AES_BLOCK_SIZE,
2059 .setkey = aead_setkey,
2060 .setauthsize = aead_setauthsize,
2061 .encrypt = aead_encrypt,
2062 .decrypt = aead_decrypt,
2063 .ivsize = AES_BLOCK_SIZE,
2064 .maxauthsize = SHA384_DIGEST_SIZE,
2066 .caam = {
2067 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
2068 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
2069 OP_ALG_AAI_HMAC_PRECOMP,
2070 .geniv = true,
2074 .aead = {
2075 .base = {
2076 .cra_name = "authenc(hmac(sha512),cbc(aes))",
2077 .cra_driver_name = "authenc-hmac-sha512-"
2078 "cbc-aes-caam-qi2",
2079 .cra_blocksize = AES_BLOCK_SIZE,
2081 .setkey = aead_setkey,
2082 .setauthsize = aead_setauthsize,
2083 .encrypt = aead_encrypt,
2084 .decrypt = aead_decrypt,
2085 .ivsize = AES_BLOCK_SIZE,
2086 .maxauthsize = SHA512_DIGEST_SIZE,
2088 .caam = {
2089 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
2090 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
2091 OP_ALG_AAI_HMAC_PRECOMP,
2095 .aead = {
2096 .base = {
2097 .cra_name = "echainiv(authenc(hmac(sha512),"
2098 "cbc(aes)))",
2099 .cra_driver_name = "echainiv-authenc-"
2100 "hmac-sha512-cbc-aes-"
2101 "caam-qi2",
2102 .cra_blocksize = AES_BLOCK_SIZE,
2104 .setkey = aead_setkey,
2105 .setauthsize = aead_setauthsize,
2106 .encrypt = aead_encrypt,
2107 .decrypt = aead_decrypt,
2108 .ivsize = AES_BLOCK_SIZE,
2109 .maxauthsize = SHA512_DIGEST_SIZE,
2111 .caam = {
2112 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
2113 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
2114 OP_ALG_AAI_HMAC_PRECOMP,
2115 .geniv = true,
2119 .aead = {
2120 .base = {
2121 .cra_name = "authenc(hmac(md5),cbc(des3_ede))",
2122 .cra_driver_name = "authenc-hmac-md5-"
2123 "cbc-des3_ede-caam-qi2",
2124 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2126 .setkey = des3_aead_setkey,
2127 .setauthsize = aead_setauthsize,
2128 .encrypt = aead_encrypt,
2129 .decrypt = aead_decrypt,
2130 .ivsize = DES3_EDE_BLOCK_SIZE,
2131 .maxauthsize = MD5_DIGEST_SIZE,
2133 .caam = {
2134 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2135 .class2_alg_type = OP_ALG_ALGSEL_MD5 |
2136 OP_ALG_AAI_HMAC_PRECOMP,
2140 .aead = {
2141 .base = {
2142 .cra_name = "echainiv(authenc(hmac(md5),"
2143 "cbc(des3_ede)))",
2144 .cra_driver_name = "echainiv-authenc-hmac-md5-"
2145 "cbc-des3_ede-caam-qi2",
2146 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2148 .setkey = des3_aead_setkey,
2149 .setauthsize = aead_setauthsize,
2150 .encrypt = aead_encrypt,
2151 .decrypt = aead_decrypt,
2152 .ivsize = DES3_EDE_BLOCK_SIZE,
2153 .maxauthsize = MD5_DIGEST_SIZE,
2155 .caam = {
2156 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2157 .class2_alg_type = OP_ALG_ALGSEL_MD5 |
2158 OP_ALG_AAI_HMAC_PRECOMP,
2159 .geniv = true,
2163 .aead = {
2164 .base = {
2165 .cra_name = "authenc(hmac(sha1),"
2166 "cbc(des3_ede))",
2167 .cra_driver_name = "authenc-hmac-sha1-"
2168 "cbc-des3_ede-caam-qi2",
2169 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2171 .setkey = des3_aead_setkey,
2172 .setauthsize = aead_setauthsize,
2173 .encrypt = aead_encrypt,
2174 .decrypt = aead_decrypt,
2175 .ivsize = DES3_EDE_BLOCK_SIZE,
2176 .maxauthsize = SHA1_DIGEST_SIZE,
2178 .caam = {
2179 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2180 .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
2181 OP_ALG_AAI_HMAC_PRECOMP,
2185 .aead = {
2186 .base = {
2187 .cra_name = "echainiv(authenc(hmac(sha1),"
2188 "cbc(des3_ede)))",
2189 .cra_driver_name = "echainiv-authenc-"
2190 "hmac-sha1-"
2191 "cbc-des3_ede-caam-qi2",
2192 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2194 .setkey = des3_aead_setkey,
2195 .setauthsize = aead_setauthsize,
2196 .encrypt = aead_encrypt,
2197 .decrypt = aead_decrypt,
2198 .ivsize = DES3_EDE_BLOCK_SIZE,
2199 .maxauthsize = SHA1_DIGEST_SIZE,
2201 .caam = {
2202 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2203 .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
2204 OP_ALG_AAI_HMAC_PRECOMP,
2205 .geniv = true,
2209 .aead = {
2210 .base = {
2211 .cra_name = "authenc(hmac(sha224),"
2212 "cbc(des3_ede))",
2213 .cra_driver_name = "authenc-hmac-sha224-"
2214 "cbc-des3_ede-caam-qi2",
2215 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2217 .setkey = des3_aead_setkey,
2218 .setauthsize = aead_setauthsize,
2219 .encrypt = aead_encrypt,
2220 .decrypt = aead_decrypt,
2221 .ivsize = DES3_EDE_BLOCK_SIZE,
2222 .maxauthsize = SHA224_DIGEST_SIZE,
2224 .caam = {
2225 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2226 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
2227 OP_ALG_AAI_HMAC_PRECOMP,
2231 .aead = {
2232 .base = {
2233 .cra_name = "echainiv(authenc(hmac(sha224),"
2234 "cbc(des3_ede)))",
2235 .cra_driver_name = "echainiv-authenc-"
2236 "hmac-sha224-"
2237 "cbc-des3_ede-caam-qi2",
2238 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2240 .setkey = des3_aead_setkey,
2241 .setauthsize = aead_setauthsize,
2242 .encrypt = aead_encrypt,
2243 .decrypt = aead_decrypt,
2244 .ivsize = DES3_EDE_BLOCK_SIZE,
2245 .maxauthsize = SHA224_DIGEST_SIZE,
2247 .caam = {
2248 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2249 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
2250 OP_ALG_AAI_HMAC_PRECOMP,
2251 .geniv = true,
2255 .aead = {
2256 .base = {
2257 .cra_name = "authenc(hmac(sha256),"
2258 "cbc(des3_ede))",
2259 .cra_driver_name = "authenc-hmac-sha256-"
2260 "cbc-des3_ede-caam-qi2",
2261 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2263 .setkey = des3_aead_setkey,
2264 .setauthsize = aead_setauthsize,
2265 .encrypt = aead_encrypt,
2266 .decrypt = aead_decrypt,
2267 .ivsize = DES3_EDE_BLOCK_SIZE,
2268 .maxauthsize = SHA256_DIGEST_SIZE,
2270 .caam = {
2271 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2272 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
2273 OP_ALG_AAI_HMAC_PRECOMP,
2277 .aead = {
2278 .base = {
2279 .cra_name = "echainiv(authenc(hmac(sha256),"
2280 "cbc(des3_ede)))",
2281 .cra_driver_name = "echainiv-authenc-"
2282 "hmac-sha256-"
2283 "cbc-des3_ede-caam-qi2",
2284 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2286 .setkey = des3_aead_setkey,
2287 .setauthsize = aead_setauthsize,
2288 .encrypt = aead_encrypt,
2289 .decrypt = aead_decrypt,
2290 .ivsize = DES3_EDE_BLOCK_SIZE,
2291 .maxauthsize = SHA256_DIGEST_SIZE,
2293 .caam = {
2294 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2295 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
2296 OP_ALG_AAI_HMAC_PRECOMP,
2297 .geniv = true,
2301 .aead = {
2302 .base = {
2303 .cra_name = "authenc(hmac(sha384),"
2304 "cbc(des3_ede))",
2305 .cra_driver_name = "authenc-hmac-sha384-"
2306 "cbc-des3_ede-caam-qi2",
2307 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2309 .setkey = des3_aead_setkey,
2310 .setauthsize = aead_setauthsize,
2311 .encrypt = aead_encrypt,
2312 .decrypt = aead_decrypt,
2313 .ivsize = DES3_EDE_BLOCK_SIZE,
2314 .maxauthsize = SHA384_DIGEST_SIZE,
2316 .caam = {
2317 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2318 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
2319 OP_ALG_AAI_HMAC_PRECOMP,
2323 .aead = {
2324 .base = {
2325 .cra_name = "echainiv(authenc(hmac(sha384),"
2326 "cbc(des3_ede)))",
2327 .cra_driver_name = "echainiv-authenc-"
2328 "hmac-sha384-"
2329 "cbc-des3_ede-caam-qi2",
2330 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2332 .setkey = des3_aead_setkey,
2333 .setauthsize = aead_setauthsize,
2334 .encrypt = aead_encrypt,
2335 .decrypt = aead_decrypt,
2336 .ivsize = DES3_EDE_BLOCK_SIZE,
2337 .maxauthsize = SHA384_DIGEST_SIZE,
2339 .caam = {
2340 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2341 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
2342 OP_ALG_AAI_HMAC_PRECOMP,
2343 .geniv = true,
2347 .aead = {
2348 .base = {
2349 .cra_name = "authenc(hmac(sha512),"
2350 "cbc(des3_ede))",
2351 .cra_driver_name = "authenc-hmac-sha512-"
2352 "cbc-des3_ede-caam-qi2",
2353 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2355 .setkey = des3_aead_setkey,
2356 .setauthsize = aead_setauthsize,
2357 .encrypt = aead_encrypt,
2358 .decrypt = aead_decrypt,
2359 .ivsize = DES3_EDE_BLOCK_SIZE,
2360 .maxauthsize = SHA512_DIGEST_SIZE,
2362 .caam = {
2363 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2364 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
2365 OP_ALG_AAI_HMAC_PRECOMP,
2369 .aead = {
2370 .base = {
2371 .cra_name = "echainiv(authenc(hmac(sha512),"
2372 "cbc(des3_ede)))",
2373 .cra_driver_name = "echainiv-authenc-"
2374 "hmac-sha512-"
2375 "cbc-des3_ede-caam-qi2",
2376 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2378 .setkey = des3_aead_setkey,
2379 .setauthsize = aead_setauthsize,
2380 .encrypt = aead_encrypt,
2381 .decrypt = aead_decrypt,
2382 .ivsize = DES3_EDE_BLOCK_SIZE,
2383 .maxauthsize = SHA512_DIGEST_SIZE,
2385 .caam = {
2386 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2387 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
2388 OP_ALG_AAI_HMAC_PRECOMP,
2389 .geniv = true,
2393 .aead = {
2394 .base = {
2395 .cra_name = "authenc(hmac(md5),cbc(des))",
2396 .cra_driver_name = "authenc-hmac-md5-"
2397 "cbc-des-caam-qi2",
2398 .cra_blocksize = DES_BLOCK_SIZE,
2400 .setkey = aead_setkey,
2401 .setauthsize = aead_setauthsize,
2402 .encrypt = aead_encrypt,
2403 .decrypt = aead_decrypt,
2404 .ivsize = DES_BLOCK_SIZE,
2405 .maxauthsize = MD5_DIGEST_SIZE,
2407 .caam = {
2408 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2409 .class2_alg_type = OP_ALG_ALGSEL_MD5 |
2410 OP_ALG_AAI_HMAC_PRECOMP,
2414 .aead = {
2415 .base = {
2416 .cra_name = "echainiv(authenc(hmac(md5),"
2417 "cbc(des)))",
2418 .cra_driver_name = "echainiv-authenc-hmac-md5-"
2419 "cbc-des-caam-qi2",
2420 .cra_blocksize = DES_BLOCK_SIZE,
2422 .setkey = aead_setkey,
2423 .setauthsize = aead_setauthsize,
2424 .encrypt = aead_encrypt,
2425 .decrypt = aead_decrypt,
2426 .ivsize = DES_BLOCK_SIZE,
2427 .maxauthsize = MD5_DIGEST_SIZE,
2429 .caam = {
2430 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2431 .class2_alg_type = OP_ALG_ALGSEL_MD5 |
2432 OP_ALG_AAI_HMAC_PRECOMP,
2433 .geniv = true,
2437 .aead = {
2438 .base = {
2439 .cra_name = "authenc(hmac(sha1),cbc(des))",
2440 .cra_driver_name = "authenc-hmac-sha1-"
2441 "cbc-des-caam-qi2",
2442 .cra_blocksize = DES_BLOCK_SIZE,
2444 .setkey = aead_setkey,
2445 .setauthsize = aead_setauthsize,
2446 .encrypt = aead_encrypt,
2447 .decrypt = aead_decrypt,
2448 .ivsize = DES_BLOCK_SIZE,
2449 .maxauthsize = SHA1_DIGEST_SIZE,
2451 .caam = {
2452 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2453 .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
2454 OP_ALG_AAI_HMAC_PRECOMP,
2458 .aead = {
2459 .base = {
2460 .cra_name = "echainiv(authenc(hmac(sha1),"
2461 "cbc(des)))",
2462 .cra_driver_name = "echainiv-authenc-"
2463 "hmac-sha1-cbc-des-caam-qi2",
2464 .cra_blocksize = DES_BLOCK_SIZE,
2466 .setkey = aead_setkey,
2467 .setauthsize = aead_setauthsize,
2468 .encrypt = aead_encrypt,
2469 .decrypt = aead_decrypt,
2470 .ivsize = DES_BLOCK_SIZE,
2471 .maxauthsize = SHA1_DIGEST_SIZE,
2473 .caam = {
2474 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2475 .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
2476 OP_ALG_AAI_HMAC_PRECOMP,
2477 .geniv = true,
2481 .aead = {
2482 .base = {
2483 .cra_name = "authenc(hmac(sha224),cbc(des))",
2484 .cra_driver_name = "authenc-hmac-sha224-"
2485 "cbc-des-caam-qi2",
2486 .cra_blocksize = DES_BLOCK_SIZE,
2488 .setkey = aead_setkey,
2489 .setauthsize = aead_setauthsize,
2490 .encrypt = aead_encrypt,
2491 .decrypt = aead_decrypt,
2492 .ivsize = DES_BLOCK_SIZE,
2493 .maxauthsize = SHA224_DIGEST_SIZE,
2495 .caam = {
2496 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2497 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
2498 OP_ALG_AAI_HMAC_PRECOMP,
2502 .aead = {
2503 .base = {
2504 .cra_name = "echainiv(authenc(hmac(sha224),"
2505 "cbc(des)))",
2506 .cra_driver_name = "echainiv-authenc-"
2507 "hmac-sha224-cbc-des-"
2508 "caam-qi2",
2509 .cra_blocksize = DES_BLOCK_SIZE,
2511 .setkey = aead_setkey,
2512 .setauthsize = aead_setauthsize,
2513 .encrypt = aead_encrypt,
2514 .decrypt = aead_decrypt,
2515 .ivsize = DES_BLOCK_SIZE,
2516 .maxauthsize = SHA224_DIGEST_SIZE,
2518 .caam = {
2519 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2520 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
2521 OP_ALG_AAI_HMAC_PRECOMP,
2522 .geniv = true,
2526 .aead = {
2527 .base = {
2528 .cra_name = "authenc(hmac(sha256),cbc(des))",
2529 .cra_driver_name = "authenc-hmac-sha256-"
2530 "cbc-des-caam-qi2",
2531 .cra_blocksize = DES_BLOCK_SIZE,
2533 .setkey = aead_setkey,
2534 .setauthsize = aead_setauthsize,
2535 .encrypt = aead_encrypt,
2536 .decrypt = aead_decrypt,
2537 .ivsize = DES_BLOCK_SIZE,
2538 .maxauthsize = SHA256_DIGEST_SIZE,
2540 .caam = {
2541 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2542 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
2543 OP_ALG_AAI_HMAC_PRECOMP,
2547 .aead = {
2548 .base = {
2549 .cra_name = "echainiv(authenc(hmac(sha256),"
2550 "cbc(des)))",
2551 .cra_driver_name = "echainiv-authenc-"
2552 "hmac-sha256-cbc-des-"
2553 "caam-qi2",
2554 .cra_blocksize = DES_BLOCK_SIZE,
2556 .setkey = aead_setkey,
2557 .setauthsize = aead_setauthsize,
2558 .encrypt = aead_encrypt,
2559 .decrypt = aead_decrypt,
2560 .ivsize = DES_BLOCK_SIZE,
2561 .maxauthsize = SHA256_DIGEST_SIZE,
2563 .caam = {
2564 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2565 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
2566 OP_ALG_AAI_HMAC_PRECOMP,
2567 .geniv = true,
2571 .aead = {
2572 .base = {
2573 .cra_name = "authenc(hmac(sha384),cbc(des))",
2574 .cra_driver_name = "authenc-hmac-sha384-"
2575 "cbc-des-caam-qi2",
2576 .cra_blocksize = DES_BLOCK_SIZE,
2578 .setkey = aead_setkey,
2579 .setauthsize = aead_setauthsize,
2580 .encrypt = aead_encrypt,
2581 .decrypt = aead_decrypt,
2582 .ivsize = DES_BLOCK_SIZE,
2583 .maxauthsize = SHA384_DIGEST_SIZE,
2585 .caam = {
2586 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2587 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
2588 OP_ALG_AAI_HMAC_PRECOMP,
2592 .aead = {
2593 .base = {
2594 .cra_name = "echainiv(authenc(hmac(sha384),"
2595 "cbc(des)))",
2596 .cra_driver_name = "echainiv-authenc-"
2597 "hmac-sha384-cbc-des-"
2598 "caam-qi2",
2599 .cra_blocksize = DES_BLOCK_SIZE,
2601 .setkey = aead_setkey,
2602 .setauthsize = aead_setauthsize,
2603 .encrypt = aead_encrypt,
2604 .decrypt = aead_decrypt,
2605 .ivsize = DES_BLOCK_SIZE,
2606 .maxauthsize = SHA384_DIGEST_SIZE,
2608 .caam = {
2609 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2610 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
2611 OP_ALG_AAI_HMAC_PRECOMP,
2612 .geniv = true,
2616 .aead = {
2617 .base = {
2618 .cra_name = "authenc(hmac(sha512),cbc(des))",
2619 .cra_driver_name = "authenc-hmac-sha512-"
2620 "cbc-des-caam-qi2",
2621 .cra_blocksize = DES_BLOCK_SIZE,
2623 .setkey = aead_setkey,
2624 .setauthsize = aead_setauthsize,
2625 .encrypt = aead_encrypt,
2626 .decrypt = aead_decrypt,
2627 .ivsize = DES_BLOCK_SIZE,
2628 .maxauthsize = SHA512_DIGEST_SIZE,
2630 .caam = {
2631 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2632 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
2633 OP_ALG_AAI_HMAC_PRECOMP,
2637 .aead = {
2638 .base = {
2639 .cra_name = "echainiv(authenc(hmac(sha512),"
2640 "cbc(des)))",
2641 .cra_driver_name = "echainiv-authenc-"
2642 "hmac-sha512-cbc-des-"
2643 "caam-qi2",
2644 .cra_blocksize = DES_BLOCK_SIZE,
2646 .setkey = aead_setkey,
2647 .setauthsize = aead_setauthsize,
2648 .encrypt = aead_encrypt,
2649 .decrypt = aead_decrypt,
2650 .ivsize = DES_BLOCK_SIZE,
2651 .maxauthsize = SHA512_DIGEST_SIZE,
2653 .caam = {
2654 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2655 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
2656 OP_ALG_AAI_HMAC_PRECOMP,
2657 .geniv = true,
2661 .aead = {
2662 .base = {
2663 .cra_name = "authenc(hmac(md5),"
2664 "rfc3686(ctr(aes)))",
2665 .cra_driver_name = "authenc-hmac-md5-"
2666 "rfc3686-ctr-aes-caam-qi2",
2667 .cra_blocksize = 1,
2669 .setkey = aead_setkey,
2670 .setauthsize = aead_setauthsize,
2671 .encrypt = aead_encrypt,
2672 .decrypt = aead_decrypt,
2673 .ivsize = CTR_RFC3686_IV_SIZE,
2674 .maxauthsize = MD5_DIGEST_SIZE,
2676 .caam = {
2677 .class1_alg_type = OP_ALG_ALGSEL_AES |
2678 OP_ALG_AAI_CTR_MOD128,
2679 .class2_alg_type = OP_ALG_ALGSEL_MD5 |
2680 OP_ALG_AAI_HMAC_PRECOMP,
2681 .rfc3686 = true,
2685 .aead = {
2686 .base = {
2687 .cra_name = "seqiv(authenc("
2688 "hmac(md5),rfc3686(ctr(aes))))",
2689 .cra_driver_name = "seqiv-authenc-hmac-md5-"
2690 "rfc3686-ctr-aes-caam-qi2",
2691 .cra_blocksize = 1,
2693 .setkey = aead_setkey,
2694 .setauthsize = aead_setauthsize,
2695 .encrypt = aead_encrypt,
2696 .decrypt = aead_decrypt,
2697 .ivsize = CTR_RFC3686_IV_SIZE,
2698 .maxauthsize = MD5_DIGEST_SIZE,
2700 .caam = {
2701 .class1_alg_type = OP_ALG_ALGSEL_AES |
2702 OP_ALG_AAI_CTR_MOD128,
2703 .class2_alg_type = OP_ALG_ALGSEL_MD5 |
2704 OP_ALG_AAI_HMAC_PRECOMP,
2705 .rfc3686 = true,
2706 .geniv = true,
2710 .aead = {
2711 .base = {
2712 .cra_name = "authenc(hmac(sha1),"
2713 "rfc3686(ctr(aes)))",
2714 .cra_driver_name = "authenc-hmac-sha1-"
2715 "rfc3686-ctr-aes-caam-qi2",
2716 .cra_blocksize = 1,
2718 .setkey = aead_setkey,
2719 .setauthsize = aead_setauthsize,
2720 .encrypt = aead_encrypt,
2721 .decrypt = aead_decrypt,
2722 .ivsize = CTR_RFC3686_IV_SIZE,
2723 .maxauthsize = SHA1_DIGEST_SIZE,
2725 .caam = {
2726 .class1_alg_type = OP_ALG_ALGSEL_AES |
2727 OP_ALG_AAI_CTR_MOD128,
2728 .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
2729 OP_ALG_AAI_HMAC_PRECOMP,
2730 .rfc3686 = true,
2734 .aead = {
2735 .base = {
2736 .cra_name = "seqiv(authenc("
2737 "hmac(sha1),rfc3686(ctr(aes))))",
2738 .cra_driver_name = "seqiv-authenc-hmac-sha1-"
2739 "rfc3686-ctr-aes-caam-qi2",
2740 .cra_blocksize = 1,
2742 .setkey = aead_setkey,
2743 .setauthsize = aead_setauthsize,
2744 .encrypt = aead_encrypt,
2745 .decrypt = aead_decrypt,
2746 .ivsize = CTR_RFC3686_IV_SIZE,
2747 .maxauthsize = SHA1_DIGEST_SIZE,
2749 .caam = {
2750 .class1_alg_type = OP_ALG_ALGSEL_AES |
2751 OP_ALG_AAI_CTR_MOD128,
2752 .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
2753 OP_ALG_AAI_HMAC_PRECOMP,
2754 .rfc3686 = true,
2755 .geniv = true,
2759 .aead = {
2760 .base = {
2761 .cra_name = "authenc(hmac(sha224),"
2762 "rfc3686(ctr(aes)))",
2763 .cra_driver_name = "authenc-hmac-sha224-"
2764 "rfc3686-ctr-aes-caam-qi2",
2765 .cra_blocksize = 1,
2767 .setkey = aead_setkey,
2768 .setauthsize = aead_setauthsize,
2769 .encrypt = aead_encrypt,
2770 .decrypt = aead_decrypt,
2771 .ivsize = CTR_RFC3686_IV_SIZE,
2772 .maxauthsize = SHA224_DIGEST_SIZE,
2774 .caam = {
2775 .class1_alg_type = OP_ALG_ALGSEL_AES |
2776 OP_ALG_AAI_CTR_MOD128,
2777 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
2778 OP_ALG_AAI_HMAC_PRECOMP,
2779 .rfc3686 = true,
2783 .aead = {
2784 .base = {
2785 .cra_name = "seqiv(authenc("
2786 "hmac(sha224),rfc3686(ctr(aes))))",
2787 .cra_driver_name = "seqiv-authenc-hmac-sha224-"
2788 "rfc3686-ctr-aes-caam-qi2",
2789 .cra_blocksize = 1,
2791 .setkey = aead_setkey,
2792 .setauthsize = aead_setauthsize,
2793 .encrypt = aead_encrypt,
2794 .decrypt = aead_decrypt,
2795 .ivsize = CTR_RFC3686_IV_SIZE,
2796 .maxauthsize = SHA224_DIGEST_SIZE,
2798 .caam = {
2799 .class1_alg_type = OP_ALG_ALGSEL_AES |
2800 OP_ALG_AAI_CTR_MOD128,
2801 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
2802 OP_ALG_AAI_HMAC_PRECOMP,
2803 .rfc3686 = true,
2804 .geniv = true,
2808 .aead = {
2809 .base = {
2810 .cra_name = "authenc(hmac(sha256),"
2811 "rfc3686(ctr(aes)))",
2812 .cra_driver_name = "authenc-hmac-sha256-"
2813 "rfc3686-ctr-aes-caam-qi2",
2814 .cra_blocksize = 1,
2816 .setkey = aead_setkey,
2817 .setauthsize = aead_setauthsize,
2818 .encrypt = aead_encrypt,
2819 .decrypt = aead_decrypt,
2820 .ivsize = CTR_RFC3686_IV_SIZE,
2821 .maxauthsize = SHA256_DIGEST_SIZE,
2823 .caam = {
2824 .class1_alg_type = OP_ALG_ALGSEL_AES |
2825 OP_ALG_AAI_CTR_MOD128,
2826 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
2827 OP_ALG_AAI_HMAC_PRECOMP,
2828 .rfc3686 = true,
2832 .aead = {
2833 .base = {
2834 .cra_name = "seqiv(authenc(hmac(sha256),"
2835 "rfc3686(ctr(aes))))",
2836 .cra_driver_name = "seqiv-authenc-hmac-sha256-"
2837 "rfc3686-ctr-aes-caam-qi2",
2838 .cra_blocksize = 1,
2840 .setkey = aead_setkey,
2841 .setauthsize = aead_setauthsize,
2842 .encrypt = aead_encrypt,
2843 .decrypt = aead_decrypt,
2844 .ivsize = CTR_RFC3686_IV_SIZE,
2845 .maxauthsize = SHA256_DIGEST_SIZE,
2847 .caam = {
2848 .class1_alg_type = OP_ALG_ALGSEL_AES |
2849 OP_ALG_AAI_CTR_MOD128,
2850 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
2851 OP_ALG_AAI_HMAC_PRECOMP,
2852 .rfc3686 = true,
2853 .geniv = true,
2857 .aead = {
2858 .base = {
2859 .cra_name = "authenc(hmac(sha384),"
2860 "rfc3686(ctr(aes)))",
2861 .cra_driver_name = "authenc-hmac-sha384-"
2862 "rfc3686-ctr-aes-caam-qi2",
2863 .cra_blocksize = 1,
2865 .setkey = aead_setkey,
2866 .setauthsize = aead_setauthsize,
2867 .encrypt = aead_encrypt,
2868 .decrypt = aead_decrypt,
2869 .ivsize = CTR_RFC3686_IV_SIZE,
2870 .maxauthsize = SHA384_DIGEST_SIZE,
2872 .caam = {
2873 .class1_alg_type = OP_ALG_ALGSEL_AES |
2874 OP_ALG_AAI_CTR_MOD128,
2875 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
2876 OP_ALG_AAI_HMAC_PRECOMP,
2877 .rfc3686 = true,
2881 .aead = {
2882 .base = {
2883 .cra_name = "seqiv(authenc(hmac(sha384),"
2884 "rfc3686(ctr(aes))))",
2885 .cra_driver_name = "seqiv-authenc-hmac-sha384-"
2886 "rfc3686-ctr-aes-caam-qi2",
2887 .cra_blocksize = 1,
2889 .setkey = aead_setkey,
2890 .setauthsize = aead_setauthsize,
2891 .encrypt = aead_encrypt,
2892 .decrypt = aead_decrypt,
2893 .ivsize = CTR_RFC3686_IV_SIZE,
2894 .maxauthsize = SHA384_DIGEST_SIZE,
2896 .caam = {
2897 .class1_alg_type = OP_ALG_ALGSEL_AES |
2898 OP_ALG_AAI_CTR_MOD128,
2899 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
2900 OP_ALG_AAI_HMAC_PRECOMP,
2901 .rfc3686 = true,
2902 .geniv = true,
2906 .aead = {
2907 .base = {
2908 .cra_name = "rfc7539(chacha20,poly1305)",
2909 .cra_driver_name = "rfc7539-chacha20-poly1305-"
2910 "caam-qi2",
2911 .cra_blocksize = 1,
2913 .setkey = chachapoly_setkey,
2914 .setauthsize = chachapoly_setauthsize,
2915 .encrypt = aead_encrypt,
2916 .decrypt = aead_decrypt,
2917 .ivsize = CHACHAPOLY_IV_SIZE,
2918 .maxauthsize = POLY1305_DIGEST_SIZE,
2920 .caam = {
2921 .class1_alg_type = OP_ALG_ALGSEL_CHACHA20 |
2922 OP_ALG_AAI_AEAD,
2923 .class2_alg_type = OP_ALG_ALGSEL_POLY1305 |
2924 OP_ALG_AAI_AEAD,
2925 .nodkp = true,
2929 .aead = {
2930 .base = {
2931 .cra_name = "rfc7539esp(chacha20,poly1305)",
2932 .cra_driver_name = "rfc7539esp-chacha20-"
2933 "poly1305-caam-qi2",
2934 .cra_blocksize = 1,
2936 .setkey = chachapoly_setkey,
2937 .setauthsize = chachapoly_setauthsize,
2938 .encrypt = aead_encrypt,
2939 .decrypt = aead_decrypt,
2940 .ivsize = 8,
2941 .maxauthsize = POLY1305_DIGEST_SIZE,
2943 .caam = {
2944 .class1_alg_type = OP_ALG_ALGSEL_CHACHA20 |
2945 OP_ALG_AAI_AEAD,
2946 .class2_alg_type = OP_ALG_ALGSEL_POLY1305 |
2947 OP_ALG_AAI_AEAD,
2948 .nodkp = true,
2952 .aead = {
2953 .base = {
2954 .cra_name = "authenc(hmac(sha512),"
2955 "rfc3686(ctr(aes)))",
2956 .cra_driver_name = "authenc-hmac-sha512-"
2957 "rfc3686-ctr-aes-caam-qi2",
2958 .cra_blocksize = 1,
2960 .setkey = aead_setkey,
2961 .setauthsize = aead_setauthsize,
2962 .encrypt = aead_encrypt,
2963 .decrypt = aead_decrypt,
2964 .ivsize = CTR_RFC3686_IV_SIZE,
2965 .maxauthsize = SHA512_DIGEST_SIZE,
2967 .caam = {
2968 .class1_alg_type = OP_ALG_ALGSEL_AES |
2969 OP_ALG_AAI_CTR_MOD128,
2970 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
2971 OP_ALG_AAI_HMAC_PRECOMP,
2972 .rfc3686 = true,
2976 .aead = {
2977 .base = {
2978 .cra_name = "seqiv(authenc(hmac(sha512),"
2979 "rfc3686(ctr(aes))))",
2980 .cra_driver_name = "seqiv-authenc-hmac-sha512-"
2981 "rfc3686-ctr-aes-caam-qi2",
2982 .cra_blocksize = 1,
2984 .setkey = aead_setkey,
2985 .setauthsize = aead_setauthsize,
2986 .encrypt = aead_encrypt,
2987 .decrypt = aead_decrypt,
2988 .ivsize = CTR_RFC3686_IV_SIZE,
2989 .maxauthsize = SHA512_DIGEST_SIZE,
2991 .caam = {
2992 .class1_alg_type = OP_ALG_ALGSEL_AES |
2993 OP_ALG_AAI_CTR_MOD128,
2994 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
2995 OP_ALG_AAI_HMAC_PRECOMP,
2996 .rfc3686 = true,
2997 .geniv = true,
3002 static void caam_skcipher_alg_init(struct caam_skcipher_alg *t_alg)
3004 struct skcipher_alg *alg = &t_alg->skcipher;
3006 alg->base.cra_module = THIS_MODULE;
3007 alg->base.cra_priority = CAAM_CRA_PRIORITY;
3008 alg->base.cra_ctxsize = sizeof(struct caam_ctx);
3009 alg->base.cra_flags |= (CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY |
3010 CRYPTO_ALG_KERN_DRIVER_ONLY);
3012 alg->init = caam_cra_init_skcipher;
3013 alg->exit = caam_cra_exit;
3016 static void caam_aead_alg_init(struct caam_aead_alg *t_alg)
3018 struct aead_alg *alg = &t_alg->aead;
3020 alg->base.cra_module = THIS_MODULE;
3021 alg->base.cra_priority = CAAM_CRA_PRIORITY;
3022 alg->base.cra_ctxsize = sizeof(struct caam_ctx);
3023 alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY |
3024 CRYPTO_ALG_KERN_DRIVER_ONLY;
3026 alg->init = caam_cra_init_aead;
3027 alg->exit = caam_cra_exit_aead;
3030 /* max hash key is max split key size */
3031 #define CAAM_MAX_HASH_KEY_SIZE (SHA512_DIGEST_SIZE * 2)
3033 #define CAAM_MAX_HASH_BLOCK_SIZE SHA512_BLOCK_SIZE
3035 /* caam context sizes for hashes: running digest + 8 */
3036 #define HASH_MSG_LEN 8
3037 #define MAX_CTX_LEN (HASH_MSG_LEN + SHA512_DIGEST_SIZE)
3039 enum hash_optype {
3040 UPDATE = 0,
3041 UPDATE_FIRST,
3042 FINALIZE,
3043 DIGEST,
3044 HASH_NUM_OP
3048 * struct caam_hash_ctx - ahash per-session context
3049 * @flc: Flow Contexts array
3050 * @key: authentication key
3051 * @flc_dma: I/O virtual addresses of the Flow Contexts
3052 * @dev: dpseci device
3053 * @ctx_len: size of Context Register
3054 * @adata: hashing algorithm details
3056 struct caam_hash_ctx {
3057 struct caam_flc flc[HASH_NUM_OP];
3058 u8 key[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned;
3059 dma_addr_t flc_dma[HASH_NUM_OP];
3060 struct device *dev;
3061 int ctx_len;
3062 struct alginfo adata;
3065 /* ahash state */
3066 struct caam_hash_state {
3067 struct caam_request caam_req;
3068 dma_addr_t buf_dma;
3069 dma_addr_t ctx_dma;
3070 int ctx_dma_len;
3071 u8 buf[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned;
3072 int buflen;
3073 int next_buflen;
3074 u8 caam_ctx[MAX_CTX_LEN] ____cacheline_aligned;
3075 int (*update)(struct ahash_request *req);
3076 int (*final)(struct ahash_request *req);
3077 int (*finup)(struct ahash_request *req);
3080 struct caam_export_state {
3081 u8 buf[CAAM_MAX_HASH_BLOCK_SIZE];
3082 u8 caam_ctx[MAX_CTX_LEN];
3083 int buflen;
3084 int (*update)(struct ahash_request *req);
3085 int (*final)(struct ahash_request *req);
3086 int (*finup)(struct ahash_request *req);
3089 /* Map current buffer in state (if length > 0) and put it in link table */
3090 static inline int buf_map_to_qm_sg(struct device *dev,
3091 struct dpaa2_sg_entry *qm_sg,
3092 struct caam_hash_state *state)
3094 int buflen = state->buflen;
3096 if (!buflen)
3097 return 0;
3099 state->buf_dma = dma_map_single(dev, state->buf, buflen,
3100 DMA_TO_DEVICE);
3101 if (dma_mapping_error(dev, state->buf_dma)) {
3102 dev_err(dev, "unable to map buf\n");
3103 state->buf_dma = 0;
3104 return -ENOMEM;
3107 dma_to_qm_sg_one(qm_sg, state->buf_dma, buflen, 0);
3109 return 0;
3112 /* Map state->caam_ctx, and add it to link table */
3113 static inline int ctx_map_to_qm_sg(struct device *dev,
3114 struct caam_hash_state *state, int ctx_len,
3115 struct dpaa2_sg_entry *qm_sg, u32 flag)
3117 state->ctx_dma_len = ctx_len;
3118 state->ctx_dma = dma_map_single(dev, state->caam_ctx, ctx_len, flag);
3119 if (dma_mapping_error(dev, state->ctx_dma)) {
3120 dev_err(dev, "unable to map ctx\n");
3121 state->ctx_dma = 0;
3122 return -ENOMEM;
3125 dma_to_qm_sg_one(qm_sg, state->ctx_dma, ctx_len, 0);
3127 return 0;
3130 static int ahash_set_sh_desc(struct crypto_ahash *ahash)
3132 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
3133 int digestsize = crypto_ahash_digestsize(ahash);
3134 struct dpaa2_caam_priv *priv = dev_get_drvdata(ctx->dev);
3135 struct caam_flc *flc;
3136 u32 *desc;
3138 /* ahash_update shared descriptor */
3139 flc = &ctx->flc[UPDATE];
3140 desc = flc->sh_desc;
3141 cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_UPDATE, ctx->ctx_len,
3142 ctx->ctx_len, true, priv->sec_attr.era);
3143 flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
3144 dma_sync_single_for_device(ctx->dev, ctx->flc_dma[UPDATE],
3145 desc_bytes(desc), DMA_BIDIRECTIONAL);
3146 print_hex_dump_debug("ahash update shdesc@" __stringify(__LINE__)": ",
3147 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
3150 /* ahash_update_first shared descriptor */
3151 flc = &ctx->flc[UPDATE_FIRST];
3152 desc = flc->sh_desc;
3153 cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_INIT, ctx->ctx_len,
3154 ctx->ctx_len, false, priv->sec_attr.era);
3155 flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
3156 dma_sync_single_for_device(ctx->dev, ctx->flc_dma[UPDATE_FIRST],
3157 desc_bytes(desc), DMA_BIDIRECTIONAL);
3158 print_hex_dump_debug("ahash update first shdesc@" __stringify(__LINE__)": ",
3159 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
3162 /* ahash_final shared descriptor */
3163 flc = &ctx->flc[FINALIZE];
3164 desc = flc->sh_desc;
3165 cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_FINALIZE, digestsize,
3166 ctx->ctx_len, true, priv->sec_attr.era);
3167 flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
3168 dma_sync_single_for_device(ctx->dev, ctx->flc_dma[FINALIZE],
3169 desc_bytes(desc), DMA_BIDIRECTIONAL);
3170 print_hex_dump_debug("ahash final shdesc@" __stringify(__LINE__)": ",
3171 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
3174 /* ahash_digest shared descriptor */
3175 flc = &ctx->flc[DIGEST];
3176 desc = flc->sh_desc;
3177 cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_INITFINAL, digestsize,
3178 ctx->ctx_len, false, priv->sec_attr.era);
3179 flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
3180 dma_sync_single_for_device(ctx->dev, ctx->flc_dma[DIGEST],
3181 desc_bytes(desc), DMA_BIDIRECTIONAL);
3182 print_hex_dump_debug("ahash digest shdesc@" __stringify(__LINE__)": ",
3183 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
3186 return 0;
3189 struct split_key_sh_result {
3190 struct completion completion;
3191 int err;
3192 struct device *dev;
3195 static void split_key_sh_done(void *cbk_ctx, u32 err)
3197 struct split_key_sh_result *res = cbk_ctx;
3199 dev_dbg(res->dev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
3201 res->err = err ? caam_qi2_strstatus(res->dev, err) : 0;
3202 complete(&res->completion);
3205 /* Digest hash size if it is too large */
3206 static int hash_digest_key(struct caam_hash_ctx *ctx, u32 *keylen, u8 *key,
3207 u32 digestsize)
3209 struct caam_request *req_ctx;
3210 u32 *desc;
3211 struct split_key_sh_result result;
3212 dma_addr_t key_dma;
3213 struct caam_flc *flc;
3214 dma_addr_t flc_dma;
3215 int ret = -ENOMEM;
3216 struct dpaa2_fl_entry *in_fle, *out_fle;
3218 req_ctx = kzalloc(sizeof(*req_ctx), GFP_KERNEL | GFP_DMA);
3219 if (!req_ctx)
3220 return -ENOMEM;
3222 in_fle = &req_ctx->fd_flt[1];
3223 out_fle = &req_ctx->fd_flt[0];
3225 flc = kzalloc(sizeof(*flc), GFP_KERNEL | GFP_DMA);
3226 if (!flc)
3227 goto err_flc;
3229 key_dma = dma_map_single(ctx->dev, key, *keylen, DMA_BIDIRECTIONAL);
3230 if (dma_mapping_error(ctx->dev, key_dma)) {
3231 dev_err(ctx->dev, "unable to map key memory\n");
3232 goto err_key_dma;
3235 desc = flc->sh_desc;
3237 init_sh_desc(desc, 0);
3239 /* descriptor to perform unkeyed hash on key_in */
3240 append_operation(desc, ctx->adata.algtype | OP_ALG_ENCRYPT |
3241 OP_ALG_AS_INITFINAL);
3242 append_seq_fifo_load(desc, *keylen, FIFOLD_CLASS_CLASS2 |
3243 FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_MSG);
3244 append_seq_store(desc, digestsize, LDST_CLASS_2_CCB |
3245 LDST_SRCDST_BYTE_CONTEXT);
3247 flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
3248 flc_dma = dma_map_single(ctx->dev, flc, sizeof(flc->flc) +
3249 desc_bytes(desc), DMA_TO_DEVICE);
3250 if (dma_mapping_error(ctx->dev, flc_dma)) {
3251 dev_err(ctx->dev, "unable to map shared descriptor\n");
3252 goto err_flc_dma;
3255 dpaa2_fl_set_final(in_fle, true);
3256 dpaa2_fl_set_format(in_fle, dpaa2_fl_single);
3257 dpaa2_fl_set_addr(in_fle, key_dma);
3258 dpaa2_fl_set_len(in_fle, *keylen);
3259 dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
3260 dpaa2_fl_set_addr(out_fle, key_dma);
3261 dpaa2_fl_set_len(out_fle, digestsize);
3263 print_hex_dump_debug("key_in@" __stringify(__LINE__)": ",
3264 DUMP_PREFIX_ADDRESS, 16, 4, key, *keylen, 1);
3265 print_hex_dump_debug("shdesc@" __stringify(__LINE__)": ",
3266 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
3269 result.err = 0;
3270 init_completion(&result.completion);
3271 result.dev = ctx->dev;
3273 req_ctx->flc = flc;
3274 req_ctx->flc_dma = flc_dma;
3275 req_ctx->cbk = split_key_sh_done;
3276 req_ctx->ctx = &result;
3278 ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
3279 if (ret == -EINPROGRESS) {
3280 /* in progress */
3281 wait_for_completion(&result.completion);
3282 ret = result.err;
3283 print_hex_dump_debug("digested key@" __stringify(__LINE__)": ",
3284 DUMP_PREFIX_ADDRESS, 16, 4, key,
3285 digestsize, 1);
3288 dma_unmap_single(ctx->dev, flc_dma, sizeof(flc->flc) + desc_bytes(desc),
3289 DMA_TO_DEVICE);
3290 err_flc_dma:
3291 dma_unmap_single(ctx->dev, key_dma, *keylen, DMA_BIDIRECTIONAL);
3292 err_key_dma:
3293 kfree(flc);
3294 err_flc:
3295 kfree(req_ctx);
3297 *keylen = digestsize;
3299 return ret;
3302 static int ahash_setkey(struct crypto_ahash *ahash, const u8 *key,
3303 unsigned int keylen)
3305 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
3306 unsigned int blocksize = crypto_tfm_alg_blocksize(&ahash->base);
3307 unsigned int digestsize = crypto_ahash_digestsize(ahash);
3308 int ret;
3309 u8 *hashed_key = NULL;
3311 dev_dbg(ctx->dev, "keylen %d blocksize %d\n", keylen, blocksize);
3313 if (keylen > blocksize) {
3314 hashed_key = kmemdup(key, keylen, GFP_KERNEL | GFP_DMA);
3315 if (!hashed_key)
3316 return -ENOMEM;
3317 ret = hash_digest_key(ctx, &keylen, hashed_key, digestsize);
3318 if (ret)
3319 goto bad_free_key;
3320 key = hashed_key;
3323 ctx->adata.keylen = keylen;
3324 ctx->adata.keylen_pad = split_key_len(ctx->adata.algtype &
3325 OP_ALG_ALGSEL_MASK);
3326 if (ctx->adata.keylen_pad > CAAM_MAX_HASH_KEY_SIZE)
3327 goto bad_free_key;
3329 ctx->adata.key_virt = key;
3330 ctx->adata.key_inline = true;
3333 * In case |user key| > |derived key|, using DKP<imm,imm> would result
3334 * in invalid opcodes (last bytes of user key) in the resulting
3335 * descriptor. Use DKP<ptr,imm> instead => both virtual and dma key
3336 * addresses are needed.
3338 if (keylen > ctx->adata.keylen_pad) {
3339 memcpy(ctx->key, key, keylen);
3340 dma_sync_single_for_device(ctx->dev, ctx->adata.key_dma,
3341 ctx->adata.keylen_pad,
3342 DMA_TO_DEVICE);
3345 ret = ahash_set_sh_desc(ahash);
3346 kfree(hashed_key);
3347 return ret;
3348 bad_free_key:
3349 kfree(hashed_key);
3350 return -EINVAL;
3353 static inline void ahash_unmap(struct device *dev, struct ahash_edesc *edesc,
3354 struct ahash_request *req)
3356 struct caam_hash_state *state = ahash_request_ctx(req);
3358 if (edesc->src_nents)
3359 dma_unmap_sg(dev, req->src, edesc->src_nents, DMA_TO_DEVICE);
3361 if (edesc->qm_sg_bytes)
3362 dma_unmap_single(dev, edesc->qm_sg_dma, edesc->qm_sg_bytes,
3363 DMA_TO_DEVICE);
3365 if (state->buf_dma) {
3366 dma_unmap_single(dev, state->buf_dma, state->buflen,
3367 DMA_TO_DEVICE);
3368 state->buf_dma = 0;
3372 static inline void ahash_unmap_ctx(struct device *dev,
3373 struct ahash_edesc *edesc,
3374 struct ahash_request *req, u32 flag)
3376 struct caam_hash_state *state = ahash_request_ctx(req);
3378 if (state->ctx_dma) {
3379 dma_unmap_single(dev, state->ctx_dma, state->ctx_dma_len, flag);
3380 state->ctx_dma = 0;
3382 ahash_unmap(dev, edesc, req);
3385 static void ahash_done(void *cbk_ctx, u32 status)
3387 struct crypto_async_request *areq = cbk_ctx;
3388 struct ahash_request *req = ahash_request_cast(areq);
3389 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
3390 struct caam_hash_state *state = ahash_request_ctx(req);
3391 struct ahash_edesc *edesc = state->caam_req.edesc;
3392 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
3393 int digestsize = crypto_ahash_digestsize(ahash);
3394 int ecode = 0;
3396 dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
3398 if (unlikely(status))
3399 ecode = caam_qi2_strstatus(ctx->dev, status);
3401 ahash_unmap_ctx(ctx->dev, edesc, req, DMA_FROM_DEVICE);
3402 memcpy(req->result, state->caam_ctx, digestsize);
3403 qi_cache_free(edesc);
3405 print_hex_dump_debug("ctx@" __stringify(__LINE__)": ",
3406 DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
3407 ctx->ctx_len, 1);
3409 req->base.complete(&req->base, ecode);
3412 static void ahash_done_bi(void *cbk_ctx, u32 status)
3414 struct crypto_async_request *areq = cbk_ctx;
3415 struct ahash_request *req = ahash_request_cast(areq);
3416 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
3417 struct caam_hash_state *state = ahash_request_ctx(req);
3418 struct ahash_edesc *edesc = state->caam_req.edesc;
3419 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
3420 int ecode = 0;
3422 dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
3424 if (unlikely(status))
3425 ecode = caam_qi2_strstatus(ctx->dev, status);
3427 ahash_unmap_ctx(ctx->dev, edesc, req, DMA_BIDIRECTIONAL);
3428 qi_cache_free(edesc);
3430 scatterwalk_map_and_copy(state->buf, req->src,
3431 req->nbytes - state->next_buflen,
3432 state->next_buflen, 0);
3433 state->buflen = state->next_buflen;
3435 print_hex_dump_debug("buf@" __stringify(__LINE__)": ",
3436 DUMP_PREFIX_ADDRESS, 16, 4, state->buf,
3437 state->buflen, 1);
3439 print_hex_dump_debug("ctx@" __stringify(__LINE__)": ",
3440 DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
3441 ctx->ctx_len, 1);
3442 if (req->result)
3443 print_hex_dump_debug("result@" __stringify(__LINE__)": ",
3444 DUMP_PREFIX_ADDRESS, 16, 4, req->result,
3445 crypto_ahash_digestsize(ahash), 1);
3447 req->base.complete(&req->base, ecode);
3450 static void ahash_done_ctx_src(void *cbk_ctx, u32 status)
3452 struct crypto_async_request *areq = cbk_ctx;
3453 struct ahash_request *req = ahash_request_cast(areq);
3454 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
3455 struct caam_hash_state *state = ahash_request_ctx(req);
3456 struct ahash_edesc *edesc = state->caam_req.edesc;
3457 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
3458 int digestsize = crypto_ahash_digestsize(ahash);
3459 int ecode = 0;
3461 dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
3463 if (unlikely(status))
3464 ecode = caam_qi2_strstatus(ctx->dev, status);
3466 ahash_unmap_ctx(ctx->dev, edesc, req, DMA_BIDIRECTIONAL);
3467 memcpy(req->result, state->caam_ctx, digestsize);
3468 qi_cache_free(edesc);
3470 print_hex_dump_debug("ctx@" __stringify(__LINE__)": ",
3471 DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
3472 ctx->ctx_len, 1);
3474 req->base.complete(&req->base, ecode);
3477 static void ahash_done_ctx_dst(void *cbk_ctx, u32 status)
3479 struct crypto_async_request *areq = cbk_ctx;
3480 struct ahash_request *req = ahash_request_cast(areq);
3481 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
3482 struct caam_hash_state *state = ahash_request_ctx(req);
3483 struct ahash_edesc *edesc = state->caam_req.edesc;
3484 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
3485 int ecode = 0;
3487 dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
3489 if (unlikely(status))
3490 ecode = caam_qi2_strstatus(ctx->dev, status);
3492 ahash_unmap_ctx(ctx->dev, edesc, req, DMA_FROM_DEVICE);
3493 qi_cache_free(edesc);
3495 scatterwalk_map_and_copy(state->buf, req->src,
3496 req->nbytes - state->next_buflen,
3497 state->next_buflen, 0);
3498 state->buflen = state->next_buflen;
3500 print_hex_dump_debug("buf@" __stringify(__LINE__)": ",
3501 DUMP_PREFIX_ADDRESS, 16, 4, state->buf,
3502 state->buflen, 1);
3504 print_hex_dump_debug("ctx@" __stringify(__LINE__)": ",
3505 DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
3506 ctx->ctx_len, 1);
3507 if (req->result)
3508 print_hex_dump_debug("result@" __stringify(__LINE__)": ",
3509 DUMP_PREFIX_ADDRESS, 16, 4, req->result,
3510 crypto_ahash_digestsize(ahash), 1);
3512 req->base.complete(&req->base, ecode);
3515 static int ahash_update_ctx(struct ahash_request *req)
3517 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
3518 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
3519 struct caam_hash_state *state = ahash_request_ctx(req);
3520 struct caam_request *req_ctx = &state->caam_req;
3521 struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
3522 struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
3523 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
3524 GFP_KERNEL : GFP_ATOMIC;
3525 u8 *buf = state->buf;
3526 int *buflen = &state->buflen;
3527 int *next_buflen = &state->next_buflen;
3528 int in_len = *buflen + req->nbytes, to_hash;
3529 int src_nents, mapped_nents, qm_sg_bytes, qm_sg_src_index;
3530 struct ahash_edesc *edesc;
3531 int ret = 0;
3533 *next_buflen = in_len & (crypto_tfm_alg_blocksize(&ahash->base) - 1);
3534 to_hash = in_len - *next_buflen;
3536 if (to_hash) {
3537 struct dpaa2_sg_entry *sg_table;
3538 int src_len = req->nbytes - *next_buflen;
3540 src_nents = sg_nents_for_len(req->src, src_len);
3541 if (src_nents < 0) {
3542 dev_err(ctx->dev, "Invalid number of src SG.\n");
3543 return src_nents;
3546 if (src_nents) {
3547 mapped_nents = dma_map_sg(ctx->dev, req->src, src_nents,
3548 DMA_TO_DEVICE);
3549 if (!mapped_nents) {
3550 dev_err(ctx->dev, "unable to DMA map source\n");
3551 return -ENOMEM;
3553 } else {
3554 mapped_nents = 0;
3557 /* allocate space for base edesc and link tables */
3558 edesc = qi_cache_zalloc(GFP_DMA | flags);
3559 if (!edesc) {
3560 dma_unmap_sg(ctx->dev, req->src, src_nents,
3561 DMA_TO_DEVICE);
3562 return -ENOMEM;
3565 edesc->src_nents = src_nents;
3566 qm_sg_src_index = 1 + (*buflen ? 1 : 0);
3567 qm_sg_bytes = pad_sg_nents(qm_sg_src_index + mapped_nents) *
3568 sizeof(*sg_table);
3569 sg_table = &edesc->sgt[0];
3571 ret = ctx_map_to_qm_sg(ctx->dev, state, ctx->ctx_len, sg_table,
3572 DMA_BIDIRECTIONAL);
3573 if (ret)
3574 goto unmap_ctx;
3576 ret = buf_map_to_qm_sg(ctx->dev, sg_table + 1, state);
3577 if (ret)
3578 goto unmap_ctx;
3580 if (mapped_nents) {
3581 sg_to_qm_sg_last(req->src, src_len,
3582 sg_table + qm_sg_src_index, 0);
3583 } else {
3584 dpaa2_sg_set_final(sg_table + qm_sg_src_index - 1,
3585 true);
3588 edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table,
3589 qm_sg_bytes, DMA_TO_DEVICE);
3590 if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
3591 dev_err(ctx->dev, "unable to map S/G table\n");
3592 ret = -ENOMEM;
3593 goto unmap_ctx;
3595 edesc->qm_sg_bytes = qm_sg_bytes;
3597 memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
3598 dpaa2_fl_set_final(in_fle, true);
3599 dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
3600 dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
3601 dpaa2_fl_set_len(in_fle, ctx->ctx_len + to_hash);
3602 dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
3603 dpaa2_fl_set_addr(out_fle, state->ctx_dma);
3604 dpaa2_fl_set_len(out_fle, ctx->ctx_len);
3606 req_ctx->flc = &ctx->flc[UPDATE];
3607 req_ctx->flc_dma = ctx->flc_dma[UPDATE];
3608 req_ctx->cbk = ahash_done_bi;
3609 req_ctx->ctx = &req->base;
3610 req_ctx->edesc = edesc;
3612 ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
3613 if (ret != -EINPROGRESS &&
3614 !(ret == -EBUSY &&
3615 req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
3616 goto unmap_ctx;
3617 } else if (*next_buflen) {
3618 scatterwalk_map_and_copy(buf + *buflen, req->src, 0,
3619 req->nbytes, 0);
3620 *buflen = *next_buflen;
3622 print_hex_dump_debug("buf@" __stringify(__LINE__)": ",
3623 DUMP_PREFIX_ADDRESS, 16, 4, buf,
3624 *buflen, 1);
3627 return ret;
3628 unmap_ctx:
3629 ahash_unmap_ctx(ctx->dev, edesc, req, DMA_BIDIRECTIONAL);
3630 qi_cache_free(edesc);
3631 return ret;
3634 static int ahash_final_ctx(struct ahash_request *req)
3636 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
3637 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
3638 struct caam_hash_state *state = ahash_request_ctx(req);
3639 struct caam_request *req_ctx = &state->caam_req;
3640 struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
3641 struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
3642 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
3643 GFP_KERNEL : GFP_ATOMIC;
3644 int buflen = state->buflen;
3645 int qm_sg_bytes;
3646 int digestsize = crypto_ahash_digestsize(ahash);
3647 struct ahash_edesc *edesc;
3648 struct dpaa2_sg_entry *sg_table;
3649 int ret;
3651 /* allocate space for base edesc and link tables */
3652 edesc = qi_cache_zalloc(GFP_DMA | flags);
3653 if (!edesc)
3654 return -ENOMEM;
3656 qm_sg_bytes = pad_sg_nents(1 + (buflen ? 1 : 0)) * sizeof(*sg_table);
3657 sg_table = &edesc->sgt[0];
3659 ret = ctx_map_to_qm_sg(ctx->dev, state, ctx->ctx_len, sg_table,
3660 DMA_BIDIRECTIONAL);
3661 if (ret)
3662 goto unmap_ctx;
3664 ret = buf_map_to_qm_sg(ctx->dev, sg_table + 1, state);
3665 if (ret)
3666 goto unmap_ctx;
3668 dpaa2_sg_set_final(sg_table + (buflen ? 1 : 0), true);
3670 edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table, qm_sg_bytes,
3671 DMA_TO_DEVICE);
3672 if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
3673 dev_err(ctx->dev, "unable to map S/G table\n");
3674 ret = -ENOMEM;
3675 goto unmap_ctx;
3677 edesc->qm_sg_bytes = qm_sg_bytes;
3679 memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
3680 dpaa2_fl_set_final(in_fle, true);
3681 dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
3682 dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
3683 dpaa2_fl_set_len(in_fle, ctx->ctx_len + buflen);
3684 dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
3685 dpaa2_fl_set_addr(out_fle, state->ctx_dma);
3686 dpaa2_fl_set_len(out_fle, digestsize);
3688 req_ctx->flc = &ctx->flc[FINALIZE];
3689 req_ctx->flc_dma = ctx->flc_dma[FINALIZE];
3690 req_ctx->cbk = ahash_done_ctx_src;
3691 req_ctx->ctx = &req->base;
3692 req_ctx->edesc = edesc;
3694 ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
3695 if (ret == -EINPROGRESS ||
3696 (ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
3697 return ret;
3699 unmap_ctx:
3700 ahash_unmap_ctx(ctx->dev, edesc, req, DMA_BIDIRECTIONAL);
3701 qi_cache_free(edesc);
3702 return ret;
3705 static int ahash_finup_ctx(struct ahash_request *req)
3707 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
3708 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
3709 struct caam_hash_state *state = ahash_request_ctx(req);
3710 struct caam_request *req_ctx = &state->caam_req;
3711 struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
3712 struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
3713 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
3714 GFP_KERNEL : GFP_ATOMIC;
3715 int buflen = state->buflen;
3716 int qm_sg_bytes, qm_sg_src_index;
3717 int src_nents, mapped_nents;
3718 int digestsize = crypto_ahash_digestsize(ahash);
3719 struct ahash_edesc *edesc;
3720 struct dpaa2_sg_entry *sg_table;
3721 int ret;
3723 src_nents = sg_nents_for_len(req->src, req->nbytes);
3724 if (src_nents < 0) {
3725 dev_err(ctx->dev, "Invalid number of src SG.\n");
3726 return src_nents;
3729 if (src_nents) {
3730 mapped_nents = dma_map_sg(ctx->dev, req->src, src_nents,
3731 DMA_TO_DEVICE);
3732 if (!mapped_nents) {
3733 dev_err(ctx->dev, "unable to DMA map source\n");
3734 return -ENOMEM;
3736 } else {
3737 mapped_nents = 0;
3740 /* allocate space for base edesc and link tables */
3741 edesc = qi_cache_zalloc(GFP_DMA | flags);
3742 if (!edesc) {
3743 dma_unmap_sg(ctx->dev, req->src, src_nents, DMA_TO_DEVICE);
3744 return -ENOMEM;
3747 edesc->src_nents = src_nents;
3748 qm_sg_src_index = 1 + (buflen ? 1 : 0);
3749 qm_sg_bytes = pad_sg_nents(qm_sg_src_index + mapped_nents) *
3750 sizeof(*sg_table);
3751 sg_table = &edesc->sgt[0];
3753 ret = ctx_map_to_qm_sg(ctx->dev, state, ctx->ctx_len, sg_table,
3754 DMA_BIDIRECTIONAL);
3755 if (ret)
3756 goto unmap_ctx;
3758 ret = buf_map_to_qm_sg(ctx->dev, sg_table + 1, state);
3759 if (ret)
3760 goto unmap_ctx;
3762 sg_to_qm_sg_last(req->src, req->nbytes, sg_table + qm_sg_src_index, 0);
3764 edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table, qm_sg_bytes,
3765 DMA_TO_DEVICE);
3766 if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
3767 dev_err(ctx->dev, "unable to map S/G table\n");
3768 ret = -ENOMEM;
3769 goto unmap_ctx;
3771 edesc->qm_sg_bytes = qm_sg_bytes;
3773 memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
3774 dpaa2_fl_set_final(in_fle, true);
3775 dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
3776 dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
3777 dpaa2_fl_set_len(in_fle, ctx->ctx_len + buflen + req->nbytes);
3778 dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
3779 dpaa2_fl_set_addr(out_fle, state->ctx_dma);
3780 dpaa2_fl_set_len(out_fle, digestsize);
3782 req_ctx->flc = &ctx->flc[FINALIZE];
3783 req_ctx->flc_dma = ctx->flc_dma[FINALIZE];
3784 req_ctx->cbk = ahash_done_ctx_src;
3785 req_ctx->ctx = &req->base;
3786 req_ctx->edesc = edesc;
3788 ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
3789 if (ret == -EINPROGRESS ||
3790 (ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
3791 return ret;
3793 unmap_ctx:
3794 ahash_unmap_ctx(ctx->dev, edesc, req, DMA_BIDIRECTIONAL);
3795 qi_cache_free(edesc);
3796 return ret;
3799 static int ahash_digest(struct ahash_request *req)
3801 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
3802 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
3803 struct caam_hash_state *state = ahash_request_ctx(req);
3804 struct caam_request *req_ctx = &state->caam_req;
3805 struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
3806 struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
3807 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
3808 GFP_KERNEL : GFP_ATOMIC;
3809 int digestsize = crypto_ahash_digestsize(ahash);
3810 int src_nents, mapped_nents;
3811 struct ahash_edesc *edesc;
3812 int ret = -ENOMEM;
3814 state->buf_dma = 0;
3816 src_nents = sg_nents_for_len(req->src, req->nbytes);
3817 if (src_nents < 0) {
3818 dev_err(ctx->dev, "Invalid number of src SG.\n");
3819 return src_nents;
3822 if (src_nents) {
3823 mapped_nents = dma_map_sg(ctx->dev, req->src, src_nents,
3824 DMA_TO_DEVICE);
3825 if (!mapped_nents) {
3826 dev_err(ctx->dev, "unable to map source for DMA\n");
3827 return ret;
3829 } else {
3830 mapped_nents = 0;
3833 /* allocate space for base edesc and link tables */
3834 edesc = qi_cache_zalloc(GFP_DMA | flags);
3835 if (!edesc) {
3836 dma_unmap_sg(ctx->dev, req->src, src_nents, DMA_TO_DEVICE);
3837 return ret;
3840 edesc->src_nents = src_nents;
3841 memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
3843 if (mapped_nents > 1) {
3844 int qm_sg_bytes;
3845 struct dpaa2_sg_entry *sg_table = &edesc->sgt[0];
3847 qm_sg_bytes = pad_sg_nents(mapped_nents) * sizeof(*sg_table);
3848 sg_to_qm_sg_last(req->src, req->nbytes, sg_table, 0);
3849 edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table,
3850 qm_sg_bytes, DMA_TO_DEVICE);
3851 if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
3852 dev_err(ctx->dev, "unable to map S/G table\n");
3853 goto unmap;
3855 edesc->qm_sg_bytes = qm_sg_bytes;
3856 dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
3857 dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
3858 } else {
3859 dpaa2_fl_set_format(in_fle, dpaa2_fl_single);
3860 dpaa2_fl_set_addr(in_fle, sg_dma_address(req->src));
3863 state->ctx_dma_len = digestsize;
3864 state->ctx_dma = dma_map_single(ctx->dev, state->caam_ctx, digestsize,
3865 DMA_FROM_DEVICE);
3866 if (dma_mapping_error(ctx->dev, state->ctx_dma)) {
3867 dev_err(ctx->dev, "unable to map ctx\n");
3868 state->ctx_dma = 0;
3869 goto unmap;
3872 dpaa2_fl_set_final(in_fle, true);
3873 dpaa2_fl_set_len(in_fle, req->nbytes);
3874 dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
3875 dpaa2_fl_set_addr(out_fle, state->ctx_dma);
3876 dpaa2_fl_set_len(out_fle, digestsize);
3878 req_ctx->flc = &ctx->flc[DIGEST];
3879 req_ctx->flc_dma = ctx->flc_dma[DIGEST];
3880 req_ctx->cbk = ahash_done;
3881 req_ctx->ctx = &req->base;
3882 req_ctx->edesc = edesc;
3883 ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
3884 if (ret == -EINPROGRESS ||
3885 (ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
3886 return ret;
3888 unmap:
3889 ahash_unmap_ctx(ctx->dev, edesc, req, DMA_FROM_DEVICE);
3890 qi_cache_free(edesc);
3891 return ret;
3894 static int ahash_final_no_ctx(struct ahash_request *req)
3896 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
3897 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
3898 struct caam_hash_state *state = ahash_request_ctx(req);
3899 struct caam_request *req_ctx = &state->caam_req;
3900 struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
3901 struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
3902 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
3903 GFP_KERNEL : GFP_ATOMIC;
3904 u8 *buf = state->buf;
3905 int buflen = state->buflen;
3906 int digestsize = crypto_ahash_digestsize(ahash);
3907 struct ahash_edesc *edesc;
3908 int ret = -ENOMEM;
3910 /* allocate space for base edesc and link tables */
3911 edesc = qi_cache_zalloc(GFP_DMA | flags);
3912 if (!edesc)
3913 return ret;
3915 if (buflen) {
3916 state->buf_dma = dma_map_single(ctx->dev, buf, buflen,
3917 DMA_TO_DEVICE);
3918 if (dma_mapping_error(ctx->dev, state->buf_dma)) {
3919 dev_err(ctx->dev, "unable to map src\n");
3920 goto unmap;
3924 state->ctx_dma_len = digestsize;
3925 state->ctx_dma = dma_map_single(ctx->dev, state->caam_ctx, digestsize,
3926 DMA_FROM_DEVICE);
3927 if (dma_mapping_error(ctx->dev, state->ctx_dma)) {
3928 dev_err(ctx->dev, "unable to map ctx\n");
3929 state->ctx_dma = 0;
3930 goto unmap;
3933 memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
3934 dpaa2_fl_set_final(in_fle, true);
3936 * crypto engine requires the input entry to be present when
3937 * "frame list" FD is used.
3938 * Since engine does not support FMT=2'b11 (unused entry type), leaving
3939 * in_fle zeroized (except for "Final" flag) is the best option.
3941 if (buflen) {
3942 dpaa2_fl_set_format(in_fle, dpaa2_fl_single);
3943 dpaa2_fl_set_addr(in_fle, state->buf_dma);
3944 dpaa2_fl_set_len(in_fle, buflen);
3946 dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
3947 dpaa2_fl_set_addr(out_fle, state->ctx_dma);
3948 dpaa2_fl_set_len(out_fle, digestsize);
3950 req_ctx->flc = &ctx->flc[DIGEST];
3951 req_ctx->flc_dma = ctx->flc_dma[DIGEST];
3952 req_ctx->cbk = ahash_done;
3953 req_ctx->ctx = &req->base;
3954 req_ctx->edesc = edesc;
3956 ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
3957 if (ret == -EINPROGRESS ||
3958 (ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
3959 return ret;
3961 unmap:
3962 ahash_unmap_ctx(ctx->dev, edesc, req, DMA_FROM_DEVICE);
3963 qi_cache_free(edesc);
3964 return ret;
3967 static int ahash_update_no_ctx(struct ahash_request *req)
3969 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
3970 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
3971 struct caam_hash_state *state = ahash_request_ctx(req);
3972 struct caam_request *req_ctx = &state->caam_req;
3973 struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
3974 struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
3975 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
3976 GFP_KERNEL : GFP_ATOMIC;
3977 u8 *buf = state->buf;
3978 int *buflen = &state->buflen;
3979 int *next_buflen = &state->next_buflen;
3980 int in_len = *buflen + req->nbytes, to_hash;
3981 int qm_sg_bytes, src_nents, mapped_nents;
3982 struct ahash_edesc *edesc;
3983 int ret = 0;
3985 *next_buflen = in_len & (crypto_tfm_alg_blocksize(&ahash->base) - 1);
3986 to_hash = in_len - *next_buflen;
3988 if (to_hash) {
3989 struct dpaa2_sg_entry *sg_table;
3990 int src_len = req->nbytes - *next_buflen;
3992 src_nents = sg_nents_for_len(req->src, src_len);
3993 if (src_nents < 0) {
3994 dev_err(ctx->dev, "Invalid number of src SG.\n");
3995 return src_nents;
3998 if (src_nents) {
3999 mapped_nents = dma_map_sg(ctx->dev, req->src, src_nents,
4000 DMA_TO_DEVICE);
4001 if (!mapped_nents) {
4002 dev_err(ctx->dev, "unable to DMA map source\n");
4003 return -ENOMEM;
4005 } else {
4006 mapped_nents = 0;
4009 /* allocate space for base edesc and link tables */
4010 edesc = qi_cache_zalloc(GFP_DMA | flags);
4011 if (!edesc) {
4012 dma_unmap_sg(ctx->dev, req->src, src_nents,
4013 DMA_TO_DEVICE);
4014 return -ENOMEM;
4017 edesc->src_nents = src_nents;
4018 qm_sg_bytes = pad_sg_nents(1 + mapped_nents) *
4019 sizeof(*sg_table);
4020 sg_table = &edesc->sgt[0];
4022 ret = buf_map_to_qm_sg(ctx->dev, sg_table, state);
4023 if (ret)
4024 goto unmap_ctx;
4026 sg_to_qm_sg_last(req->src, src_len, sg_table + 1, 0);
4028 edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table,
4029 qm_sg_bytes, DMA_TO_DEVICE);
4030 if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
4031 dev_err(ctx->dev, "unable to map S/G table\n");
4032 ret = -ENOMEM;
4033 goto unmap_ctx;
4035 edesc->qm_sg_bytes = qm_sg_bytes;
4037 state->ctx_dma_len = ctx->ctx_len;
4038 state->ctx_dma = dma_map_single(ctx->dev, state->caam_ctx,
4039 ctx->ctx_len, DMA_FROM_DEVICE);
4040 if (dma_mapping_error(ctx->dev, state->ctx_dma)) {
4041 dev_err(ctx->dev, "unable to map ctx\n");
4042 state->ctx_dma = 0;
4043 ret = -ENOMEM;
4044 goto unmap_ctx;
4047 memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
4048 dpaa2_fl_set_final(in_fle, true);
4049 dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
4050 dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
4051 dpaa2_fl_set_len(in_fle, to_hash);
4052 dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
4053 dpaa2_fl_set_addr(out_fle, state->ctx_dma);
4054 dpaa2_fl_set_len(out_fle, ctx->ctx_len);
4056 req_ctx->flc = &ctx->flc[UPDATE_FIRST];
4057 req_ctx->flc_dma = ctx->flc_dma[UPDATE_FIRST];
4058 req_ctx->cbk = ahash_done_ctx_dst;
4059 req_ctx->ctx = &req->base;
4060 req_ctx->edesc = edesc;
4062 ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
4063 if (ret != -EINPROGRESS &&
4064 !(ret == -EBUSY &&
4065 req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
4066 goto unmap_ctx;
4068 state->update = ahash_update_ctx;
4069 state->finup = ahash_finup_ctx;
4070 state->final = ahash_final_ctx;
4071 } else if (*next_buflen) {
4072 scatterwalk_map_and_copy(buf + *buflen, req->src, 0,
4073 req->nbytes, 0);
4074 *buflen = *next_buflen;
4076 print_hex_dump_debug("buf@" __stringify(__LINE__)": ",
4077 DUMP_PREFIX_ADDRESS, 16, 4, buf,
4078 *buflen, 1);
4081 return ret;
4082 unmap_ctx:
4083 ahash_unmap_ctx(ctx->dev, edesc, req, DMA_TO_DEVICE);
4084 qi_cache_free(edesc);
4085 return ret;
4088 static int ahash_finup_no_ctx(struct ahash_request *req)
4090 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
4091 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
4092 struct caam_hash_state *state = ahash_request_ctx(req);
4093 struct caam_request *req_ctx = &state->caam_req;
4094 struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
4095 struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
4096 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
4097 GFP_KERNEL : GFP_ATOMIC;
4098 int buflen = state->buflen;
4099 int qm_sg_bytes, src_nents, mapped_nents;
4100 int digestsize = crypto_ahash_digestsize(ahash);
4101 struct ahash_edesc *edesc;
4102 struct dpaa2_sg_entry *sg_table;
4103 int ret = -ENOMEM;
4105 src_nents = sg_nents_for_len(req->src, req->nbytes);
4106 if (src_nents < 0) {
4107 dev_err(ctx->dev, "Invalid number of src SG.\n");
4108 return src_nents;
4111 if (src_nents) {
4112 mapped_nents = dma_map_sg(ctx->dev, req->src, src_nents,
4113 DMA_TO_DEVICE);
4114 if (!mapped_nents) {
4115 dev_err(ctx->dev, "unable to DMA map source\n");
4116 return ret;
4118 } else {
4119 mapped_nents = 0;
4122 /* allocate space for base edesc and link tables */
4123 edesc = qi_cache_zalloc(GFP_DMA | flags);
4124 if (!edesc) {
4125 dma_unmap_sg(ctx->dev, req->src, src_nents, DMA_TO_DEVICE);
4126 return ret;
4129 edesc->src_nents = src_nents;
4130 qm_sg_bytes = pad_sg_nents(2 + mapped_nents) * sizeof(*sg_table);
4131 sg_table = &edesc->sgt[0];
4133 ret = buf_map_to_qm_sg(ctx->dev, sg_table, state);
4134 if (ret)
4135 goto unmap;
4137 sg_to_qm_sg_last(req->src, req->nbytes, sg_table + 1, 0);
4139 edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table, qm_sg_bytes,
4140 DMA_TO_DEVICE);
4141 if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
4142 dev_err(ctx->dev, "unable to map S/G table\n");
4143 ret = -ENOMEM;
4144 goto unmap;
4146 edesc->qm_sg_bytes = qm_sg_bytes;
4148 state->ctx_dma_len = digestsize;
4149 state->ctx_dma = dma_map_single(ctx->dev, state->caam_ctx, digestsize,
4150 DMA_FROM_DEVICE);
4151 if (dma_mapping_error(ctx->dev, state->ctx_dma)) {
4152 dev_err(ctx->dev, "unable to map ctx\n");
4153 state->ctx_dma = 0;
4154 ret = -ENOMEM;
4155 goto unmap;
4158 memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
4159 dpaa2_fl_set_final(in_fle, true);
4160 dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
4161 dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
4162 dpaa2_fl_set_len(in_fle, buflen + req->nbytes);
4163 dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
4164 dpaa2_fl_set_addr(out_fle, state->ctx_dma);
4165 dpaa2_fl_set_len(out_fle, digestsize);
4167 req_ctx->flc = &ctx->flc[DIGEST];
4168 req_ctx->flc_dma = ctx->flc_dma[DIGEST];
4169 req_ctx->cbk = ahash_done;
4170 req_ctx->ctx = &req->base;
4171 req_ctx->edesc = edesc;
4172 ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
4173 if (ret != -EINPROGRESS &&
4174 !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
4175 goto unmap;
4177 return ret;
4178 unmap:
4179 ahash_unmap_ctx(ctx->dev, edesc, req, DMA_FROM_DEVICE);
4180 qi_cache_free(edesc);
4181 return ret;
4184 static int ahash_update_first(struct ahash_request *req)
4186 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
4187 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
4188 struct caam_hash_state *state = ahash_request_ctx(req);
4189 struct caam_request *req_ctx = &state->caam_req;
4190 struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
4191 struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
4192 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
4193 GFP_KERNEL : GFP_ATOMIC;
4194 u8 *buf = state->buf;
4195 int *buflen = &state->buflen;
4196 int *next_buflen = &state->next_buflen;
4197 int to_hash;
4198 int src_nents, mapped_nents;
4199 struct ahash_edesc *edesc;
4200 int ret = 0;
4202 *next_buflen = req->nbytes & (crypto_tfm_alg_blocksize(&ahash->base) -
4204 to_hash = req->nbytes - *next_buflen;
4206 if (to_hash) {
4207 struct dpaa2_sg_entry *sg_table;
4208 int src_len = req->nbytes - *next_buflen;
4210 src_nents = sg_nents_for_len(req->src, src_len);
4211 if (src_nents < 0) {
4212 dev_err(ctx->dev, "Invalid number of src SG.\n");
4213 return src_nents;
4216 if (src_nents) {
4217 mapped_nents = dma_map_sg(ctx->dev, req->src, src_nents,
4218 DMA_TO_DEVICE);
4219 if (!mapped_nents) {
4220 dev_err(ctx->dev, "unable to map source for DMA\n");
4221 return -ENOMEM;
4223 } else {
4224 mapped_nents = 0;
4227 /* allocate space for base edesc and link tables */
4228 edesc = qi_cache_zalloc(GFP_DMA | flags);
4229 if (!edesc) {
4230 dma_unmap_sg(ctx->dev, req->src, src_nents,
4231 DMA_TO_DEVICE);
4232 return -ENOMEM;
4235 edesc->src_nents = src_nents;
4236 sg_table = &edesc->sgt[0];
4238 memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
4239 dpaa2_fl_set_final(in_fle, true);
4240 dpaa2_fl_set_len(in_fle, to_hash);
4242 if (mapped_nents > 1) {
4243 int qm_sg_bytes;
4245 sg_to_qm_sg_last(req->src, src_len, sg_table, 0);
4246 qm_sg_bytes = pad_sg_nents(mapped_nents) *
4247 sizeof(*sg_table);
4248 edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table,
4249 qm_sg_bytes,
4250 DMA_TO_DEVICE);
4251 if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
4252 dev_err(ctx->dev, "unable to map S/G table\n");
4253 ret = -ENOMEM;
4254 goto unmap_ctx;
4256 edesc->qm_sg_bytes = qm_sg_bytes;
4257 dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
4258 dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
4259 } else {
4260 dpaa2_fl_set_format(in_fle, dpaa2_fl_single);
4261 dpaa2_fl_set_addr(in_fle, sg_dma_address(req->src));
4264 state->ctx_dma_len = ctx->ctx_len;
4265 state->ctx_dma = dma_map_single(ctx->dev, state->caam_ctx,
4266 ctx->ctx_len, DMA_FROM_DEVICE);
4267 if (dma_mapping_error(ctx->dev, state->ctx_dma)) {
4268 dev_err(ctx->dev, "unable to map ctx\n");
4269 state->ctx_dma = 0;
4270 ret = -ENOMEM;
4271 goto unmap_ctx;
4274 dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
4275 dpaa2_fl_set_addr(out_fle, state->ctx_dma);
4276 dpaa2_fl_set_len(out_fle, ctx->ctx_len);
4278 req_ctx->flc = &ctx->flc[UPDATE_FIRST];
4279 req_ctx->flc_dma = ctx->flc_dma[UPDATE_FIRST];
4280 req_ctx->cbk = ahash_done_ctx_dst;
4281 req_ctx->ctx = &req->base;
4282 req_ctx->edesc = edesc;
4284 ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
4285 if (ret != -EINPROGRESS &&
4286 !(ret == -EBUSY && req->base.flags &
4287 CRYPTO_TFM_REQ_MAY_BACKLOG))
4288 goto unmap_ctx;
4290 state->update = ahash_update_ctx;
4291 state->finup = ahash_finup_ctx;
4292 state->final = ahash_final_ctx;
4293 } else if (*next_buflen) {
4294 state->update = ahash_update_no_ctx;
4295 state->finup = ahash_finup_no_ctx;
4296 state->final = ahash_final_no_ctx;
4297 scatterwalk_map_and_copy(buf, req->src, 0,
4298 req->nbytes, 0);
4299 *buflen = *next_buflen;
4301 print_hex_dump_debug("buf@" __stringify(__LINE__)": ",
4302 DUMP_PREFIX_ADDRESS, 16, 4, buf,
4303 *buflen, 1);
4306 return ret;
4307 unmap_ctx:
4308 ahash_unmap_ctx(ctx->dev, edesc, req, DMA_TO_DEVICE);
4309 qi_cache_free(edesc);
4310 return ret;
4313 static int ahash_finup_first(struct ahash_request *req)
4315 return ahash_digest(req);
4318 static int ahash_init(struct ahash_request *req)
4320 struct caam_hash_state *state = ahash_request_ctx(req);
4322 state->update = ahash_update_first;
4323 state->finup = ahash_finup_first;
4324 state->final = ahash_final_no_ctx;
4326 state->ctx_dma = 0;
4327 state->ctx_dma_len = 0;
4328 state->buf_dma = 0;
4329 state->buflen = 0;
4330 state->next_buflen = 0;
4332 return 0;
4335 static int ahash_update(struct ahash_request *req)
4337 struct caam_hash_state *state = ahash_request_ctx(req);
4339 return state->update(req);
4342 static int ahash_finup(struct ahash_request *req)
4344 struct caam_hash_state *state = ahash_request_ctx(req);
4346 return state->finup(req);
4349 static int ahash_final(struct ahash_request *req)
4351 struct caam_hash_state *state = ahash_request_ctx(req);
4353 return state->final(req);
4356 static int ahash_export(struct ahash_request *req, void *out)
4358 struct caam_hash_state *state = ahash_request_ctx(req);
4359 struct caam_export_state *export = out;
4360 u8 *buf = state->buf;
4361 int len = state->buflen;
4363 memcpy(export->buf, buf, len);
4364 memcpy(export->caam_ctx, state->caam_ctx, sizeof(export->caam_ctx));
4365 export->buflen = len;
4366 export->update = state->update;
4367 export->final = state->final;
4368 export->finup = state->finup;
4370 return 0;
4373 static int ahash_import(struct ahash_request *req, const void *in)
4375 struct caam_hash_state *state = ahash_request_ctx(req);
4376 const struct caam_export_state *export = in;
4378 memset(state, 0, sizeof(*state));
4379 memcpy(state->buf, export->buf, export->buflen);
4380 memcpy(state->caam_ctx, export->caam_ctx, sizeof(state->caam_ctx));
4381 state->buflen = export->buflen;
4382 state->update = export->update;
4383 state->final = export->final;
4384 state->finup = export->finup;
4386 return 0;
4389 struct caam_hash_template {
4390 char name[CRYPTO_MAX_ALG_NAME];
4391 char driver_name[CRYPTO_MAX_ALG_NAME];
4392 char hmac_name[CRYPTO_MAX_ALG_NAME];
4393 char hmac_driver_name[CRYPTO_MAX_ALG_NAME];
4394 unsigned int blocksize;
4395 struct ahash_alg template_ahash;
4396 u32 alg_type;
4399 /* ahash descriptors */
4400 static struct caam_hash_template driver_hash[] = {
4402 .name = "sha1",
4403 .driver_name = "sha1-caam-qi2",
4404 .hmac_name = "hmac(sha1)",
4405 .hmac_driver_name = "hmac-sha1-caam-qi2",
4406 .blocksize = SHA1_BLOCK_SIZE,
4407 .template_ahash = {
4408 .init = ahash_init,
4409 .update = ahash_update,
4410 .final = ahash_final,
4411 .finup = ahash_finup,
4412 .digest = ahash_digest,
4413 .export = ahash_export,
4414 .import = ahash_import,
4415 .setkey = ahash_setkey,
4416 .halg = {
4417 .digestsize = SHA1_DIGEST_SIZE,
4418 .statesize = sizeof(struct caam_export_state),
4421 .alg_type = OP_ALG_ALGSEL_SHA1,
4422 }, {
4423 .name = "sha224",
4424 .driver_name = "sha224-caam-qi2",
4425 .hmac_name = "hmac(sha224)",
4426 .hmac_driver_name = "hmac-sha224-caam-qi2",
4427 .blocksize = SHA224_BLOCK_SIZE,
4428 .template_ahash = {
4429 .init = ahash_init,
4430 .update = ahash_update,
4431 .final = ahash_final,
4432 .finup = ahash_finup,
4433 .digest = ahash_digest,
4434 .export = ahash_export,
4435 .import = ahash_import,
4436 .setkey = ahash_setkey,
4437 .halg = {
4438 .digestsize = SHA224_DIGEST_SIZE,
4439 .statesize = sizeof(struct caam_export_state),
4442 .alg_type = OP_ALG_ALGSEL_SHA224,
4443 }, {
4444 .name = "sha256",
4445 .driver_name = "sha256-caam-qi2",
4446 .hmac_name = "hmac(sha256)",
4447 .hmac_driver_name = "hmac-sha256-caam-qi2",
4448 .blocksize = SHA256_BLOCK_SIZE,
4449 .template_ahash = {
4450 .init = ahash_init,
4451 .update = ahash_update,
4452 .final = ahash_final,
4453 .finup = ahash_finup,
4454 .digest = ahash_digest,
4455 .export = ahash_export,
4456 .import = ahash_import,
4457 .setkey = ahash_setkey,
4458 .halg = {
4459 .digestsize = SHA256_DIGEST_SIZE,
4460 .statesize = sizeof(struct caam_export_state),
4463 .alg_type = OP_ALG_ALGSEL_SHA256,
4464 }, {
4465 .name = "sha384",
4466 .driver_name = "sha384-caam-qi2",
4467 .hmac_name = "hmac(sha384)",
4468 .hmac_driver_name = "hmac-sha384-caam-qi2",
4469 .blocksize = SHA384_BLOCK_SIZE,
4470 .template_ahash = {
4471 .init = ahash_init,
4472 .update = ahash_update,
4473 .final = ahash_final,
4474 .finup = ahash_finup,
4475 .digest = ahash_digest,
4476 .export = ahash_export,
4477 .import = ahash_import,
4478 .setkey = ahash_setkey,
4479 .halg = {
4480 .digestsize = SHA384_DIGEST_SIZE,
4481 .statesize = sizeof(struct caam_export_state),
4484 .alg_type = OP_ALG_ALGSEL_SHA384,
4485 }, {
4486 .name = "sha512",
4487 .driver_name = "sha512-caam-qi2",
4488 .hmac_name = "hmac(sha512)",
4489 .hmac_driver_name = "hmac-sha512-caam-qi2",
4490 .blocksize = SHA512_BLOCK_SIZE,
4491 .template_ahash = {
4492 .init = ahash_init,
4493 .update = ahash_update,
4494 .final = ahash_final,
4495 .finup = ahash_finup,
4496 .digest = ahash_digest,
4497 .export = ahash_export,
4498 .import = ahash_import,
4499 .setkey = ahash_setkey,
4500 .halg = {
4501 .digestsize = SHA512_DIGEST_SIZE,
4502 .statesize = sizeof(struct caam_export_state),
4505 .alg_type = OP_ALG_ALGSEL_SHA512,
4506 }, {
4507 .name = "md5",
4508 .driver_name = "md5-caam-qi2",
4509 .hmac_name = "hmac(md5)",
4510 .hmac_driver_name = "hmac-md5-caam-qi2",
4511 .blocksize = MD5_BLOCK_WORDS * 4,
4512 .template_ahash = {
4513 .init = ahash_init,
4514 .update = ahash_update,
4515 .final = ahash_final,
4516 .finup = ahash_finup,
4517 .digest = ahash_digest,
4518 .export = ahash_export,
4519 .import = ahash_import,
4520 .setkey = ahash_setkey,
4521 .halg = {
4522 .digestsize = MD5_DIGEST_SIZE,
4523 .statesize = sizeof(struct caam_export_state),
4526 .alg_type = OP_ALG_ALGSEL_MD5,
4530 struct caam_hash_alg {
4531 struct list_head entry;
4532 struct device *dev;
4533 int alg_type;
4534 struct ahash_alg ahash_alg;
4537 static int caam_hash_cra_init(struct crypto_tfm *tfm)
4539 struct crypto_ahash *ahash = __crypto_ahash_cast(tfm);
4540 struct crypto_alg *base = tfm->__crt_alg;
4541 struct hash_alg_common *halg =
4542 container_of(base, struct hash_alg_common, base);
4543 struct ahash_alg *alg =
4544 container_of(halg, struct ahash_alg, halg);
4545 struct caam_hash_alg *caam_hash =
4546 container_of(alg, struct caam_hash_alg, ahash_alg);
4547 struct caam_hash_ctx *ctx = crypto_tfm_ctx(tfm);
4548 /* Sizes for MDHA running digests: MD5, SHA1, 224, 256, 384, 512 */
4549 static const u8 runninglen[] = { HASH_MSG_LEN + MD5_DIGEST_SIZE,
4550 HASH_MSG_LEN + SHA1_DIGEST_SIZE,
4551 HASH_MSG_LEN + 32,
4552 HASH_MSG_LEN + SHA256_DIGEST_SIZE,
4553 HASH_MSG_LEN + 64,
4554 HASH_MSG_LEN + SHA512_DIGEST_SIZE };
4555 dma_addr_t dma_addr;
4556 int i;
4558 ctx->dev = caam_hash->dev;
4560 if (alg->setkey) {
4561 ctx->adata.key_dma = dma_map_single_attrs(ctx->dev, ctx->key,
4562 ARRAY_SIZE(ctx->key),
4563 DMA_TO_DEVICE,
4564 DMA_ATTR_SKIP_CPU_SYNC);
4565 if (dma_mapping_error(ctx->dev, ctx->adata.key_dma)) {
4566 dev_err(ctx->dev, "unable to map key\n");
4567 return -ENOMEM;
4571 dma_addr = dma_map_single_attrs(ctx->dev, ctx->flc, sizeof(ctx->flc),
4572 DMA_BIDIRECTIONAL,
4573 DMA_ATTR_SKIP_CPU_SYNC);
4574 if (dma_mapping_error(ctx->dev, dma_addr)) {
4575 dev_err(ctx->dev, "unable to map shared descriptors\n");
4576 if (ctx->adata.key_dma)
4577 dma_unmap_single_attrs(ctx->dev, ctx->adata.key_dma,
4578 ARRAY_SIZE(ctx->key),
4579 DMA_TO_DEVICE,
4580 DMA_ATTR_SKIP_CPU_SYNC);
4581 return -ENOMEM;
4584 for (i = 0; i < HASH_NUM_OP; i++)
4585 ctx->flc_dma[i] = dma_addr + i * sizeof(ctx->flc[i]);
4587 /* copy descriptor header template value */
4588 ctx->adata.algtype = OP_TYPE_CLASS2_ALG | caam_hash->alg_type;
4590 ctx->ctx_len = runninglen[(ctx->adata.algtype &
4591 OP_ALG_ALGSEL_SUBMASK) >>
4592 OP_ALG_ALGSEL_SHIFT];
4594 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
4595 sizeof(struct caam_hash_state));
4598 * For keyed hash algorithms shared descriptors
4599 * will be created later in setkey() callback
4601 return alg->setkey ? 0 : ahash_set_sh_desc(ahash);
4604 static void caam_hash_cra_exit(struct crypto_tfm *tfm)
4606 struct caam_hash_ctx *ctx = crypto_tfm_ctx(tfm);
4608 dma_unmap_single_attrs(ctx->dev, ctx->flc_dma[0], sizeof(ctx->flc),
4609 DMA_BIDIRECTIONAL, DMA_ATTR_SKIP_CPU_SYNC);
4610 if (ctx->adata.key_dma)
4611 dma_unmap_single_attrs(ctx->dev, ctx->adata.key_dma,
4612 ARRAY_SIZE(ctx->key), DMA_TO_DEVICE,
4613 DMA_ATTR_SKIP_CPU_SYNC);
4616 static struct caam_hash_alg *caam_hash_alloc(struct device *dev,
4617 struct caam_hash_template *template, bool keyed)
4619 struct caam_hash_alg *t_alg;
4620 struct ahash_alg *halg;
4621 struct crypto_alg *alg;
4623 t_alg = kzalloc(sizeof(*t_alg), GFP_KERNEL);
4624 if (!t_alg)
4625 return ERR_PTR(-ENOMEM);
4627 t_alg->ahash_alg = template->template_ahash;
4628 halg = &t_alg->ahash_alg;
4629 alg = &halg->halg.base;
4631 if (keyed) {
4632 snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s",
4633 template->hmac_name);
4634 snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
4635 template->hmac_driver_name);
4636 } else {
4637 snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s",
4638 template->name);
4639 snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
4640 template->driver_name);
4641 t_alg->ahash_alg.setkey = NULL;
4643 alg->cra_module = THIS_MODULE;
4644 alg->cra_init = caam_hash_cra_init;
4645 alg->cra_exit = caam_hash_cra_exit;
4646 alg->cra_ctxsize = sizeof(struct caam_hash_ctx);
4647 alg->cra_priority = CAAM_CRA_PRIORITY;
4648 alg->cra_blocksize = template->blocksize;
4649 alg->cra_alignmask = 0;
4650 alg->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY;
4652 t_alg->alg_type = template->alg_type;
4653 t_alg->dev = dev;
4655 return t_alg;
4658 static void dpaa2_caam_fqdan_cb(struct dpaa2_io_notification_ctx *nctx)
4660 struct dpaa2_caam_priv_per_cpu *ppriv;
4662 ppriv = container_of(nctx, struct dpaa2_caam_priv_per_cpu, nctx);
4663 napi_schedule_irqoff(&ppriv->napi);
4666 static int __cold dpaa2_dpseci_dpio_setup(struct dpaa2_caam_priv *priv)
4668 struct device *dev = priv->dev;
4669 struct dpaa2_io_notification_ctx *nctx;
4670 struct dpaa2_caam_priv_per_cpu *ppriv;
4671 int err, i = 0, cpu;
4673 for_each_online_cpu(cpu) {
4674 ppriv = per_cpu_ptr(priv->ppriv, cpu);
4675 ppriv->priv = priv;
4676 nctx = &ppriv->nctx;
4677 nctx->is_cdan = 0;
4678 nctx->id = ppriv->rsp_fqid;
4679 nctx->desired_cpu = cpu;
4680 nctx->cb = dpaa2_caam_fqdan_cb;
4682 /* Register notification callbacks */
4683 ppriv->dpio = dpaa2_io_service_select(cpu);
4684 err = dpaa2_io_service_register(ppriv->dpio, nctx, dev);
4685 if (unlikely(err)) {
4686 dev_dbg(dev, "No affine DPIO for cpu %d\n", cpu);
4687 nctx->cb = NULL;
4689 * If no affine DPIO for this core, there's probably
4690 * none available for next cores either. Signal we want
4691 * to retry later, in case the DPIO devices weren't
4692 * probed yet.
4694 err = -EPROBE_DEFER;
4695 goto err;
4698 ppriv->store = dpaa2_io_store_create(DPAA2_CAAM_STORE_SIZE,
4699 dev);
4700 if (unlikely(!ppriv->store)) {
4701 dev_err(dev, "dpaa2_io_store_create() failed\n");
4702 err = -ENOMEM;
4703 goto err;
4706 if (++i == priv->num_pairs)
4707 break;
4710 return 0;
4712 err:
4713 for_each_online_cpu(cpu) {
4714 ppriv = per_cpu_ptr(priv->ppriv, cpu);
4715 if (!ppriv->nctx.cb)
4716 break;
4717 dpaa2_io_service_deregister(ppriv->dpio, &ppriv->nctx, dev);
4720 for_each_online_cpu(cpu) {
4721 ppriv = per_cpu_ptr(priv->ppriv, cpu);
4722 if (!ppriv->store)
4723 break;
4724 dpaa2_io_store_destroy(ppriv->store);
4727 return err;
4730 static void __cold dpaa2_dpseci_dpio_free(struct dpaa2_caam_priv *priv)
4732 struct dpaa2_caam_priv_per_cpu *ppriv;
4733 int i = 0, cpu;
4735 for_each_online_cpu(cpu) {
4736 ppriv = per_cpu_ptr(priv->ppriv, cpu);
4737 dpaa2_io_service_deregister(ppriv->dpio, &ppriv->nctx,
4738 priv->dev);
4739 dpaa2_io_store_destroy(ppriv->store);
4741 if (++i == priv->num_pairs)
4742 return;
4746 static int dpaa2_dpseci_bind(struct dpaa2_caam_priv *priv)
4748 struct dpseci_rx_queue_cfg rx_queue_cfg;
4749 struct device *dev = priv->dev;
4750 struct fsl_mc_device *ls_dev = to_fsl_mc_device(dev);
4751 struct dpaa2_caam_priv_per_cpu *ppriv;
4752 int err = 0, i = 0, cpu;
4754 /* Configure Rx queues */
4755 for_each_online_cpu(cpu) {
4756 ppriv = per_cpu_ptr(priv->ppriv, cpu);
4758 rx_queue_cfg.options = DPSECI_QUEUE_OPT_DEST |
4759 DPSECI_QUEUE_OPT_USER_CTX;
4760 rx_queue_cfg.order_preservation_en = 0;
4761 rx_queue_cfg.dest_cfg.dest_type = DPSECI_DEST_DPIO;
4762 rx_queue_cfg.dest_cfg.dest_id = ppriv->nctx.dpio_id;
4764 * Rx priority (WQ) doesn't really matter, since we use
4765 * pull mode, i.e. volatile dequeues from specific FQs
4767 rx_queue_cfg.dest_cfg.priority = 0;
4768 rx_queue_cfg.user_ctx = ppriv->nctx.qman64;
4770 err = dpseci_set_rx_queue(priv->mc_io, 0, ls_dev->mc_handle, i,
4771 &rx_queue_cfg);
4772 if (err) {
4773 dev_err(dev, "dpseci_set_rx_queue() failed with err %d\n",
4774 err);
4775 return err;
4778 if (++i == priv->num_pairs)
4779 break;
4782 return err;
4785 static void dpaa2_dpseci_congestion_free(struct dpaa2_caam_priv *priv)
4787 struct device *dev = priv->dev;
4789 if (!priv->cscn_mem)
4790 return;
4792 dma_unmap_single(dev, priv->cscn_dma, DPAA2_CSCN_SIZE, DMA_FROM_DEVICE);
4793 kfree(priv->cscn_mem);
4796 static void dpaa2_dpseci_free(struct dpaa2_caam_priv *priv)
4798 struct device *dev = priv->dev;
4799 struct fsl_mc_device *ls_dev = to_fsl_mc_device(dev);
4800 int err;
4802 if (DPSECI_VER(priv->major_ver, priv->minor_ver) > DPSECI_VER(5, 3)) {
4803 err = dpseci_reset(priv->mc_io, 0, ls_dev->mc_handle);
4804 if (err)
4805 dev_err(dev, "dpseci_reset() failed\n");
4808 dpaa2_dpseci_congestion_free(priv);
4809 dpseci_close(priv->mc_io, 0, ls_dev->mc_handle);
4812 static void dpaa2_caam_process_fd(struct dpaa2_caam_priv *priv,
4813 const struct dpaa2_fd *fd)
4815 struct caam_request *req;
4816 u32 fd_err;
4818 if (dpaa2_fd_get_format(fd) != dpaa2_fd_list) {
4819 dev_err(priv->dev, "Only Frame List FD format is supported!\n");
4820 return;
4823 fd_err = dpaa2_fd_get_ctrl(fd) & FD_CTRL_ERR_MASK;
4824 if (unlikely(fd_err))
4825 dev_err_ratelimited(priv->dev, "FD error: %08x\n", fd_err);
4828 * FD[ADDR] is guaranteed to be valid, irrespective of errors reported
4829 * in FD[ERR] or FD[FRC].
4831 req = dpaa2_caam_iova_to_virt(priv, dpaa2_fd_get_addr(fd));
4832 dma_unmap_single(priv->dev, req->fd_flt_dma, sizeof(req->fd_flt),
4833 DMA_BIDIRECTIONAL);
4834 req->cbk(req->ctx, dpaa2_fd_get_frc(fd));
4837 static int dpaa2_caam_pull_fq(struct dpaa2_caam_priv_per_cpu *ppriv)
4839 int err;
4841 /* Retry while portal is busy */
4842 do {
4843 err = dpaa2_io_service_pull_fq(ppriv->dpio, ppriv->rsp_fqid,
4844 ppriv->store);
4845 } while (err == -EBUSY);
4847 if (unlikely(err))
4848 dev_err(ppriv->priv->dev, "dpaa2_io_service_pull err %d", err);
4850 return err;
4853 static int dpaa2_caam_store_consume(struct dpaa2_caam_priv_per_cpu *ppriv)
4855 struct dpaa2_dq *dq;
4856 int cleaned = 0, is_last;
4858 do {
4859 dq = dpaa2_io_store_next(ppriv->store, &is_last);
4860 if (unlikely(!dq)) {
4861 if (unlikely(!is_last)) {
4862 dev_dbg(ppriv->priv->dev,
4863 "FQ %d returned no valid frames\n",
4864 ppriv->rsp_fqid);
4866 * MUST retry until we get some sort of
4867 * valid response token (be it "empty dequeue"
4868 * or a valid frame).
4870 continue;
4872 break;
4875 /* Process FD */
4876 dpaa2_caam_process_fd(ppriv->priv, dpaa2_dq_fd(dq));
4877 cleaned++;
4878 } while (!is_last);
4880 return cleaned;
4883 static int dpaa2_dpseci_poll(struct napi_struct *napi, int budget)
4885 struct dpaa2_caam_priv_per_cpu *ppriv;
4886 struct dpaa2_caam_priv *priv;
4887 int err, cleaned = 0, store_cleaned;
4889 ppriv = container_of(napi, struct dpaa2_caam_priv_per_cpu, napi);
4890 priv = ppriv->priv;
4892 if (unlikely(dpaa2_caam_pull_fq(ppriv)))
4893 return 0;
4895 do {
4896 store_cleaned = dpaa2_caam_store_consume(ppriv);
4897 cleaned += store_cleaned;
4899 if (store_cleaned == 0 ||
4900 cleaned > budget - DPAA2_CAAM_STORE_SIZE)
4901 break;
4903 /* Try to dequeue some more */
4904 err = dpaa2_caam_pull_fq(ppriv);
4905 if (unlikely(err))
4906 break;
4907 } while (1);
4909 if (cleaned < budget) {
4910 napi_complete_done(napi, cleaned);
4911 err = dpaa2_io_service_rearm(ppriv->dpio, &ppriv->nctx);
4912 if (unlikely(err))
4913 dev_err(priv->dev, "Notification rearm failed: %d\n",
4914 err);
4917 return cleaned;
4920 static int dpaa2_dpseci_congestion_setup(struct dpaa2_caam_priv *priv,
4921 u16 token)
4923 struct dpseci_congestion_notification_cfg cong_notif_cfg = { 0 };
4924 struct device *dev = priv->dev;
4925 int err;
4928 * Congestion group feature supported starting with DPSECI API v5.1
4929 * and only when object has been created with this capability.
4931 if ((DPSECI_VER(priv->major_ver, priv->minor_ver) < DPSECI_VER(5, 1)) ||
4932 !(priv->dpseci_attr.options & DPSECI_OPT_HAS_CG))
4933 return 0;
4935 priv->cscn_mem = kzalloc(DPAA2_CSCN_SIZE + DPAA2_CSCN_ALIGN,
4936 GFP_KERNEL | GFP_DMA);
4937 if (!priv->cscn_mem)
4938 return -ENOMEM;
4940 priv->cscn_mem_aligned = PTR_ALIGN(priv->cscn_mem, DPAA2_CSCN_ALIGN);
4941 priv->cscn_dma = dma_map_single(dev, priv->cscn_mem_aligned,
4942 DPAA2_CSCN_SIZE, DMA_FROM_DEVICE);
4943 if (dma_mapping_error(dev, priv->cscn_dma)) {
4944 dev_err(dev, "Error mapping CSCN memory area\n");
4945 err = -ENOMEM;
4946 goto err_dma_map;
4949 cong_notif_cfg.units = DPSECI_CONGESTION_UNIT_BYTES;
4950 cong_notif_cfg.threshold_entry = DPAA2_SEC_CONG_ENTRY_THRESH;
4951 cong_notif_cfg.threshold_exit = DPAA2_SEC_CONG_EXIT_THRESH;
4952 cong_notif_cfg.message_ctx = (uintptr_t)priv;
4953 cong_notif_cfg.message_iova = priv->cscn_dma;
4954 cong_notif_cfg.notification_mode = DPSECI_CGN_MODE_WRITE_MEM_ON_ENTER |
4955 DPSECI_CGN_MODE_WRITE_MEM_ON_EXIT |
4956 DPSECI_CGN_MODE_COHERENT_WRITE;
4958 err = dpseci_set_congestion_notification(priv->mc_io, 0, token,
4959 &cong_notif_cfg);
4960 if (err) {
4961 dev_err(dev, "dpseci_set_congestion_notification failed\n");
4962 goto err_set_cong;
4965 return 0;
4967 err_set_cong:
4968 dma_unmap_single(dev, priv->cscn_dma, DPAA2_CSCN_SIZE, DMA_FROM_DEVICE);
4969 err_dma_map:
4970 kfree(priv->cscn_mem);
4972 return err;
4975 static int __cold dpaa2_dpseci_setup(struct fsl_mc_device *ls_dev)
4977 struct device *dev = &ls_dev->dev;
4978 struct dpaa2_caam_priv *priv;
4979 struct dpaa2_caam_priv_per_cpu *ppriv;
4980 int err, cpu;
4981 u8 i;
4983 priv = dev_get_drvdata(dev);
4985 priv->dev = dev;
4986 priv->dpsec_id = ls_dev->obj_desc.id;
4988 /* Get a handle for the DPSECI this interface is associate with */
4989 err = dpseci_open(priv->mc_io, 0, priv->dpsec_id, &ls_dev->mc_handle);
4990 if (err) {
4991 dev_err(dev, "dpseci_open() failed: %d\n", err);
4992 goto err_open;
4995 err = dpseci_get_api_version(priv->mc_io, 0, &priv->major_ver,
4996 &priv->minor_ver);
4997 if (err) {
4998 dev_err(dev, "dpseci_get_api_version() failed\n");
4999 goto err_get_vers;
5002 dev_info(dev, "dpseci v%d.%d\n", priv->major_ver, priv->minor_ver);
5004 if (DPSECI_VER(priv->major_ver, priv->minor_ver) > DPSECI_VER(5, 3)) {
5005 err = dpseci_reset(priv->mc_io, 0, ls_dev->mc_handle);
5006 if (err) {
5007 dev_err(dev, "dpseci_reset() failed\n");
5008 goto err_get_vers;
5012 err = dpseci_get_attributes(priv->mc_io, 0, ls_dev->mc_handle,
5013 &priv->dpseci_attr);
5014 if (err) {
5015 dev_err(dev, "dpseci_get_attributes() failed\n");
5016 goto err_get_vers;
5019 err = dpseci_get_sec_attr(priv->mc_io, 0, ls_dev->mc_handle,
5020 &priv->sec_attr);
5021 if (err) {
5022 dev_err(dev, "dpseci_get_sec_attr() failed\n");
5023 goto err_get_vers;
5026 err = dpaa2_dpseci_congestion_setup(priv, ls_dev->mc_handle);
5027 if (err) {
5028 dev_err(dev, "setup_congestion() failed\n");
5029 goto err_get_vers;
5032 priv->num_pairs = min(priv->dpseci_attr.num_rx_queues,
5033 priv->dpseci_attr.num_tx_queues);
5034 if (priv->num_pairs > num_online_cpus()) {
5035 dev_warn(dev, "%d queues won't be used\n",
5036 priv->num_pairs - num_online_cpus());
5037 priv->num_pairs = num_online_cpus();
5040 for (i = 0; i < priv->dpseci_attr.num_rx_queues; i++) {
5041 err = dpseci_get_rx_queue(priv->mc_io, 0, ls_dev->mc_handle, i,
5042 &priv->rx_queue_attr[i]);
5043 if (err) {
5044 dev_err(dev, "dpseci_get_rx_queue() failed\n");
5045 goto err_get_rx_queue;
5049 for (i = 0; i < priv->dpseci_attr.num_tx_queues; i++) {
5050 err = dpseci_get_tx_queue(priv->mc_io, 0, ls_dev->mc_handle, i,
5051 &priv->tx_queue_attr[i]);
5052 if (err) {
5053 dev_err(dev, "dpseci_get_tx_queue() failed\n");
5054 goto err_get_rx_queue;
5058 i = 0;
5059 for_each_online_cpu(cpu) {
5060 u8 j;
5062 j = i % priv->num_pairs;
5064 ppriv = per_cpu_ptr(priv->ppriv, cpu);
5065 ppriv->req_fqid = priv->tx_queue_attr[j].fqid;
5068 * Allow all cores to enqueue, while only some of them
5069 * will take part in dequeuing.
5071 if (++i > priv->num_pairs)
5072 continue;
5074 ppriv->rsp_fqid = priv->rx_queue_attr[j].fqid;
5075 ppriv->prio = j;
5077 dev_dbg(dev, "pair %d: rx queue %d, tx queue %d\n", j,
5078 priv->rx_queue_attr[j].fqid,
5079 priv->tx_queue_attr[j].fqid);
5081 ppriv->net_dev.dev = *dev;
5082 INIT_LIST_HEAD(&ppriv->net_dev.napi_list);
5083 netif_napi_add(&ppriv->net_dev, &ppriv->napi, dpaa2_dpseci_poll,
5084 DPAA2_CAAM_NAPI_WEIGHT);
5087 return 0;
5089 err_get_rx_queue:
5090 dpaa2_dpseci_congestion_free(priv);
5091 err_get_vers:
5092 dpseci_close(priv->mc_io, 0, ls_dev->mc_handle);
5093 err_open:
5094 return err;
5097 static int dpaa2_dpseci_enable(struct dpaa2_caam_priv *priv)
5099 struct device *dev = priv->dev;
5100 struct fsl_mc_device *ls_dev = to_fsl_mc_device(dev);
5101 struct dpaa2_caam_priv_per_cpu *ppriv;
5102 int i;
5104 for (i = 0; i < priv->num_pairs; i++) {
5105 ppriv = per_cpu_ptr(priv->ppriv, i);
5106 napi_enable(&ppriv->napi);
5109 return dpseci_enable(priv->mc_io, 0, ls_dev->mc_handle);
5112 static int __cold dpaa2_dpseci_disable(struct dpaa2_caam_priv *priv)
5114 struct device *dev = priv->dev;
5115 struct dpaa2_caam_priv_per_cpu *ppriv;
5116 struct fsl_mc_device *ls_dev = to_fsl_mc_device(dev);
5117 int i, err = 0, enabled;
5119 err = dpseci_disable(priv->mc_io, 0, ls_dev->mc_handle);
5120 if (err) {
5121 dev_err(dev, "dpseci_disable() failed\n");
5122 return err;
5125 err = dpseci_is_enabled(priv->mc_io, 0, ls_dev->mc_handle, &enabled);
5126 if (err) {
5127 dev_err(dev, "dpseci_is_enabled() failed\n");
5128 return err;
5131 dev_dbg(dev, "disable: %s\n", enabled ? "false" : "true");
5133 for (i = 0; i < priv->num_pairs; i++) {
5134 ppriv = per_cpu_ptr(priv->ppriv, i);
5135 napi_disable(&ppriv->napi);
5136 netif_napi_del(&ppriv->napi);
5139 return 0;
5142 static struct list_head hash_list;
5144 static int dpaa2_caam_probe(struct fsl_mc_device *dpseci_dev)
5146 struct device *dev;
5147 struct dpaa2_caam_priv *priv;
5148 int i, err = 0;
5149 bool registered = false;
5152 * There is no way to get CAAM endianness - there is no direct register
5153 * space access and MC f/w does not provide this attribute.
5154 * All DPAA2-based SoCs have little endian CAAM, thus hard-code this
5155 * property.
5157 caam_little_end = true;
5159 caam_imx = false;
5161 dev = &dpseci_dev->dev;
5163 priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
5164 if (!priv)
5165 return -ENOMEM;
5167 dev_set_drvdata(dev, priv);
5169 priv->domain = iommu_get_domain_for_dev(dev);
5171 qi_cache = kmem_cache_create("dpaa2_caamqicache", CAAM_QI_MEMCACHE_SIZE,
5172 0, SLAB_CACHE_DMA, NULL);
5173 if (!qi_cache) {
5174 dev_err(dev, "Can't allocate SEC cache\n");
5175 return -ENOMEM;
5178 err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(49));
5179 if (err) {
5180 dev_err(dev, "dma_set_mask_and_coherent() failed\n");
5181 goto err_dma_mask;
5184 /* Obtain a MC portal */
5185 err = fsl_mc_portal_allocate(dpseci_dev, 0, &priv->mc_io);
5186 if (err) {
5187 if (err == -ENXIO)
5188 err = -EPROBE_DEFER;
5189 else
5190 dev_err(dev, "MC portal allocation failed\n");
5192 goto err_dma_mask;
5195 priv->ppriv = alloc_percpu(*priv->ppriv);
5196 if (!priv->ppriv) {
5197 dev_err(dev, "alloc_percpu() failed\n");
5198 err = -ENOMEM;
5199 goto err_alloc_ppriv;
5202 /* DPSECI initialization */
5203 err = dpaa2_dpseci_setup(dpseci_dev);
5204 if (err) {
5205 dev_err(dev, "dpaa2_dpseci_setup() failed\n");
5206 goto err_dpseci_setup;
5209 /* DPIO */
5210 err = dpaa2_dpseci_dpio_setup(priv);
5211 if (err) {
5212 dev_err_probe(dev, err, "dpaa2_dpseci_dpio_setup() failed\n");
5213 goto err_dpio_setup;
5216 /* DPSECI binding to DPIO */
5217 err = dpaa2_dpseci_bind(priv);
5218 if (err) {
5219 dev_err(dev, "dpaa2_dpseci_bind() failed\n");
5220 goto err_bind;
5223 /* DPSECI enable */
5224 err = dpaa2_dpseci_enable(priv);
5225 if (err) {
5226 dev_err(dev, "dpaa2_dpseci_enable() failed\n");
5227 goto err_bind;
5230 dpaa2_dpseci_debugfs_init(priv);
5232 /* register crypto algorithms the device supports */
5233 for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
5234 struct caam_skcipher_alg *t_alg = driver_algs + i;
5235 u32 alg_sel = t_alg->caam.class1_alg_type & OP_ALG_ALGSEL_MASK;
5237 /* Skip DES algorithms if not supported by device */
5238 if (!priv->sec_attr.des_acc_num &&
5239 (alg_sel == OP_ALG_ALGSEL_3DES ||
5240 alg_sel == OP_ALG_ALGSEL_DES))
5241 continue;
5243 /* Skip AES algorithms if not supported by device */
5244 if (!priv->sec_attr.aes_acc_num &&
5245 alg_sel == OP_ALG_ALGSEL_AES)
5246 continue;
5248 /* Skip CHACHA20 algorithms if not supported by device */
5249 if (alg_sel == OP_ALG_ALGSEL_CHACHA20 &&
5250 !priv->sec_attr.ccha_acc_num)
5251 continue;
5253 t_alg->caam.dev = dev;
5254 caam_skcipher_alg_init(t_alg);
5256 err = crypto_register_skcipher(&t_alg->skcipher);
5257 if (err) {
5258 dev_warn(dev, "%s alg registration failed: %d\n",
5259 t_alg->skcipher.base.cra_driver_name, err);
5260 continue;
5263 t_alg->registered = true;
5264 registered = true;
5267 for (i = 0; i < ARRAY_SIZE(driver_aeads); i++) {
5268 struct caam_aead_alg *t_alg = driver_aeads + i;
5269 u32 c1_alg_sel = t_alg->caam.class1_alg_type &
5270 OP_ALG_ALGSEL_MASK;
5271 u32 c2_alg_sel = t_alg->caam.class2_alg_type &
5272 OP_ALG_ALGSEL_MASK;
5274 /* Skip DES algorithms if not supported by device */
5275 if (!priv->sec_attr.des_acc_num &&
5276 (c1_alg_sel == OP_ALG_ALGSEL_3DES ||
5277 c1_alg_sel == OP_ALG_ALGSEL_DES))
5278 continue;
5280 /* Skip AES algorithms if not supported by device */
5281 if (!priv->sec_attr.aes_acc_num &&
5282 c1_alg_sel == OP_ALG_ALGSEL_AES)
5283 continue;
5285 /* Skip CHACHA20 algorithms if not supported by device */
5286 if (c1_alg_sel == OP_ALG_ALGSEL_CHACHA20 &&
5287 !priv->sec_attr.ccha_acc_num)
5288 continue;
5290 /* Skip POLY1305 algorithms if not supported by device */
5291 if (c2_alg_sel == OP_ALG_ALGSEL_POLY1305 &&
5292 !priv->sec_attr.ptha_acc_num)
5293 continue;
5296 * Skip algorithms requiring message digests
5297 * if MD not supported by device.
5299 if ((c2_alg_sel & ~OP_ALG_ALGSEL_SUBMASK) == 0x40 &&
5300 !priv->sec_attr.md_acc_num)
5301 continue;
5303 t_alg->caam.dev = dev;
5304 caam_aead_alg_init(t_alg);
5306 err = crypto_register_aead(&t_alg->aead);
5307 if (err) {
5308 dev_warn(dev, "%s alg registration failed: %d\n",
5309 t_alg->aead.base.cra_driver_name, err);
5310 continue;
5313 t_alg->registered = true;
5314 registered = true;
5316 if (registered)
5317 dev_info(dev, "algorithms registered in /proc/crypto\n");
5319 /* register hash algorithms the device supports */
5320 INIT_LIST_HEAD(&hash_list);
5323 * Skip registration of any hashing algorithms if MD block
5324 * is not present.
5326 if (!priv->sec_attr.md_acc_num)
5327 return 0;
5329 for (i = 0; i < ARRAY_SIZE(driver_hash); i++) {
5330 struct caam_hash_alg *t_alg;
5331 struct caam_hash_template *alg = driver_hash + i;
5333 /* register hmac version */
5334 t_alg = caam_hash_alloc(dev, alg, true);
5335 if (IS_ERR(t_alg)) {
5336 err = PTR_ERR(t_alg);
5337 dev_warn(dev, "%s hash alg allocation failed: %d\n",
5338 alg->hmac_driver_name, err);
5339 continue;
5342 err = crypto_register_ahash(&t_alg->ahash_alg);
5343 if (err) {
5344 dev_warn(dev, "%s alg registration failed: %d\n",
5345 t_alg->ahash_alg.halg.base.cra_driver_name,
5346 err);
5347 kfree(t_alg);
5348 } else {
5349 list_add_tail(&t_alg->entry, &hash_list);
5352 /* register unkeyed version */
5353 t_alg = caam_hash_alloc(dev, alg, false);
5354 if (IS_ERR(t_alg)) {
5355 err = PTR_ERR(t_alg);
5356 dev_warn(dev, "%s alg allocation failed: %d\n",
5357 alg->driver_name, err);
5358 continue;
5361 err = crypto_register_ahash(&t_alg->ahash_alg);
5362 if (err) {
5363 dev_warn(dev, "%s alg registration failed: %d\n",
5364 t_alg->ahash_alg.halg.base.cra_driver_name,
5365 err);
5366 kfree(t_alg);
5367 } else {
5368 list_add_tail(&t_alg->entry, &hash_list);
5371 if (!list_empty(&hash_list))
5372 dev_info(dev, "hash algorithms registered in /proc/crypto\n");
5374 return err;
5376 err_bind:
5377 dpaa2_dpseci_dpio_free(priv);
5378 err_dpio_setup:
5379 dpaa2_dpseci_free(priv);
5380 err_dpseci_setup:
5381 free_percpu(priv->ppriv);
5382 err_alloc_ppriv:
5383 fsl_mc_portal_free(priv->mc_io);
5384 err_dma_mask:
5385 kmem_cache_destroy(qi_cache);
5387 return err;
5390 static int __cold dpaa2_caam_remove(struct fsl_mc_device *ls_dev)
5392 struct device *dev;
5393 struct dpaa2_caam_priv *priv;
5394 int i;
5396 dev = &ls_dev->dev;
5397 priv = dev_get_drvdata(dev);
5399 dpaa2_dpseci_debugfs_exit(priv);
5401 for (i = 0; i < ARRAY_SIZE(driver_aeads); i++) {
5402 struct caam_aead_alg *t_alg = driver_aeads + i;
5404 if (t_alg->registered)
5405 crypto_unregister_aead(&t_alg->aead);
5408 for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
5409 struct caam_skcipher_alg *t_alg = driver_algs + i;
5411 if (t_alg->registered)
5412 crypto_unregister_skcipher(&t_alg->skcipher);
5415 if (hash_list.next) {
5416 struct caam_hash_alg *t_hash_alg, *p;
5418 list_for_each_entry_safe(t_hash_alg, p, &hash_list, entry) {
5419 crypto_unregister_ahash(&t_hash_alg->ahash_alg);
5420 list_del(&t_hash_alg->entry);
5421 kfree(t_hash_alg);
5425 dpaa2_dpseci_disable(priv);
5426 dpaa2_dpseci_dpio_free(priv);
5427 dpaa2_dpseci_free(priv);
5428 free_percpu(priv->ppriv);
5429 fsl_mc_portal_free(priv->mc_io);
5430 kmem_cache_destroy(qi_cache);
5432 return 0;
5435 int dpaa2_caam_enqueue(struct device *dev, struct caam_request *req)
5437 struct dpaa2_fd fd;
5438 struct dpaa2_caam_priv *priv = dev_get_drvdata(dev);
5439 struct dpaa2_caam_priv_per_cpu *ppriv;
5440 int err = 0, i;
5442 if (IS_ERR(req))
5443 return PTR_ERR(req);
5445 if (priv->cscn_mem) {
5446 dma_sync_single_for_cpu(priv->dev, priv->cscn_dma,
5447 DPAA2_CSCN_SIZE,
5448 DMA_FROM_DEVICE);
5449 if (unlikely(dpaa2_cscn_state_congested(priv->cscn_mem_aligned))) {
5450 dev_dbg_ratelimited(dev, "Dropping request\n");
5451 return -EBUSY;
5455 dpaa2_fl_set_flc(&req->fd_flt[1], req->flc_dma);
5457 req->fd_flt_dma = dma_map_single(dev, req->fd_flt, sizeof(req->fd_flt),
5458 DMA_BIDIRECTIONAL);
5459 if (dma_mapping_error(dev, req->fd_flt_dma)) {
5460 dev_err(dev, "DMA mapping error for QI enqueue request\n");
5461 goto err_out;
5464 memset(&fd, 0, sizeof(fd));
5465 dpaa2_fd_set_format(&fd, dpaa2_fd_list);
5466 dpaa2_fd_set_addr(&fd, req->fd_flt_dma);
5467 dpaa2_fd_set_len(&fd, dpaa2_fl_get_len(&req->fd_flt[1]));
5468 dpaa2_fd_set_flc(&fd, req->flc_dma);
5470 ppriv = this_cpu_ptr(priv->ppriv);
5471 for (i = 0; i < (priv->dpseci_attr.num_tx_queues << 1); i++) {
5472 err = dpaa2_io_service_enqueue_fq(ppriv->dpio, ppriv->req_fqid,
5473 &fd);
5474 if (err != -EBUSY)
5475 break;
5477 cpu_relax();
5480 if (unlikely(err)) {
5481 dev_err_ratelimited(dev, "Error enqueuing frame: %d\n", err);
5482 goto err_out;
5485 return -EINPROGRESS;
5487 err_out:
5488 dma_unmap_single(dev, req->fd_flt_dma, sizeof(req->fd_flt),
5489 DMA_BIDIRECTIONAL);
5490 return -EIO;
5492 EXPORT_SYMBOL(dpaa2_caam_enqueue);
5494 static const struct fsl_mc_device_id dpaa2_caam_match_id_table[] = {
5496 .vendor = FSL_MC_VENDOR_FREESCALE,
5497 .obj_type = "dpseci",
5499 { .vendor = 0x0 }
5501 MODULE_DEVICE_TABLE(fslmc, dpaa2_caam_match_id_table);
5503 static struct fsl_mc_driver dpaa2_caam_driver = {
5504 .driver = {
5505 .name = KBUILD_MODNAME,
5506 .owner = THIS_MODULE,
5508 .probe = dpaa2_caam_probe,
5509 .remove = dpaa2_caam_remove,
5510 .match_id_table = dpaa2_caam_match_id_table
5513 MODULE_LICENSE("Dual BSD/GPL");
5514 MODULE_AUTHOR("Freescale Semiconductor, Inc");
5515 MODULE_DESCRIPTION("Freescale DPAA2 CAAM Driver");
5517 module_fsl_mc_driver(dpaa2_caam_driver);