Linux 5.1.15
[linux/fpc-iii.git] / drivers / crypto / caam / caamalg_qi2.c
blob0a72c96708c48304793d62d4fa4e928d31c79f8b
1 // SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
2 /*
3 * Copyright 2015-2016 Freescale Semiconductor Inc.
4 * Copyright 2017-2018 NXP
5 */
7 #include "compat.h"
8 #include "regs.h"
9 #include "caamalg_qi2.h"
10 #include "dpseci_cmd.h"
11 #include "desc_constr.h"
12 #include "error.h"
13 #include "sg_sw_sec4.h"
14 #include "sg_sw_qm2.h"
15 #include "key_gen.h"
16 #include "caamalg_desc.h"
17 #include "caamhash_desc.h"
18 #include <linux/fsl/mc.h>
19 #include <soc/fsl/dpaa2-io.h>
20 #include <soc/fsl/dpaa2-fd.h>
22 #define CAAM_CRA_PRIORITY 2000
24 /* max key is sum of AES_MAX_KEY_SIZE, max split key size */
25 #define CAAM_MAX_KEY_SIZE (AES_MAX_KEY_SIZE + CTR_RFC3686_NONCE_SIZE + \
26 SHA512_DIGEST_SIZE * 2)
29 * This is a a cache of buffers, from which the users of CAAM QI driver
30 * can allocate short buffers. It's speedier than doing kmalloc on the hotpath.
31 * NOTE: A more elegant solution would be to have some headroom in the frames
32 * being processed. This can be added by the dpaa2-eth driver. This would
33 * pose a problem for userspace application processing which cannot
34 * know of this limitation. So for now, this will work.
35 * NOTE: The memcache is SMP-safe. No need to handle spinlocks in-here
37 static struct kmem_cache *qi_cache;
39 struct caam_alg_entry {
40 struct device *dev;
41 int class1_alg_type;
42 int class2_alg_type;
43 bool rfc3686;
44 bool geniv;
47 struct caam_aead_alg {
48 struct aead_alg aead;
49 struct caam_alg_entry caam;
50 bool registered;
53 struct caam_skcipher_alg {
54 struct skcipher_alg skcipher;
55 struct caam_alg_entry caam;
56 bool registered;
59 /**
60 * caam_ctx - per-session context
61 * @flc: Flow Contexts array
62 * @key: [authentication key], encryption key
63 * @flc_dma: I/O virtual addresses of the Flow Contexts
64 * @key_dma: I/O virtual address of the key
65 * @dir: DMA direction for mapping key and Flow Contexts
66 * @dev: dpseci device
67 * @adata: authentication algorithm details
68 * @cdata: encryption algorithm details
69 * @authsize: authentication tag (a.k.a. ICV / MAC) size
71 struct caam_ctx {
72 struct caam_flc flc[NUM_OP];
73 u8 key[CAAM_MAX_KEY_SIZE];
74 dma_addr_t flc_dma[NUM_OP];
75 dma_addr_t key_dma;
76 enum dma_data_direction dir;
77 struct device *dev;
78 struct alginfo adata;
79 struct alginfo cdata;
80 unsigned int authsize;
83 static void *dpaa2_caam_iova_to_virt(struct dpaa2_caam_priv *priv,
84 dma_addr_t iova_addr)
86 phys_addr_t phys_addr;
88 phys_addr = priv->domain ? iommu_iova_to_phys(priv->domain, iova_addr) :
89 iova_addr;
91 return phys_to_virt(phys_addr);
95 * qi_cache_zalloc - Allocate buffers from CAAM-QI cache
97 * Allocate data on the hotpath. Instead of using kzalloc, one can use the
98 * services of the CAAM QI memory cache (backed by kmem_cache). The buffers
99 * will have a size of CAAM_QI_MEMCACHE_SIZE, which should be sufficient for
100 * hosting 16 SG entries.
102 * @flags - flags that would be used for the equivalent kmalloc(..) call
104 * Returns a pointer to a retrieved buffer on success or NULL on failure.
106 static inline void *qi_cache_zalloc(gfp_t flags)
108 return kmem_cache_zalloc(qi_cache, flags);
112 * qi_cache_free - Frees buffers allocated from CAAM-QI cache
114 * @obj - buffer previously allocated by qi_cache_zalloc
116 * No checking is being done, the call is a passthrough call to
117 * kmem_cache_free(...)
119 static inline void qi_cache_free(void *obj)
121 kmem_cache_free(qi_cache, obj);
124 static struct caam_request *to_caam_req(struct crypto_async_request *areq)
126 switch (crypto_tfm_alg_type(areq->tfm)) {
127 case CRYPTO_ALG_TYPE_SKCIPHER:
128 return skcipher_request_ctx(skcipher_request_cast(areq));
129 case CRYPTO_ALG_TYPE_AEAD:
130 return aead_request_ctx(container_of(areq, struct aead_request,
131 base));
132 case CRYPTO_ALG_TYPE_AHASH:
133 return ahash_request_ctx(ahash_request_cast(areq));
134 default:
135 return ERR_PTR(-EINVAL);
139 static void caam_unmap(struct device *dev, struct scatterlist *src,
140 struct scatterlist *dst, int src_nents,
141 int dst_nents, dma_addr_t iv_dma, int ivsize,
142 dma_addr_t qm_sg_dma, int qm_sg_bytes)
144 if (dst != src) {
145 if (src_nents)
146 dma_unmap_sg(dev, src, src_nents, DMA_TO_DEVICE);
147 if (dst_nents)
148 dma_unmap_sg(dev, dst, dst_nents, DMA_FROM_DEVICE);
149 } else {
150 dma_unmap_sg(dev, src, src_nents, DMA_BIDIRECTIONAL);
153 if (iv_dma)
154 dma_unmap_single(dev, iv_dma, ivsize, DMA_TO_DEVICE);
156 if (qm_sg_bytes)
157 dma_unmap_single(dev, qm_sg_dma, qm_sg_bytes, DMA_TO_DEVICE);
160 static int aead_set_sh_desc(struct crypto_aead *aead)
162 struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead),
163 typeof(*alg), aead);
164 struct caam_ctx *ctx = crypto_aead_ctx(aead);
165 unsigned int ivsize = crypto_aead_ivsize(aead);
166 struct device *dev = ctx->dev;
167 struct dpaa2_caam_priv *priv = dev_get_drvdata(dev);
168 struct caam_flc *flc;
169 u32 *desc;
170 u32 ctx1_iv_off = 0;
171 u32 *nonce = NULL;
172 unsigned int data_len[2];
173 u32 inl_mask;
174 const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) ==
175 OP_ALG_AAI_CTR_MOD128);
176 const bool is_rfc3686 = alg->caam.rfc3686;
178 if (!ctx->cdata.keylen || !ctx->authsize)
179 return 0;
182 * AES-CTR needs to load IV in CONTEXT1 reg
183 * at an offset of 128bits (16bytes)
184 * CONTEXT1[255:128] = IV
186 if (ctr_mode)
187 ctx1_iv_off = 16;
190 * RFC3686 specific:
191 * CONTEXT1[255:128] = {NONCE, IV, COUNTER}
193 if (is_rfc3686) {
194 ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE;
195 nonce = (u32 *)((void *)ctx->key + ctx->adata.keylen_pad +
196 ctx->cdata.keylen - CTR_RFC3686_NONCE_SIZE);
199 data_len[0] = ctx->adata.keylen_pad;
200 data_len[1] = ctx->cdata.keylen;
202 /* aead_encrypt shared descriptor */
203 if (desc_inline_query((alg->caam.geniv ? DESC_QI_AEAD_GIVENC_LEN :
204 DESC_QI_AEAD_ENC_LEN) +
205 (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0),
206 DESC_JOB_IO_LEN, data_len, &inl_mask,
207 ARRAY_SIZE(data_len)) < 0)
208 return -EINVAL;
210 if (inl_mask & 1)
211 ctx->adata.key_virt = ctx->key;
212 else
213 ctx->adata.key_dma = ctx->key_dma;
215 if (inl_mask & 2)
216 ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
217 else
218 ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
220 ctx->adata.key_inline = !!(inl_mask & 1);
221 ctx->cdata.key_inline = !!(inl_mask & 2);
223 flc = &ctx->flc[ENCRYPT];
224 desc = flc->sh_desc;
226 if (alg->caam.geniv)
227 cnstr_shdsc_aead_givencap(desc, &ctx->cdata, &ctx->adata,
228 ivsize, ctx->authsize, is_rfc3686,
229 nonce, ctx1_iv_off, true,
230 priv->sec_attr.era);
231 else
232 cnstr_shdsc_aead_encap(desc, &ctx->cdata, &ctx->adata,
233 ivsize, ctx->authsize, is_rfc3686, nonce,
234 ctx1_iv_off, true, priv->sec_attr.era);
236 flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
237 dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
238 sizeof(flc->flc) + desc_bytes(desc),
239 ctx->dir);
241 /* aead_decrypt shared descriptor */
242 if (desc_inline_query(DESC_QI_AEAD_DEC_LEN +
243 (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0),
244 DESC_JOB_IO_LEN, data_len, &inl_mask,
245 ARRAY_SIZE(data_len)) < 0)
246 return -EINVAL;
248 if (inl_mask & 1)
249 ctx->adata.key_virt = ctx->key;
250 else
251 ctx->adata.key_dma = ctx->key_dma;
253 if (inl_mask & 2)
254 ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
255 else
256 ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
258 ctx->adata.key_inline = !!(inl_mask & 1);
259 ctx->cdata.key_inline = !!(inl_mask & 2);
261 flc = &ctx->flc[DECRYPT];
262 desc = flc->sh_desc;
263 cnstr_shdsc_aead_decap(desc, &ctx->cdata, &ctx->adata,
264 ivsize, ctx->authsize, alg->caam.geniv,
265 is_rfc3686, nonce, ctx1_iv_off, true,
266 priv->sec_attr.era);
267 flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
268 dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
269 sizeof(flc->flc) + desc_bytes(desc),
270 ctx->dir);
272 return 0;
275 static int aead_setauthsize(struct crypto_aead *authenc, unsigned int authsize)
277 struct caam_ctx *ctx = crypto_aead_ctx(authenc);
279 ctx->authsize = authsize;
280 aead_set_sh_desc(authenc);
282 return 0;
285 static int aead_setkey(struct crypto_aead *aead, const u8 *key,
286 unsigned int keylen)
288 struct caam_ctx *ctx = crypto_aead_ctx(aead);
289 struct device *dev = ctx->dev;
290 struct crypto_authenc_keys keys;
292 if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
293 goto badkey;
295 dev_dbg(dev, "keylen %d enckeylen %d authkeylen %d\n",
296 keys.authkeylen + keys.enckeylen, keys.enckeylen,
297 keys.authkeylen);
298 print_hex_dump_debug("key in @" __stringify(__LINE__)": ",
299 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
301 ctx->adata.keylen = keys.authkeylen;
302 ctx->adata.keylen_pad = split_key_len(ctx->adata.algtype &
303 OP_ALG_ALGSEL_MASK);
305 if (ctx->adata.keylen_pad + keys.enckeylen > CAAM_MAX_KEY_SIZE)
306 goto badkey;
308 memcpy(ctx->key, keys.authkey, keys.authkeylen);
309 memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey, keys.enckeylen);
310 dma_sync_single_for_device(dev, ctx->key_dma, ctx->adata.keylen_pad +
311 keys.enckeylen, ctx->dir);
312 print_hex_dump_debug("ctx.key@" __stringify(__LINE__)": ",
313 DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
314 ctx->adata.keylen_pad + keys.enckeylen, 1);
316 ctx->cdata.keylen = keys.enckeylen;
318 memzero_explicit(&keys, sizeof(keys));
319 return aead_set_sh_desc(aead);
320 badkey:
321 crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
322 memzero_explicit(&keys, sizeof(keys));
323 return -EINVAL;
326 static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
327 bool encrypt)
329 struct crypto_aead *aead = crypto_aead_reqtfm(req);
330 struct caam_request *req_ctx = aead_request_ctx(req);
331 struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
332 struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
333 struct caam_ctx *ctx = crypto_aead_ctx(aead);
334 struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead),
335 typeof(*alg), aead);
336 struct device *dev = ctx->dev;
337 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
338 GFP_KERNEL : GFP_ATOMIC;
339 int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
340 struct aead_edesc *edesc;
341 dma_addr_t qm_sg_dma, iv_dma = 0;
342 int ivsize = 0;
343 unsigned int authsize = ctx->authsize;
344 int qm_sg_index = 0, qm_sg_nents = 0, qm_sg_bytes;
345 int in_len, out_len;
346 struct dpaa2_sg_entry *sg_table;
348 /* allocate space for base edesc, link tables and IV */
349 edesc = qi_cache_zalloc(GFP_DMA | flags);
350 if (unlikely(!edesc)) {
351 dev_err(dev, "could not allocate extended descriptor\n");
352 return ERR_PTR(-ENOMEM);
355 if (unlikely(req->dst != req->src)) {
356 src_nents = sg_nents_for_len(req->src, req->assoclen +
357 req->cryptlen);
358 if (unlikely(src_nents < 0)) {
359 dev_err(dev, "Insufficient bytes (%d) in src S/G\n",
360 req->assoclen + req->cryptlen);
361 qi_cache_free(edesc);
362 return ERR_PTR(src_nents);
365 dst_nents = sg_nents_for_len(req->dst, req->assoclen +
366 req->cryptlen +
367 (encrypt ? authsize :
368 (-authsize)));
369 if (unlikely(dst_nents < 0)) {
370 dev_err(dev, "Insufficient bytes (%d) in dst S/G\n",
371 req->assoclen + req->cryptlen +
372 (encrypt ? authsize : (-authsize)));
373 qi_cache_free(edesc);
374 return ERR_PTR(dst_nents);
377 if (src_nents) {
378 mapped_src_nents = dma_map_sg(dev, req->src, src_nents,
379 DMA_TO_DEVICE);
380 if (unlikely(!mapped_src_nents)) {
381 dev_err(dev, "unable to map source\n");
382 qi_cache_free(edesc);
383 return ERR_PTR(-ENOMEM);
385 } else {
386 mapped_src_nents = 0;
389 if (dst_nents) {
390 mapped_dst_nents = dma_map_sg(dev, req->dst, dst_nents,
391 DMA_FROM_DEVICE);
392 if (unlikely(!mapped_dst_nents)) {
393 dev_err(dev, "unable to map destination\n");
394 dma_unmap_sg(dev, req->src, src_nents,
395 DMA_TO_DEVICE);
396 qi_cache_free(edesc);
397 return ERR_PTR(-ENOMEM);
399 } else {
400 mapped_dst_nents = 0;
402 } else {
403 src_nents = sg_nents_for_len(req->src, req->assoclen +
404 req->cryptlen +
405 (encrypt ? authsize : 0));
406 if (unlikely(src_nents < 0)) {
407 dev_err(dev, "Insufficient bytes (%d) in src S/G\n",
408 req->assoclen + req->cryptlen +
409 (encrypt ? authsize : 0));
410 qi_cache_free(edesc);
411 return ERR_PTR(src_nents);
414 mapped_src_nents = dma_map_sg(dev, req->src, src_nents,
415 DMA_BIDIRECTIONAL);
416 if (unlikely(!mapped_src_nents)) {
417 dev_err(dev, "unable to map source\n");
418 qi_cache_free(edesc);
419 return ERR_PTR(-ENOMEM);
423 if ((alg->caam.rfc3686 && encrypt) || !alg->caam.geniv)
424 ivsize = crypto_aead_ivsize(aead);
427 * Create S/G table: req->assoclen, [IV,] req->src [, req->dst].
428 * Input is not contiguous.
430 qm_sg_nents = 1 + !!ivsize + mapped_src_nents +
431 (mapped_dst_nents > 1 ? mapped_dst_nents : 0);
432 sg_table = &edesc->sgt[0];
433 qm_sg_bytes = qm_sg_nents * sizeof(*sg_table);
434 if (unlikely(offsetof(struct aead_edesc, sgt) + qm_sg_bytes + ivsize >
435 CAAM_QI_MEMCACHE_SIZE)) {
436 dev_err(dev, "No space for %d S/G entries and/or %dB IV\n",
437 qm_sg_nents, ivsize);
438 caam_unmap(dev, req->src, req->dst, src_nents, dst_nents, 0,
439 0, 0, 0);
440 qi_cache_free(edesc);
441 return ERR_PTR(-ENOMEM);
444 if (ivsize) {
445 u8 *iv = (u8 *)(sg_table + qm_sg_nents);
447 /* Make sure IV is located in a DMAable area */
448 memcpy(iv, req->iv, ivsize);
450 iv_dma = dma_map_single(dev, iv, ivsize, DMA_TO_DEVICE);
451 if (dma_mapping_error(dev, iv_dma)) {
452 dev_err(dev, "unable to map IV\n");
453 caam_unmap(dev, req->src, req->dst, src_nents,
454 dst_nents, 0, 0, 0, 0);
455 qi_cache_free(edesc);
456 return ERR_PTR(-ENOMEM);
460 edesc->src_nents = src_nents;
461 edesc->dst_nents = dst_nents;
462 edesc->iv_dma = iv_dma;
464 if ((alg->caam.class1_alg_type & OP_ALG_ALGSEL_MASK) ==
465 OP_ALG_ALGSEL_CHACHA20 && ivsize != CHACHAPOLY_IV_SIZE)
467 * The associated data comes already with the IV but we need
468 * to skip it when we authenticate or encrypt...
470 edesc->assoclen = cpu_to_caam32(req->assoclen - ivsize);
471 else
472 edesc->assoclen = cpu_to_caam32(req->assoclen);
473 edesc->assoclen_dma = dma_map_single(dev, &edesc->assoclen, 4,
474 DMA_TO_DEVICE);
475 if (dma_mapping_error(dev, edesc->assoclen_dma)) {
476 dev_err(dev, "unable to map assoclen\n");
477 caam_unmap(dev, req->src, req->dst, src_nents, dst_nents,
478 iv_dma, ivsize, 0, 0);
479 qi_cache_free(edesc);
480 return ERR_PTR(-ENOMEM);
483 dma_to_qm_sg_one(sg_table, edesc->assoclen_dma, 4, 0);
484 qm_sg_index++;
485 if (ivsize) {
486 dma_to_qm_sg_one(sg_table + qm_sg_index, iv_dma, ivsize, 0);
487 qm_sg_index++;
489 sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table + qm_sg_index, 0);
490 qm_sg_index += mapped_src_nents;
492 if (mapped_dst_nents > 1)
493 sg_to_qm_sg_last(req->dst, mapped_dst_nents, sg_table +
494 qm_sg_index, 0);
496 qm_sg_dma = dma_map_single(dev, sg_table, qm_sg_bytes, DMA_TO_DEVICE);
497 if (dma_mapping_error(dev, qm_sg_dma)) {
498 dev_err(dev, "unable to map S/G table\n");
499 dma_unmap_single(dev, edesc->assoclen_dma, 4, DMA_TO_DEVICE);
500 caam_unmap(dev, req->src, req->dst, src_nents, dst_nents,
501 iv_dma, ivsize, 0, 0);
502 qi_cache_free(edesc);
503 return ERR_PTR(-ENOMEM);
506 edesc->qm_sg_dma = qm_sg_dma;
507 edesc->qm_sg_bytes = qm_sg_bytes;
509 out_len = req->assoclen + req->cryptlen +
510 (encrypt ? ctx->authsize : (-ctx->authsize));
511 in_len = 4 + ivsize + req->assoclen + req->cryptlen;
513 memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
514 dpaa2_fl_set_final(in_fle, true);
515 dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
516 dpaa2_fl_set_addr(in_fle, qm_sg_dma);
517 dpaa2_fl_set_len(in_fle, in_len);
519 if (req->dst == req->src) {
520 if (mapped_src_nents == 1) {
521 dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
522 dpaa2_fl_set_addr(out_fle, sg_dma_address(req->src));
523 } else {
524 dpaa2_fl_set_format(out_fle, dpaa2_fl_sg);
525 dpaa2_fl_set_addr(out_fle, qm_sg_dma +
526 (1 + !!ivsize) * sizeof(*sg_table));
528 } else if (mapped_dst_nents == 1) {
529 dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
530 dpaa2_fl_set_addr(out_fle, sg_dma_address(req->dst));
531 } else {
532 dpaa2_fl_set_format(out_fle, dpaa2_fl_sg);
533 dpaa2_fl_set_addr(out_fle, qm_sg_dma + qm_sg_index *
534 sizeof(*sg_table));
537 dpaa2_fl_set_len(out_fle, out_len);
539 return edesc;
542 static int chachapoly_set_sh_desc(struct crypto_aead *aead)
544 struct caam_ctx *ctx = crypto_aead_ctx(aead);
545 unsigned int ivsize = crypto_aead_ivsize(aead);
546 struct device *dev = ctx->dev;
547 struct caam_flc *flc;
548 u32 *desc;
550 if (!ctx->cdata.keylen || !ctx->authsize)
551 return 0;
553 flc = &ctx->flc[ENCRYPT];
554 desc = flc->sh_desc;
555 cnstr_shdsc_chachapoly(desc, &ctx->cdata, &ctx->adata, ivsize,
556 ctx->authsize, true, true);
557 flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
558 dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
559 sizeof(flc->flc) + desc_bytes(desc),
560 ctx->dir);
562 flc = &ctx->flc[DECRYPT];
563 desc = flc->sh_desc;
564 cnstr_shdsc_chachapoly(desc, &ctx->cdata, &ctx->adata, ivsize,
565 ctx->authsize, false, true);
566 flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
567 dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
568 sizeof(flc->flc) + desc_bytes(desc),
569 ctx->dir);
571 return 0;
574 static int chachapoly_setauthsize(struct crypto_aead *aead,
575 unsigned int authsize)
577 struct caam_ctx *ctx = crypto_aead_ctx(aead);
579 if (authsize != POLY1305_DIGEST_SIZE)
580 return -EINVAL;
582 ctx->authsize = authsize;
583 return chachapoly_set_sh_desc(aead);
586 static int chachapoly_setkey(struct crypto_aead *aead, const u8 *key,
587 unsigned int keylen)
589 struct caam_ctx *ctx = crypto_aead_ctx(aead);
590 unsigned int ivsize = crypto_aead_ivsize(aead);
591 unsigned int saltlen = CHACHAPOLY_IV_SIZE - ivsize;
593 if (keylen != CHACHA_KEY_SIZE + saltlen) {
594 crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
595 return -EINVAL;
598 ctx->cdata.key_virt = key;
599 ctx->cdata.keylen = keylen - saltlen;
601 return chachapoly_set_sh_desc(aead);
604 static int gcm_set_sh_desc(struct crypto_aead *aead)
606 struct caam_ctx *ctx = crypto_aead_ctx(aead);
607 struct device *dev = ctx->dev;
608 unsigned int ivsize = crypto_aead_ivsize(aead);
609 struct caam_flc *flc;
610 u32 *desc;
611 int rem_bytes = CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN -
612 ctx->cdata.keylen;
614 if (!ctx->cdata.keylen || !ctx->authsize)
615 return 0;
618 * AES GCM encrypt shared descriptor
619 * Job Descriptor and Shared Descriptor
620 * must fit into the 64-word Descriptor h/w Buffer
622 if (rem_bytes >= DESC_QI_GCM_ENC_LEN) {
623 ctx->cdata.key_inline = true;
624 ctx->cdata.key_virt = ctx->key;
625 } else {
626 ctx->cdata.key_inline = false;
627 ctx->cdata.key_dma = ctx->key_dma;
630 flc = &ctx->flc[ENCRYPT];
631 desc = flc->sh_desc;
632 cnstr_shdsc_gcm_encap(desc, &ctx->cdata, ivsize, ctx->authsize, true);
633 flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
634 dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
635 sizeof(flc->flc) + desc_bytes(desc),
636 ctx->dir);
639 * Job Descriptor and Shared Descriptors
640 * must all fit into the 64-word Descriptor h/w Buffer
642 if (rem_bytes >= DESC_QI_GCM_DEC_LEN) {
643 ctx->cdata.key_inline = true;
644 ctx->cdata.key_virt = ctx->key;
645 } else {
646 ctx->cdata.key_inline = false;
647 ctx->cdata.key_dma = ctx->key_dma;
650 flc = &ctx->flc[DECRYPT];
651 desc = flc->sh_desc;
652 cnstr_shdsc_gcm_decap(desc, &ctx->cdata, ivsize, ctx->authsize, true);
653 flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
654 dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
655 sizeof(flc->flc) + desc_bytes(desc),
656 ctx->dir);
658 return 0;
661 static int gcm_setauthsize(struct crypto_aead *authenc, unsigned int authsize)
663 struct caam_ctx *ctx = crypto_aead_ctx(authenc);
665 ctx->authsize = authsize;
666 gcm_set_sh_desc(authenc);
668 return 0;
671 static int gcm_setkey(struct crypto_aead *aead,
672 const u8 *key, unsigned int keylen)
674 struct caam_ctx *ctx = crypto_aead_ctx(aead);
675 struct device *dev = ctx->dev;
677 print_hex_dump_debug("key in @" __stringify(__LINE__)": ",
678 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
680 memcpy(ctx->key, key, keylen);
681 dma_sync_single_for_device(dev, ctx->key_dma, keylen, ctx->dir);
682 ctx->cdata.keylen = keylen;
684 return gcm_set_sh_desc(aead);
687 static int rfc4106_set_sh_desc(struct crypto_aead *aead)
689 struct caam_ctx *ctx = crypto_aead_ctx(aead);
690 struct device *dev = ctx->dev;
691 unsigned int ivsize = crypto_aead_ivsize(aead);
692 struct caam_flc *flc;
693 u32 *desc;
694 int rem_bytes = CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN -
695 ctx->cdata.keylen;
697 if (!ctx->cdata.keylen || !ctx->authsize)
698 return 0;
700 ctx->cdata.key_virt = ctx->key;
703 * RFC4106 encrypt shared descriptor
704 * Job Descriptor and Shared Descriptor
705 * must fit into the 64-word Descriptor h/w Buffer
707 if (rem_bytes >= DESC_QI_RFC4106_ENC_LEN) {
708 ctx->cdata.key_inline = true;
709 } else {
710 ctx->cdata.key_inline = false;
711 ctx->cdata.key_dma = ctx->key_dma;
714 flc = &ctx->flc[ENCRYPT];
715 desc = flc->sh_desc;
716 cnstr_shdsc_rfc4106_encap(desc, &ctx->cdata, ivsize, ctx->authsize,
717 true);
718 flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
719 dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
720 sizeof(flc->flc) + desc_bytes(desc),
721 ctx->dir);
724 * Job Descriptor and Shared Descriptors
725 * must all fit into the 64-word Descriptor h/w Buffer
727 if (rem_bytes >= DESC_QI_RFC4106_DEC_LEN) {
728 ctx->cdata.key_inline = true;
729 } else {
730 ctx->cdata.key_inline = false;
731 ctx->cdata.key_dma = ctx->key_dma;
734 flc = &ctx->flc[DECRYPT];
735 desc = flc->sh_desc;
736 cnstr_shdsc_rfc4106_decap(desc, &ctx->cdata, ivsize, ctx->authsize,
737 true);
738 flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
739 dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
740 sizeof(flc->flc) + desc_bytes(desc),
741 ctx->dir);
743 return 0;
746 static int rfc4106_setauthsize(struct crypto_aead *authenc,
747 unsigned int authsize)
749 struct caam_ctx *ctx = crypto_aead_ctx(authenc);
751 ctx->authsize = authsize;
752 rfc4106_set_sh_desc(authenc);
754 return 0;
757 static int rfc4106_setkey(struct crypto_aead *aead,
758 const u8 *key, unsigned int keylen)
760 struct caam_ctx *ctx = crypto_aead_ctx(aead);
761 struct device *dev = ctx->dev;
763 if (keylen < 4)
764 return -EINVAL;
766 print_hex_dump_debug("key in @" __stringify(__LINE__)": ",
767 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
769 memcpy(ctx->key, key, keylen);
771 * The last four bytes of the key material are used as the salt value
772 * in the nonce. Update the AES key length.
774 ctx->cdata.keylen = keylen - 4;
775 dma_sync_single_for_device(dev, ctx->key_dma, ctx->cdata.keylen,
776 ctx->dir);
778 return rfc4106_set_sh_desc(aead);
781 static int rfc4543_set_sh_desc(struct crypto_aead *aead)
783 struct caam_ctx *ctx = crypto_aead_ctx(aead);
784 struct device *dev = ctx->dev;
785 unsigned int ivsize = crypto_aead_ivsize(aead);
786 struct caam_flc *flc;
787 u32 *desc;
788 int rem_bytes = CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN -
789 ctx->cdata.keylen;
791 if (!ctx->cdata.keylen || !ctx->authsize)
792 return 0;
794 ctx->cdata.key_virt = ctx->key;
797 * RFC4543 encrypt shared descriptor
798 * Job Descriptor and Shared Descriptor
799 * must fit into the 64-word Descriptor h/w Buffer
801 if (rem_bytes >= DESC_QI_RFC4543_ENC_LEN) {
802 ctx->cdata.key_inline = true;
803 } else {
804 ctx->cdata.key_inline = false;
805 ctx->cdata.key_dma = ctx->key_dma;
808 flc = &ctx->flc[ENCRYPT];
809 desc = flc->sh_desc;
810 cnstr_shdsc_rfc4543_encap(desc, &ctx->cdata, ivsize, ctx->authsize,
811 true);
812 flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
813 dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
814 sizeof(flc->flc) + desc_bytes(desc),
815 ctx->dir);
818 * Job Descriptor and Shared Descriptors
819 * must all fit into the 64-word Descriptor h/w Buffer
821 if (rem_bytes >= DESC_QI_RFC4543_DEC_LEN) {
822 ctx->cdata.key_inline = true;
823 } else {
824 ctx->cdata.key_inline = false;
825 ctx->cdata.key_dma = ctx->key_dma;
828 flc = &ctx->flc[DECRYPT];
829 desc = flc->sh_desc;
830 cnstr_shdsc_rfc4543_decap(desc, &ctx->cdata, ivsize, ctx->authsize,
831 true);
832 flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
833 dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
834 sizeof(flc->flc) + desc_bytes(desc),
835 ctx->dir);
837 return 0;
840 static int rfc4543_setauthsize(struct crypto_aead *authenc,
841 unsigned int authsize)
843 struct caam_ctx *ctx = crypto_aead_ctx(authenc);
845 ctx->authsize = authsize;
846 rfc4543_set_sh_desc(authenc);
848 return 0;
851 static int rfc4543_setkey(struct crypto_aead *aead,
852 const u8 *key, unsigned int keylen)
854 struct caam_ctx *ctx = crypto_aead_ctx(aead);
855 struct device *dev = ctx->dev;
857 if (keylen < 4)
858 return -EINVAL;
860 print_hex_dump_debug("key in @" __stringify(__LINE__)": ",
861 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
863 memcpy(ctx->key, key, keylen);
865 * The last four bytes of the key material are used as the salt value
866 * in the nonce. Update the AES key length.
868 ctx->cdata.keylen = keylen - 4;
869 dma_sync_single_for_device(dev, ctx->key_dma, ctx->cdata.keylen,
870 ctx->dir);
872 return rfc4543_set_sh_desc(aead);
875 static int skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key,
876 unsigned int keylen)
878 struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
879 struct caam_skcipher_alg *alg =
880 container_of(crypto_skcipher_alg(skcipher),
881 struct caam_skcipher_alg, skcipher);
882 struct device *dev = ctx->dev;
883 struct caam_flc *flc;
884 unsigned int ivsize = crypto_skcipher_ivsize(skcipher);
885 u32 *desc;
886 u32 ctx1_iv_off = 0;
887 const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) ==
888 OP_ALG_AAI_CTR_MOD128) &&
889 ((ctx->cdata.algtype & OP_ALG_ALGSEL_MASK) !=
890 OP_ALG_ALGSEL_CHACHA20);
891 const bool is_rfc3686 = alg->caam.rfc3686;
893 print_hex_dump_debug("key in @" __stringify(__LINE__)": ",
894 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
897 * AES-CTR needs to load IV in CONTEXT1 reg
898 * at an offset of 128bits (16bytes)
899 * CONTEXT1[255:128] = IV
901 if (ctr_mode)
902 ctx1_iv_off = 16;
905 * RFC3686 specific:
906 * | CONTEXT1[255:128] = {NONCE, IV, COUNTER}
907 * | *key = {KEY, NONCE}
909 if (is_rfc3686) {
910 ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE;
911 keylen -= CTR_RFC3686_NONCE_SIZE;
914 ctx->cdata.keylen = keylen;
915 ctx->cdata.key_virt = key;
916 ctx->cdata.key_inline = true;
918 /* skcipher_encrypt shared descriptor */
919 flc = &ctx->flc[ENCRYPT];
920 desc = flc->sh_desc;
921 cnstr_shdsc_skcipher_encap(desc, &ctx->cdata, ivsize, is_rfc3686,
922 ctx1_iv_off);
923 flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
924 dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
925 sizeof(flc->flc) + desc_bytes(desc),
926 ctx->dir);
928 /* skcipher_decrypt shared descriptor */
929 flc = &ctx->flc[DECRYPT];
930 desc = flc->sh_desc;
931 cnstr_shdsc_skcipher_decap(desc, &ctx->cdata, ivsize, is_rfc3686,
932 ctx1_iv_off);
933 flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
934 dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
935 sizeof(flc->flc) + desc_bytes(desc),
936 ctx->dir);
938 return 0;
941 static int xts_skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key,
942 unsigned int keylen)
944 struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
945 struct device *dev = ctx->dev;
946 struct caam_flc *flc;
947 u32 *desc;
949 if (keylen != 2 * AES_MIN_KEY_SIZE && keylen != 2 * AES_MAX_KEY_SIZE) {
950 dev_err(dev, "key size mismatch\n");
951 crypto_skcipher_set_flags(skcipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
952 return -EINVAL;
955 ctx->cdata.keylen = keylen;
956 ctx->cdata.key_virt = key;
957 ctx->cdata.key_inline = true;
959 /* xts_skcipher_encrypt shared descriptor */
960 flc = &ctx->flc[ENCRYPT];
961 desc = flc->sh_desc;
962 cnstr_shdsc_xts_skcipher_encap(desc, &ctx->cdata);
963 flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
964 dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
965 sizeof(flc->flc) + desc_bytes(desc),
966 ctx->dir);
968 /* xts_skcipher_decrypt shared descriptor */
969 flc = &ctx->flc[DECRYPT];
970 desc = flc->sh_desc;
971 cnstr_shdsc_xts_skcipher_decap(desc, &ctx->cdata);
972 flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
973 dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
974 sizeof(flc->flc) + desc_bytes(desc),
975 ctx->dir);
977 return 0;
980 static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req)
982 struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
983 struct caam_request *req_ctx = skcipher_request_ctx(req);
984 struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
985 struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
986 struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
987 struct device *dev = ctx->dev;
988 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
989 GFP_KERNEL : GFP_ATOMIC;
990 int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
991 struct skcipher_edesc *edesc;
992 dma_addr_t iv_dma;
993 u8 *iv;
994 int ivsize = crypto_skcipher_ivsize(skcipher);
995 int dst_sg_idx, qm_sg_ents, qm_sg_bytes;
996 struct dpaa2_sg_entry *sg_table;
998 src_nents = sg_nents_for_len(req->src, req->cryptlen);
999 if (unlikely(src_nents < 0)) {
1000 dev_err(dev, "Insufficient bytes (%d) in src S/G\n",
1001 req->cryptlen);
1002 return ERR_PTR(src_nents);
1005 if (unlikely(req->dst != req->src)) {
1006 dst_nents = sg_nents_for_len(req->dst, req->cryptlen);
1007 if (unlikely(dst_nents < 0)) {
1008 dev_err(dev, "Insufficient bytes (%d) in dst S/G\n",
1009 req->cryptlen);
1010 return ERR_PTR(dst_nents);
1013 mapped_src_nents = dma_map_sg(dev, req->src, src_nents,
1014 DMA_TO_DEVICE);
1015 if (unlikely(!mapped_src_nents)) {
1016 dev_err(dev, "unable to map source\n");
1017 return ERR_PTR(-ENOMEM);
1020 mapped_dst_nents = dma_map_sg(dev, req->dst, dst_nents,
1021 DMA_FROM_DEVICE);
1022 if (unlikely(!mapped_dst_nents)) {
1023 dev_err(dev, "unable to map destination\n");
1024 dma_unmap_sg(dev, req->src, src_nents, DMA_TO_DEVICE);
1025 return ERR_PTR(-ENOMEM);
1027 } else {
1028 mapped_src_nents = dma_map_sg(dev, req->src, src_nents,
1029 DMA_BIDIRECTIONAL);
1030 if (unlikely(!mapped_src_nents)) {
1031 dev_err(dev, "unable to map source\n");
1032 return ERR_PTR(-ENOMEM);
1036 qm_sg_ents = 1 + mapped_src_nents;
1037 dst_sg_idx = qm_sg_ents;
1039 qm_sg_ents += mapped_dst_nents > 1 ? mapped_dst_nents : 0;
1040 qm_sg_bytes = qm_sg_ents * sizeof(struct dpaa2_sg_entry);
1041 if (unlikely(offsetof(struct skcipher_edesc, sgt) + qm_sg_bytes +
1042 ivsize > CAAM_QI_MEMCACHE_SIZE)) {
1043 dev_err(dev, "No space for %d S/G entries and/or %dB IV\n",
1044 qm_sg_ents, ivsize);
1045 caam_unmap(dev, req->src, req->dst, src_nents, dst_nents, 0,
1046 0, 0, 0);
1047 return ERR_PTR(-ENOMEM);
1050 /* allocate space for base edesc, link tables and IV */
1051 edesc = qi_cache_zalloc(GFP_DMA | flags);
1052 if (unlikely(!edesc)) {
1053 dev_err(dev, "could not allocate extended descriptor\n");
1054 caam_unmap(dev, req->src, req->dst, src_nents, dst_nents, 0,
1055 0, 0, 0);
1056 return ERR_PTR(-ENOMEM);
1059 /* Make sure IV is located in a DMAable area */
1060 sg_table = &edesc->sgt[0];
1061 iv = (u8 *)(sg_table + qm_sg_ents);
1062 memcpy(iv, req->iv, ivsize);
1064 iv_dma = dma_map_single(dev, iv, ivsize, DMA_TO_DEVICE);
1065 if (dma_mapping_error(dev, iv_dma)) {
1066 dev_err(dev, "unable to map IV\n");
1067 caam_unmap(dev, req->src, req->dst, src_nents, dst_nents, 0,
1068 0, 0, 0);
1069 qi_cache_free(edesc);
1070 return ERR_PTR(-ENOMEM);
1073 edesc->src_nents = src_nents;
1074 edesc->dst_nents = dst_nents;
1075 edesc->iv_dma = iv_dma;
1076 edesc->qm_sg_bytes = qm_sg_bytes;
1078 dma_to_qm_sg_one(sg_table, iv_dma, ivsize, 0);
1079 sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table + 1, 0);
1081 if (mapped_dst_nents > 1)
1082 sg_to_qm_sg_last(req->dst, mapped_dst_nents, sg_table +
1083 dst_sg_idx, 0);
1085 edesc->qm_sg_dma = dma_map_single(dev, sg_table, edesc->qm_sg_bytes,
1086 DMA_TO_DEVICE);
1087 if (dma_mapping_error(dev, edesc->qm_sg_dma)) {
1088 dev_err(dev, "unable to map S/G table\n");
1089 caam_unmap(dev, req->src, req->dst, src_nents, dst_nents,
1090 iv_dma, ivsize, 0, 0);
1091 qi_cache_free(edesc);
1092 return ERR_PTR(-ENOMEM);
1095 memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
1096 dpaa2_fl_set_final(in_fle, true);
1097 dpaa2_fl_set_len(in_fle, req->cryptlen + ivsize);
1098 dpaa2_fl_set_len(out_fle, req->cryptlen);
1100 dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
1101 dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
1103 if (req->src == req->dst) {
1104 dpaa2_fl_set_format(out_fle, dpaa2_fl_sg);
1105 dpaa2_fl_set_addr(out_fle, edesc->qm_sg_dma +
1106 sizeof(*sg_table));
1107 } else if (mapped_dst_nents > 1) {
1108 dpaa2_fl_set_format(out_fle, dpaa2_fl_sg);
1109 dpaa2_fl_set_addr(out_fle, edesc->qm_sg_dma + dst_sg_idx *
1110 sizeof(*sg_table));
1111 } else {
1112 dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
1113 dpaa2_fl_set_addr(out_fle, sg_dma_address(req->dst));
1116 return edesc;
1119 static void aead_unmap(struct device *dev, struct aead_edesc *edesc,
1120 struct aead_request *req)
1122 struct crypto_aead *aead = crypto_aead_reqtfm(req);
1123 int ivsize = crypto_aead_ivsize(aead);
1125 caam_unmap(dev, req->src, req->dst, edesc->src_nents, edesc->dst_nents,
1126 edesc->iv_dma, ivsize, edesc->qm_sg_dma, edesc->qm_sg_bytes);
1127 dma_unmap_single(dev, edesc->assoclen_dma, 4, DMA_TO_DEVICE);
1130 static void skcipher_unmap(struct device *dev, struct skcipher_edesc *edesc,
1131 struct skcipher_request *req)
1133 struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
1134 int ivsize = crypto_skcipher_ivsize(skcipher);
1136 caam_unmap(dev, req->src, req->dst, edesc->src_nents, edesc->dst_nents,
1137 edesc->iv_dma, ivsize, edesc->qm_sg_dma, edesc->qm_sg_bytes);
1140 static void aead_encrypt_done(void *cbk_ctx, u32 status)
1142 struct crypto_async_request *areq = cbk_ctx;
1143 struct aead_request *req = container_of(areq, struct aead_request,
1144 base);
1145 struct caam_request *req_ctx = to_caam_req(areq);
1146 struct aead_edesc *edesc = req_ctx->edesc;
1147 struct crypto_aead *aead = crypto_aead_reqtfm(req);
1148 struct caam_ctx *ctx = crypto_aead_ctx(aead);
1149 int ecode = 0;
1151 dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
1153 if (unlikely(status)) {
1154 caam_qi2_strstatus(ctx->dev, status);
1155 ecode = -EIO;
1158 aead_unmap(ctx->dev, edesc, req);
1159 qi_cache_free(edesc);
1160 aead_request_complete(req, ecode);
1163 static void aead_decrypt_done(void *cbk_ctx, u32 status)
1165 struct crypto_async_request *areq = cbk_ctx;
1166 struct aead_request *req = container_of(areq, struct aead_request,
1167 base);
1168 struct caam_request *req_ctx = to_caam_req(areq);
1169 struct aead_edesc *edesc = req_ctx->edesc;
1170 struct crypto_aead *aead = crypto_aead_reqtfm(req);
1171 struct caam_ctx *ctx = crypto_aead_ctx(aead);
1172 int ecode = 0;
1174 dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
1176 if (unlikely(status)) {
1177 caam_qi2_strstatus(ctx->dev, status);
1179 * verify hw auth check passed else return -EBADMSG
1181 if ((status & JRSTA_CCBERR_ERRID_MASK) ==
1182 JRSTA_CCBERR_ERRID_ICVCHK)
1183 ecode = -EBADMSG;
1184 else
1185 ecode = -EIO;
1188 aead_unmap(ctx->dev, edesc, req);
1189 qi_cache_free(edesc);
1190 aead_request_complete(req, ecode);
1193 static int aead_encrypt(struct aead_request *req)
1195 struct aead_edesc *edesc;
1196 struct crypto_aead *aead = crypto_aead_reqtfm(req);
1197 struct caam_ctx *ctx = crypto_aead_ctx(aead);
1198 struct caam_request *caam_req = aead_request_ctx(req);
1199 int ret;
1201 /* allocate extended descriptor */
1202 edesc = aead_edesc_alloc(req, true);
1203 if (IS_ERR(edesc))
1204 return PTR_ERR(edesc);
1206 caam_req->flc = &ctx->flc[ENCRYPT];
1207 caam_req->flc_dma = ctx->flc_dma[ENCRYPT];
1208 caam_req->cbk = aead_encrypt_done;
1209 caam_req->ctx = &req->base;
1210 caam_req->edesc = edesc;
1211 ret = dpaa2_caam_enqueue(ctx->dev, caam_req);
1212 if (ret != -EINPROGRESS &&
1213 !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
1214 aead_unmap(ctx->dev, edesc, req);
1215 qi_cache_free(edesc);
1218 return ret;
1221 static int aead_decrypt(struct aead_request *req)
1223 struct aead_edesc *edesc;
1224 struct crypto_aead *aead = crypto_aead_reqtfm(req);
1225 struct caam_ctx *ctx = crypto_aead_ctx(aead);
1226 struct caam_request *caam_req = aead_request_ctx(req);
1227 int ret;
1229 /* allocate extended descriptor */
1230 edesc = aead_edesc_alloc(req, false);
1231 if (IS_ERR(edesc))
1232 return PTR_ERR(edesc);
1234 caam_req->flc = &ctx->flc[DECRYPT];
1235 caam_req->flc_dma = ctx->flc_dma[DECRYPT];
1236 caam_req->cbk = aead_decrypt_done;
1237 caam_req->ctx = &req->base;
1238 caam_req->edesc = edesc;
1239 ret = dpaa2_caam_enqueue(ctx->dev, caam_req);
1240 if (ret != -EINPROGRESS &&
1241 !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
1242 aead_unmap(ctx->dev, edesc, req);
1243 qi_cache_free(edesc);
1246 return ret;
1249 static int ipsec_gcm_encrypt(struct aead_request *req)
1251 if (req->assoclen < 8)
1252 return -EINVAL;
1254 return aead_encrypt(req);
1257 static int ipsec_gcm_decrypt(struct aead_request *req)
1259 if (req->assoclen < 8)
1260 return -EINVAL;
1262 return aead_decrypt(req);
1265 static void skcipher_encrypt_done(void *cbk_ctx, u32 status)
1267 struct crypto_async_request *areq = cbk_ctx;
1268 struct skcipher_request *req = skcipher_request_cast(areq);
1269 struct caam_request *req_ctx = to_caam_req(areq);
1270 struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
1271 struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
1272 struct skcipher_edesc *edesc = req_ctx->edesc;
1273 int ecode = 0;
1274 int ivsize = crypto_skcipher_ivsize(skcipher);
1276 dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
1278 if (unlikely(status)) {
1279 caam_qi2_strstatus(ctx->dev, status);
1280 ecode = -EIO;
1283 print_hex_dump_debug("dstiv @" __stringify(__LINE__)": ",
1284 DUMP_PREFIX_ADDRESS, 16, 4, req->iv,
1285 edesc->src_nents > 1 ? 100 : ivsize, 1);
1286 caam_dump_sg(KERN_DEBUG, "dst @" __stringify(__LINE__)": ",
1287 DUMP_PREFIX_ADDRESS, 16, 4, req->dst,
1288 edesc->dst_nents > 1 ? 100 : req->cryptlen, 1);
1290 skcipher_unmap(ctx->dev, edesc, req);
1293 * The crypto API expects us to set the IV (req->iv) to the last
1294 * ciphertext block. This is used e.g. by the CTS mode.
1296 scatterwalk_map_and_copy(req->iv, req->dst, req->cryptlen - ivsize,
1297 ivsize, 0);
1299 qi_cache_free(edesc);
1300 skcipher_request_complete(req, ecode);
1303 static void skcipher_decrypt_done(void *cbk_ctx, u32 status)
1305 struct crypto_async_request *areq = cbk_ctx;
1306 struct skcipher_request *req = skcipher_request_cast(areq);
1307 struct caam_request *req_ctx = to_caam_req(areq);
1308 struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
1309 struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
1310 struct skcipher_edesc *edesc = req_ctx->edesc;
1311 int ecode = 0;
1312 int ivsize = crypto_skcipher_ivsize(skcipher);
1314 dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
1316 if (unlikely(status)) {
1317 caam_qi2_strstatus(ctx->dev, status);
1318 ecode = -EIO;
1321 print_hex_dump_debug("dstiv @" __stringify(__LINE__)": ",
1322 DUMP_PREFIX_ADDRESS, 16, 4, req->iv,
1323 edesc->src_nents > 1 ? 100 : ivsize, 1);
1324 caam_dump_sg(KERN_DEBUG, "dst @" __stringify(__LINE__)": ",
1325 DUMP_PREFIX_ADDRESS, 16, 4, req->dst,
1326 edesc->dst_nents > 1 ? 100 : req->cryptlen, 1);
1328 skcipher_unmap(ctx->dev, edesc, req);
1329 qi_cache_free(edesc);
1330 skcipher_request_complete(req, ecode);
1333 static int skcipher_encrypt(struct skcipher_request *req)
1335 struct skcipher_edesc *edesc;
1336 struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
1337 struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
1338 struct caam_request *caam_req = skcipher_request_ctx(req);
1339 int ret;
1341 /* allocate extended descriptor */
1342 edesc = skcipher_edesc_alloc(req);
1343 if (IS_ERR(edesc))
1344 return PTR_ERR(edesc);
1346 caam_req->flc = &ctx->flc[ENCRYPT];
1347 caam_req->flc_dma = ctx->flc_dma[ENCRYPT];
1348 caam_req->cbk = skcipher_encrypt_done;
1349 caam_req->ctx = &req->base;
1350 caam_req->edesc = edesc;
1351 ret = dpaa2_caam_enqueue(ctx->dev, caam_req);
1352 if (ret != -EINPROGRESS &&
1353 !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
1354 skcipher_unmap(ctx->dev, edesc, req);
1355 qi_cache_free(edesc);
1358 return ret;
1361 static int skcipher_decrypt(struct skcipher_request *req)
1363 struct skcipher_edesc *edesc;
1364 struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
1365 struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
1366 struct caam_request *caam_req = skcipher_request_ctx(req);
1367 int ivsize = crypto_skcipher_ivsize(skcipher);
1368 int ret;
1370 /* allocate extended descriptor */
1371 edesc = skcipher_edesc_alloc(req);
1372 if (IS_ERR(edesc))
1373 return PTR_ERR(edesc);
1376 * The crypto API expects us to set the IV (req->iv) to the last
1377 * ciphertext block.
1379 scatterwalk_map_and_copy(req->iv, req->src, req->cryptlen - ivsize,
1380 ivsize, 0);
1382 caam_req->flc = &ctx->flc[DECRYPT];
1383 caam_req->flc_dma = ctx->flc_dma[DECRYPT];
1384 caam_req->cbk = skcipher_decrypt_done;
1385 caam_req->ctx = &req->base;
1386 caam_req->edesc = edesc;
1387 ret = dpaa2_caam_enqueue(ctx->dev, caam_req);
1388 if (ret != -EINPROGRESS &&
1389 !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
1390 skcipher_unmap(ctx->dev, edesc, req);
1391 qi_cache_free(edesc);
1394 return ret;
1397 static int caam_cra_init(struct caam_ctx *ctx, struct caam_alg_entry *caam,
1398 bool uses_dkp)
1400 dma_addr_t dma_addr;
1401 int i;
1403 /* copy descriptor header template value */
1404 ctx->cdata.algtype = OP_TYPE_CLASS1_ALG | caam->class1_alg_type;
1405 ctx->adata.algtype = OP_TYPE_CLASS2_ALG | caam->class2_alg_type;
1407 ctx->dev = caam->dev;
1408 ctx->dir = uses_dkp ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
1410 dma_addr = dma_map_single_attrs(ctx->dev, ctx->flc,
1411 offsetof(struct caam_ctx, flc_dma),
1412 ctx->dir, DMA_ATTR_SKIP_CPU_SYNC);
1413 if (dma_mapping_error(ctx->dev, dma_addr)) {
1414 dev_err(ctx->dev, "unable to map key, shared descriptors\n");
1415 return -ENOMEM;
1418 for (i = 0; i < NUM_OP; i++)
1419 ctx->flc_dma[i] = dma_addr + i * sizeof(ctx->flc[i]);
1420 ctx->key_dma = dma_addr + NUM_OP * sizeof(ctx->flc[0]);
1422 return 0;
1425 static int caam_cra_init_skcipher(struct crypto_skcipher *tfm)
1427 struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
1428 struct caam_skcipher_alg *caam_alg =
1429 container_of(alg, typeof(*caam_alg), skcipher);
1431 crypto_skcipher_set_reqsize(tfm, sizeof(struct caam_request));
1432 return caam_cra_init(crypto_skcipher_ctx(tfm), &caam_alg->caam, false);
1435 static int caam_cra_init_aead(struct crypto_aead *tfm)
1437 struct aead_alg *alg = crypto_aead_alg(tfm);
1438 struct caam_aead_alg *caam_alg = container_of(alg, typeof(*caam_alg),
1439 aead);
1441 crypto_aead_set_reqsize(tfm, sizeof(struct caam_request));
1442 return caam_cra_init(crypto_aead_ctx(tfm), &caam_alg->caam,
1443 alg->setkey == aead_setkey);
1446 static void caam_exit_common(struct caam_ctx *ctx)
1448 dma_unmap_single_attrs(ctx->dev, ctx->flc_dma[0],
1449 offsetof(struct caam_ctx, flc_dma), ctx->dir,
1450 DMA_ATTR_SKIP_CPU_SYNC);
1453 static void caam_cra_exit(struct crypto_skcipher *tfm)
1455 caam_exit_common(crypto_skcipher_ctx(tfm));
1458 static void caam_cra_exit_aead(struct crypto_aead *tfm)
1460 caam_exit_common(crypto_aead_ctx(tfm));
1463 static struct caam_skcipher_alg driver_algs[] = {
1465 .skcipher = {
1466 .base = {
1467 .cra_name = "cbc(aes)",
1468 .cra_driver_name = "cbc-aes-caam-qi2",
1469 .cra_blocksize = AES_BLOCK_SIZE,
1471 .setkey = skcipher_setkey,
1472 .encrypt = skcipher_encrypt,
1473 .decrypt = skcipher_decrypt,
1474 .min_keysize = AES_MIN_KEY_SIZE,
1475 .max_keysize = AES_MAX_KEY_SIZE,
1476 .ivsize = AES_BLOCK_SIZE,
1478 .caam.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1481 .skcipher = {
1482 .base = {
1483 .cra_name = "cbc(des3_ede)",
1484 .cra_driver_name = "cbc-3des-caam-qi2",
1485 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
1487 .setkey = skcipher_setkey,
1488 .encrypt = skcipher_encrypt,
1489 .decrypt = skcipher_decrypt,
1490 .min_keysize = DES3_EDE_KEY_SIZE,
1491 .max_keysize = DES3_EDE_KEY_SIZE,
1492 .ivsize = DES3_EDE_BLOCK_SIZE,
1494 .caam.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
1497 .skcipher = {
1498 .base = {
1499 .cra_name = "cbc(des)",
1500 .cra_driver_name = "cbc-des-caam-qi2",
1501 .cra_blocksize = DES_BLOCK_SIZE,
1503 .setkey = skcipher_setkey,
1504 .encrypt = skcipher_encrypt,
1505 .decrypt = skcipher_decrypt,
1506 .min_keysize = DES_KEY_SIZE,
1507 .max_keysize = DES_KEY_SIZE,
1508 .ivsize = DES_BLOCK_SIZE,
1510 .caam.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
1513 .skcipher = {
1514 .base = {
1515 .cra_name = "ctr(aes)",
1516 .cra_driver_name = "ctr-aes-caam-qi2",
1517 .cra_blocksize = 1,
1519 .setkey = skcipher_setkey,
1520 .encrypt = skcipher_encrypt,
1521 .decrypt = skcipher_decrypt,
1522 .min_keysize = AES_MIN_KEY_SIZE,
1523 .max_keysize = AES_MAX_KEY_SIZE,
1524 .ivsize = AES_BLOCK_SIZE,
1525 .chunksize = AES_BLOCK_SIZE,
1527 .caam.class1_alg_type = OP_ALG_ALGSEL_AES |
1528 OP_ALG_AAI_CTR_MOD128,
1531 .skcipher = {
1532 .base = {
1533 .cra_name = "rfc3686(ctr(aes))",
1534 .cra_driver_name = "rfc3686-ctr-aes-caam-qi2",
1535 .cra_blocksize = 1,
1537 .setkey = skcipher_setkey,
1538 .encrypt = skcipher_encrypt,
1539 .decrypt = skcipher_decrypt,
1540 .min_keysize = AES_MIN_KEY_SIZE +
1541 CTR_RFC3686_NONCE_SIZE,
1542 .max_keysize = AES_MAX_KEY_SIZE +
1543 CTR_RFC3686_NONCE_SIZE,
1544 .ivsize = CTR_RFC3686_IV_SIZE,
1545 .chunksize = AES_BLOCK_SIZE,
1547 .caam = {
1548 .class1_alg_type = OP_ALG_ALGSEL_AES |
1549 OP_ALG_AAI_CTR_MOD128,
1550 .rfc3686 = true,
1554 .skcipher = {
1555 .base = {
1556 .cra_name = "xts(aes)",
1557 .cra_driver_name = "xts-aes-caam-qi2",
1558 .cra_blocksize = AES_BLOCK_SIZE,
1560 .setkey = xts_skcipher_setkey,
1561 .encrypt = skcipher_encrypt,
1562 .decrypt = skcipher_decrypt,
1563 .min_keysize = 2 * AES_MIN_KEY_SIZE,
1564 .max_keysize = 2 * AES_MAX_KEY_SIZE,
1565 .ivsize = AES_BLOCK_SIZE,
1567 .caam.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_XTS,
1570 .skcipher = {
1571 .base = {
1572 .cra_name = "chacha20",
1573 .cra_driver_name = "chacha20-caam-qi2",
1574 .cra_blocksize = 1,
1576 .setkey = skcipher_setkey,
1577 .encrypt = skcipher_encrypt,
1578 .decrypt = skcipher_decrypt,
1579 .min_keysize = CHACHA_KEY_SIZE,
1580 .max_keysize = CHACHA_KEY_SIZE,
1581 .ivsize = CHACHA_IV_SIZE,
1583 .caam.class1_alg_type = OP_ALG_ALGSEL_CHACHA20,
1587 static struct caam_aead_alg driver_aeads[] = {
1589 .aead = {
1590 .base = {
1591 .cra_name = "rfc4106(gcm(aes))",
1592 .cra_driver_name = "rfc4106-gcm-aes-caam-qi2",
1593 .cra_blocksize = 1,
1595 .setkey = rfc4106_setkey,
1596 .setauthsize = rfc4106_setauthsize,
1597 .encrypt = ipsec_gcm_encrypt,
1598 .decrypt = ipsec_gcm_decrypt,
1599 .ivsize = 8,
1600 .maxauthsize = AES_BLOCK_SIZE,
1602 .caam = {
1603 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
1607 .aead = {
1608 .base = {
1609 .cra_name = "rfc4543(gcm(aes))",
1610 .cra_driver_name = "rfc4543-gcm-aes-caam-qi2",
1611 .cra_blocksize = 1,
1613 .setkey = rfc4543_setkey,
1614 .setauthsize = rfc4543_setauthsize,
1615 .encrypt = ipsec_gcm_encrypt,
1616 .decrypt = ipsec_gcm_decrypt,
1617 .ivsize = 8,
1618 .maxauthsize = AES_BLOCK_SIZE,
1620 .caam = {
1621 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
1624 /* Galois Counter Mode */
1626 .aead = {
1627 .base = {
1628 .cra_name = "gcm(aes)",
1629 .cra_driver_name = "gcm-aes-caam-qi2",
1630 .cra_blocksize = 1,
1632 .setkey = gcm_setkey,
1633 .setauthsize = gcm_setauthsize,
1634 .encrypt = aead_encrypt,
1635 .decrypt = aead_decrypt,
1636 .ivsize = 12,
1637 .maxauthsize = AES_BLOCK_SIZE,
1639 .caam = {
1640 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
1643 /* single-pass ipsec_esp descriptor */
1645 .aead = {
1646 .base = {
1647 .cra_name = "authenc(hmac(md5),cbc(aes))",
1648 .cra_driver_name = "authenc-hmac-md5-"
1649 "cbc-aes-caam-qi2",
1650 .cra_blocksize = AES_BLOCK_SIZE,
1652 .setkey = aead_setkey,
1653 .setauthsize = aead_setauthsize,
1654 .encrypt = aead_encrypt,
1655 .decrypt = aead_decrypt,
1656 .ivsize = AES_BLOCK_SIZE,
1657 .maxauthsize = MD5_DIGEST_SIZE,
1659 .caam = {
1660 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1661 .class2_alg_type = OP_ALG_ALGSEL_MD5 |
1662 OP_ALG_AAI_HMAC_PRECOMP,
1666 .aead = {
1667 .base = {
1668 .cra_name = "echainiv(authenc(hmac(md5),"
1669 "cbc(aes)))",
1670 .cra_driver_name = "echainiv-authenc-hmac-md5-"
1671 "cbc-aes-caam-qi2",
1672 .cra_blocksize = AES_BLOCK_SIZE,
1674 .setkey = aead_setkey,
1675 .setauthsize = aead_setauthsize,
1676 .encrypt = aead_encrypt,
1677 .decrypt = aead_decrypt,
1678 .ivsize = AES_BLOCK_SIZE,
1679 .maxauthsize = MD5_DIGEST_SIZE,
1681 .caam = {
1682 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1683 .class2_alg_type = OP_ALG_ALGSEL_MD5 |
1684 OP_ALG_AAI_HMAC_PRECOMP,
1685 .geniv = true,
1689 .aead = {
1690 .base = {
1691 .cra_name = "authenc(hmac(sha1),cbc(aes))",
1692 .cra_driver_name = "authenc-hmac-sha1-"
1693 "cbc-aes-caam-qi2",
1694 .cra_blocksize = AES_BLOCK_SIZE,
1696 .setkey = aead_setkey,
1697 .setauthsize = aead_setauthsize,
1698 .encrypt = aead_encrypt,
1699 .decrypt = aead_decrypt,
1700 .ivsize = AES_BLOCK_SIZE,
1701 .maxauthsize = SHA1_DIGEST_SIZE,
1703 .caam = {
1704 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1705 .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
1706 OP_ALG_AAI_HMAC_PRECOMP,
1710 .aead = {
1711 .base = {
1712 .cra_name = "echainiv(authenc(hmac(sha1),"
1713 "cbc(aes)))",
1714 .cra_driver_name = "echainiv-authenc-"
1715 "hmac-sha1-cbc-aes-caam-qi2",
1716 .cra_blocksize = AES_BLOCK_SIZE,
1718 .setkey = aead_setkey,
1719 .setauthsize = aead_setauthsize,
1720 .encrypt = aead_encrypt,
1721 .decrypt = aead_decrypt,
1722 .ivsize = AES_BLOCK_SIZE,
1723 .maxauthsize = SHA1_DIGEST_SIZE,
1725 .caam = {
1726 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1727 .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
1728 OP_ALG_AAI_HMAC_PRECOMP,
1729 .geniv = true,
1733 .aead = {
1734 .base = {
1735 .cra_name = "authenc(hmac(sha224),cbc(aes))",
1736 .cra_driver_name = "authenc-hmac-sha224-"
1737 "cbc-aes-caam-qi2",
1738 .cra_blocksize = AES_BLOCK_SIZE,
1740 .setkey = aead_setkey,
1741 .setauthsize = aead_setauthsize,
1742 .encrypt = aead_encrypt,
1743 .decrypt = aead_decrypt,
1744 .ivsize = AES_BLOCK_SIZE,
1745 .maxauthsize = SHA224_DIGEST_SIZE,
1747 .caam = {
1748 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1749 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
1750 OP_ALG_AAI_HMAC_PRECOMP,
1754 .aead = {
1755 .base = {
1756 .cra_name = "echainiv(authenc(hmac(sha224),"
1757 "cbc(aes)))",
1758 .cra_driver_name = "echainiv-authenc-"
1759 "hmac-sha224-cbc-aes-caam-qi2",
1760 .cra_blocksize = AES_BLOCK_SIZE,
1762 .setkey = aead_setkey,
1763 .setauthsize = aead_setauthsize,
1764 .encrypt = aead_encrypt,
1765 .decrypt = aead_decrypt,
1766 .ivsize = AES_BLOCK_SIZE,
1767 .maxauthsize = SHA224_DIGEST_SIZE,
1769 .caam = {
1770 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1771 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
1772 OP_ALG_AAI_HMAC_PRECOMP,
1773 .geniv = true,
1777 .aead = {
1778 .base = {
1779 .cra_name = "authenc(hmac(sha256),cbc(aes))",
1780 .cra_driver_name = "authenc-hmac-sha256-"
1781 "cbc-aes-caam-qi2",
1782 .cra_blocksize = AES_BLOCK_SIZE,
1784 .setkey = aead_setkey,
1785 .setauthsize = aead_setauthsize,
1786 .encrypt = aead_encrypt,
1787 .decrypt = aead_decrypt,
1788 .ivsize = AES_BLOCK_SIZE,
1789 .maxauthsize = SHA256_DIGEST_SIZE,
1791 .caam = {
1792 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1793 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
1794 OP_ALG_AAI_HMAC_PRECOMP,
1798 .aead = {
1799 .base = {
1800 .cra_name = "echainiv(authenc(hmac(sha256),"
1801 "cbc(aes)))",
1802 .cra_driver_name = "echainiv-authenc-"
1803 "hmac-sha256-cbc-aes-"
1804 "caam-qi2",
1805 .cra_blocksize = AES_BLOCK_SIZE,
1807 .setkey = aead_setkey,
1808 .setauthsize = aead_setauthsize,
1809 .encrypt = aead_encrypt,
1810 .decrypt = aead_decrypt,
1811 .ivsize = AES_BLOCK_SIZE,
1812 .maxauthsize = SHA256_DIGEST_SIZE,
1814 .caam = {
1815 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1816 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
1817 OP_ALG_AAI_HMAC_PRECOMP,
1818 .geniv = true,
1822 .aead = {
1823 .base = {
1824 .cra_name = "authenc(hmac(sha384),cbc(aes))",
1825 .cra_driver_name = "authenc-hmac-sha384-"
1826 "cbc-aes-caam-qi2",
1827 .cra_blocksize = AES_BLOCK_SIZE,
1829 .setkey = aead_setkey,
1830 .setauthsize = aead_setauthsize,
1831 .encrypt = aead_encrypt,
1832 .decrypt = aead_decrypt,
1833 .ivsize = AES_BLOCK_SIZE,
1834 .maxauthsize = SHA384_DIGEST_SIZE,
1836 .caam = {
1837 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1838 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
1839 OP_ALG_AAI_HMAC_PRECOMP,
1843 .aead = {
1844 .base = {
1845 .cra_name = "echainiv(authenc(hmac(sha384),"
1846 "cbc(aes)))",
1847 .cra_driver_name = "echainiv-authenc-"
1848 "hmac-sha384-cbc-aes-"
1849 "caam-qi2",
1850 .cra_blocksize = AES_BLOCK_SIZE,
1852 .setkey = aead_setkey,
1853 .setauthsize = aead_setauthsize,
1854 .encrypt = aead_encrypt,
1855 .decrypt = aead_decrypt,
1856 .ivsize = AES_BLOCK_SIZE,
1857 .maxauthsize = SHA384_DIGEST_SIZE,
1859 .caam = {
1860 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1861 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
1862 OP_ALG_AAI_HMAC_PRECOMP,
1863 .geniv = true,
1867 .aead = {
1868 .base = {
1869 .cra_name = "authenc(hmac(sha512),cbc(aes))",
1870 .cra_driver_name = "authenc-hmac-sha512-"
1871 "cbc-aes-caam-qi2",
1872 .cra_blocksize = AES_BLOCK_SIZE,
1874 .setkey = aead_setkey,
1875 .setauthsize = aead_setauthsize,
1876 .encrypt = aead_encrypt,
1877 .decrypt = aead_decrypt,
1878 .ivsize = AES_BLOCK_SIZE,
1879 .maxauthsize = SHA512_DIGEST_SIZE,
1881 .caam = {
1882 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1883 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
1884 OP_ALG_AAI_HMAC_PRECOMP,
1888 .aead = {
1889 .base = {
1890 .cra_name = "echainiv(authenc(hmac(sha512),"
1891 "cbc(aes)))",
1892 .cra_driver_name = "echainiv-authenc-"
1893 "hmac-sha512-cbc-aes-"
1894 "caam-qi2",
1895 .cra_blocksize = AES_BLOCK_SIZE,
1897 .setkey = aead_setkey,
1898 .setauthsize = aead_setauthsize,
1899 .encrypt = aead_encrypt,
1900 .decrypt = aead_decrypt,
1901 .ivsize = AES_BLOCK_SIZE,
1902 .maxauthsize = SHA512_DIGEST_SIZE,
1904 .caam = {
1905 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1906 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
1907 OP_ALG_AAI_HMAC_PRECOMP,
1908 .geniv = true,
1912 .aead = {
1913 .base = {
1914 .cra_name = "authenc(hmac(md5),cbc(des3_ede))",
1915 .cra_driver_name = "authenc-hmac-md5-"
1916 "cbc-des3_ede-caam-qi2",
1917 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
1919 .setkey = aead_setkey,
1920 .setauthsize = aead_setauthsize,
1921 .encrypt = aead_encrypt,
1922 .decrypt = aead_decrypt,
1923 .ivsize = DES3_EDE_BLOCK_SIZE,
1924 .maxauthsize = MD5_DIGEST_SIZE,
1926 .caam = {
1927 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
1928 .class2_alg_type = OP_ALG_ALGSEL_MD5 |
1929 OP_ALG_AAI_HMAC_PRECOMP,
1933 .aead = {
1934 .base = {
1935 .cra_name = "echainiv(authenc(hmac(md5),"
1936 "cbc(des3_ede)))",
1937 .cra_driver_name = "echainiv-authenc-hmac-md5-"
1938 "cbc-des3_ede-caam-qi2",
1939 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
1941 .setkey = aead_setkey,
1942 .setauthsize = aead_setauthsize,
1943 .encrypt = aead_encrypt,
1944 .decrypt = aead_decrypt,
1945 .ivsize = DES3_EDE_BLOCK_SIZE,
1946 .maxauthsize = MD5_DIGEST_SIZE,
1948 .caam = {
1949 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
1950 .class2_alg_type = OP_ALG_ALGSEL_MD5 |
1951 OP_ALG_AAI_HMAC_PRECOMP,
1952 .geniv = true,
1956 .aead = {
1957 .base = {
1958 .cra_name = "authenc(hmac(sha1),"
1959 "cbc(des3_ede))",
1960 .cra_driver_name = "authenc-hmac-sha1-"
1961 "cbc-des3_ede-caam-qi2",
1962 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
1964 .setkey = aead_setkey,
1965 .setauthsize = aead_setauthsize,
1966 .encrypt = aead_encrypt,
1967 .decrypt = aead_decrypt,
1968 .ivsize = DES3_EDE_BLOCK_SIZE,
1969 .maxauthsize = SHA1_DIGEST_SIZE,
1971 .caam = {
1972 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
1973 .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
1974 OP_ALG_AAI_HMAC_PRECOMP,
1978 .aead = {
1979 .base = {
1980 .cra_name = "echainiv(authenc(hmac(sha1),"
1981 "cbc(des3_ede)))",
1982 .cra_driver_name = "echainiv-authenc-"
1983 "hmac-sha1-"
1984 "cbc-des3_ede-caam-qi2",
1985 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
1987 .setkey = aead_setkey,
1988 .setauthsize = aead_setauthsize,
1989 .encrypt = aead_encrypt,
1990 .decrypt = aead_decrypt,
1991 .ivsize = DES3_EDE_BLOCK_SIZE,
1992 .maxauthsize = SHA1_DIGEST_SIZE,
1994 .caam = {
1995 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
1996 .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
1997 OP_ALG_AAI_HMAC_PRECOMP,
1998 .geniv = true,
2002 .aead = {
2003 .base = {
2004 .cra_name = "authenc(hmac(sha224),"
2005 "cbc(des3_ede))",
2006 .cra_driver_name = "authenc-hmac-sha224-"
2007 "cbc-des3_ede-caam-qi2",
2008 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2010 .setkey = aead_setkey,
2011 .setauthsize = aead_setauthsize,
2012 .encrypt = aead_encrypt,
2013 .decrypt = aead_decrypt,
2014 .ivsize = DES3_EDE_BLOCK_SIZE,
2015 .maxauthsize = SHA224_DIGEST_SIZE,
2017 .caam = {
2018 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2019 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
2020 OP_ALG_AAI_HMAC_PRECOMP,
2024 .aead = {
2025 .base = {
2026 .cra_name = "echainiv(authenc(hmac(sha224),"
2027 "cbc(des3_ede)))",
2028 .cra_driver_name = "echainiv-authenc-"
2029 "hmac-sha224-"
2030 "cbc-des3_ede-caam-qi2",
2031 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2033 .setkey = aead_setkey,
2034 .setauthsize = aead_setauthsize,
2035 .encrypt = aead_encrypt,
2036 .decrypt = aead_decrypt,
2037 .ivsize = DES3_EDE_BLOCK_SIZE,
2038 .maxauthsize = SHA224_DIGEST_SIZE,
2040 .caam = {
2041 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2042 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
2043 OP_ALG_AAI_HMAC_PRECOMP,
2044 .geniv = true,
2048 .aead = {
2049 .base = {
2050 .cra_name = "authenc(hmac(sha256),"
2051 "cbc(des3_ede))",
2052 .cra_driver_name = "authenc-hmac-sha256-"
2053 "cbc-des3_ede-caam-qi2",
2054 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2056 .setkey = aead_setkey,
2057 .setauthsize = aead_setauthsize,
2058 .encrypt = aead_encrypt,
2059 .decrypt = aead_decrypt,
2060 .ivsize = DES3_EDE_BLOCK_SIZE,
2061 .maxauthsize = SHA256_DIGEST_SIZE,
2063 .caam = {
2064 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2065 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
2066 OP_ALG_AAI_HMAC_PRECOMP,
2070 .aead = {
2071 .base = {
2072 .cra_name = "echainiv(authenc(hmac(sha256),"
2073 "cbc(des3_ede)))",
2074 .cra_driver_name = "echainiv-authenc-"
2075 "hmac-sha256-"
2076 "cbc-des3_ede-caam-qi2",
2077 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2079 .setkey = aead_setkey,
2080 .setauthsize = aead_setauthsize,
2081 .encrypt = aead_encrypt,
2082 .decrypt = aead_decrypt,
2083 .ivsize = DES3_EDE_BLOCK_SIZE,
2084 .maxauthsize = SHA256_DIGEST_SIZE,
2086 .caam = {
2087 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2088 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
2089 OP_ALG_AAI_HMAC_PRECOMP,
2090 .geniv = true,
2094 .aead = {
2095 .base = {
2096 .cra_name = "authenc(hmac(sha384),"
2097 "cbc(des3_ede))",
2098 .cra_driver_name = "authenc-hmac-sha384-"
2099 "cbc-des3_ede-caam-qi2",
2100 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2102 .setkey = aead_setkey,
2103 .setauthsize = aead_setauthsize,
2104 .encrypt = aead_encrypt,
2105 .decrypt = aead_decrypt,
2106 .ivsize = DES3_EDE_BLOCK_SIZE,
2107 .maxauthsize = SHA384_DIGEST_SIZE,
2109 .caam = {
2110 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2111 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
2112 OP_ALG_AAI_HMAC_PRECOMP,
2116 .aead = {
2117 .base = {
2118 .cra_name = "echainiv(authenc(hmac(sha384),"
2119 "cbc(des3_ede)))",
2120 .cra_driver_name = "echainiv-authenc-"
2121 "hmac-sha384-"
2122 "cbc-des3_ede-caam-qi2",
2123 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2125 .setkey = aead_setkey,
2126 .setauthsize = aead_setauthsize,
2127 .encrypt = aead_encrypt,
2128 .decrypt = aead_decrypt,
2129 .ivsize = DES3_EDE_BLOCK_SIZE,
2130 .maxauthsize = SHA384_DIGEST_SIZE,
2132 .caam = {
2133 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2134 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
2135 OP_ALG_AAI_HMAC_PRECOMP,
2136 .geniv = true,
2140 .aead = {
2141 .base = {
2142 .cra_name = "authenc(hmac(sha512),"
2143 "cbc(des3_ede))",
2144 .cra_driver_name = "authenc-hmac-sha512-"
2145 "cbc-des3_ede-caam-qi2",
2146 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2148 .setkey = aead_setkey,
2149 .setauthsize = aead_setauthsize,
2150 .encrypt = aead_encrypt,
2151 .decrypt = aead_decrypt,
2152 .ivsize = DES3_EDE_BLOCK_SIZE,
2153 .maxauthsize = SHA512_DIGEST_SIZE,
2155 .caam = {
2156 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2157 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
2158 OP_ALG_AAI_HMAC_PRECOMP,
2162 .aead = {
2163 .base = {
2164 .cra_name = "echainiv(authenc(hmac(sha512),"
2165 "cbc(des3_ede)))",
2166 .cra_driver_name = "echainiv-authenc-"
2167 "hmac-sha512-"
2168 "cbc-des3_ede-caam-qi2",
2169 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2171 .setkey = aead_setkey,
2172 .setauthsize = aead_setauthsize,
2173 .encrypt = aead_encrypt,
2174 .decrypt = aead_decrypt,
2175 .ivsize = DES3_EDE_BLOCK_SIZE,
2176 .maxauthsize = SHA512_DIGEST_SIZE,
2178 .caam = {
2179 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2180 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
2181 OP_ALG_AAI_HMAC_PRECOMP,
2182 .geniv = true,
2186 .aead = {
2187 .base = {
2188 .cra_name = "authenc(hmac(md5),cbc(des))",
2189 .cra_driver_name = "authenc-hmac-md5-"
2190 "cbc-des-caam-qi2",
2191 .cra_blocksize = DES_BLOCK_SIZE,
2193 .setkey = aead_setkey,
2194 .setauthsize = aead_setauthsize,
2195 .encrypt = aead_encrypt,
2196 .decrypt = aead_decrypt,
2197 .ivsize = DES_BLOCK_SIZE,
2198 .maxauthsize = MD5_DIGEST_SIZE,
2200 .caam = {
2201 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2202 .class2_alg_type = OP_ALG_ALGSEL_MD5 |
2203 OP_ALG_AAI_HMAC_PRECOMP,
2207 .aead = {
2208 .base = {
2209 .cra_name = "echainiv(authenc(hmac(md5),"
2210 "cbc(des)))",
2211 .cra_driver_name = "echainiv-authenc-hmac-md5-"
2212 "cbc-des-caam-qi2",
2213 .cra_blocksize = DES_BLOCK_SIZE,
2215 .setkey = aead_setkey,
2216 .setauthsize = aead_setauthsize,
2217 .encrypt = aead_encrypt,
2218 .decrypt = aead_decrypt,
2219 .ivsize = DES_BLOCK_SIZE,
2220 .maxauthsize = MD5_DIGEST_SIZE,
2222 .caam = {
2223 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2224 .class2_alg_type = OP_ALG_ALGSEL_MD5 |
2225 OP_ALG_AAI_HMAC_PRECOMP,
2226 .geniv = true,
2230 .aead = {
2231 .base = {
2232 .cra_name = "authenc(hmac(sha1),cbc(des))",
2233 .cra_driver_name = "authenc-hmac-sha1-"
2234 "cbc-des-caam-qi2",
2235 .cra_blocksize = DES_BLOCK_SIZE,
2237 .setkey = aead_setkey,
2238 .setauthsize = aead_setauthsize,
2239 .encrypt = aead_encrypt,
2240 .decrypt = aead_decrypt,
2241 .ivsize = DES_BLOCK_SIZE,
2242 .maxauthsize = SHA1_DIGEST_SIZE,
2244 .caam = {
2245 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2246 .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
2247 OP_ALG_AAI_HMAC_PRECOMP,
2251 .aead = {
2252 .base = {
2253 .cra_name = "echainiv(authenc(hmac(sha1),"
2254 "cbc(des)))",
2255 .cra_driver_name = "echainiv-authenc-"
2256 "hmac-sha1-cbc-des-caam-qi2",
2257 .cra_blocksize = DES_BLOCK_SIZE,
2259 .setkey = aead_setkey,
2260 .setauthsize = aead_setauthsize,
2261 .encrypt = aead_encrypt,
2262 .decrypt = aead_decrypt,
2263 .ivsize = DES_BLOCK_SIZE,
2264 .maxauthsize = SHA1_DIGEST_SIZE,
2266 .caam = {
2267 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2268 .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
2269 OP_ALG_AAI_HMAC_PRECOMP,
2270 .geniv = true,
2274 .aead = {
2275 .base = {
2276 .cra_name = "authenc(hmac(sha224),cbc(des))",
2277 .cra_driver_name = "authenc-hmac-sha224-"
2278 "cbc-des-caam-qi2",
2279 .cra_blocksize = DES_BLOCK_SIZE,
2281 .setkey = aead_setkey,
2282 .setauthsize = aead_setauthsize,
2283 .encrypt = aead_encrypt,
2284 .decrypt = aead_decrypt,
2285 .ivsize = DES_BLOCK_SIZE,
2286 .maxauthsize = SHA224_DIGEST_SIZE,
2288 .caam = {
2289 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2290 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
2291 OP_ALG_AAI_HMAC_PRECOMP,
2295 .aead = {
2296 .base = {
2297 .cra_name = "echainiv(authenc(hmac(sha224),"
2298 "cbc(des)))",
2299 .cra_driver_name = "echainiv-authenc-"
2300 "hmac-sha224-cbc-des-"
2301 "caam-qi2",
2302 .cra_blocksize = DES_BLOCK_SIZE,
2304 .setkey = aead_setkey,
2305 .setauthsize = aead_setauthsize,
2306 .encrypt = aead_encrypt,
2307 .decrypt = aead_decrypt,
2308 .ivsize = DES_BLOCK_SIZE,
2309 .maxauthsize = SHA224_DIGEST_SIZE,
2311 .caam = {
2312 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2313 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
2314 OP_ALG_AAI_HMAC_PRECOMP,
2315 .geniv = true,
2319 .aead = {
2320 .base = {
2321 .cra_name = "authenc(hmac(sha256),cbc(des))",
2322 .cra_driver_name = "authenc-hmac-sha256-"
2323 "cbc-des-caam-qi2",
2324 .cra_blocksize = DES_BLOCK_SIZE,
2326 .setkey = aead_setkey,
2327 .setauthsize = aead_setauthsize,
2328 .encrypt = aead_encrypt,
2329 .decrypt = aead_decrypt,
2330 .ivsize = DES_BLOCK_SIZE,
2331 .maxauthsize = SHA256_DIGEST_SIZE,
2333 .caam = {
2334 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2335 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
2336 OP_ALG_AAI_HMAC_PRECOMP,
2340 .aead = {
2341 .base = {
2342 .cra_name = "echainiv(authenc(hmac(sha256),"
2343 "cbc(des)))",
2344 .cra_driver_name = "echainiv-authenc-"
2345 "hmac-sha256-cbc-desi-"
2346 "caam-qi2",
2347 .cra_blocksize = DES_BLOCK_SIZE,
2349 .setkey = aead_setkey,
2350 .setauthsize = aead_setauthsize,
2351 .encrypt = aead_encrypt,
2352 .decrypt = aead_decrypt,
2353 .ivsize = DES_BLOCK_SIZE,
2354 .maxauthsize = SHA256_DIGEST_SIZE,
2356 .caam = {
2357 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2358 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
2359 OP_ALG_AAI_HMAC_PRECOMP,
2360 .geniv = true,
2364 .aead = {
2365 .base = {
2366 .cra_name = "authenc(hmac(sha384),cbc(des))",
2367 .cra_driver_name = "authenc-hmac-sha384-"
2368 "cbc-des-caam-qi2",
2369 .cra_blocksize = DES_BLOCK_SIZE,
2371 .setkey = aead_setkey,
2372 .setauthsize = aead_setauthsize,
2373 .encrypt = aead_encrypt,
2374 .decrypt = aead_decrypt,
2375 .ivsize = DES_BLOCK_SIZE,
2376 .maxauthsize = SHA384_DIGEST_SIZE,
2378 .caam = {
2379 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2380 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
2381 OP_ALG_AAI_HMAC_PRECOMP,
2385 .aead = {
2386 .base = {
2387 .cra_name = "echainiv(authenc(hmac(sha384),"
2388 "cbc(des)))",
2389 .cra_driver_name = "echainiv-authenc-"
2390 "hmac-sha384-cbc-des-"
2391 "caam-qi2",
2392 .cra_blocksize = DES_BLOCK_SIZE,
2394 .setkey = aead_setkey,
2395 .setauthsize = aead_setauthsize,
2396 .encrypt = aead_encrypt,
2397 .decrypt = aead_decrypt,
2398 .ivsize = DES_BLOCK_SIZE,
2399 .maxauthsize = SHA384_DIGEST_SIZE,
2401 .caam = {
2402 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2403 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
2404 OP_ALG_AAI_HMAC_PRECOMP,
2405 .geniv = true,
2409 .aead = {
2410 .base = {
2411 .cra_name = "authenc(hmac(sha512),cbc(des))",
2412 .cra_driver_name = "authenc-hmac-sha512-"
2413 "cbc-des-caam-qi2",
2414 .cra_blocksize = DES_BLOCK_SIZE,
2416 .setkey = aead_setkey,
2417 .setauthsize = aead_setauthsize,
2418 .encrypt = aead_encrypt,
2419 .decrypt = aead_decrypt,
2420 .ivsize = DES_BLOCK_SIZE,
2421 .maxauthsize = SHA512_DIGEST_SIZE,
2423 .caam = {
2424 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2425 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
2426 OP_ALG_AAI_HMAC_PRECOMP,
2430 .aead = {
2431 .base = {
2432 .cra_name = "echainiv(authenc(hmac(sha512),"
2433 "cbc(des)))",
2434 .cra_driver_name = "echainiv-authenc-"
2435 "hmac-sha512-cbc-des-"
2436 "caam-qi2",
2437 .cra_blocksize = DES_BLOCK_SIZE,
2439 .setkey = aead_setkey,
2440 .setauthsize = aead_setauthsize,
2441 .encrypt = aead_encrypt,
2442 .decrypt = aead_decrypt,
2443 .ivsize = DES_BLOCK_SIZE,
2444 .maxauthsize = SHA512_DIGEST_SIZE,
2446 .caam = {
2447 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2448 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
2449 OP_ALG_AAI_HMAC_PRECOMP,
2450 .geniv = true,
2454 .aead = {
2455 .base = {
2456 .cra_name = "authenc(hmac(md5),"
2457 "rfc3686(ctr(aes)))",
2458 .cra_driver_name = "authenc-hmac-md5-"
2459 "rfc3686-ctr-aes-caam-qi2",
2460 .cra_blocksize = 1,
2462 .setkey = aead_setkey,
2463 .setauthsize = aead_setauthsize,
2464 .encrypt = aead_encrypt,
2465 .decrypt = aead_decrypt,
2466 .ivsize = CTR_RFC3686_IV_SIZE,
2467 .maxauthsize = MD5_DIGEST_SIZE,
2469 .caam = {
2470 .class1_alg_type = OP_ALG_ALGSEL_AES |
2471 OP_ALG_AAI_CTR_MOD128,
2472 .class2_alg_type = OP_ALG_ALGSEL_MD5 |
2473 OP_ALG_AAI_HMAC_PRECOMP,
2474 .rfc3686 = true,
2478 .aead = {
2479 .base = {
2480 .cra_name = "seqiv(authenc("
2481 "hmac(md5),rfc3686(ctr(aes))))",
2482 .cra_driver_name = "seqiv-authenc-hmac-md5-"
2483 "rfc3686-ctr-aes-caam-qi2",
2484 .cra_blocksize = 1,
2486 .setkey = aead_setkey,
2487 .setauthsize = aead_setauthsize,
2488 .encrypt = aead_encrypt,
2489 .decrypt = aead_decrypt,
2490 .ivsize = CTR_RFC3686_IV_SIZE,
2491 .maxauthsize = MD5_DIGEST_SIZE,
2493 .caam = {
2494 .class1_alg_type = OP_ALG_ALGSEL_AES |
2495 OP_ALG_AAI_CTR_MOD128,
2496 .class2_alg_type = OP_ALG_ALGSEL_MD5 |
2497 OP_ALG_AAI_HMAC_PRECOMP,
2498 .rfc3686 = true,
2499 .geniv = true,
2503 .aead = {
2504 .base = {
2505 .cra_name = "authenc(hmac(sha1),"
2506 "rfc3686(ctr(aes)))",
2507 .cra_driver_name = "authenc-hmac-sha1-"
2508 "rfc3686-ctr-aes-caam-qi2",
2509 .cra_blocksize = 1,
2511 .setkey = aead_setkey,
2512 .setauthsize = aead_setauthsize,
2513 .encrypt = aead_encrypt,
2514 .decrypt = aead_decrypt,
2515 .ivsize = CTR_RFC3686_IV_SIZE,
2516 .maxauthsize = SHA1_DIGEST_SIZE,
2518 .caam = {
2519 .class1_alg_type = OP_ALG_ALGSEL_AES |
2520 OP_ALG_AAI_CTR_MOD128,
2521 .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
2522 OP_ALG_AAI_HMAC_PRECOMP,
2523 .rfc3686 = true,
2527 .aead = {
2528 .base = {
2529 .cra_name = "seqiv(authenc("
2530 "hmac(sha1),rfc3686(ctr(aes))))",
2531 .cra_driver_name = "seqiv-authenc-hmac-sha1-"
2532 "rfc3686-ctr-aes-caam-qi2",
2533 .cra_blocksize = 1,
2535 .setkey = aead_setkey,
2536 .setauthsize = aead_setauthsize,
2537 .encrypt = aead_encrypt,
2538 .decrypt = aead_decrypt,
2539 .ivsize = CTR_RFC3686_IV_SIZE,
2540 .maxauthsize = SHA1_DIGEST_SIZE,
2542 .caam = {
2543 .class1_alg_type = OP_ALG_ALGSEL_AES |
2544 OP_ALG_AAI_CTR_MOD128,
2545 .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
2546 OP_ALG_AAI_HMAC_PRECOMP,
2547 .rfc3686 = true,
2548 .geniv = true,
2552 .aead = {
2553 .base = {
2554 .cra_name = "authenc(hmac(sha224),"
2555 "rfc3686(ctr(aes)))",
2556 .cra_driver_name = "authenc-hmac-sha224-"
2557 "rfc3686-ctr-aes-caam-qi2",
2558 .cra_blocksize = 1,
2560 .setkey = aead_setkey,
2561 .setauthsize = aead_setauthsize,
2562 .encrypt = aead_encrypt,
2563 .decrypt = aead_decrypt,
2564 .ivsize = CTR_RFC3686_IV_SIZE,
2565 .maxauthsize = SHA224_DIGEST_SIZE,
2567 .caam = {
2568 .class1_alg_type = OP_ALG_ALGSEL_AES |
2569 OP_ALG_AAI_CTR_MOD128,
2570 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
2571 OP_ALG_AAI_HMAC_PRECOMP,
2572 .rfc3686 = true,
2576 .aead = {
2577 .base = {
2578 .cra_name = "seqiv(authenc("
2579 "hmac(sha224),rfc3686(ctr(aes))))",
2580 .cra_driver_name = "seqiv-authenc-hmac-sha224-"
2581 "rfc3686-ctr-aes-caam-qi2",
2582 .cra_blocksize = 1,
2584 .setkey = aead_setkey,
2585 .setauthsize = aead_setauthsize,
2586 .encrypt = aead_encrypt,
2587 .decrypt = aead_decrypt,
2588 .ivsize = CTR_RFC3686_IV_SIZE,
2589 .maxauthsize = SHA224_DIGEST_SIZE,
2591 .caam = {
2592 .class1_alg_type = OP_ALG_ALGSEL_AES |
2593 OP_ALG_AAI_CTR_MOD128,
2594 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
2595 OP_ALG_AAI_HMAC_PRECOMP,
2596 .rfc3686 = true,
2597 .geniv = true,
2601 .aead = {
2602 .base = {
2603 .cra_name = "authenc(hmac(sha256),"
2604 "rfc3686(ctr(aes)))",
2605 .cra_driver_name = "authenc-hmac-sha256-"
2606 "rfc3686-ctr-aes-caam-qi2",
2607 .cra_blocksize = 1,
2609 .setkey = aead_setkey,
2610 .setauthsize = aead_setauthsize,
2611 .encrypt = aead_encrypt,
2612 .decrypt = aead_decrypt,
2613 .ivsize = CTR_RFC3686_IV_SIZE,
2614 .maxauthsize = SHA256_DIGEST_SIZE,
2616 .caam = {
2617 .class1_alg_type = OP_ALG_ALGSEL_AES |
2618 OP_ALG_AAI_CTR_MOD128,
2619 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
2620 OP_ALG_AAI_HMAC_PRECOMP,
2621 .rfc3686 = true,
2625 .aead = {
2626 .base = {
2627 .cra_name = "seqiv(authenc(hmac(sha256),"
2628 "rfc3686(ctr(aes))))",
2629 .cra_driver_name = "seqiv-authenc-hmac-sha256-"
2630 "rfc3686-ctr-aes-caam-qi2",
2631 .cra_blocksize = 1,
2633 .setkey = aead_setkey,
2634 .setauthsize = aead_setauthsize,
2635 .encrypt = aead_encrypt,
2636 .decrypt = aead_decrypt,
2637 .ivsize = CTR_RFC3686_IV_SIZE,
2638 .maxauthsize = SHA256_DIGEST_SIZE,
2640 .caam = {
2641 .class1_alg_type = OP_ALG_ALGSEL_AES |
2642 OP_ALG_AAI_CTR_MOD128,
2643 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
2644 OP_ALG_AAI_HMAC_PRECOMP,
2645 .rfc3686 = true,
2646 .geniv = true,
2650 .aead = {
2651 .base = {
2652 .cra_name = "authenc(hmac(sha384),"
2653 "rfc3686(ctr(aes)))",
2654 .cra_driver_name = "authenc-hmac-sha384-"
2655 "rfc3686-ctr-aes-caam-qi2",
2656 .cra_blocksize = 1,
2658 .setkey = aead_setkey,
2659 .setauthsize = aead_setauthsize,
2660 .encrypt = aead_encrypt,
2661 .decrypt = aead_decrypt,
2662 .ivsize = CTR_RFC3686_IV_SIZE,
2663 .maxauthsize = SHA384_DIGEST_SIZE,
2665 .caam = {
2666 .class1_alg_type = OP_ALG_ALGSEL_AES |
2667 OP_ALG_AAI_CTR_MOD128,
2668 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
2669 OP_ALG_AAI_HMAC_PRECOMP,
2670 .rfc3686 = true,
2674 .aead = {
2675 .base = {
2676 .cra_name = "seqiv(authenc(hmac(sha384),"
2677 "rfc3686(ctr(aes))))",
2678 .cra_driver_name = "seqiv-authenc-hmac-sha384-"
2679 "rfc3686-ctr-aes-caam-qi2",
2680 .cra_blocksize = 1,
2682 .setkey = aead_setkey,
2683 .setauthsize = aead_setauthsize,
2684 .encrypt = aead_encrypt,
2685 .decrypt = aead_decrypt,
2686 .ivsize = CTR_RFC3686_IV_SIZE,
2687 .maxauthsize = SHA384_DIGEST_SIZE,
2689 .caam = {
2690 .class1_alg_type = OP_ALG_ALGSEL_AES |
2691 OP_ALG_AAI_CTR_MOD128,
2692 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
2693 OP_ALG_AAI_HMAC_PRECOMP,
2694 .rfc3686 = true,
2695 .geniv = true,
2699 .aead = {
2700 .base = {
2701 .cra_name = "rfc7539(chacha20,poly1305)",
2702 .cra_driver_name = "rfc7539-chacha20-poly1305-"
2703 "caam-qi2",
2704 .cra_blocksize = 1,
2706 .setkey = chachapoly_setkey,
2707 .setauthsize = chachapoly_setauthsize,
2708 .encrypt = aead_encrypt,
2709 .decrypt = aead_decrypt,
2710 .ivsize = CHACHAPOLY_IV_SIZE,
2711 .maxauthsize = POLY1305_DIGEST_SIZE,
2713 .caam = {
2714 .class1_alg_type = OP_ALG_ALGSEL_CHACHA20 |
2715 OP_ALG_AAI_AEAD,
2716 .class2_alg_type = OP_ALG_ALGSEL_POLY1305 |
2717 OP_ALG_AAI_AEAD,
2721 .aead = {
2722 .base = {
2723 .cra_name = "rfc7539esp(chacha20,poly1305)",
2724 .cra_driver_name = "rfc7539esp-chacha20-"
2725 "poly1305-caam-qi2",
2726 .cra_blocksize = 1,
2728 .setkey = chachapoly_setkey,
2729 .setauthsize = chachapoly_setauthsize,
2730 .encrypt = aead_encrypt,
2731 .decrypt = aead_decrypt,
2732 .ivsize = 8,
2733 .maxauthsize = POLY1305_DIGEST_SIZE,
2735 .caam = {
2736 .class1_alg_type = OP_ALG_ALGSEL_CHACHA20 |
2737 OP_ALG_AAI_AEAD,
2738 .class2_alg_type = OP_ALG_ALGSEL_POLY1305 |
2739 OP_ALG_AAI_AEAD,
2743 .aead = {
2744 .base = {
2745 .cra_name = "authenc(hmac(sha512),"
2746 "rfc3686(ctr(aes)))",
2747 .cra_driver_name = "authenc-hmac-sha512-"
2748 "rfc3686-ctr-aes-caam-qi2",
2749 .cra_blocksize = 1,
2751 .setkey = aead_setkey,
2752 .setauthsize = aead_setauthsize,
2753 .encrypt = aead_encrypt,
2754 .decrypt = aead_decrypt,
2755 .ivsize = CTR_RFC3686_IV_SIZE,
2756 .maxauthsize = SHA512_DIGEST_SIZE,
2758 .caam = {
2759 .class1_alg_type = OP_ALG_ALGSEL_AES |
2760 OP_ALG_AAI_CTR_MOD128,
2761 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
2762 OP_ALG_AAI_HMAC_PRECOMP,
2763 .rfc3686 = true,
2767 .aead = {
2768 .base = {
2769 .cra_name = "seqiv(authenc(hmac(sha512),"
2770 "rfc3686(ctr(aes))))",
2771 .cra_driver_name = "seqiv-authenc-hmac-sha512-"
2772 "rfc3686-ctr-aes-caam-qi2",
2773 .cra_blocksize = 1,
2775 .setkey = aead_setkey,
2776 .setauthsize = aead_setauthsize,
2777 .encrypt = aead_encrypt,
2778 .decrypt = aead_decrypt,
2779 .ivsize = CTR_RFC3686_IV_SIZE,
2780 .maxauthsize = SHA512_DIGEST_SIZE,
2782 .caam = {
2783 .class1_alg_type = OP_ALG_ALGSEL_AES |
2784 OP_ALG_AAI_CTR_MOD128,
2785 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
2786 OP_ALG_AAI_HMAC_PRECOMP,
2787 .rfc3686 = true,
2788 .geniv = true,
2793 static void caam_skcipher_alg_init(struct caam_skcipher_alg *t_alg)
2795 struct skcipher_alg *alg = &t_alg->skcipher;
2797 alg->base.cra_module = THIS_MODULE;
2798 alg->base.cra_priority = CAAM_CRA_PRIORITY;
2799 alg->base.cra_ctxsize = sizeof(struct caam_ctx);
2800 alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY;
2802 alg->init = caam_cra_init_skcipher;
2803 alg->exit = caam_cra_exit;
2806 static void caam_aead_alg_init(struct caam_aead_alg *t_alg)
2808 struct aead_alg *alg = &t_alg->aead;
2810 alg->base.cra_module = THIS_MODULE;
2811 alg->base.cra_priority = CAAM_CRA_PRIORITY;
2812 alg->base.cra_ctxsize = sizeof(struct caam_ctx);
2813 alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY;
2815 alg->init = caam_cra_init_aead;
2816 alg->exit = caam_cra_exit_aead;
2819 /* max hash key is max split key size */
2820 #define CAAM_MAX_HASH_KEY_SIZE (SHA512_DIGEST_SIZE * 2)
2822 #define CAAM_MAX_HASH_BLOCK_SIZE SHA512_BLOCK_SIZE
2824 /* caam context sizes for hashes: running digest + 8 */
2825 #define HASH_MSG_LEN 8
2826 #define MAX_CTX_LEN (HASH_MSG_LEN + SHA512_DIGEST_SIZE)
2828 enum hash_optype {
2829 UPDATE = 0,
2830 UPDATE_FIRST,
2831 FINALIZE,
2832 DIGEST,
2833 HASH_NUM_OP
2837 * caam_hash_ctx - ahash per-session context
2838 * @flc: Flow Contexts array
2839 * @flc_dma: I/O virtual addresses of the Flow Contexts
2840 * @dev: dpseci device
2841 * @ctx_len: size of Context Register
2842 * @adata: hashing algorithm details
2844 struct caam_hash_ctx {
2845 struct caam_flc flc[HASH_NUM_OP];
2846 dma_addr_t flc_dma[HASH_NUM_OP];
2847 struct device *dev;
2848 int ctx_len;
2849 struct alginfo adata;
2852 /* ahash state */
2853 struct caam_hash_state {
2854 struct caam_request caam_req;
2855 dma_addr_t buf_dma;
2856 dma_addr_t ctx_dma;
2857 int ctx_dma_len;
2858 u8 buf_0[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned;
2859 int buflen_0;
2860 u8 buf_1[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned;
2861 int buflen_1;
2862 u8 caam_ctx[MAX_CTX_LEN] ____cacheline_aligned;
2863 int (*update)(struct ahash_request *req);
2864 int (*final)(struct ahash_request *req);
2865 int (*finup)(struct ahash_request *req);
2866 int current_buf;
2869 struct caam_export_state {
2870 u8 buf[CAAM_MAX_HASH_BLOCK_SIZE];
2871 u8 caam_ctx[MAX_CTX_LEN];
2872 int buflen;
2873 int (*update)(struct ahash_request *req);
2874 int (*final)(struct ahash_request *req);
2875 int (*finup)(struct ahash_request *req);
2878 static inline void switch_buf(struct caam_hash_state *state)
2880 state->current_buf ^= 1;
2883 static inline u8 *current_buf(struct caam_hash_state *state)
2885 return state->current_buf ? state->buf_1 : state->buf_0;
2888 static inline u8 *alt_buf(struct caam_hash_state *state)
2890 return state->current_buf ? state->buf_0 : state->buf_1;
2893 static inline int *current_buflen(struct caam_hash_state *state)
2895 return state->current_buf ? &state->buflen_1 : &state->buflen_0;
2898 static inline int *alt_buflen(struct caam_hash_state *state)
2900 return state->current_buf ? &state->buflen_0 : &state->buflen_1;
2903 /* Map current buffer in state (if length > 0) and put it in link table */
2904 static inline int buf_map_to_qm_sg(struct device *dev,
2905 struct dpaa2_sg_entry *qm_sg,
2906 struct caam_hash_state *state)
2908 int buflen = *current_buflen(state);
2910 if (!buflen)
2911 return 0;
2913 state->buf_dma = dma_map_single(dev, current_buf(state), buflen,
2914 DMA_TO_DEVICE);
2915 if (dma_mapping_error(dev, state->buf_dma)) {
2916 dev_err(dev, "unable to map buf\n");
2917 state->buf_dma = 0;
2918 return -ENOMEM;
2921 dma_to_qm_sg_one(qm_sg, state->buf_dma, buflen, 0);
2923 return 0;
2926 /* Map state->caam_ctx, and add it to link table */
2927 static inline int ctx_map_to_qm_sg(struct device *dev,
2928 struct caam_hash_state *state, int ctx_len,
2929 struct dpaa2_sg_entry *qm_sg, u32 flag)
2931 state->ctx_dma_len = ctx_len;
2932 state->ctx_dma = dma_map_single(dev, state->caam_ctx, ctx_len, flag);
2933 if (dma_mapping_error(dev, state->ctx_dma)) {
2934 dev_err(dev, "unable to map ctx\n");
2935 state->ctx_dma = 0;
2936 return -ENOMEM;
2939 dma_to_qm_sg_one(qm_sg, state->ctx_dma, ctx_len, 0);
2941 return 0;
2944 static int ahash_set_sh_desc(struct crypto_ahash *ahash)
2946 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
2947 int digestsize = crypto_ahash_digestsize(ahash);
2948 struct dpaa2_caam_priv *priv = dev_get_drvdata(ctx->dev);
2949 struct caam_flc *flc;
2950 u32 *desc;
2952 /* ahash_update shared descriptor */
2953 flc = &ctx->flc[UPDATE];
2954 desc = flc->sh_desc;
2955 cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_UPDATE, ctx->ctx_len,
2956 ctx->ctx_len, true, priv->sec_attr.era);
2957 flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
2958 dma_sync_single_for_device(ctx->dev, ctx->flc_dma[UPDATE],
2959 desc_bytes(desc), DMA_BIDIRECTIONAL);
2960 print_hex_dump_debug("ahash update shdesc@" __stringify(__LINE__)": ",
2961 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
2964 /* ahash_update_first shared descriptor */
2965 flc = &ctx->flc[UPDATE_FIRST];
2966 desc = flc->sh_desc;
2967 cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_INIT, ctx->ctx_len,
2968 ctx->ctx_len, false, priv->sec_attr.era);
2969 flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
2970 dma_sync_single_for_device(ctx->dev, ctx->flc_dma[UPDATE_FIRST],
2971 desc_bytes(desc), DMA_BIDIRECTIONAL);
2972 print_hex_dump_debug("ahash update first shdesc@" __stringify(__LINE__)": ",
2973 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
2976 /* ahash_final shared descriptor */
2977 flc = &ctx->flc[FINALIZE];
2978 desc = flc->sh_desc;
2979 cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_FINALIZE, digestsize,
2980 ctx->ctx_len, true, priv->sec_attr.era);
2981 flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
2982 dma_sync_single_for_device(ctx->dev, ctx->flc_dma[FINALIZE],
2983 desc_bytes(desc), DMA_BIDIRECTIONAL);
2984 print_hex_dump_debug("ahash final shdesc@" __stringify(__LINE__)": ",
2985 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
2988 /* ahash_digest shared descriptor */
2989 flc = &ctx->flc[DIGEST];
2990 desc = flc->sh_desc;
2991 cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_INITFINAL, digestsize,
2992 ctx->ctx_len, false, priv->sec_attr.era);
2993 flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
2994 dma_sync_single_for_device(ctx->dev, ctx->flc_dma[DIGEST],
2995 desc_bytes(desc), DMA_BIDIRECTIONAL);
2996 print_hex_dump_debug("ahash digest shdesc@" __stringify(__LINE__)": ",
2997 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
3000 return 0;
3003 struct split_key_sh_result {
3004 struct completion completion;
3005 int err;
3006 struct device *dev;
3009 static void split_key_sh_done(void *cbk_ctx, u32 err)
3011 struct split_key_sh_result *res = cbk_ctx;
3013 dev_dbg(res->dev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
3015 if (err)
3016 caam_qi2_strstatus(res->dev, err);
3018 res->err = err;
3019 complete(&res->completion);
3022 /* Digest hash size if it is too large */
3023 static int hash_digest_key(struct caam_hash_ctx *ctx, u32 *keylen, u8 *key,
3024 u32 digestsize)
3026 struct caam_request *req_ctx;
3027 u32 *desc;
3028 struct split_key_sh_result result;
3029 dma_addr_t key_dma;
3030 struct caam_flc *flc;
3031 dma_addr_t flc_dma;
3032 int ret = -ENOMEM;
3033 struct dpaa2_fl_entry *in_fle, *out_fle;
3035 req_ctx = kzalloc(sizeof(*req_ctx), GFP_KERNEL | GFP_DMA);
3036 if (!req_ctx)
3037 return -ENOMEM;
3039 in_fle = &req_ctx->fd_flt[1];
3040 out_fle = &req_ctx->fd_flt[0];
3042 flc = kzalloc(sizeof(*flc), GFP_KERNEL | GFP_DMA);
3043 if (!flc)
3044 goto err_flc;
3046 key_dma = dma_map_single(ctx->dev, key, *keylen, DMA_BIDIRECTIONAL);
3047 if (dma_mapping_error(ctx->dev, key_dma)) {
3048 dev_err(ctx->dev, "unable to map key memory\n");
3049 goto err_key_dma;
3052 desc = flc->sh_desc;
3054 init_sh_desc(desc, 0);
3056 /* descriptor to perform unkeyed hash on key_in */
3057 append_operation(desc, ctx->adata.algtype | OP_ALG_ENCRYPT |
3058 OP_ALG_AS_INITFINAL);
3059 append_seq_fifo_load(desc, *keylen, FIFOLD_CLASS_CLASS2 |
3060 FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_MSG);
3061 append_seq_store(desc, digestsize, LDST_CLASS_2_CCB |
3062 LDST_SRCDST_BYTE_CONTEXT);
3064 flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
3065 flc_dma = dma_map_single(ctx->dev, flc, sizeof(flc->flc) +
3066 desc_bytes(desc), DMA_TO_DEVICE);
3067 if (dma_mapping_error(ctx->dev, flc_dma)) {
3068 dev_err(ctx->dev, "unable to map shared descriptor\n");
3069 goto err_flc_dma;
3072 dpaa2_fl_set_final(in_fle, true);
3073 dpaa2_fl_set_format(in_fle, dpaa2_fl_single);
3074 dpaa2_fl_set_addr(in_fle, key_dma);
3075 dpaa2_fl_set_len(in_fle, *keylen);
3076 dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
3077 dpaa2_fl_set_addr(out_fle, key_dma);
3078 dpaa2_fl_set_len(out_fle, digestsize);
3080 print_hex_dump_debug("key_in@" __stringify(__LINE__)": ",
3081 DUMP_PREFIX_ADDRESS, 16, 4, key, *keylen, 1);
3082 print_hex_dump_debug("shdesc@" __stringify(__LINE__)": ",
3083 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
3086 result.err = 0;
3087 init_completion(&result.completion);
3088 result.dev = ctx->dev;
3090 req_ctx->flc = flc;
3091 req_ctx->flc_dma = flc_dma;
3092 req_ctx->cbk = split_key_sh_done;
3093 req_ctx->ctx = &result;
3095 ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
3096 if (ret == -EINPROGRESS) {
3097 /* in progress */
3098 wait_for_completion(&result.completion);
3099 ret = result.err;
3100 print_hex_dump_debug("digested key@" __stringify(__LINE__)": ",
3101 DUMP_PREFIX_ADDRESS, 16, 4, key,
3102 digestsize, 1);
3105 dma_unmap_single(ctx->dev, flc_dma, sizeof(flc->flc) + desc_bytes(desc),
3106 DMA_TO_DEVICE);
3107 err_flc_dma:
3108 dma_unmap_single(ctx->dev, key_dma, *keylen, DMA_BIDIRECTIONAL);
3109 err_key_dma:
3110 kfree(flc);
3111 err_flc:
3112 kfree(req_ctx);
3114 *keylen = digestsize;
3116 return ret;
3119 static int ahash_setkey(struct crypto_ahash *ahash, const u8 *key,
3120 unsigned int keylen)
3122 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
3123 unsigned int blocksize = crypto_tfm_alg_blocksize(&ahash->base);
3124 unsigned int digestsize = crypto_ahash_digestsize(ahash);
3125 int ret;
3126 u8 *hashed_key = NULL;
3128 dev_dbg(ctx->dev, "keylen %d blocksize %d\n", keylen, blocksize);
3130 if (keylen > blocksize) {
3131 hashed_key = kmemdup(key, keylen, GFP_KERNEL | GFP_DMA);
3132 if (!hashed_key)
3133 return -ENOMEM;
3134 ret = hash_digest_key(ctx, &keylen, hashed_key, digestsize);
3135 if (ret)
3136 goto bad_free_key;
3137 key = hashed_key;
3140 ctx->adata.keylen = keylen;
3141 ctx->adata.keylen_pad = split_key_len(ctx->adata.algtype &
3142 OP_ALG_ALGSEL_MASK);
3143 if (ctx->adata.keylen_pad > CAAM_MAX_HASH_KEY_SIZE)
3144 goto bad_free_key;
3146 ctx->adata.key_virt = key;
3147 ctx->adata.key_inline = true;
3149 ret = ahash_set_sh_desc(ahash);
3150 kfree(hashed_key);
3151 return ret;
3152 bad_free_key:
3153 kfree(hashed_key);
3154 crypto_ahash_set_flags(ahash, CRYPTO_TFM_RES_BAD_KEY_LEN);
3155 return -EINVAL;
3158 static inline void ahash_unmap(struct device *dev, struct ahash_edesc *edesc,
3159 struct ahash_request *req)
3161 struct caam_hash_state *state = ahash_request_ctx(req);
3163 if (edesc->src_nents)
3164 dma_unmap_sg(dev, req->src, edesc->src_nents, DMA_TO_DEVICE);
3166 if (edesc->qm_sg_bytes)
3167 dma_unmap_single(dev, edesc->qm_sg_dma, edesc->qm_sg_bytes,
3168 DMA_TO_DEVICE);
3170 if (state->buf_dma) {
3171 dma_unmap_single(dev, state->buf_dma, *current_buflen(state),
3172 DMA_TO_DEVICE);
3173 state->buf_dma = 0;
3177 static inline void ahash_unmap_ctx(struct device *dev,
3178 struct ahash_edesc *edesc,
3179 struct ahash_request *req, u32 flag)
3181 struct caam_hash_state *state = ahash_request_ctx(req);
3183 if (state->ctx_dma) {
3184 dma_unmap_single(dev, state->ctx_dma, state->ctx_dma_len, flag);
3185 state->ctx_dma = 0;
3187 ahash_unmap(dev, edesc, req);
3190 static void ahash_done(void *cbk_ctx, u32 status)
3192 struct crypto_async_request *areq = cbk_ctx;
3193 struct ahash_request *req = ahash_request_cast(areq);
3194 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
3195 struct caam_hash_state *state = ahash_request_ctx(req);
3196 struct ahash_edesc *edesc = state->caam_req.edesc;
3197 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
3198 int digestsize = crypto_ahash_digestsize(ahash);
3199 int ecode = 0;
3201 dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
3203 if (unlikely(status)) {
3204 caam_qi2_strstatus(ctx->dev, status);
3205 ecode = -EIO;
3208 ahash_unmap_ctx(ctx->dev, edesc, req, DMA_FROM_DEVICE);
3209 memcpy(req->result, state->caam_ctx, digestsize);
3210 qi_cache_free(edesc);
3212 print_hex_dump_debug("ctx@" __stringify(__LINE__)": ",
3213 DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
3214 ctx->ctx_len, 1);
3216 req->base.complete(&req->base, ecode);
3219 static void ahash_done_bi(void *cbk_ctx, u32 status)
3221 struct crypto_async_request *areq = cbk_ctx;
3222 struct ahash_request *req = ahash_request_cast(areq);
3223 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
3224 struct caam_hash_state *state = ahash_request_ctx(req);
3225 struct ahash_edesc *edesc = state->caam_req.edesc;
3226 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
3227 int ecode = 0;
3229 dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
3231 if (unlikely(status)) {
3232 caam_qi2_strstatus(ctx->dev, status);
3233 ecode = -EIO;
3236 ahash_unmap_ctx(ctx->dev, edesc, req, DMA_BIDIRECTIONAL);
3237 switch_buf(state);
3238 qi_cache_free(edesc);
3240 print_hex_dump_debug("ctx@" __stringify(__LINE__)": ",
3241 DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
3242 ctx->ctx_len, 1);
3243 if (req->result)
3244 print_hex_dump_debug("result@" __stringify(__LINE__)": ",
3245 DUMP_PREFIX_ADDRESS, 16, 4, req->result,
3246 crypto_ahash_digestsize(ahash), 1);
3248 req->base.complete(&req->base, ecode);
3251 static void ahash_done_ctx_src(void *cbk_ctx, u32 status)
3253 struct crypto_async_request *areq = cbk_ctx;
3254 struct ahash_request *req = ahash_request_cast(areq);
3255 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
3256 struct caam_hash_state *state = ahash_request_ctx(req);
3257 struct ahash_edesc *edesc = state->caam_req.edesc;
3258 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
3259 int digestsize = crypto_ahash_digestsize(ahash);
3260 int ecode = 0;
3262 dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
3264 if (unlikely(status)) {
3265 caam_qi2_strstatus(ctx->dev, status);
3266 ecode = -EIO;
3269 ahash_unmap_ctx(ctx->dev, edesc, req, DMA_BIDIRECTIONAL);
3270 memcpy(req->result, state->caam_ctx, digestsize);
3271 qi_cache_free(edesc);
3273 print_hex_dump_debug("ctx@" __stringify(__LINE__)": ",
3274 DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
3275 ctx->ctx_len, 1);
3277 req->base.complete(&req->base, ecode);
3280 static void ahash_done_ctx_dst(void *cbk_ctx, u32 status)
3282 struct crypto_async_request *areq = cbk_ctx;
3283 struct ahash_request *req = ahash_request_cast(areq);
3284 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
3285 struct caam_hash_state *state = ahash_request_ctx(req);
3286 struct ahash_edesc *edesc = state->caam_req.edesc;
3287 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
3288 int ecode = 0;
3290 dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
3292 if (unlikely(status)) {
3293 caam_qi2_strstatus(ctx->dev, status);
3294 ecode = -EIO;
3297 ahash_unmap_ctx(ctx->dev, edesc, req, DMA_FROM_DEVICE);
3298 switch_buf(state);
3299 qi_cache_free(edesc);
3301 print_hex_dump_debug("ctx@" __stringify(__LINE__)": ",
3302 DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
3303 ctx->ctx_len, 1);
3304 if (req->result)
3305 print_hex_dump_debug("result@" __stringify(__LINE__)": ",
3306 DUMP_PREFIX_ADDRESS, 16, 4, req->result,
3307 crypto_ahash_digestsize(ahash), 1);
3309 req->base.complete(&req->base, ecode);
3312 static int ahash_update_ctx(struct ahash_request *req)
3314 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
3315 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
3316 struct caam_hash_state *state = ahash_request_ctx(req);
3317 struct caam_request *req_ctx = &state->caam_req;
3318 struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
3319 struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
3320 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
3321 GFP_KERNEL : GFP_ATOMIC;
3322 u8 *buf = current_buf(state);
3323 int *buflen = current_buflen(state);
3324 u8 *next_buf = alt_buf(state);
3325 int *next_buflen = alt_buflen(state), last_buflen;
3326 int in_len = *buflen + req->nbytes, to_hash;
3327 int src_nents, mapped_nents, qm_sg_bytes, qm_sg_src_index;
3328 struct ahash_edesc *edesc;
3329 int ret = 0;
3331 last_buflen = *next_buflen;
3332 *next_buflen = in_len & (crypto_tfm_alg_blocksize(&ahash->base) - 1);
3333 to_hash = in_len - *next_buflen;
3335 if (to_hash) {
3336 struct dpaa2_sg_entry *sg_table;
3338 src_nents = sg_nents_for_len(req->src,
3339 req->nbytes - (*next_buflen));
3340 if (src_nents < 0) {
3341 dev_err(ctx->dev, "Invalid number of src SG.\n");
3342 return src_nents;
3345 if (src_nents) {
3346 mapped_nents = dma_map_sg(ctx->dev, req->src, src_nents,
3347 DMA_TO_DEVICE);
3348 if (!mapped_nents) {
3349 dev_err(ctx->dev, "unable to DMA map source\n");
3350 return -ENOMEM;
3352 } else {
3353 mapped_nents = 0;
3356 /* allocate space for base edesc and link tables */
3357 edesc = qi_cache_zalloc(GFP_DMA | flags);
3358 if (!edesc) {
3359 dma_unmap_sg(ctx->dev, req->src, src_nents,
3360 DMA_TO_DEVICE);
3361 return -ENOMEM;
3364 edesc->src_nents = src_nents;
3365 qm_sg_src_index = 1 + (*buflen ? 1 : 0);
3366 qm_sg_bytes = (qm_sg_src_index + mapped_nents) *
3367 sizeof(*sg_table);
3368 sg_table = &edesc->sgt[0];
3370 ret = ctx_map_to_qm_sg(ctx->dev, state, ctx->ctx_len, sg_table,
3371 DMA_BIDIRECTIONAL);
3372 if (ret)
3373 goto unmap_ctx;
3375 ret = buf_map_to_qm_sg(ctx->dev, sg_table + 1, state);
3376 if (ret)
3377 goto unmap_ctx;
3379 if (mapped_nents) {
3380 sg_to_qm_sg_last(req->src, mapped_nents,
3381 sg_table + qm_sg_src_index, 0);
3382 if (*next_buflen)
3383 scatterwalk_map_and_copy(next_buf, req->src,
3384 to_hash - *buflen,
3385 *next_buflen, 0);
3386 } else {
3387 dpaa2_sg_set_final(sg_table + qm_sg_src_index - 1,
3388 true);
3391 edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table,
3392 qm_sg_bytes, DMA_TO_DEVICE);
3393 if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
3394 dev_err(ctx->dev, "unable to map S/G table\n");
3395 ret = -ENOMEM;
3396 goto unmap_ctx;
3398 edesc->qm_sg_bytes = qm_sg_bytes;
3400 memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
3401 dpaa2_fl_set_final(in_fle, true);
3402 dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
3403 dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
3404 dpaa2_fl_set_len(in_fle, ctx->ctx_len + to_hash);
3405 dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
3406 dpaa2_fl_set_addr(out_fle, state->ctx_dma);
3407 dpaa2_fl_set_len(out_fle, ctx->ctx_len);
3409 req_ctx->flc = &ctx->flc[UPDATE];
3410 req_ctx->flc_dma = ctx->flc_dma[UPDATE];
3411 req_ctx->cbk = ahash_done_bi;
3412 req_ctx->ctx = &req->base;
3413 req_ctx->edesc = edesc;
3415 ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
3416 if (ret != -EINPROGRESS &&
3417 !(ret == -EBUSY &&
3418 req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
3419 goto unmap_ctx;
3420 } else if (*next_buflen) {
3421 scatterwalk_map_and_copy(buf + *buflen, req->src, 0,
3422 req->nbytes, 0);
3423 *buflen = *next_buflen;
3424 *next_buflen = last_buflen;
3427 print_hex_dump_debug("buf@" __stringify(__LINE__)": ",
3428 DUMP_PREFIX_ADDRESS, 16, 4, buf, *buflen, 1);
3429 print_hex_dump_debug("next buf@" __stringify(__LINE__)": ",
3430 DUMP_PREFIX_ADDRESS, 16, 4, next_buf, *next_buflen,
3433 return ret;
3434 unmap_ctx:
3435 ahash_unmap_ctx(ctx->dev, edesc, req, DMA_BIDIRECTIONAL);
3436 qi_cache_free(edesc);
3437 return ret;
3440 static int ahash_final_ctx(struct ahash_request *req)
3442 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
3443 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
3444 struct caam_hash_state *state = ahash_request_ctx(req);
3445 struct caam_request *req_ctx = &state->caam_req;
3446 struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
3447 struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
3448 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
3449 GFP_KERNEL : GFP_ATOMIC;
3450 int buflen = *current_buflen(state);
3451 int qm_sg_bytes, qm_sg_src_index;
3452 int digestsize = crypto_ahash_digestsize(ahash);
3453 struct ahash_edesc *edesc;
3454 struct dpaa2_sg_entry *sg_table;
3455 int ret;
3457 /* allocate space for base edesc and link tables */
3458 edesc = qi_cache_zalloc(GFP_DMA | flags);
3459 if (!edesc)
3460 return -ENOMEM;
3462 qm_sg_src_index = 1 + (buflen ? 1 : 0);
3463 qm_sg_bytes = qm_sg_src_index * sizeof(*sg_table);
3464 sg_table = &edesc->sgt[0];
3466 ret = ctx_map_to_qm_sg(ctx->dev, state, ctx->ctx_len, sg_table,
3467 DMA_BIDIRECTIONAL);
3468 if (ret)
3469 goto unmap_ctx;
3471 ret = buf_map_to_qm_sg(ctx->dev, sg_table + 1, state);
3472 if (ret)
3473 goto unmap_ctx;
3475 dpaa2_sg_set_final(sg_table + qm_sg_src_index - 1, true);
3477 edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table, qm_sg_bytes,
3478 DMA_TO_DEVICE);
3479 if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
3480 dev_err(ctx->dev, "unable to map S/G table\n");
3481 ret = -ENOMEM;
3482 goto unmap_ctx;
3484 edesc->qm_sg_bytes = qm_sg_bytes;
3486 memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
3487 dpaa2_fl_set_final(in_fle, true);
3488 dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
3489 dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
3490 dpaa2_fl_set_len(in_fle, ctx->ctx_len + buflen);
3491 dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
3492 dpaa2_fl_set_addr(out_fle, state->ctx_dma);
3493 dpaa2_fl_set_len(out_fle, digestsize);
3495 req_ctx->flc = &ctx->flc[FINALIZE];
3496 req_ctx->flc_dma = ctx->flc_dma[FINALIZE];
3497 req_ctx->cbk = ahash_done_ctx_src;
3498 req_ctx->ctx = &req->base;
3499 req_ctx->edesc = edesc;
3501 ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
3502 if (ret == -EINPROGRESS ||
3503 (ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
3504 return ret;
3506 unmap_ctx:
3507 ahash_unmap_ctx(ctx->dev, edesc, req, DMA_BIDIRECTIONAL);
3508 qi_cache_free(edesc);
3509 return ret;
3512 static int ahash_finup_ctx(struct ahash_request *req)
3514 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
3515 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
3516 struct caam_hash_state *state = ahash_request_ctx(req);
3517 struct caam_request *req_ctx = &state->caam_req;
3518 struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
3519 struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
3520 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
3521 GFP_KERNEL : GFP_ATOMIC;
3522 int buflen = *current_buflen(state);
3523 int qm_sg_bytes, qm_sg_src_index;
3524 int src_nents, mapped_nents;
3525 int digestsize = crypto_ahash_digestsize(ahash);
3526 struct ahash_edesc *edesc;
3527 struct dpaa2_sg_entry *sg_table;
3528 int ret;
3530 src_nents = sg_nents_for_len(req->src, req->nbytes);
3531 if (src_nents < 0) {
3532 dev_err(ctx->dev, "Invalid number of src SG.\n");
3533 return src_nents;
3536 if (src_nents) {
3537 mapped_nents = dma_map_sg(ctx->dev, req->src, src_nents,
3538 DMA_TO_DEVICE);
3539 if (!mapped_nents) {
3540 dev_err(ctx->dev, "unable to DMA map source\n");
3541 return -ENOMEM;
3543 } else {
3544 mapped_nents = 0;
3547 /* allocate space for base edesc and link tables */
3548 edesc = qi_cache_zalloc(GFP_DMA | flags);
3549 if (!edesc) {
3550 dma_unmap_sg(ctx->dev, req->src, src_nents, DMA_TO_DEVICE);
3551 return -ENOMEM;
3554 edesc->src_nents = src_nents;
3555 qm_sg_src_index = 1 + (buflen ? 1 : 0);
3556 qm_sg_bytes = (qm_sg_src_index + mapped_nents) * sizeof(*sg_table);
3557 sg_table = &edesc->sgt[0];
3559 ret = ctx_map_to_qm_sg(ctx->dev, state, ctx->ctx_len, sg_table,
3560 DMA_BIDIRECTIONAL);
3561 if (ret)
3562 goto unmap_ctx;
3564 ret = buf_map_to_qm_sg(ctx->dev, sg_table + 1, state);
3565 if (ret)
3566 goto unmap_ctx;
3568 sg_to_qm_sg_last(req->src, mapped_nents, sg_table + qm_sg_src_index, 0);
3570 edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table, qm_sg_bytes,
3571 DMA_TO_DEVICE);
3572 if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
3573 dev_err(ctx->dev, "unable to map S/G table\n");
3574 ret = -ENOMEM;
3575 goto unmap_ctx;
3577 edesc->qm_sg_bytes = qm_sg_bytes;
3579 memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
3580 dpaa2_fl_set_final(in_fle, true);
3581 dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
3582 dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
3583 dpaa2_fl_set_len(in_fle, ctx->ctx_len + buflen + req->nbytes);
3584 dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
3585 dpaa2_fl_set_addr(out_fle, state->ctx_dma);
3586 dpaa2_fl_set_len(out_fle, digestsize);
3588 req_ctx->flc = &ctx->flc[FINALIZE];
3589 req_ctx->flc_dma = ctx->flc_dma[FINALIZE];
3590 req_ctx->cbk = ahash_done_ctx_src;
3591 req_ctx->ctx = &req->base;
3592 req_ctx->edesc = edesc;
3594 ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
3595 if (ret == -EINPROGRESS ||
3596 (ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
3597 return ret;
3599 unmap_ctx:
3600 ahash_unmap_ctx(ctx->dev, edesc, req, DMA_BIDIRECTIONAL);
3601 qi_cache_free(edesc);
3602 return ret;
3605 static int ahash_digest(struct ahash_request *req)
3607 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
3608 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
3609 struct caam_hash_state *state = ahash_request_ctx(req);
3610 struct caam_request *req_ctx = &state->caam_req;
3611 struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
3612 struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
3613 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
3614 GFP_KERNEL : GFP_ATOMIC;
3615 int digestsize = crypto_ahash_digestsize(ahash);
3616 int src_nents, mapped_nents;
3617 struct ahash_edesc *edesc;
3618 int ret = -ENOMEM;
3620 state->buf_dma = 0;
3622 src_nents = sg_nents_for_len(req->src, req->nbytes);
3623 if (src_nents < 0) {
3624 dev_err(ctx->dev, "Invalid number of src SG.\n");
3625 return src_nents;
3628 if (src_nents) {
3629 mapped_nents = dma_map_sg(ctx->dev, req->src, src_nents,
3630 DMA_TO_DEVICE);
3631 if (!mapped_nents) {
3632 dev_err(ctx->dev, "unable to map source for DMA\n");
3633 return ret;
3635 } else {
3636 mapped_nents = 0;
3639 /* allocate space for base edesc and link tables */
3640 edesc = qi_cache_zalloc(GFP_DMA | flags);
3641 if (!edesc) {
3642 dma_unmap_sg(ctx->dev, req->src, src_nents, DMA_TO_DEVICE);
3643 return ret;
3646 edesc->src_nents = src_nents;
3647 memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
3649 if (mapped_nents > 1) {
3650 int qm_sg_bytes;
3651 struct dpaa2_sg_entry *sg_table = &edesc->sgt[0];
3653 qm_sg_bytes = mapped_nents * sizeof(*sg_table);
3654 sg_to_qm_sg_last(req->src, mapped_nents, sg_table, 0);
3655 edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table,
3656 qm_sg_bytes, DMA_TO_DEVICE);
3657 if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
3658 dev_err(ctx->dev, "unable to map S/G table\n");
3659 goto unmap;
3661 edesc->qm_sg_bytes = qm_sg_bytes;
3662 dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
3663 dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
3664 } else {
3665 dpaa2_fl_set_format(in_fle, dpaa2_fl_single);
3666 dpaa2_fl_set_addr(in_fle, sg_dma_address(req->src));
3669 state->ctx_dma_len = digestsize;
3670 state->ctx_dma = dma_map_single(ctx->dev, state->caam_ctx, digestsize,
3671 DMA_FROM_DEVICE);
3672 if (dma_mapping_error(ctx->dev, state->ctx_dma)) {
3673 dev_err(ctx->dev, "unable to map ctx\n");
3674 state->ctx_dma = 0;
3675 goto unmap;
3678 dpaa2_fl_set_final(in_fle, true);
3679 dpaa2_fl_set_len(in_fle, req->nbytes);
3680 dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
3681 dpaa2_fl_set_addr(out_fle, state->ctx_dma);
3682 dpaa2_fl_set_len(out_fle, digestsize);
3684 req_ctx->flc = &ctx->flc[DIGEST];
3685 req_ctx->flc_dma = ctx->flc_dma[DIGEST];
3686 req_ctx->cbk = ahash_done;
3687 req_ctx->ctx = &req->base;
3688 req_ctx->edesc = edesc;
3689 ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
3690 if (ret == -EINPROGRESS ||
3691 (ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
3692 return ret;
3694 unmap:
3695 ahash_unmap_ctx(ctx->dev, edesc, req, DMA_FROM_DEVICE);
3696 qi_cache_free(edesc);
3697 return ret;
3700 static int ahash_final_no_ctx(struct ahash_request *req)
3702 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
3703 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
3704 struct caam_hash_state *state = ahash_request_ctx(req);
3705 struct caam_request *req_ctx = &state->caam_req;
3706 struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
3707 struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
3708 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
3709 GFP_KERNEL : GFP_ATOMIC;
3710 u8 *buf = current_buf(state);
3711 int buflen = *current_buflen(state);
3712 int digestsize = crypto_ahash_digestsize(ahash);
3713 struct ahash_edesc *edesc;
3714 int ret = -ENOMEM;
3716 /* allocate space for base edesc and link tables */
3717 edesc = qi_cache_zalloc(GFP_DMA | flags);
3718 if (!edesc)
3719 return ret;
3721 if (buflen) {
3722 state->buf_dma = dma_map_single(ctx->dev, buf, buflen,
3723 DMA_TO_DEVICE);
3724 if (dma_mapping_error(ctx->dev, state->buf_dma)) {
3725 dev_err(ctx->dev, "unable to map src\n");
3726 goto unmap;
3730 state->ctx_dma_len = digestsize;
3731 state->ctx_dma = dma_map_single(ctx->dev, state->caam_ctx, digestsize,
3732 DMA_FROM_DEVICE);
3733 if (dma_mapping_error(ctx->dev, state->ctx_dma)) {
3734 dev_err(ctx->dev, "unable to map ctx\n");
3735 state->ctx_dma = 0;
3736 goto unmap;
3739 memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
3740 dpaa2_fl_set_final(in_fle, true);
3742 * crypto engine requires the input entry to be present when
3743 * "frame list" FD is used.
3744 * Since engine does not support FMT=2'b11 (unused entry type), leaving
3745 * in_fle zeroized (except for "Final" flag) is the best option.
3747 if (buflen) {
3748 dpaa2_fl_set_format(in_fle, dpaa2_fl_single);
3749 dpaa2_fl_set_addr(in_fle, state->buf_dma);
3750 dpaa2_fl_set_len(in_fle, buflen);
3752 dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
3753 dpaa2_fl_set_addr(out_fle, state->ctx_dma);
3754 dpaa2_fl_set_len(out_fle, digestsize);
3756 req_ctx->flc = &ctx->flc[DIGEST];
3757 req_ctx->flc_dma = ctx->flc_dma[DIGEST];
3758 req_ctx->cbk = ahash_done;
3759 req_ctx->ctx = &req->base;
3760 req_ctx->edesc = edesc;
3762 ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
3763 if (ret == -EINPROGRESS ||
3764 (ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
3765 return ret;
3767 unmap:
3768 ahash_unmap_ctx(ctx->dev, edesc, req, DMA_FROM_DEVICE);
3769 qi_cache_free(edesc);
3770 return ret;
3773 static int ahash_update_no_ctx(struct ahash_request *req)
3775 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
3776 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
3777 struct caam_hash_state *state = ahash_request_ctx(req);
3778 struct caam_request *req_ctx = &state->caam_req;
3779 struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
3780 struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
3781 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
3782 GFP_KERNEL : GFP_ATOMIC;
3783 u8 *buf = current_buf(state);
3784 int *buflen = current_buflen(state);
3785 u8 *next_buf = alt_buf(state);
3786 int *next_buflen = alt_buflen(state);
3787 int in_len = *buflen + req->nbytes, to_hash;
3788 int qm_sg_bytes, src_nents, mapped_nents;
3789 struct ahash_edesc *edesc;
3790 int ret = 0;
3792 *next_buflen = in_len & (crypto_tfm_alg_blocksize(&ahash->base) - 1);
3793 to_hash = in_len - *next_buflen;
3795 if (to_hash) {
3796 struct dpaa2_sg_entry *sg_table;
3798 src_nents = sg_nents_for_len(req->src,
3799 req->nbytes - *next_buflen);
3800 if (src_nents < 0) {
3801 dev_err(ctx->dev, "Invalid number of src SG.\n");
3802 return src_nents;
3805 if (src_nents) {
3806 mapped_nents = dma_map_sg(ctx->dev, req->src, src_nents,
3807 DMA_TO_DEVICE);
3808 if (!mapped_nents) {
3809 dev_err(ctx->dev, "unable to DMA map source\n");
3810 return -ENOMEM;
3812 } else {
3813 mapped_nents = 0;
3816 /* allocate space for base edesc and link tables */
3817 edesc = qi_cache_zalloc(GFP_DMA | flags);
3818 if (!edesc) {
3819 dma_unmap_sg(ctx->dev, req->src, src_nents,
3820 DMA_TO_DEVICE);
3821 return -ENOMEM;
3824 edesc->src_nents = src_nents;
3825 qm_sg_bytes = (1 + mapped_nents) * sizeof(*sg_table);
3826 sg_table = &edesc->sgt[0];
3828 ret = buf_map_to_qm_sg(ctx->dev, sg_table, state);
3829 if (ret)
3830 goto unmap_ctx;
3832 sg_to_qm_sg_last(req->src, mapped_nents, sg_table + 1, 0);
3834 if (*next_buflen)
3835 scatterwalk_map_and_copy(next_buf, req->src,
3836 to_hash - *buflen,
3837 *next_buflen, 0);
3839 edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table,
3840 qm_sg_bytes, DMA_TO_DEVICE);
3841 if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
3842 dev_err(ctx->dev, "unable to map S/G table\n");
3843 ret = -ENOMEM;
3844 goto unmap_ctx;
3846 edesc->qm_sg_bytes = qm_sg_bytes;
3848 state->ctx_dma_len = ctx->ctx_len;
3849 state->ctx_dma = dma_map_single(ctx->dev, state->caam_ctx,
3850 ctx->ctx_len, DMA_FROM_DEVICE);
3851 if (dma_mapping_error(ctx->dev, state->ctx_dma)) {
3852 dev_err(ctx->dev, "unable to map ctx\n");
3853 state->ctx_dma = 0;
3854 ret = -ENOMEM;
3855 goto unmap_ctx;
3858 memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
3859 dpaa2_fl_set_final(in_fle, true);
3860 dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
3861 dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
3862 dpaa2_fl_set_len(in_fle, to_hash);
3863 dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
3864 dpaa2_fl_set_addr(out_fle, state->ctx_dma);
3865 dpaa2_fl_set_len(out_fle, ctx->ctx_len);
3867 req_ctx->flc = &ctx->flc[UPDATE_FIRST];
3868 req_ctx->flc_dma = ctx->flc_dma[UPDATE_FIRST];
3869 req_ctx->cbk = ahash_done_ctx_dst;
3870 req_ctx->ctx = &req->base;
3871 req_ctx->edesc = edesc;
3873 ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
3874 if (ret != -EINPROGRESS &&
3875 !(ret == -EBUSY &&
3876 req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
3877 goto unmap_ctx;
3879 state->update = ahash_update_ctx;
3880 state->finup = ahash_finup_ctx;
3881 state->final = ahash_final_ctx;
3882 } else if (*next_buflen) {
3883 scatterwalk_map_and_copy(buf + *buflen, req->src, 0,
3884 req->nbytes, 0);
3885 *buflen = *next_buflen;
3886 *next_buflen = 0;
3889 print_hex_dump_debug("buf@" __stringify(__LINE__)": ",
3890 DUMP_PREFIX_ADDRESS, 16, 4, buf, *buflen, 1);
3891 print_hex_dump_debug("next buf@" __stringify(__LINE__)": ",
3892 DUMP_PREFIX_ADDRESS, 16, 4, next_buf, *next_buflen,
3895 return ret;
3896 unmap_ctx:
3897 ahash_unmap_ctx(ctx->dev, edesc, req, DMA_TO_DEVICE);
3898 qi_cache_free(edesc);
3899 return ret;
3902 static int ahash_finup_no_ctx(struct ahash_request *req)
3904 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
3905 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
3906 struct caam_hash_state *state = ahash_request_ctx(req);
3907 struct caam_request *req_ctx = &state->caam_req;
3908 struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
3909 struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
3910 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
3911 GFP_KERNEL : GFP_ATOMIC;
3912 int buflen = *current_buflen(state);
3913 int qm_sg_bytes, src_nents, mapped_nents;
3914 int digestsize = crypto_ahash_digestsize(ahash);
3915 struct ahash_edesc *edesc;
3916 struct dpaa2_sg_entry *sg_table;
3917 int ret;
3919 src_nents = sg_nents_for_len(req->src, req->nbytes);
3920 if (src_nents < 0) {
3921 dev_err(ctx->dev, "Invalid number of src SG.\n");
3922 return src_nents;
3925 if (src_nents) {
3926 mapped_nents = dma_map_sg(ctx->dev, req->src, src_nents,
3927 DMA_TO_DEVICE);
3928 if (!mapped_nents) {
3929 dev_err(ctx->dev, "unable to DMA map source\n");
3930 return -ENOMEM;
3932 } else {
3933 mapped_nents = 0;
3936 /* allocate space for base edesc and link tables */
3937 edesc = qi_cache_zalloc(GFP_DMA | flags);
3938 if (!edesc) {
3939 dma_unmap_sg(ctx->dev, req->src, src_nents, DMA_TO_DEVICE);
3940 return -ENOMEM;
3943 edesc->src_nents = src_nents;
3944 qm_sg_bytes = (2 + mapped_nents) * sizeof(*sg_table);
3945 sg_table = &edesc->sgt[0];
3947 ret = buf_map_to_qm_sg(ctx->dev, sg_table, state);
3948 if (ret)
3949 goto unmap;
3951 sg_to_qm_sg_last(req->src, mapped_nents, sg_table + 1, 0);
3953 edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table, qm_sg_bytes,
3954 DMA_TO_DEVICE);
3955 if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
3956 dev_err(ctx->dev, "unable to map S/G table\n");
3957 ret = -ENOMEM;
3958 goto unmap;
3960 edesc->qm_sg_bytes = qm_sg_bytes;
3962 state->ctx_dma_len = digestsize;
3963 state->ctx_dma = dma_map_single(ctx->dev, state->caam_ctx, digestsize,
3964 DMA_FROM_DEVICE);
3965 if (dma_mapping_error(ctx->dev, state->ctx_dma)) {
3966 dev_err(ctx->dev, "unable to map ctx\n");
3967 state->ctx_dma = 0;
3968 ret = -ENOMEM;
3969 goto unmap;
3972 memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
3973 dpaa2_fl_set_final(in_fle, true);
3974 dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
3975 dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
3976 dpaa2_fl_set_len(in_fle, buflen + req->nbytes);
3977 dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
3978 dpaa2_fl_set_addr(out_fle, state->ctx_dma);
3979 dpaa2_fl_set_len(out_fle, digestsize);
3981 req_ctx->flc = &ctx->flc[DIGEST];
3982 req_ctx->flc_dma = ctx->flc_dma[DIGEST];
3983 req_ctx->cbk = ahash_done;
3984 req_ctx->ctx = &req->base;
3985 req_ctx->edesc = edesc;
3986 ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
3987 if (ret != -EINPROGRESS &&
3988 !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
3989 goto unmap;
3991 return ret;
3992 unmap:
3993 ahash_unmap_ctx(ctx->dev, edesc, req, DMA_FROM_DEVICE);
3994 qi_cache_free(edesc);
3995 return -ENOMEM;
3998 static int ahash_update_first(struct ahash_request *req)
4000 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
4001 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
4002 struct caam_hash_state *state = ahash_request_ctx(req);
4003 struct caam_request *req_ctx = &state->caam_req;
4004 struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
4005 struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
4006 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
4007 GFP_KERNEL : GFP_ATOMIC;
4008 u8 *next_buf = alt_buf(state);
4009 int *next_buflen = alt_buflen(state);
4010 int to_hash;
4011 int src_nents, mapped_nents;
4012 struct ahash_edesc *edesc;
4013 int ret = 0;
4015 *next_buflen = req->nbytes & (crypto_tfm_alg_blocksize(&ahash->base) -
4017 to_hash = req->nbytes - *next_buflen;
4019 if (to_hash) {
4020 struct dpaa2_sg_entry *sg_table;
4022 src_nents = sg_nents_for_len(req->src,
4023 req->nbytes - (*next_buflen));
4024 if (src_nents < 0) {
4025 dev_err(ctx->dev, "Invalid number of src SG.\n");
4026 return src_nents;
4029 if (src_nents) {
4030 mapped_nents = dma_map_sg(ctx->dev, req->src, src_nents,
4031 DMA_TO_DEVICE);
4032 if (!mapped_nents) {
4033 dev_err(ctx->dev, "unable to map source for DMA\n");
4034 return -ENOMEM;
4036 } else {
4037 mapped_nents = 0;
4040 /* allocate space for base edesc and link tables */
4041 edesc = qi_cache_zalloc(GFP_DMA | flags);
4042 if (!edesc) {
4043 dma_unmap_sg(ctx->dev, req->src, src_nents,
4044 DMA_TO_DEVICE);
4045 return -ENOMEM;
4048 edesc->src_nents = src_nents;
4049 sg_table = &edesc->sgt[0];
4051 memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
4052 dpaa2_fl_set_final(in_fle, true);
4053 dpaa2_fl_set_len(in_fle, to_hash);
4055 if (mapped_nents > 1) {
4056 int qm_sg_bytes;
4058 sg_to_qm_sg_last(req->src, mapped_nents, sg_table, 0);
4059 qm_sg_bytes = mapped_nents * sizeof(*sg_table);
4060 edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table,
4061 qm_sg_bytes,
4062 DMA_TO_DEVICE);
4063 if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
4064 dev_err(ctx->dev, "unable to map S/G table\n");
4065 ret = -ENOMEM;
4066 goto unmap_ctx;
4068 edesc->qm_sg_bytes = qm_sg_bytes;
4069 dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
4070 dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
4071 } else {
4072 dpaa2_fl_set_format(in_fle, dpaa2_fl_single);
4073 dpaa2_fl_set_addr(in_fle, sg_dma_address(req->src));
4076 if (*next_buflen)
4077 scatterwalk_map_and_copy(next_buf, req->src, to_hash,
4078 *next_buflen, 0);
4080 state->ctx_dma_len = ctx->ctx_len;
4081 state->ctx_dma = dma_map_single(ctx->dev, state->caam_ctx,
4082 ctx->ctx_len, DMA_FROM_DEVICE);
4083 if (dma_mapping_error(ctx->dev, state->ctx_dma)) {
4084 dev_err(ctx->dev, "unable to map ctx\n");
4085 state->ctx_dma = 0;
4086 ret = -ENOMEM;
4087 goto unmap_ctx;
4090 dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
4091 dpaa2_fl_set_addr(out_fle, state->ctx_dma);
4092 dpaa2_fl_set_len(out_fle, ctx->ctx_len);
4094 req_ctx->flc = &ctx->flc[UPDATE_FIRST];
4095 req_ctx->flc_dma = ctx->flc_dma[UPDATE_FIRST];
4096 req_ctx->cbk = ahash_done_ctx_dst;
4097 req_ctx->ctx = &req->base;
4098 req_ctx->edesc = edesc;
4100 ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
4101 if (ret != -EINPROGRESS &&
4102 !(ret == -EBUSY && req->base.flags &
4103 CRYPTO_TFM_REQ_MAY_BACKLOG))
4104 goto unmap_ctx;
4106 state->update = ahash_update_ctx;
4107 state->finup = ahash_finup_ctx;
4108 state->final = ahash_final_ctx;
4109 } else if (*next_buflen) {
4110 state->update = ahash_update_no_ctx;
4111 state->finup = ahash_finup_no_ctx;
4112 state->final = ahash_final_no_ctx;
4113 scatterwalk_map_and_copy(next_buf, req->src, 0,
4114 req->nbytes, 0);
4115 switch_buf(state);
4118 print_hex_dump_debug("next buf@" __stringify(__LINE__)": ",
4119 DUMP_PREFIX_ADDRESS, 16, 4, next_buf, *next_buflen,
4122 return ret;
4123 unmap_ctx:
4124 ahash_unmap_ctx(ctx->dev, edesc, req, DMA_TO_DEVICE);
4125 qi_cache_free(edesc);
4126 return ret;
4129 static int ahash_finup_first(struct ahash_request *req)
4131 return ahash_digest(req);
4134 static int ahash_init(struct ahash_request *req)
4136 struct caam_hash_state *state = ahash_request_ctx(req);
4138 state->update = ahash_update_first;
4139 state->finup = ahash_finup_first;
4140 state->final = ahash_final_no_ctx;
4142 state->ctx_dma = 0;
4143 state->ctx_dma_len = 0;
4144 state->current_buf = 0;
4145 state->buf_dma = 0;
4146 state->buflen_0 = 0;
4147 state->buflen_1 = 0;
4149 return 0;
4152 static int ahash_update(struct ahash_request *req)
4154 struct caam_hash_state *state = ahash_request_ctx(req);
4156 return state->update(req);
4159 static int ahash_finup(struct ahash_request *req)
4161 struct caam_hash_state *state = ahash_request_ctx(req);
4163 return state->finup(req);
4166 static int ahash_final(struct ahash_request *req)
4168 struct caam_hash_state *state = ahash_request_ctx(req);
4170 return state->final(req);
4173 static int ahash_export(struct ahash_request *req, void *out)
4175 struct caam_hash_state *state = ahash_request_ctx(req);
4176 struct caam_export_state *export = out;
4177 int len;
4178 u8 *buf;
4180 if (state->current_buf) {
4181 buf = state->buf_1;
4182 len = state->buflen_1;
4183 } else {
4184 buf = state->buf_0;
4185 len = state->buflen_0;
4188 memcpy(export->buf, buf, len);
4189 memcpy(export->caam_ctx, state->caam_ctx, sizeof(export->caam_ctx));
4190 export->buflen = len;
4191 export->update = state->update;
4192 export->final = state->final;
4193 export->finup = state->finup;
4195 return 0;
4198 static int ahash_import(struct ahash_request *req, const void *in)
4200 struct caam_hash_state *state = ahash_request_ctx(req);
4201 const struct caam_export_state *export = in;
4203 memset(state, 0, sizeof(*state));
4204 memcpy(state->buf_0, export->buf, export->buflen);
4205 memcpy(state->caam_ctx, export->caam_ctx, sizeof(state->caam_ctx));
4206 state->buflen_0 = export->buflen;
4207 state->update = export->update;
4208 state->final = export->final;
4209 state->finup = export->finup;
4211 return 0;
4214 struct caam_hash_template {
4215 char name[CRYPTO_MAX_ALG_NAME];
4216 char driver_name[CRYPTO_MAX_ALG_NAME];
4217 char hmac_name[CRYPTO_MAX_ALG_NAME];
4218 char hmac_driver_name[CRYPTO_MAX_ALG_NAME];
4219 unsigned int blocksize;
4220 struct ahash_alg template_ahash;
4221 u32 alg_type;
4224 /* ahash descriptors */
4225 static struct caam_hash_template driver_hash[] = {
4227 .name = "sha1",
4228 .driver_name = "sha1-caam-qi2",
4229 .hmac_name = "hmac(sha1)",
4230 .hmac_driver_name = "hmac-sha1-caam-qi2",
4231 .blocksize = SHA1_BLOCK_SIZE,
4232 .template_ahash = {
4233 .init = ahash_init,
4234 .update = ahash_update,
4235 .final = ahash_final,
4236 .finup = ahash_finup,
4237 .digest = ahash_digest,
4238 .export = ahash_export,
4239 .import = ahash_import,
4240 .setkey = ahash_setkey,
4241 .halg = {
4242 .digestsize = SHA1_DIGEST_SIZE,
4243 .statesize = sizeof(struct caam_export_state),
4246 .alg_type = OP_ALG_ALGSEL_SHA1,
4247 }, {
4248 .name = "sha224",
4249 .driver_name = "sha224-caam-qi2",
4250 .hmac_name = "hmac(sha224)",
4251 .hmac_driver_name = "hmac-sha224-caam-qi2",
4252 .blocksize = SHA224_BLOCK_SIZE,
4253 .template_ahash = {
4254 .init = ahash_init,
4255 .update = ahash_update,
4256 .final = ahash_final,
4257 .finup = ahash_finup,
4258 .digest = ahash_digest,
4259 .export = ahash_export,
4260 .import = ahash_import,
4261 .setkey = ahash_setkey,
4262 .halg = {
4263 .digestsize = SHA224_DIGEST_SIZE,
4264 .statesize = sizeof(struct caam_export_state),
4267 .alg_type = OP_ALG_ALGSEL_SHA224,
4268 }, {
4269 .name = "sha256",
4270 .driver_name = "sha256-caam-qi2",
4271 .hmac_name = "hmac(sha256)",
4272 .hmac_driver_name = "hmac-sha256-caam-qi2",
4273 .blocksize = SHA256_BLOCK_SIZE,
4274 .template_ahash = {
4275 .init = ahash_init,
4276 .update = ahash_update,
4277 .final = ahash_final,
4278 .finup = ahash_finup,
4279 .digest = ahash_digest,
4280 .export = ahash_export,
4281 .import = ahash_import,
4282 .setkey = ahash_setkey,
4283 .halg = {
4284 .digestsize = SHA256_DIGEST_SIZE,
4285 .statesize = sizeof(struct caam_export_state),
4288 .alg_type = OP_ALG_ALGSEL_SHA256,
4289 }, {
4290 .name = "sha384",
4291 .driver_name = "sha384-caam-qi2",
4292 .hmac_name = "hmac(sha384)",
4293 .hmac_driver_name = "hmac-sha384-caam-qi2",
4294 .blocksize = SHA384_BLOCK_SIZE,
4295 .template_ahash = {
4296 .init = ahash_init,
4297 .update = ahash_update,
4298 .final = ahash_final,
4299 .finup = ahash_finup,
4300 .digest = ahash_digest,
4301 .export = ahash_export,
4302 .import = ahash_import,
4303 .setkey = ahash_setkey,
4304 .halg = {
4305 .digestsize = SHA384_DIGEST_SIZE,
4306 .statesize = sizeof(struct caam_export_state),
4309 .alg_type = OP_ALG_ALGSEL_SHA384,
4310 }, {
4311 .name = "sha512",
4312 .driver_name = "sha512-caam-qi2",
4313 .hmac_name = "hmac(sha512)",
4314 .hmac_driver_name = "hmac-sha512-caam-qi2",
4315 .blocksize = SHA512_BLOCK_SIZE,
4316 .template_ahash = {
4317 .init = ahash_init,
4318 .update = ahash_update,
4319 .final = ahash_final,
4320 .finup = ahash_finup,
4321 .digest = ahash_digest,
4322 .export = ahash_export,
4323 .import = ahash_import,
4324 .setkey = ahash_setkey,
4325 .halg = {
4326 .digestsize = SHA512_DIGEST_SIZE,
4327 .statesize = sizeof(struct caam_export_state),
4330 .alg_type = OP_ALG_ALGSEL_SHA512,
4331 }, {
4332 .name = "md5",
4333 .driver_name = "md5-caam-qi2",
4334 .hmac_name = "hmac(md5)",
4335 .hmac_driver_name = "hmac-md5-caam-qi2",
4336 .blocksize = MD5_BLOCK_WORDS * 4,
4337 .template_ahash = {
4338 .init = ahash_init,
4339 .update = ahash_update,
4340 .final = ahash_final,
4341 .finup = ahash_finup,
4342 .digest = ahash_digest,
4343 .export = ahash_export,
4344 .import = ahash_import,
4345 .setkey = ahash_setkey,
4346 .halg = {
4347 .digestsize = MD5_DIGEST_SIZE,
4348 .statesize = sizeof(struct caam_export_state),
4351 .alg_type = OP_ALG_ALGSEL_MD5,
4355 struct caam_hash_alg {
4356 struct list_head entry;
4357 struct device *dev;
4358 int alg_type;
4359 struct ahash_alg ahash_alg;
4362 static int caam_hash_cra_init(struct crypto_tfm *tfm)
4364 struct crypto_ahash *ahash = __crypto_ahash_cast(tfm);
4365 struct crypto_alg *base = tfm->__crt_alg;
4366 struct hash_alg_common *halg =
4367 container_of(base, struct hash_alg_common, base);
4368 struct ahash_alg *alg =
4369 container_of(halg, struct ahash_alg, halg);
4370 struct caam_hash_alg *caam_hash =
4371 container_of(alg, struct caam_hash_alg, ahash_alg);
4372 struct caam_hash_ctx *ctx = crypto_tfm_ctx(tfm);
4373 /* Sizes for MDHA running digests: MD5, SHA1, 224, 256, 384, 512 */
4374 static const u8 runninglen[] = { HASH_MSG_LEN + MD5_DIGEST_SIZE,
4375 HASH_MSG_LEN + SHA1_DIGEST_SIZE,
4376 HASH_MSG_LEN + 32,
4377 HASH_MSG_LEN + SHA256_DIGEST_SIZE,
4378 HASH_MSG_LEN + 64,
4379 HASH_MSG_LEN + SHA512_DIGEST_SIZE };
4380 dma_addr_t dma_addr;
4381 int i;
4383 ctx->dev = caam_hash->dev;
4385 dma_addr = dma_map_single_attrs(ctx->dev, ctx->flc, sizeof(ctx->flc),
4386 DMA_BIDIRECTIONAL,
4387 DMA_ATTR_SKIP_CPU_SYNC);
4388 if (dma_mapping_error(ctx->dev, dma_addr)) {
4389 dev_err(ctx->dev, "unable to map shared descriptors\n");
4390 return -ENOMEM;
4393 for (i = 0; i < HASH_NUM_OP; i++)
4394 ctx->flc_dma[i] = dma_addr + i * sizeof(ctx->flc[i]);
4396 /* copy descriptor header template value */
4397 ctx->adata.algtype = OP_TYPE_CLASS2_ALG | caam_hash->alg_type;
4399 ctx->ctx_len = runninglen[(ctx->adata.algtype &
4400 OP_ALG_ALGSEL_SUBMASK) >>
4401 OP_ALG_ALGSEL_SHIFT];
4403 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
4404 sizeof(struct caam_hash_state));
4406 return ahash_set_sh_desc(ahash);
4409 static void caam_hash_cra_exit(struct crypto_tfm *tfm)
4411 struct caam_hash_ctx *ctx = crypto_tfm_ctx(tfm);
4413 dma_unmap_single_attrs(ctx->dev, ctx->flc_dma[0], sizeof(ctx->flc),
4414 DMA_BIDIRECTIONAL, DMA_ATTR_SKIP_CPU_SYNC);
4417 static struct caam_hash_alg *caam_hash_alloc(struct device *dev,
4418 struct caam_hash_template *template, bool keyed)
4420 struct caam_hash_alg *t_alg;
4421 struct ahash_alg *halg;
4422 struct crypto_alg *alg;
4424 t_alg = kzalloc(sizeof(*t_alg), GFP_KERNEL);
4425 if (!t_alg)
4426 return ERR_PTR(-ENOMEM);
4428 t_alg->ahash_alg = template->template_ahash;
4429 halg = &t_alg->ahash_alg;
4430 alg = &halg->halg.base;
4432 if (keyed) {
4433 snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s",
4434 template->hmac_name);
4435 snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
4436 template->hmac_driver_name);
4437 } else {
4438 snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s",
4439 template->name);
4440 snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
4441 template->driver_name);
4442 t_alg->ahash_alg.setkey = NULL;
4444 alg->cra_module = THIS_MODULE;
4445 alg->cra_init = caam_hash_cra_init;
4446 alg->cra_exit = caam_hash_cra_exit;
4447 alg->cra_ctxsize = sizeof(struct caam_hash_ctx);
4448 alg->cra_priority = CAAM_CRA_PRIORITY;
4449 alg->cra_blocksize = template->blocksize;
4450 alg->cra_alignmask = 0;
4451 alg->cra_flags = CRYPTO_ALG_ASYNC;
4453 t_alg->alg_type = template->alg_type;
4454 t_alg->dev = dev;
4456 return t_alg;
4459 static void dpaa2_caam_fqdan_cb(struct dpaa2_io_notification_ctx *nctx)
4461 struct dpaa2_caam_priv_per_cpu *ppriv;
4463 ppriv = container_of(nctx, struct dpaa2_caam_priv_per_cpu, nctx);
4464 napi_schedule_irqoff(&ppriv->napi);
4467 static int __cold dpaa2_dpseci_dpio_setup(struct dpaa2_caam_priv *priv)
4469 struct device *dev = priv->dev;
4470 struct dpaa2_io_notification_ctx *nctx;
4471 struct dpaa2_caam_priv_per_cpu *ppriv;
4472 int err, i = 0, cpu;
4474 for_each_online_cpu(cpu) {
4475 ppriv = per_cpu_ptr(priv->ppriv, cpu);
4476 ppriv->priv = priv;
4477 nctx = &ppriv->nctx;
4478 nctx->is_cdan = 0;
4479 nctx->id = ppriv->rsp_fqid;
4480 nctx->desired_cpu = cpu;
4481 nctx->cb = dpaa2_caam_fqdan_cb;
4483 /* Register notification callbacks */
4484 ppriv->dpio = dpaa2_io_service_select(cpu);
4485 err = dpaa2_io_service_register(ppriv->dpio, nctx, dev);
4486 if (unlikely(err)) {
4487 dev_dbg(dev, "No affine DPIO for cpu %d\n", cpu);
4488 nctx->cb = NULL;
4490 * If no affine DPIO for this core, there's probably
4491 * none available for next cores either. Signal we want
4492 * to retry later, in case the DPIO devices weren't
4493 * probed yet.
4495 err = -EPROBE_DEFER;
4496 goto err;
4499 ppriv->store = dpaa2_io_store_create(DPAA2_CAAM_STORE_SIZE,
4500 dev);
4501 if (unlikely(!ppriv->store)) {
4502 dev_err(dev, "dpaa2_io_store_create() failed\n");
4503 err = -ENOMEM;
4504 goto err;
4507 if (++i == priv->num_pairs)
4508 break;
4511 return 0;
4513 err:
4514 for_each_online_cpu(cpu) {
4515 ppriv = per_cpu_ptr(priv->ppriv, cpu);
4516 if (!ppriv->nctx.cb)
4517 break;
4518 dpaa2_io_service_deregister(ppriv->dpio, &ppriv->nctx, dev);
4521 for_each_online_cpu(cpu) {
4522 ppriv = per_cpu_ptr(priv->ppriv, cpu);
4523 if (!ppriv->store)
4524 break;
4525 dpaa2_io_store_destroy(ppriv->store);
4528 return err;
4531 static void __cold dpaa2_dpseci_dpio_free(struct dpaa2_caam_priv *priv)
4533 struct dpaa2_caam_priv_per_cpu *ppriv;
4534 int i = 0, cpu;
4536 for_each_online_cpu(cpu) {
4537 ppriv = per_cpu_ptr(priv->ppriv, cpu);
4538 dpaa2_io_service_deregister(ppriv->dpio, &ppriv->nctx,
4539 priv->dev);
4540 dpaa2_io_store_destroy(ppriv->store);
4542 if (++i == priv->num_pairs)
4543 return;
4547 static int dpaa2_dpseci_bind(struct dpaa2_caam_priv *priv)
4549 struct dpseci_rx_queue_cfg rx_queue_cfg;
4550 struct device *dev = priv->dev;
4551 struct fsl_mc_device *ls_dev = to_fsl_mc_device(dev);
4552 struct dpaa2_caam_priv_per_cpu *ppriv;
4553 int err = 0, i = 0, cpu;
4555 /* Configure Rx queues */
4556 for_each_online_cpu(cpu) {
4557 ppriv = per_cpu_ptr(priv->ppriv, cpu);
4559 rx_queue_cfg.options = DPSECI_QUEUE_OPT_DEST |
4560 DPSECI_QUEUE_OPT_USER_CTX;
4561 rx_queue_cfg.order_preservation_en = 0;
4562 rx_queue_cfg.dest_cfg.dest_type = DPSECI_DEST_DPIO;
4563 rx_queue_cfg.dest_cfg.dest_id = ppriv->nctx.dpio_id;
4565 * Rx priority (WQ) doesn't really matter, since we use
4566 * pull mode, i.e. volatile dequeues from specific FQs
4568 rx_queue_cfg.dest_cfg.priority = 0;
4569 rx_queue_cfg.user_ctx = ppriv->nctx.qman64;
4571 err = dpseci_set_rx_queue(priv->mc_io, 0, ls_dev->mc_handle, i,
4572 &rx_queue_cfg);
4573 if (err) {
4574 dev_err(dev, "dpseci_set_rx_queue() failed with err %d\n",
4575 err);
4576 return err;
4579 if (++i == priv->num_pairs)
4580 break;
4583 return err;
4586 static void dpaa2_dpseci_congestion_free(struct dpaa2_caam_priv *priv)
4588 struct device *dev = priv->dev;
4590 if (!priv->cscn_mem)
4591 return;
4593 dma_unmap_single(dev, priv->cscn_dma, DPAA2_CSCN_SIZE, DMA_FROM_DEVICE);
4594 kfree(priv->cscn_mem);
4597 static void dpaa2_dpseci_free(struct dpaa2_caam_priv *priv)
4599 struct device *dev = priv->dev;
4600 struct fsl_mc_device *ls_dev = to_fsl_mc_device(dev);
4602 dpaa2_dpseci_congestion_free(priv);
4603 dpseci_close(priv->mc_io, 0, ls_dev->mc_handle);
4606 static void dpaa2_caam_process_fd(struct dpaa2_caam_priv *priv,
4607 const struct dpaa2_fd *fd)
4609 struct caam_request *req;
4610 u32 fd_err;
4612 if (dpaa2_fd_get_format(fd) != dpaa2_fd_list) {
4613 dev_err(priv->dev, "Only Frame List FD format is supported!\n");
4614 return;
4617 fd_err = dpaa2_fd_get_ctrl(fd) & FD_CTRL_ERR_MASK;
4618 if (unlikely(fd_err))
4619 dev_err(priv->dev, "FD error: %08x\n", fd_err);
4622 * FD[ADDR] is guaranteed to be valid, irrespective of errors reported
4623 * in FD[ERR] or FD[FRC].
4625 req = dpaa2_caam_iova_to_virt(priv, dpaa2_fd_get_addr(fd));
4626 dma_unmap_single(priv->dev, req->fd_flt_dma, sizeof(req->fd_flt),
4627 DMA_BIDIRECTIONAL);
4628 req->cbk(req->ctx, dpaa2_fd_get_frc(fd));
4631 static int dpaa2_caam_pull_fq(struct dpaa2_caam_priv_per_cpu *ppriv)
4633 int err;
4635 /* Retry while portal is busy */
4636 do {
4637 err = dpaa2_io_service_pull_fq(ppriv->dpio, ppriv->rsp_fqid,
4638 ppriv->store);
4639 } while (err == -EBUSY);
4641 if (unlikely(err))
4642 dev_err(ppriv->priv->dev, "dpaa2_io_service_pull err %d", err);
4644 return err;
4647 static int dpaa2_caam_store_consume(struct dpaa2_caam_priv_per_cpu *ppriv)
4649 struct dpaa2_dq *dq;
4650 int cleaned = 0, is_last;
4652 do {
4653 dq = dpaa2_io_store_next(ppriv->store, &is_last);
4654 if (unlikely(!dq)) {
4655 if (unlikely(!is_last)) {
4656 dev_dbg(ppriv->priv->dev,
4657 "FQ %d returned no valid frames\n",
4658 ppriv->rsp_fqid);
4660 * MUST retry until we get some sort of
4661 * valid response token (be it "empty dequeue"
4662 * or a valid frame).
4664 continue;
4666 break;
4669 /* Process FD */
4670 dpaa2_caam_process_fd(ppriv->priv, dpaa2_dq_fd(dq));
4671 cleaned++;
4672 } while (!is_last);
4674 return cleaned;
4677 static int dpaa2_dpseci_poll(struct napi_struct *napi, int budget)
4679 struct dpaa2_caam_priv_per_cpu *ppriv;
4680 struct dpaa2_caam_priv *priv;
4681 int err, cleaned = 0, store_cleaned;
4683 ppriv = container_of(napi, struct dpaa2_caam_priv_per_cpu, napi);
4684 priv = ppriv->priv;
4686 if (unlikely(dpaa2_caam_pull_fq(ppriv)))
4687 return 0;
4689 do {
4690 store_cleaned = dpaa2_caam_store_consume(ppriv);
4691 cleaned += store_cleaned;
4693 if (store_cleaned == 0 ||
4694 cleaned > budget - DPAA2_CAAM_STORE_SIZE)
4695 break;
4697 /* Try to dequeue some more */
4698 err = dpaa2_caam_pull_fq(ppriv);
4699 if (unlikely(err))
4700 break;
4701 } while (1);
4703 if (cleaned < budget) {
4704 napi_complete_done(napi, cleaned);
4705 err = dpaa2_io_service_rearm(ppriv->dpio, &ppriv->nctx);
4706 if (unlikely(err))
4707 dev_err(priv->dev, "Notification rearm failed: %d\n",
4708 err);
4711 return cleaned;
4714 static int dpaa2_dpseci_congestion_setup(struct dpaa2_caam_priv *priv,
4715 u16 token)
4717 struct dpseci_congestion_notification_cfg cong_notif_cfg = { 0 };
4718 struct device *dev = priv->dev;
4719 int err;
4722 * Congestion group feature supported starting with DPSECI API v5.1
4723 * and only when object has been created with this capability.
4725 if ((DPSECI_VER(priv->major_ver, priv->minor_ver) < DPSECI_VER(5, 1)) ||
4726 !(priv->dpseci_attr.options & DPSECI_OPT_HAS_CG))
4727 return 0;
4729 priv->cscn_mem = kzalloc(DPAA2_CSCN_SIZE + DPAA2_CSCN_ALIGN,
4730 GFP_KERNEL | GFP_DMA);
4731 if (!priv->cscn_mem)
4732 return -ENOMEM;
4734 priv->cscn_mem_aligned = PTR_ALIGN(priv->cscn_mem, DPAA2_CSCN_ALIGN);
4735 priv->cscn_dma = dma_map_single(dev, priv->cscn_mem_aligned,
4736 DPAA2_CSCN_SIZE, DMA_FROM_DEVICE);
4737 if (dma_mapping_error(dev, priv->cscn_dma)) {
4738 dev_err(dev, "Error mapping CSCN memory area\n");
4739 err = -ENOMEM;
4740 goto err_dma_map;
4743 cong_notif_cfg.units = DPSECI_CONGESTION_UNIT_BYTES;
4744 cong_notif_cfg.threshold_entry = DPAA2_SEC_CONG_ENTRY_THRESH;
4745 cong_notif_cfg.threshold_exit = DPAA2_SEC_CONG_EXIT_THRESH;
4746 cong_notif_cfg.message_ctx = (uintptr_t)priv;
4747 cong_notif_cfg.message_iova = priv->cscn_dma;
4748 cong_notif_cfg.notification_mode = DPSECI_CGN_MODE_WRITE_MEM_ON_ENTER |
4749 DPSECI_CGN_MODE_WRITE_MEM_ON_EXIT |
4750 DPSECI_CGN_MODE_COHERENT_WRITE;
4752 err = dpseci_set_congestion_notification(priv->mc_io, 0, token,
4753 &cong_notif_cfg);
4754 if (err) {
4755 dev_err(dev, "dpseci_set_congestion_notification failed\n");
4756 goto err_set_cong;
4759 return 0;
4761 err_set_cong:
4762 dma_unmap_single(dev, priv->cscn_dma, DPAA2_CSCN_SIZE, DMA_FROM_DEVICE);
4763 err_dma_map:
4764 kfree(priv->cscn_mem);
4766 return err;
4769 static int __cold dpaa2_dpseci_setup(struct fsl_mc_device *ls_dev)
4771 struct device *dev = &ls_dev->dev;
4772 struct dpaa2_caam_priv *priv;
4773 struct dpaa2_caam_priv_per_cpu *ppriv;
4774 int err, cpu;
4775 u8 i;
4777 priv = dev_get_drvdata(dev);
4779 priv->dev = dev;
4780 priv->dpsec_id = ls_dev->obj_desc.id;
4782 /* Get a handle for the DPSECI this interface is associate with */
4783 err = dpseci_open(priv->mc_io, 0, priv->dpsec_id, &ls_dev->mc_handle);
4784 if (err) {
4785 dev_err(dev, "dpseci_open() failed: %d\n", err);
4786 goto err_open;
4789 err = dpseci_get_api_version(priv->mc_io, 0, &priv->major_ver,
4790 &priv->minor_ver);
4791 if (err) {
4792 dev_err(dev, "dpseci_get_api_version() failed\n");
4793 goto err_get_vers;
4796 dev_info(dev, "dpseci v%d.%d\n", priv->major_ver, priv->minor_ver);
4798 err = dpseci_get_attributes(priv->mc_io, 0, ls_dev->mc_handle,
4799 &priv->dpseci_attr);
4800 if (err) {
4801 dev_err(dev, "dpseci_get_attributes() failed\n");
4802 goto err_get_vers;
4805 err = dpseci_get_sec_attr(priv->mc_io, 0, ls_dev->mc_handle,
4806 &priv->sec_attr);
4807 if (err) {
4808 dev_err(dev, "dpseci_get_sec_attr() failed\n");
4809 goto err_get_vers;
4812 err = dpaa2_dpseci_congestion_setup(priv, ls_dev->mc_handle);
4813 if (err) {
4814 dev_err(dev, "setup_congestion() failed\n");
4815 goto err_get_vers;
4818 priv->num_pairs = min(priv->dpseci_attr.num_rx_queues,
4819 priv->dpseci_attr.num_tx_queues);
4820 if (priv->num_pairs > num_online_cpus()) {
4821 dev_warn(dev, "%d queues won't be used\n",
4822 priv->num_pairs - num_online_cpus());
4823 priv->num_pairs = num_online_cpus();
4826 for (i = 0; i < priv->dpseci_attr.num_rx_queues; i++) {
4827 err = dpseci_get_rx_queue(priv->mc_io, 0, ls_dev->mc_handle, i,
4828 &priv->rx_queue_attr[i]);
4829 if (err) {
4830 dev_err(dev, "dpseci_get_rx_queue() failed\n");
4831 goto err_get_rx_queue;
4835 for (i = 0; i < priv->dpseci_attr.num_tx_queues; i++) {
4836 err = dpseci_get_tx_queue(priv->mc_io, 0, ls_dev->mc_handle, i,
4837 &priv->tx_queue_attr[i]);
4838 if (err) {
4839 dev_err(dev, "dpseci_get_tx_queue() failed\n");
4840 goto err_get_rx_queue;
4844 i = 0;
4845 for_each_online_cpu(cpu) {
4846 u8 j;
4848 j = i % priv->num_pairs;
4850 ppriv = per_cpu_ptr(priv->ppriv, cpu);
4851 ppriv->req_fqid = priv->tx_queue_attr[j].fqid;
4854 * Allow all cores to enqueue, while only some of them
4855 * will take part in dequeuing.
4857 if (++i > priv->num_pairs)
4858 continue;
4860 ppriv->rsp_fqid = priv->rx_queue_attr[j].fqid;
4861 ppriv->prio = j;
4863 dev_dbg(dev, "pair %d: rx queue %d, tx queue %d\n", j,
4864 priv->rx_queue_attr[j].fqid,
4865 priv->tx_queue_attr[j].fqid);
4867 ppriv->net_dev.dev = *dev;
4868 INIT_LIST_HEAD(&ppriv->net_dev.napi_list);
4869 netif_napi_add(&ppriv->net_dev, &ppriv->napi, dpaa2_dpseci_poll,
4870 DPAA2_CAAM_NAPI_WEIGHT);
4873 return 0;
4875 err_get_rx_queue:
4876 dpaa2_dpseci_congestion_free(priv);
4877 err_get_vers:
4878 dpseci_close(priv->mc_io, 0, ls_dev->mc_handle);
4879 err_open:
4880 return err;
4883 static int dpaa2_dpseci_enable(struct dpaa2_caam_priv *priv)
4885 struct device *dev = priv->dev;
4886 struct fsl_mc_device *ls_dev = to_fsl_mc_device(dev);
4887 struct dpaa2_caam_priv_per_cpu *ppriv;
4888 int i;
4890 for (i = 0; i < priv->num_pairs; i++) {
4891 ppriv = per_cpu_ptr(priv->ppriv, i);
4892 napi_enable(&ppriv->napi);
4895 return dpseci_enable(priv->mc_io, 0, ls_dev->mc_handle);
4898 static int __cold dpaa2_dpseci_disable(struct dpaa2_caam_priv *priv)
4900 struct device *dev = priv->dev;
4901 struct dpaa2_caam_priv_per_cpu *ppriv;
4902 struct fsl_mc_device *ls_dev = to_fsl_mc_device(dev);
4903 int i, err = 0, enabled;
4905 err = dpseci_disable(priv->mc_io, 0, ls_dev->mc_handle);
4906 if (err) {
4907 dev_err(dev, "dpseci_disable() failed\n");
4908 return err;
4911 err = dpseci_is_enabled(priv->mc_io, 0, ls_dev->mc_handle, &enabled);
4912 if (err) {
4913 dev_err(dev, "dpseci_is_enabled() failed\n");
4914 return err;
4917 dev_dbg(dev, "disable: %s\n", enabled ? "false" : "true");
4919 for (i = 0; i < priv->num_pairs; i++) {
4920 ppriv = per_cpu_ptr(priv->ppriv, i);
4921 napi_disable(&ppriv->napi);
4922 netif_napi_del(&ppriv->napi);
4925 return 0;
4928 static struct list_head hash_list;
4930 static int dpaa2_caam_probe(struct fsl_mc_device *dpseci_dev)
4932 struct device *dev;
4933 struct dpaa2_caam_priv *priv;
4934 int i, err = 0;
4935 bool registered = false;
4938 * There is no way to get CAAM endianness - there is no direct register
4939 * space access and MC f/w does not provide this attribute.
4940 * All DPAA2-based SoCs have little endian CAAM, thus hard-code this
4941 * property.
4943 caam_little_end = true;
4945 caam_imx = false;
4947 dev = &dpseci_dev->dev;
4949 priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
4950 if (!priv)
4951 return -ENOMEM;
4953 dev_set_drvdata(dev, priv);
4955 priv->domain = iommu_get_domain_for_dev(dev);
4957 qi_cache = kmem_cache_create("dpaa2_caamqicache", CAAM_QI_MEMCACHE_SIZE,
4958 0, SLAB_CACHE_DMA, NULL);
4959 if (!qi_cache) {
4960 dev_err(dev, "Can't allocate SEC cache\n");
4961 return -ENOMEM;
4964 err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(49));
4965 if (err) {
4966 dev_err(dev, "dma_set_mask_and_coherent() failed\n");
4967 goto err_dma_mask;
4970 /* Obtain a MC portal */
4971 err = fsl_mc_portal_allocate(dpseci_dev, 0, &priv->mc_io);
4972 if (err) {
4973 if (err == -ENXIO)
4974 err = -EPROBE_DEFER;
4975 else
4976 dev_err(dev, "MC portal allocation failed\n");
4978 goto err_dma_mask;
4981 priv->ppriv = alloc_percpu(*priv->ppriv);
4982 if (!priv->ppriv) {
4983 dev_err(dev, "alloc_percpu() failed\n");
4984 err = -ENOMEM;
4985 goto err_alloc_ppriv;
4988 /* DPSECI initialization */
4989 err = dpaa2_dpseci_setup(dpseci_dev);
4990 if (err) {
4991 dev_err(dev, "dpaa2_dpseci_setup() failed\n");
4992 goto err_dpseci_setup;
4995 /* DPIO */
4996 err = dpaa2_dpseci_dpio_setup(priv);
4997 if (err) {
4998 if (err != -EPROBE_DEFER)
4999 dev_err(dev, "dpaa2_dpseci_dpio_setup() failed\n");
5000 goto err_dpio_setup;
5003 /* DPSECI binding to DPIO */
5004 err = dpaa2_dpseci_bind(priv);
5005 if (err) {
5006 dev_err(dev, "dpaa2_dpseci_bind() failed\n");
5007 goto err_bind;
5010 /* DPSECI enable */
5011 err = dpaa2_dpseci_enable(priv);
5012 if (err) {
5013 dev_err(dev, "dpaa2_dpseci_enable() failed\n");
5014 goto err_bind;
5017 /* register crypto algorithms the device supports */
5018 for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
5019 struct caam_skcipher_alg *t_alg = driver_algs + i;
5020 u32 alg_sel = t_alg->caam.class1_alg_type & OP_ALG_ALGSEL_MASK;
5022 /* Skip DES algorithms if not supported by device */
5023 if (!priv->sec_attr.des_acc_num &&
5024 (alg_sel == OP_ALG_ALGSEL_3DES ||
5025 alg_sel == OP_ALG_ALGSEL_DES))
5026 continue;
5028 /* Skip AES algorithms if not supported by device */
5029 if (!priv->sec_attr.aes_acc_num &&
5030 alg_sel == OP_ALG_ALGSEL_AES)
5031 continue;
5033 /* Skip CHACHA20 algorithms if not supported by device */
5034 if (alg_sel == OP_ALG_ALGSEL_CHACHA20 &&
5035 !priv->sec_attr.ccha_acc_num)
5036 continue;
5038 t_alg->caam.dev = dev;
5039 caam_skcipher_alg_init(t_alg);
5041 err = crypto_register_skcipher(&t_alg->skcipher);
5042 if (err) {
5043 dev_warn(dev, "%s alg registration failed: %d\n",
5044 t_alg->skcipher.base.cra_driver_name, err);
5045 continue;
5048 t_alg->registered = true;
5049 registered = true;
5052 for (i = 0; i < ARRAY_SIZE(driver_aeads); i++) {
5053 struct caam_aead_alg *t_alg = driver_aeads + i;
5054 u32 c1_alg_sel = t_alg->caam.class1_alg_type &
5055 OP_ALG_ALGSEL_MASK;
5056 u32 c2_alg_sel = t_alg->caam.class2_alg_type &
5057 OP_ALG_ALGSEL_MASK;
5059 /* Skip DES algorithms if not supported by device */
5060 if (!priv->sec_attr.des_acc_num &&
5061 (c1_alg_sel == OP_ALG_ALGSEL_3DES ||
5062 c1_alg_sel == OP_ALG_ALGSEL_DES))
5063 continue;
5065 /* Skip AES algorithms if not supported by device */
5066 if (!priv->sec_attr.aes_acc_num &&
5067 c1_alg_sel == OP_ALG_ALGSEL_AES)
5068 continue;
5070 /* Skip CHACHA20 algorithms if not supported by device */
5071 if (c1_alg_sel == OP_ALG_ALGSEL_CHACHA20 &&
5072 !priv->sec_attr.ccha_acc_num)
5073 continue;
5075 /* Skip POLY1305 algorithms if not supported by device */
5076 if (c2_alg_sel == OP_ALG_ALGSEL_POLY1305 &&
5077 !priv->sec_attr.ptha_acc_num)
5078 continue;
5081 * Skip algorithms requiring message digests
5082 * if MD not supported by device.
5084 if ((c2_alg_sel & ~OP_ALG_ALGSEL_SUBMASK) == 0x40 &&
5085 !priv->sec_attr.md_acc_num)
5086 continue;
5088 t_alg->caam.dev = dev;
5089 caam_aead_alg_init(t_alg);
5091 err = crypto_register_aead(&t_alg->aead);
5092 if (err) {
5093 dev_warn(dev, "%s alg registration failed: %d\n",
5094 t_alg->aead.base.cra_driver_name, err);
5095 continue;
5098 t_alg->registered = true;
5099 registered = true;
5101 if (registered)
5102 dev_info(dev, "algorithms registered in /proc/crypto\n");
5104 /* register hash algorithms the device supports */
5105 INIT_LIST_HEAD(&hash_list);
5108 * Skip registration of any hashing algorithms if MD block
5109 * is not present.
5111 if (!priv->sec_attr.md_acc_num)
5112 return 0;
5114 for (i = 0; i < ARRAY_SIZE(driver_hash); i++) {
5115 struct caam_hash_alg *t_alg;
5116 struct caam_hash_template *alg = driver_hash + i;
5118 /* register hmac version */
5119 t_alg = caam_hash_alloc(dev, alg, true);
5120 if (IS_ERR(t_alg)) {
5121 err = PTR_ERR(t_alg);
5122 dev_warn(dev, "%s hash alg allocation failed: %d\n",
5123 alg->driver_name, err);
5124 continue;
5127 err = crypto_register_ahash(&t_alg->ahash_alg);
5128 if (err) {
5129 dev_warn(dev, "%s alg registration failed: %d\n",
5130 t_alg->ahash_alg.halg.base.cra_driver_name,
5131 err);
5132 kfree(t_alg);
5133 } else {
5134 list_add_tail(&t_alg->entry, &hash_list);
5137 /* register unkeyed version */
5138 t_alg = caam_hash_alloc(dev, alg, false);
5139 if (IS_ERR(t_alg)) {
5140 err = PTR_ERR(t_alg);
5141 dev_warn(dev, "%s alg allocation failed: %d\n",
5142 alg->driver_name, err);
5143 continue;
5146 err = crypto_register_ahash(&t_alg->ahash_alg);
5147 if (err) {
5148 dev_warn(dev, "%s alg registration failed: %d\n",
5149 t_alg->ahash_alg.halg.base.cra_driver_name,
5150 err);
5151 kfree(t_alg);
5152 } else {
5153 list_add_tail(&t_alg->entry, &hash_list);
5156 if (!list_empty(&hash_list))
5157 dev_info(dev, "hash algorithms registered in /proc/crypto\n");
5159 return err;
5161 err_bind:
5162 dpaa2_dpseci_dpio_free(priv);
5163 err_dpio_setup:
5164 dpaa2_dpseci_free(priv);
5165 err_dpseci_setup:
5166 free_percpu(priv->ppriv);
5167 err_alloc_ppriv:
5168 fsl_mc_portal_free(priv->mc_io);
5169 err_dma_mask:
5170 kmem_cache_destroy(qi_cache);
5172 return err;
5175 static int __cold dpaa2_caam_remove(struct fsl_mc_device *ls_dev)
5177 struct device *dev;
5178 struct dpaa2_caam_priv *priv;
5179 int i;
5181 dev = &ls_dev->dev;
5182 priv = dev_get_drvdata(dev);
5184 for (i = 0; i < ARRAY_SIZE(driver_aeads); i++) {
5185 struct caam_aead_alg *t_alg = driver_aeads + i;
5187 if (t_alg->registered)
5188 crypto_unregister_aead(&t_alg->aead);
5191 for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
5192 struct caam_skcipher_alg *t_alg = driver_algs + i;
5194 if (t_alg->registered)
5195 crypto_unregister_skcipher(&t_alg->skcipher);
5198 if (hash_list.next) {
5199 struct caam_hash_alg *t_hash_alg, *p;
5201 list_for_each_entry_safe(t_hash_alg, p, &hash_list, entry) {
5202 crypto_unregister_ahash(&t_hash_alg->ahash_alg);
5203 list_del(&t_hash_alg->entry);
5204 kfree(t_hash_alg);
5208 dpaa2_dpseci_disable(priv);
5209 dpaa2_dpseci_dpio_free(priv);
5210 dpaa2_dpseci_free(priv);
5211 free_percpu(priv->ppriv);
5212 fsl_mc_portal_free(priv->mc_io);
5213 kmem_cache_destroy(qi_cache);
5215 return 0;
5218 int dpaa2_caam_enqueue(struct device *dev, struct caam_request *req)
5220 struct dpaa2_fd fd;
5221 struct dpaa2_caam_priv *priv = dev_get_drvdata(dev);
5222 struct dpaa2_caam_priv_per_cpu *ppriv;
5223 int err = 0, i;
5225 if (IS_ERR(req))
5226 return PTR_ERR(req);
5228 if (priv->cscn_mem) {
5229 dma_sync_single_for_cpu(priv->dev, priv->cscn_dma,
5230 DPAA2_CSCN_SIZE,
5231 DMA_FROM_DEVICE);
5232 if (unlikely(dpaa2_cscn_state_congested(priv->cscn_mem_aligned))) {
5233 dev_dbg_ratelimited(dev, "Dropping request\n");
5234 return -EBUSY;
5238 dpaa2_fl_set_flc(&req->fd_flt[1], req->flc_dma);
5240 req->fd_flt_dma = dma_map_single(dev, req->fd_flt, sizeof(req->fd_flt),
5241 DMA_BIDIRECTIONAL);
5242 if (dma_mapping_error(dev, req->fd_flt_dma)) {
5243 dev_err(dev, "DMA mapping error for QI enqueue request\n");
5244 goto err_out;
5247 memset(&fd, 0, sizeof(fd));
5248 dpaa2_fd_set_format(&fd, dpaa2_fd_list);
5249 dpaa2_fd_set_addr(&fd, req->fd_flt_dma);
5250 dpaa2_fd_set_len(&fd, dpaa2_fl_get_len(&req->fd_flt[1]));
5251 dpaa2_fd_set_flc(&fd, req->flc_dma);
5253 ppriv = this_cpu_ptr(priv->ppriv);
5254 for (i = 0; i < (priv->dpseci_attr.num_tx_queues << 1); i++) {
5255 err = dpaa2_io_service_enqueue_fq(ppriv->dpio, ppriv->req_fqid,
5256 &fd);
5257 if (err != -EBUSY)
5258 break;
5260 cpu_relax();
5263 if (unlikely(err)) {
5264 dev_err_ratelimited(dev, "Error enqueuing frame: %d\n", err);
5265 goto err_out;
5268 return -EINPROGRESS;
5270 err_out:
5271 dma_unmap_single(dev, req->fd_flt_dma, sizeof(req->fd_flt),
5272 DMA_BIDIRECTIONAL);
5273 return -EIO;
5275 EXPORT_SYMBOL(dpaa2_caam_enqueue);
5277 static const struct fsl_mc_device_id dpaa2_caam_match_id_table[] = {
5279 .vendor = FSL_MC_VENDOR_FREESCALE,
5280 .obj_type = "dpseci",
5282 { .vendor = 0x0 }
5285 static struct fsl_mc_driver dpaa2_caam_driver = {
5286 .driver = {
5287 .name = KBUILD_MODNAME,
5288 .owner = THIS_MODULE,
5290 .probe = dpaa2_caam_probe,
5291 .remove = dpaa2_caam_remove,
5292 .match_id_table = dpaa2_caam_match_id_table
5295 MODULE_LICENSE("Dual BSD/GPL");
5296 MODULE_AUTHOR("Freescale Semiconductor, Inc");
5297 MODULE_DESCRIPTION("Freescale DPAA2 CAAM Driver");
5299 module_fsl_mc_driver(dpaa2_caam_driver);