gpio: rcar: Fix runtime PM imbalance on error
[linux/fpc-iii.git] / drivers / crypto / caam / caamhash.c
blob943bc0296267d3a751c5493358a2a291ff3b5af2
1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3 * caam - Freescale FSL CAAM support for ahash functions of crypto API
5 * Copyright 2011 Freescale Semiconductor, Inc.
6 * Copyright 2018-2019 NXP
8 * Based on caamalg.c crypto API driver.
10 * relationship of digest job descriptor or first job descriptor after init to
11 * shared descriptors:
13 * --------------- ---------------
14 * | JobDesc #1 |-------------------->| ShareDesc |
15 * | *(packet 1) | | (hashKey) |
16 * --------------- | (operation) |
17 * ---------------
19 * relationship of subsequent job descriptors to shared descriptors:
21 * --------------- ---------------
22 * | JobDesc #2 |-------------------->| ShareDesc |
23 * | *(packet 2) | |------------->| (hashKey) |
24 * --------------- | |-------->| (operation) |
25 * . | | | (load ctx2) |
26 * . | | ---------------
27 * --------------- | |
28 * | JobDesc #3 |------| |
29 * | *(packet 3) | |
30 * --------------- |
31 * . |
32 * . |
33 * --------------- |
34 * | JobDesc #4 |------------
35 * | *(packet 4) |
36 * ---------------
38 * The SharedDesc never changes for a connection unless rekeyed, but
39 * each packet will likely be in a different place. So all we need
40 * to know to process the packet is where the input is, where the
41 * output goes, and what context we want to process with. Context is
42 * in the SharedDesc, packet references in the JobDesc.
44 * So, a job desc looks like:
46 * ---------------------
47 * | Header |
48 * | ShareDesc Pointer |
49 * | SEQ_OUT_PTR |
50 * | (output buffer) |
51 * | (output length) |
52 * | SEQ_IN_PTR |
53 * | (input buffer) |
54 * | (input length) |
55 * ---------------------
58 #include "compat.h"
60 #include "regs.h"
61 #include "intern.h"
62 #include "desc_constr.h"
63 #include "jr.h"
64 #include "error.h"
65 #include "sg_sw_sec4.h"
66 #include "key_gen.h"
67 #include "caamhash_desc.h"
68 #include <crypto/engine.h>
70 #define CAAM_CRA_PRIORITY 3000
72 /* max hash key is max split key size */
73 #define CAAM_MAX_HASH_KEY_SIZE (SHA512_DIGEST_SIZE * 2)
75 #define CAAM_MAX_HASH_BLOCK_SIZE SHA512_BLOCK_SIZE
76 #define CAAM_MAX_HASH_DIGEST_SIZE SHA512_DIGEST_SIZE
78 #define DESC_HASH_MAX_USED_BYTES (DESC_AHASH_FINAL_LEN + \
79 CAAM_MAX_HASH_KEY_SIZE)
80 #define DESC_HASH_MAX_USED_LEN (DESC_HASH_MAX_USED_BYTES / CAAM_CMD_SZ)
82 /* caam context sizes for hashes: running digest + 8 */
83 #define HASH_MSG_LEN 8
84 #define MAX_CTX_LEN (HASH_MSG_LEN + SHA512_DIGEST_SIZE)
86 static struct list_head hash_list;
88 /* ahash per-session context */
89 struct caam_hash_ctx {
90 struct crypto_engine_ctx enginectx;
91 u32 sh_desc_update[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned;
92 u32 sh_desc_update_first[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned;
93 u32 sh_desc_fin[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned;
94 u32 sh_desc_digest[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned;
95 u8 key[CAAM_MAX_HASH_KEY_SIZE] ____cacheline_aligned;
96 dma_addr_t sh_desc_update_dma ____cacheline_aligned;
97 dma_addr_t sh_desc_update_first_dma;
98 dma_addr_t sh_desc_fin_dma;
99 dma_addr_t sh_desc_digest_dma;
100 enum dma_data_direction dir;
101 enum dma_data_direction key_dir;
102 struct device *jrdev;
103 int ctx_len;
104 struct alginfo adata;
107 /* ahash state */
108 struct caam_hash_state {
109 dma_addr_t buf_dma;
110 dma_addr_t ctx_dma;
111 int ctx_dma_len;
112 u8 buf[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned;
113 int buflen;
114 int next_buflen;
115 u8 caam_ctx[MAX_CTX_LEN] ____cacheline_aligned;
116 int (*update)(struct ahash_request *req) ____cacheline_aligned;
117 int (*final)(struct ahash_request *req);
118 int (*finup)(struct ahash_request *req);
119 struct ahash_edesc *edesc;
120 void (*ahash_op_done)(struct device *jrdev, u32 *desc, u32 err,
121 void *context);
124 struct caam_export_state {
125 u8 buf[CAAM_MAX_HASH_BLOCK_SIZE];
126 u8 caam_ctx[MAX_CTX_LEN];
127 int buflen;
128 int (*update)(struct ahash_request *req);
129 int (*final)(struct ahash_request *req);
130 int (*finup)(struct ahash_request *req);
133 static inline bool is_cmac_aes(u32 algtype)
135 return (algtype & (OP_ALG_ALGSEL_MASK | OP_ALG_AAI_MASK)) ==
136 (OP_ALG_ALGSEL_AES | OP_ALG_AAI_CMAC);
138 /* Common job descriptor seq in/out ptr routines */
140 /* Map state->caam_ctx, and append seq_out_ptr command that points to it */
141 static inline int map_seq_out_ptr_ctx(u32 *desc, struct device *jrdev,
142 struct caam_hash_state *state,
143 int ctx_len)
145 state->ctx_dma_len = ctx_len;
146 state->ctx_dma = dma_map_single(jrdev, state->caam_ctx,
147 ctx_len, DMA_FROM_DEVICE);
148 if (dma_mapping_error(jrdev, state->ctx_dma)) {
149 dev_err(jrdev, "unable to map ctx\n");
150 state->ctx_dma = 0;
151 return -ENOMEM;
154 append_seq_out_ptr(desc, state->ctx_dma, ctx_len, 0);
156 return 0;
159 /* Map current buffer in state (if length > 0) and put it in link table */
160 static inline int buf_map_to_sec4_sg(struct device *jrdev,
161 struct sec4_sg_entry *sec4_sg,
162 struct caam_hash_state *state)
164 int buflen = state->buflen;
166 if (!buflen)
167 return 0;
169 state->buf_dma = dma_map_single(jrdev, state->buf, buflen,
170 DMA_TO_DEVICE);
171 if (dma_mapping_error(jrdev, state->buf_dma)) {
172 dev_err(jrdev, "unable to map buf\n");
173 state->buf_dma = 0;
174 return -ENOMEM;
177 dma_to_sec4_sg_one(sec4_sg, state->buf_dma, buflen, 0);
179 return 0;
182 /* Map state->caam_ctx, and add it to link table */
183 static inline int ctx_map_to_sec4_sg(struct device *jrdev,
184 struct caam_hash_state *state, int ctx_len,
185 struct sec4_sg_entry *sec4_sg, u32 flag)
187 state->ctx_dma_len = ctx_len;
188 state->ctx_dma = dma_map_single(jrdev, state->caam_ctx, ctx_len, flag);
189 if (dma_mapping_error(jrdev, state->ctx_dma)) {
190 dev_err(jrdev, "unable to map ctx\n");
191 state->ctx_dma = 0;
192 return -ENOMEM;
195 dma_to_sec4_sg_one(sec4_sg, state->ctx_dma, ctx_len, 0);
197 return 0;
200 static int ahash_set_sh_desc(struct crypto_ahash *ahash)
202 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
203 int digestsize = crypto_ahash_digestsize(ahash);
204 struct device *jrdev = ctx->jrdev;
205 struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent);
206 u32 *desc;
208 ctx->adata.key_virt = ctx->key;
210 /* ahash_update shared descriptor */
211 desc = ctx->sh_desc_update;
212 cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_UPDATE, ctx->ctx_len,
213 ctx->ctx_len, true, ctrlpriv->era);
214 dma_sync_single_for_device(jrdev, ctx->sh_desc_update_dma,
215 desc_bytes(desc), ctx->dir);
217 print_hex_dump_debug("ahash update shdesc@"__stringify(__LINE__)": ",
218 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
221 /* ahash_update_first shared descriptor */
222 desc = ctx->sh_desc_update_first;
223 cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_INIT, ctx->ctx_len,
224 ctx->ctx_len, false, ctrlpriv->era);
225 dma_sync_single_for_device(jrdev, ctx->sh_desc_update_first_dma,
226 desc_bytes(desc), ctx->dir);
227 print_hex_dump_debug("ahash update first shdesc@"__stringify(__LINE__)
228 ": ", DUMP_PREFIX_ADDRESS, 16, 4, desc,
229 desc_bytes(desc), 1);
231 /* ahash_final shared descriptor */
232 desc = ctx->sh_desc_fin;
233 cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_FINALIZE, digestsize,
234 ctx->ctx_len, true, ctrlpriv->era);
235 dma_sync_single_for_device(jrdev, ctx->sh_desc_fin_dma,
236 desc_bytes(desc), ctx->dir);
238 print_hex_dump_debug("ahash final shdesc@"__stringify(__LINE__)": ",
239 DUMP_PREFIX_ADDRESS, 16, 4, desc,
240 desc_bytes(desc), 1);
242 /* ahash_digest shared descriptor */
243 desc = ctx->sh_desc_digest;
244 cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_INITFINAL, digestsize,
245 ctx->ctx_len, false, ctrlpriv->era);
246 dma_sync_single_for_device(jrdev, ctx->sh_desc_digest_dma,
247 desc_bytes(desc), ctx->dir);
249 print_hex_dump_debug("ahash digest shdesc@"__stringify(__LINE__)": ",
250 DUMP_PREFIX_ADDRESS, 16, 4, desc,
251 desc_bytes(desc), 1);
253 return 0;
256 static int axcbc_set_sh_desc(struct crypto_ahash *ahash)
258 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
259 int digestsize = crypto_ahash_digestsize(ahash);
260 struct device *jrdev = ctx->jrdev;
261 u32 *desc;
263 /* shared descriptor for ahash_update */
264 desc = ctx->sh_desc_update;
265 cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_UPDATE,
266 ctx->ctx_len, ctx->ctx_len);
267 dma_sync_single_for_device(jrdev, ctx->sh_desc_update_dma,
268 desc_bytes(desc), ctx->dir);
269 print_hex_dump_debug("axcbc update shdesc@" __stringify(__LINE__)" : ",
270 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
273 /* shared descriptor for ahash_{final,finup} */
274 desc = ctx->sh_desc_fin;
275 cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_FINALIZE,
276 digestsize, ctx->ctx_len);
277 dma_sync_single_for_device(jrdev, ctx->sh_desc_fin_dma,
278 desc_bytes(desc), ctx->dir);
279 print_hex_dump_debug("axcbc finup shdesc@" __stringify(__LINE__)" : ",
280 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
283 /* key is immediate data for INIT and INITFINAL states */
284 ctx->adata.key_virt = ctx->key;
286 /* shared descriptor for first invocation of ahash_update */
287 desc = ctx->sh_desc_update_first;
288 cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_INIT, ctx->ctx_len,
289 ctx->ctx_len);
290 dma_sync_single_for_device(jrdev, ctx->sh_desc_update_first_dma,
291 desc_bytes(desc), ctx->dir);
292 print_hex_dump_debug("axcbc update first shdesc@" __stringify(__LINE__)
293 " : ", DUMP_PREFIX_ADDRESS, 16, 4, desc,
294 desc_bytes(desc), 1);
296 /* shared descriptor for ahash_digest */
297 desc = ctx->sh_desc_digest;
298 cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_INITFINAL,
299 digestsize, ctx->ctx_len);
300 dma_sync_single_for_device(jrdev, ctx->sh_desc_digest_dma,
301 desc_bytes(desc), ctx->dir);
302 print_hex_dump_debug("axcbc digest shdesc@" __stringify(__LINE__)" : ",
303 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
305 return 0;
308 static int acmac_set_sh_desc(struct crypto_ahash *ahash)
310 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
311 int digestsize = crypto_ahash_digestsize(ahash);
312 struct device *jrdev = ctx->jrdev;
313 u32 *desc;
315 /* shared descriptor for ahash_update */
316 desc = ctx->sh_desc_update;
317 cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_UPDATE,
318 ctx->ctx_len, ctx->ctx_len);
319 dma_sync_single_for_device(jrdev, ctx->sh_desc_update_dma,
320 desc_bytes(desc), ctx->dir);
321 print_hex_dump_debug("acmac update shdesc@" __stringify(__LINE__)" : ",
322 DUMP_PREFIX_ADDRESS, 16, 4, desc,
323 desc_bytes(desc), 1);
325 /* shared descriptor for ahash_{final,finup} */
326 desc = ctx->sh_desc_fin;
327 cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_FINALIZE,
328 digestsize, ctx->ctx_len);
329 dma_sync_single_for_device(jrdev, ctx->sh_desc_fin_dma,
330 desc_bytes(desc), ctx->dir);
331 print_hex_dump_debug("acmac finup shdesc@" __stringify(__LINE__)" : ",
332 DUMP_PREFIX_ADDRESS, 16, 4, desc,
333 desc_bytes(desc), 1);
335 /* shared descriptor for first invocation of ahash_update */
336 desc = ctx->sh_desc_update_first;
337 cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_INIT, ctx->ctx_len,
338 ctx->ctx_len);
339 dma_sync_single_for_device(jrdev, ctx->sh_desc_update_first_dma,
340 desc_bytes(desc), ctx->dir);
341 print_hex_dump_debug("acmac update first shdesc@" __stringify(__LINE__)
342 " : ", DUMP_PREFIX_ADDRESS, 16, 4, desc,
343 desc_bytes(desc), 1);
345 /* shared descriptor for ahash_digest */
346 desc = ctx->sh_desc_digest;
347 cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_INITFINAL,
348 digestsize, ctx->ctx_len);
349 dma_sync_single_for_device(jrdev, ctx->sh_desc_digest_dma,
350 desc_bytes(desc), ctx->dir);
351 print_hex_dump_debug("acmac digest shdesc@" __stringify(__LINE__)" : ",
352 DUMP_PREFIX_ADDRESS, 16, 4, desc,
353 desc_bytes(desc), 1);
355 return 0;
358 /* Digest hash size if it is too large */
359 static int hash_digest_key(struct caam_hash_ctx *ctx, u32 *keylen, u8 *key,
360 u32 digestsize)
362 struct device *jrdev = ctx->jrdev;
363 u32 *desc;
364 struct split_key_result result;
365 dma_addr_t key_dma;
366 int ret;
368 desc = kmalloc(CAAM_CMD_SZ * 8 + CAAM_PTR_SZ * 2, GFP_KERNEL | GFP_DMA);
369 if (!desc) {
370 dev_err(jrdev, "unable to allocate key input memory\n");
371 return -ENOMEM;
374 init_job_desc(desc, 0);
376 key_dma = dma_map_single(jrdev, key, *keylen, DMA_BIDIRECTIONAL);
377 if (dma_mapping_error(jrdev, key_dma)) {
378 dev_err(jrdev, "unable to map key memory\n");
379 kfree(desc);
380 return -ENOMEM;
383 /* Job descriptor to perform unkeyed hash on key_in */
384 append_operation(desc, ctx->adata.algtype | OP_ALG_ENCRYPT |
385 OP_ALG_AS_INITFINAL);
386 append_seq_in_ptr(desc, key_dma, *keylen, 0);
387 append_seq_fifo_load(desc, *keylen, FIFOLD_CLASS_CLASS2 |
388 FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_MSG);
389 append_seq_out_ptr(desc, key_dma, digestsize, 0);
390 append_seq_store(desc, digestsize, LDST_CLASS_2_CCB |
391 LDST_SRCDST_BYTE_CONTEXT);
393 print_hex_dump_debug("key_in@"__stringify(__LINE__)": ",
394 DUMP_PREFIX_ADDRESS, 16, 4, key, *keylen, 1);
395 print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ",
396 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
399 result.err = 0;
400 init_completion(&result.completion);
402 ret = caam_jr_enqueue(jrdev, desc, split_key_done, &result);
403 if (ret == -EINPROGRESS) {
404 /* in progress */
405 wait_for_completion(&result.completion);
406 ret = result.err;
408 print_hex_dump_debug("digested key@"__stringify(__LINE__)": ",
409 DUMP_PREFIX_ADDRESS, 16, 4, key,
410 digestsize, 1);
412 dma_unmap_single(jrdev, key_dma, *keylen, DMA_BIDIRECTIONAL);
414 *keylen = digestsize;
416 kfree(desc);
418 return ret;
421 static int ahash_setkey(struct crypto_ahash *ahash,
422 const u8 *key, unsigned int keylen)
424 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
425 struct device *jrdev = ctx->jrdev;
426 int blocksize = crypto_tfm_alg_blocksize(&ahash->base);
427 int digestsize = crypto_ahash_digestsize(ahash);
428 struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctx->jrdev->parent);
429 int ret;
430 u8 *hashed_key = NULL;
432 dev_dbg(jrdev, "keylen %d\n", keylen);
434 if (keylen > blocksize) {
435 hashed_key = kmemdup(key, keylen, GFP_KERNEL | GFP_DMA);
436 if (!hashed_key)
437 return -ENOMEM;
438 ret = hash_digest_key(ctx, &keylen, hashed_key, digestsize);
439 if (ret)
440 goto bad_free_key;
441 key = hashed_key;
445 * If DKP is supported, use it in the shared descriptor to generate
446 * the split key.
448 if (ctrlpriv->era >= 6) {
449 ctx->adata.key_inline = true;
450 ctx->adata.keylen = keylen;
451 ctx->adata.keylen_pad = split_key_len(ctx->adata.algtype &
452 OP_ALG_ALGSEL_MASK);
454 if (ctx->adata.keylen_pad > CAAM_MAX_HASH_KEY_SIZE)
455 goto bad_free_key;
457 memcpy(ctx->key, key, keylen);
460 * In case |user key| > |derived key|, using DKP<imm,imm>
461 * would result in invalid opcodes (last bytes of user key) in
462 * the resulting descriptor. Use DKP<ptr,imm> instead => both
463 * virtual and dma key addresses are needed.
465 if (keylen > ctx->adata.keylen_pad)
466 dma_sync_single_for_device(ctx->jrdev,
467 ctx->adata.key_dma,
468 ctx->adata.keylen_pad,
469 DMA_TO_DEVICE);
470 } else {
471 ret = gen_split_key(ctx->jrdev, ctx->key, &ctx->adata, key,
472 keylen, CAAM_MAX_HASH_KEY_SIZE);
473 if (ret)
474 goto bad_free_key;
477 kfree(hashed_key);
478 return ahash_set_sh_desc(ahash);
479 bad_free_key:
480 kfree(hashed_key);
481 return -EINVAL;
484 static int axcbc_setkey(struct crypto_ahash *ahash, const u8 *key,
485 unsigned int keylen)
487 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
488 struct device *jrdev = ctx->jrdev;
490 if (keylen != AES_KEYSIZE_128)
491 return -EINVAL;
493 memcpy(ctx->key, key, keylen);
494 dma_sync_single_for_device(jrdev, ctx->adata.key_dma, keylen,
495 DMA_TO_DEVICE);
496 ctx->adata.keylen = keylen;
498 print_hex_dump_debug("axcbc ctx.key@" __stringify(__LINE__)" : ",
499 DUMP_PREFIX_ADDRESS, 16, 4, ctx->key, keylen, 1);
501 return axcbc_set_sh_desc(ahash);
504 static int acmac_setkey(struct crypto_ahash *ahash, const u8 *key,
505 unsigned int keylen)
507 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
508 int err;
510 err = aes_check_keylen(keylen);
511 if (err)
512 return err;
514 /* key is immediate data for all cmac shared descriptors */
515 ctx->adata.key_virt = key;
516 ctx->adata.keylen = keylen;
518 print_hex_dump_debug("acmac ctx.key@" __stringify(__LINE__)" : ",
519 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
521 return acmac_set_sh_desc(ahash);
525 * ahash_edesc - s/w-extended ahash descriptor
526 * @sec4_sg_dma: physical mapped address of h/w link table
527 * @src_nents: number of segments in input scatterlist
528 * @sec4_sg_bytes: length of dma mapped sec4_sg space
529 * @bklog: stored to determine if the request needs backlog
530 * @hw_desc: the h/w job descriptor followed by any referenced link tables
531 * @sec4_sg: h/w link table
533 struct ahash_edesc {
534 dma_addr_t sec4_sg_dma;
535 int src_nents;
536 int sec4_sg_bytes;
537 bool bklog;
538 u32 hw_desc[DESC_JOB_IO_LEN_MAX / sizeof(u32)] ____cacheline_aligned;
539 struct sec4_sg_entry sec4_sg[];
542 static inline void ahash_unmap(struct device *dev,
543 struct ahash_edesc *edesc,
544 struct ahash_request *req, int dst_len)
546 struct caam_hash_state *state = ahash_request_ctx(req);
548 if (edesc->src_nents)
549 dma_unmap_sg(dev, req->src, edesc->src_nents, DMA_TO_DEVICE);
551 if (edesc->sec4_sg_bytes)
552 dma_unmap_single(dev, edesc->sec4_sg_dma,
553 edesc->sec4_sg_bytes, DMA_TO_DEVICE);
555 if (state->buf_dma) {
556 dma_unmap_single(dev, state->buf_dma, state->buflen,
557 DMA_TO_DEVICE);
558 state->buf_dma = 0;
562 static inline void ahash_unmap_ctx(struct device *dev,
563 struct ahash_edesc *edesc,
564 struct ahash_request *req, int dst_len, u32 flag)
566 struct caam_hash_state *state = ahash_request_ctx(req);
568 if (state->ctx_dma) {
569 dma_unmap_single(dev, state->ctx_dma, state->ctx_dma_len, flag);
570 state->ctx_dma = 0;
572 ahash_unmap(dev, edesc, req, dst_len);
575 static inline void ahash_done_cpy(struct device *jrdev, u32 *desc, u32 err,
576 void *context, enum dma_data_direction dir)
578 struct ahash_request *req = context;
579 struct caam_drv_private_jr *jrp = dev_get_drvdata(jrdev);
580 struct ahash_edesc *edesc;
581 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
582 int digestsize = crypto_ahash_digestsize(ahash);
583 struct caam_hash_state *state = ahash_request_ctx(req);
584 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
585 int ecode = 0;
587 dev_dbg(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
589 edesc = state->edesc;
591 if (err)
592 ecode = caam_jr_strstatus(jrdev, err);
594 ahash_unmap_ctx(jrdev, edesc, req, digestsize, dir);
595 memcpy(req->result, state->caam_ctx, digestsize);
596 kfree(edesc);
598 print_hex_dump_debug("ctx@"__stringify(__LINE__)": ",
599 DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
600 ctx->ctx_len, 1);
603 * If no backlog flag, the completion of the request is done
604 * by CAAM, not crypto engine.
606 if (!edesc->bklog)
607 req->base.complete(&req->base, ecode);
608 else
609 crypto_finalize_hash_request(jrp->engine, req, ecode);
612 static void ahash_done(struct device *jrdev, u32 *desc, u32 err,
613 void *context)
615 ahash_done_cpy(jrdev, desc, err, context, DMA_FROM_DEVICE);
618 static void ahash_done_ctx_src(struct device *jrdev, u32 *desc, u32 err,
619 void *context)
621 ahash_done_cpy(jrdev, desc, err, context, DMA_BIDIRECTIONAL);
624 static inline void ahash_done_switch(struct device *jrdev, u32 *desc, u32 err,
625 void *context, enum dma_data_direction dir)
627 struct ahash_request *req = context;
628 struct caam_drv_private_jr *jrp = dev_get_drvdata(jrdev);
629 struct ahash_edesc *edesc;
630 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
631 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
632 struct caam_hash_state *state = ahash_request_ctx(req);
633 int digestsize = crypto_ahash_digestsize(ahash);
634 int ecode = 0;
636 dev_dbg(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
638 edesc = state->edesc;
639 if (err)
640 ecode = caam_jr_strstatus(jrdev, err);
642 ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, dir);
643 kfree(edesc);
645 scatterwalk_map_and_copy(state->buf, req->src,
646 req->nbytes - state->next_buflen,
647 state->next_buflen, 0);
648 state->buflen = state->next_buflen;
650 print_hex_dump_debug("buf@" __stringify(__LINE__)": ",
651 DUMP_PREFIX_ADDRESS, 16, 4, state->buf,
652 state->buflen, 1);
654 print_hex_dump_debug("ctx@"__stringify(__LINE__)": ",
655 DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
656 ctx->ctx_len, 1);
657 if (req->result)
658 print_hex_dump_debug("result@"__stringify(__LINE__)": ",
659 DUMP_PREFIX_ADDRESS, 16, 4, req->result,
660 digestsize, 1);
663 * If no backlog flag, the completion of the request is done
664 * by CAAM, not crypto engine.
666 if (!edesc->bklog)
667 req->base.complete(&req->base, ecode);
668 else
669 crypto_finalize_hash_request(jrp->engine, req, ecode);
673 static void ahash_done_bi(struct device *jrdev, u32 *desc, u32 err,
674 void *context)
676 ahash_done_switch(jrdev, desc, err, context, DMA_BIDIRECTIONAL);
679 static void ahash_done_ctx_dst(struct device *jrdev, u32 *desc, u32 err,
680 void *context)
682 ahash_done_switch(jrdev, desc, err, context, DMA_FROM_DEVICE);
686 * Allocate an enhanced descriptor, which contains the hardware descriptor
687 * and space for hardware scatter table containing sg_num entries.
689 static struct ahash_edesc *ahash_edesc_alloc(struct ahash_request *req,
690 int sg_num, u32 *sh_desc,
691 dma_addr_t sh_desc_dma)
693 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
694 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
695 struct caam_hash_state *state = ahash_request_ctx(req);
696 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
697 GFP_KERNEL : GFP_ATOMIC;
698 struct ahash_edesc *edesc;
699 unsigned int sg_size = sg_num * sizeof(struct sec4_sg_entry);
701 edesc = kzalloc(sizeof(*edesc) + sg_size, GFP_DMA | flags);
702 if (!edesc) {
703 dev_err(ctx->jrdev, "could not allocate extended descriptor\n");
704 return NULL;
707 state->edesc = edesc;
709 init_job_desc_shared(edesc->hw_desc, sh_desc_dma, desc_len(sh_desc),
710 HDR_SHARE_DEFER | HDR_REVERSE);
712 return edesc;
715 static int ahash_edesc_add_src(struct caam_hash_ctx *ctx,
716 struct ahash_edesc *edesc,
717 struct ahash_request *req, int nents,
718 unsigned int first_sg,
719 unsigned int first_bytes, size_t to_hash)
721 dma_addr_t src_dma;
722 u32 options;
724 if (nents > 1 || first_sg) {
725 struct sec4_sg_entry *sg = edesc->sec4_sg;
726 unsigned int sgsize = sizeof(*sg) *
727 pad_sg_nents(first_sg + nents);
729 sg_to_sec4_sg_last(req->src, to_hash, sg + first_sg, 0);
731 src_dma = dma_map_single(ctx->jrdev, sg, sgsize, DMA_TO_DEVICE);
732 if (dma_mapping_error(ctx->jrdev, src_dma)) {
733 dev_err(ctx->jrdev, "unable to map S/G table\n");
734 return -ENOMEM;
737 edesc->sec4_sg_bytes = sgsize;
738 edesc->sec4_sg_dma = src_dma;
739 options = LDST_SGF;
740 } else {
741 src_dma = sg_dma_address(req->src);
742 options = 0;
745 append_seq_in_ptr(edesc->hw_desc, src_dma, first_bytes + to_hash,
746 options);
748 return 0;
751 static int ahash_do_one_req(struct crypto_engine *engine, void *areq)
753 struct ahash_request *req = ahash_request_cast(areq);
754 struct caam_hash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req));
755 struct caam_hash_state *state = ahash_request_ctx(req);
756 struct device *jrdev = ctx->jrdev;
757 u32 *desc = state->edesc->hw_desc;
758 int ret;
760 state->edesc->bklog = true;
762 ret = caam_jr_enqueue(jrdev, desc, state->ahash_op_done, req);
764 if (ret != -EINPROGRESS) {
765 ahash_unmap(jrdev, state->edesc, req, 0);
766 kfree(state->edesc);
767 } else {
768 ret = 0;
771 return ret;
774 static int ahash_enqueue_req(struct device *jrdev,
775 void (*cbk)(struct device *jrdev, u32 *desc,
776 u32 err, void *context),
777 struct ahash_request *req,
778 int dst_len, enum dma_data_direction dir)
780 struct caam_drv_private_jr *jrpriv = dev_get_drvdata(jrdev);
781 struct caam_hash_state *state = ahash_request_ctx(req);
782 struct ahash_edesc *edesc = state->edesc;
783 u32 *desc = edesc->hw_desc;
784 int ret;
786 state->ahash_op_done = cbk;
789 * Only the backlog request are sent to crypto-engine since the others
790 * can be handled by CAAM, if free, especially since JR has up to 1024
791 * entries (more than the 10 entries from crypto-engine).
793 if (req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)
794 ret = crypto_transfer_hash_request_to_engine(jrpriv->engine,
795 req);
796 else
797 ret = caam_jr_enqueue(jrdev, desc, cbk, req);
799 if ((ret != -EINPROGRESS) && (ret != -EBUSY)) {
800 ahash_unmap_ctx(jrdev, edesc, req, dst_len, dir);
801 kfree(edesc);
804 return ret;
807 /* submit update job descriptor */
808 static int ahash_update_ctx(struct ahash_request *req)
810 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
811 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
812 struct caam_hash_state *state = ahash_request_ctx(req);
813 struct device *jrdev = ctx->jrdev;
814 u8 *buf = state->buf;
815 int *buflen = &state->buflen;
816 int *next_buflen = &state->next_buflen;
817 int blocksize = crypto_ahash_blocksize(ahash);
818 int in_len = *buflen + req->nbytes, to_hash;
819 u32 *desc;
820 int src_nents, mapped_nents, sec4_sg_bytes, sec4_sg_src_index;
821 struct ahash_edesc *edesc;
822 int ret = 0;
824 *next_buflen = in_len & (blocksize - 1);
825 to_hash = in_len - *next_buflen;
828 * For XCBC and CMAC, if to_hash is multiple of block size,
829 * keep last block in internal buffer
831 if ((is_xcbc_aes(ctx->adata.algtype) ||
832 is_cmac_aes(ctx->adata.algtype)) && to_hash >= blocksize &&
833 (*next_buflen == 0)) {
834 *next_buflen = blocksize;
835 to_hash -= blocksize;
838 if (to_hash) {
839 int pad_nents;
840 int src_len = req->nbytes - *next_buflen;
842 src_nents = sg_nents_for_len(req->src, src_len);
843 if (src_nents < 0) {
844 dev_err(jrdev, "Invalid number of src SG.\n");
845 return src_nents;
848 if (src_nents) {
849 mapped_nents = dma_map_sg(jrdev, req->src, src_nents,
850 DMA_TO_DEVICE);
851 if (!mapped_nents) {
852 dev_err(jrdev, "unable to DMA map source\n");
853 return -ENOMEM;
855 } else {
856 mapped_nents = 0;
859 sec4_sg_src_index = 1 + (*buflen ? 1 : 0);
860 pad_nents = pad_sg_nents(sec4_sg_src_index + mapped_nents);
861 sec4_sg_bytes = pad_nents * sizeof(struct sec4_sg_entry);
864 * allocate space for base edesc and hw desc commands,
865 * link tables
867 edesc = ahash_edesc_alloc(req, pad_nents, ctx->sh_desc_update,
868 ctx->sh_desc_update_dma);
869 if (!edesc) {
870 dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
871 return -ENOMEM;
874 edesc->src_nents = src_nents;
875 edesc->sec4_sg_bytes = sec4_sg_bytes;
877 ret = ctx_map_to_sec4_sg(jrdev, state, ctx->ctx_len,
878 edesc->sec4_sg, DMA_BIDIRECTIONAL);
879 if (ret)
880 goto unmap_ctx;
882 ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1, state);
883 if (ret)
884 goto unmap_ctx;
886 if (mapped_nents)
887 sg_to_sec4_sg_last(req->src, src_len,
888 edesc->sec4_sg + sec4_sg_src_index,
890 else
891 sg_to_sec4_set_last(edesc->sec4_sg + sec4_sg_src_index -
894 desc = edesc->hw_desc;
896 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
897 sec4_sg_bytes,
898 DMA_TO_DEVICE);
899 if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
900 dev_err(jrdev, "unable to map S/G table\n");
901 ret = -ENOMEM;
902 goto unmap_ctx;
905 append_seq_in_ptr(desc, edesc->sec4_sg_dma, ctx->ctx_len +
906 to_hash, LDST_SGF);
908 append_seq_out_ptr(desc, state->ctx_dma, ctx->ctx_len, 0);
910 print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ",
911 DUMP_PREFIX_ADDRESS, 16, 4, desc,
912 desc_bytes(desc), 1);
914 ret = ahash_enqueue_req(jrdev, ahash_done_bi, req,
915 ctx->ctx_len, DMA_BIDIRECTIONAL);
916 } else if (*next_buflen) {
917 scatterwalk_map_and_copy(buf + *buflen, req->src, 0,
918 req->nbytes, 0);
919 *buflen = *next_buflen;
921 print_hex_dump_debug("buf@" __stringify(__LINE__)": ",
922 DUMP_PREFIX_ADDRESS, 16, 4, buf,
923 *buflen, 1);
926 return ret;
927 unmap_ctx:
928 ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_BIDIRECTIONAL);
929 kfree(edesc);
930 return ret;
933 static int ahash_final_ctx(struct ahash_request *req)
935 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
936 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
937 struct caam_hash_state *state = ahash_request_ctx(req);
938 struct device *jrdev = ctx->jrdev;
939 int buflen = state->buflen;
940 u32 *desc;
941 int sec4_sg_bytes;
942 int digestsize = crypto_ahash_digestsize(ahash);
943 struct ahash_edesc *edesc;
944 int ret;
946 sec4_sg_bytes = pad_sg_nents(1 + (buflen ? 1 : 0)) *
947 sizeof(struct sec4_sg_entry);
949 /* allocate space for base edesc and hw desc commands, link tables */
950 edesc = ahash_edesc_alloc(req, 4, ctx->sh_desc_fin,
951 ctx->sh_desc_fin_dma);
952 if (!edesc)
953 return -ENOMEM;
955 desc = edesc->hw_desc;
957 edesc->sec4_sg_bytes = sec4_sg_bytes;
959 ret = ctx_map_to_sec4_sg(jrdev, state, ctx->ctx_len,
960 edesc->sec4_sg, DMA_BIDIRECTIONAL);
961 if (ret)
962 goto unmap_ctx;
964 ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1, state);
965 if (ret)
966 goto unmap_ctx;
968 sg_to_sec4_set_last(edesc->sec4_sg + (buflen ? 1 : 0));
970 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
971 sec4_sg_bytes, DMA_TO_DEVICE);
972 if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
973 dev_err(jrdev, "unable to map S/G table\n");
974 ret = -ENOMEM;
975 goto unmap_ctx;
978 append_seq_in_ptr(desc, edesc->sec4_sg_dma, ctx->ctx_len + buflen,
979 LDST_SGF);
980 append_seq_out_ptr(desc, state->ctx_dma, digestsize, 0);
982 print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ",
983 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
986 return ahash_enqueue_req(jrdev, ahash_done_ctx_src, req,
987 digestsize, DMA_BIDIRECTIONAL);
988 unmap_ctx:
989 ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_BIDIRECTIONAL);
990 kfree(edesc);
991 return ret;
994 static int ahash_finup_ctx(struct ahash_request *req)
996 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
997 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
998 struct caam_hash_state *state = ahash_request_ctx(req);
999 struct device *jrdev = ctx->jrdev;
1000 int buflen = state->buflen;
1001 u32 *desc;
1002 int sec4_sg_src_index;
1003 int src_nents, mapped_nents;
1004 int digestsize = crypto_ahash_digestsize(ahash);
1005 struct ahash_edesc *edesc;
1006 int ret;
1008 src_nents = sg_nents_for_len(req->src, req->nbytes);
1009 if (src_nents < 0) {
1010 dev_err(jrdev, "Invalid number of src SG.\n");
1011 return src_nents;
1014 if (src_nents) {
1015 mapped_nents = dma_map_sg(jrdev, req->src, src_nents,
1016 DMA_TO_DEVICE);
1017 if (!mapped_nents) {
1018 dev_err(jrdev, "unable to DMA map source\n");
1019 return -ENOMEM;
1021 } else {
1022 mapped_nents = 0;
1025 sec4_sg_src_index = 1 + (buflen ? 1 : 0);
1027 /* allocate space for base edesc and hw desc commands, link tables */
1028 edesc = ahash_edesc_alloc(req, sec4_sg_src_index + mapped_nents,
1029 ctx->sh_desc_fin, ctx->sh_desc_fin_dma);
1030 if (!edesc) {
1031 dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
1032 return -ENOMEM;
1035 desc = edesc->hw_desc;
1037 edesc->src_nents = src_nents;
1039 ret = ctx_map_to_sec4_sg(jrdev, state, ctx->ctx_len,
1040 edesc->sec4_sg, DMA_BIDIRECTIONAL);
1041 if (ret)
1042 goto unmap_ctx;
1044 ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1, state);
1045 if (ret)
1046 goto unmap_ctx;
1048 ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents,
1049 sec4_sg_src_index, ctx->ctx_len + buflen,
1050 req->nbytes);
1051 if (ret)
1052 goto unmap_ctx;
1054 append_seq_out_ptr(desc, state->ctx_dma, digestsize, 0);
1056 print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ",
1057 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
1060 return ahash_enqueue_req(jrdev, ahash_done_ctx_src, req,
1061 digestsize, DMA_BIDIRECTIONAL);
1062 unmap_ctx:
1063 ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_BIDIRECTIONAL);
1064 kfree(edesc);
1065 return ret;
1068 static int ahash_digest(struct ahash_request *req)
1070 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1071 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1072 struct caam_hash_state *state = ahash_request_ctx(req);
1073 struct device *jrdev = ctx->jrdev;
1074 u32 *desc;
1075 int digestsize = crypto_ahash_digestsize(ahash);
1076 int src_nents, mapped_nents;
1077 struct ahash_edesc *edesc;
1078 int ret;
1080 state->buf_dma = 0;
1082 src_nents = sg_nents_for_len(req->src, req->nbytes);
1083 if (src_nents < 0) {
1084 dev_err(jrdev, "Invalid number of src SG.\n");
1085 return src_nents;
1088 if (src_nents) {
1089 mapped_nents = dma_map_sg(jrdev, req->src, src_nents,
1090 DMA_TO_DEVICE);
1091 if (!mapped_nents) {
1092 dev_err(jrdev, "unable to map source for DMA\n");
1093 return -ENOMEM;
1095 } else {
1096 mapped_nents = 0;
1099 /* allocate space for base edesc and hw desc commands, link tables */
1100 edesc = ahash_edesc_alloc(req, mapped_nents > 1 ? mapped_nents : 0,
1101 ctx->sh_desc_digest, ctx->sh_desc_digest_dma);
1102 if (!edesc) {
1103 dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
1104 return -ENOMEM;
1107 edesc->src_nents = src_nents;
1109 ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents, 0, 0,
1110 req->nbytes);
1111 if (ret) {
1112 ahash_unmap(jrdev, edesc, req, digestsize);
1113 kfree(edesc);
1114 return ret;
1117 desc = edesc->hw_desc;
1119 ret = map_seq_out_ptr_ctx(desc, jrdev, state, digestsize);
1120 if (ret) {
1121 ahash_unmap(jrdev, edesc, req, digestsize);
1122 kfree(edesc);
1123 return -ENOMEM;
1126 print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ",
1127 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
1130 return ahash_enqueue_req(jrdev, ahash_done, req, digestsize,
1131 DMA_FROM_DEVICE);
1134 /* submit ahash final if it the first job descriptor */
1135 static int ahash_final_no_ctx(struct ahash_request *req)
1137 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1138 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1139 struct caam_hash_state *state = ahash_request_ctx(req);
1140 struct device *jrdev = ctx->jrdev;
1141 u8 *buf = state->buf;
1142 int buflen = state->buflen;
1143 u32 *desc;
1144 int digestsize = crypto_ahash_digestsize(ahash);
1145 struct ahash_edesc *edesc;
1146 int ret;
1148 /* allocate space for base edesc and hw desc commands, link tables */
1149 edesc = ahash_edesc_alloc(req, 0, ctx->sh_desc_digest,
1150 ctx->sh_desc_digest_dma);
1151 if (!edesc)
1152 return -ENOMEM;
1154 desc = edesc->hw_desc;
1156 if (buflen) {
1157 state->buf_dma = dma_map_single(jrdev, buf, buflen,
1158 DMA_TO_DEVICE);
1159 if (dma_mapping_error(jrdev, state->buf_dma)) {
1160 dev_err(jrdev, "unable to map src\n");
1161 goto unmap;
1164 append_seq_in_ptr(desc, state->buf_dma, buflen, 0);
1167 ret = map_seq_out_ptr_ctx(desc, jrdev, state, digestsize);
1168 if (ret)
1169 goto unmap;
1171 print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ",
1172 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
1175 return ahash_enqueue_req(jrdev, ahash_done, req,
1176 digestsize, DMA_FROM_DEVICE);
1177 unmap:
1178 ahash_unmap(jrdev, edesc, req, digestsize);
1179 kfree(edesc);
1180 return -ENOMEM;
1183 /* submit ahash update if it the first job descriptor after update */
1184 static int ahash_update_no_ctx(struct ahash_request *req)
1186 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1187 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1188 struct caam_hash_state *state = ahash_request_ctx(req);
1189 struct device *jrdev = ctx->jrdev;
1190 u8 *buf = state->buf;
1191 int *buflen = &state->buflen;
1192 int *next_buflen = &state->next_buflen;
1193 int blocksize = crypto_ahash_blocksize(ahash);
1194 int in_len = *buflen + req->nbytes, to_hash;
1195 int sec4_sg_bytes, src_nents, mapped_nents;
1196 struct ahash_edesc *edesc;
1197 u32 *desc;
1198 int ret = 0;
1200 *next_buflen = in_len & (blocksize - 1);
1201 to_hash = in_len - *next_buflen;
1204 * For XCBC and CMAC, if to_hash is multiple of block size,
1205 * keep last block in internal buffer
1207 if ((is_xcbc_aes(ctx->adata.algtype) ||
1208 is_cmac_aes(ctx->adata.algtype)) && to_hash >= blocksize &&
1209 (*next_buflen == 0)) {
1210 *next_buflen = blocksize;
1211 to_hash -= blocksize;
1214 if (to_hash) {
1215 int pad_nents;
1216 int src_len = req->nbytes - *next_buflen;
1218 src_nents = sg_nents_for_len(req->src, src_len);
1219 if (src_nents < 0) {
1220 dev_err(jrdev, "Invalid number of src SG.\n");
1221 return src_nents;
1224 if (src_nents) {
1225 mapped_nents = dma_map_sg(jrdev, req->src, src_nents,
1226 DMA_TO_DEVICE);
1227 if (!mapped_nents) {
1228 dev_err(jrdev, "unable to DMA map source\n");
1229 return -ENOMEM;
1231 } else {
1232 mapped_nents = 0;
1235 pad_nents = pad_sg_nents(1 + mapped_nents);
1236 sec4_sg_bytes = pad_nents * sizeof(struct sec4_sg_entry);
1239 * allocate space for base edesc and hw desc commands,
1240 * link tables
1242 edesc = ahash_edesc_alloc(req, pad_nents,
1243 ctx->sh_desc_update_first,
1244 ctx->sh_desc_update_first_dma);
1245 if (!edesc) {
1246 dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
1247 return -ENOMEM;
1250 edesc->src_nents = src_nents;
1251 edesc->sec4_sg_bytes = sec4_sg_bytes;
1253 ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg, state);
1254 if (ret)
1255 goto unmap_ctx;
1257 sg_to_sec4_sg_last(req->src, src_len, edesc->sec4_sg + 1, 0);
1259 desc = edesc->hw_desc;
1261 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
1262 sec4_sg_bytes,
1263 DMA_TO_DEVICE);
1264 if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
1265 dev_err(jrdev, "unable to map S/G table\n");
1266 ret = -ENOMEM;
1267 goto unmap_ctx;
1270 append_seq_in_ptr(desc, edesc->sec4_sg_dma, to_hash, LDST_SGF);
1272 ret = map_seq_out_ptr_ctx(desc, jrdev, state, ctx->ctx_len);
1273 if (ret)
1274 goto unmap_ctx;
1276 print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ",
1277 DUMP_PREFIX_ADDRESS, 16, 4, desc,
1278 desc_bytes(desc), 1);
1280 ret = ahash_enqueue_req(jrdev, ahash_done_ctx_dst, req,
1281 ctx->ctx_len, DMA_TO_DEVICE);
1282 if ((ret != -EINPROGRESS) && (ret != -EBUSY))
1283 return ret;
1284 state->update = ahash_update_ctx;
1285 state->finup = ahash_finup_ctx;
1286 state->final = ahash_final_ctx;
1287 } else if (*next_buflen) {
1288 scatterwalk_map_and_copy(buf + *buflen, req->src, 0,
1289 req->nbytes, 0);
1290 *buflen = *next_buflen;
1292 print_hex_dump_debug("buf@" __stringify(__LINE__)": ",
1293 DUMP_PREFIX_ADDRESS, 16, 4, buf,
1294 *buflen, 1);
1297 return ret;
1298 unmap_ctx:
1299 ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_TO_DEVICE);
1300 kfree(edesc);
1301 return ret;
1304 /* submit ahash finup if it the first job descriptor after update */
1305 static int ahash_finup_no_ctx(struct ahash_request *req)
1307 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1308 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1309 struct caam_hash_state *state = ahash_request_ctx(req);
1310 struct device *jrdev = ctx->jrdev;
1311 int buflen = state->buflen;
1312 u32 *desc;
1313 int sec4_sg_bytes, sec4_sg_src_index, src_nents, mapped_nents;
1314 int digestsize = crypto_ahash_digestsize(ahash);
1315 struct ahash_edesc *edesc;
1316 int ret;
1318 src_nents = sg_nents_for_len(req->src, req->nbytes);
1319 if (src_nents < 0) {
1320 dev_err(jrdev, "Invalid number of src SG.\n");
1321 return src_nents;
1324 if (src_nents) {
1325 mapped_nents = dma_map_sg(jrdev, req->src, src_nents,
1326 DMA_TO_DEVICE);
1327 if (!mapped_nents) {
1328 dev_err(jrdev, "unable to DMA map source\n");
1329 return -ENOMEM;
1331 } else {
1332 mapped_nents = 0;
1335 sec4_sg_src_index = 2;
1336 sec4_sg_bytes = (sec4_sg_src_index + mapped_nents) *
1337 sizeof(struct sec4_sg_entry);
1339 /* allocate space for base edesc and hw desc commands, link tables */
1340 edesc = ahash_edesc_alloc(req, sec4_sg_src_index + mapped_nents,
1341 ctx->sh_desc_digest, ctx->sh_desc_digest_dma);
1342 if (!edesc) {
1343 dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
1344 return -ENOMEM;
1347 desc = edesc->hw_desc;
1349 edesc->src_nents = src_nents;
1350 edesc->sec4_sg_bytes = sec4_sg_bytes;
1352 ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg, state);
1353 if (ret)
1354 goto unmap;
1356 ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents, 1, buflen,
1357 req->nbytes);
1358 if (ret) {
1359 dev_err(jrdev, "unable to map S/G table\n");
1360 goto unmap;
1363 ret = map_seq_out_ptr_ctx(desc, jrdev, state, digestsize);
1364 if (ret)
1365 goto unmap;
1367 print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ",
1368 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
1371 return ahash_enqueue_req(jrdev, ahash_done, req,
1372 digestsize, DMA_FROM_DEVICE);
1373 unmap:
1374 ahash_unmap(jrdev, edesc, req, digestsize);
1375 kfree(edesc);
1376 return -ENOMEM;
1380 /* submit first update job descriptor after init */
1381 static int ahash_update_first(struct ahash_request *req)
1383 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1384 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1385 struct caam_hash_state *state = ahash_request_ctx(req);
1386 struct device *jrdev = ctx->jrdev;
1387 u8 *buf = state->buf;
1388 int *buflen = &state->buflen;
1389 int *next_buflen = &state->next_buflen;
1390 int to_hash;
1391 int blocksize = crypto_ahash_blocksize(ahash);
1392 u32 *desc;
1393 int src_nents, mapped_nents;
1394 struct ahash_edesc *edesc;
1395 int ret = 0;
1397 *next_buflen = req->nbytes & (blocksize - 1);
1398 to_hash = req->nbytes - *next_buflen;
1401 * For XCBC and CMAC, if to_hash is multiple of block size,
1402 * keep last block in internal buffer
1404 if ((is_xcbc_aes(ctx->adata.algtype) ||
1405 is_cmac_aes(ctx->adata.algtype)) && to_hash >= blocksize &&
1406 (*next_buflen == 0)) {
1407 *next_buflen = blocksize;
1408 to_hash -= blocksize;
1411 if (to_hash) {
1412 src_nents = sg_nents_for_len(req->src,
1413 req->nbytes - *next_buflen);
1414 if (src_nents < 0) {
1415 dev_err(jrdev, "Invalid number of src SG.\n");
1416 return src_nents;
1419 if (src_nents) {
1420 mapped_nents = dma_map_sg(jrdev, req->src, src_nents,
1421 DMA_TO_DEVICE);
1422 if (!mapped_nents) {
1423 dev_err(jrdev, "unable to map source for DMA\n");
1424 return -ENOMEM;
1426 } else {
1427 mapped_nents = 0;
1431 * allocate space for base edesc and hw desc commands,
1432 * link tables
1434 edesc = ahash_edesc_alloc(req, mapped_nents > 1 ?
1435 mapped_nents : 0,
1436 ctx->sh_desc_update_first,
1437 ctx->sh_desc_update_first_dma);
1438 if (!edesc) {
1439 dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
1440 return -ENOMEM;
1443 edesc->src_nents = src_nents;
1445 ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents, 0, 0,
1446 to_hash);
1447 if (ret)
1448 goto unmap_ctx;
1450 desc = edesc->hw_desc;
1452 ret = map_seq_out_ptr_ctx(desc, jrdev, state, ctx->ctx_len);
1453 if (ret)
1454 goto unmap_ctx;
1456 print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ",
1457 DUMP_PREFIX_ADDRESS, 16, 4, desc,
1458 desc_bytes(desc), 1);
1460 ret = ahash_enqueue_req(jrdev, ahash_done_ctx_dst, req,
1461 ctx->ctx_len, DMA_TO_DEVICE);
1462 if ((ret != -EINPROGRESS) && (ret != -EBUSY))
1463 return ret;
1464 state->update = ahash_update_ctx;
1465 state->finup = ahash_finup_ctx;
1466 state->final = ahash_final_ctx;
1467 } else if (*next_buflen) {
1468 state->update = ahash_update_no_ctx;
1469 state->finup = ahash_finup_no_ctx;
1470 state->final = ahash_final_no_ctx;
1471 scatterwalk_map_and_copy(buf, req->src, 0,
1472 req->nbytes, 0);
1473 *buflen = *next_buflen;
1475 print_hex_dump_debug("buf@" __stringify(__LINE__)": ",
1476 DUMP_PREFIX_ADDRESS, 16, 4, buf,
1477 *buflen, 1);
1480 return ret;
1481 unmap_ctx:
1482 ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_TO_DEVICE);
1483 kfree(edesc);
1484 return ret;
1487 static int ahash_finup_first(struct ahash_request *req)
1489 return ahash_digest(req);
1492 static int ahash_init(struct ahash_request *req)
1494 struct caam_hash_state *state = ahash_request_ctx(req);
1496 state->update = ahash_update_first;
1497 state->finup = ahash_finup_first;
1498 state->final = ahash_final_no_ctx;
1500 state->ctx_dma = 0;
1501 state->ctx_dma_len = 0;
1502 state->buf_dma = 0;
1503 state->buflen = 0;
1504 state->next_buflen = 0;
1506 return 0;
1509 static int ahash_update(struct ahash_request *req)
1511 struct caam_hash_state *state = ahash_request_ctx(req);
1513 return state->update(req);
1516 static int ahash_finup(struct ahash_request *req)
1518 struct caam_hash_state *state = ahash_request_ctx(req);
1520 return state->finup(req);
1523 static int ahash_final(struct ahash_request *req)
1525 struct caam_hash_state *state = ahash_request_ctx(req);
1527 return state->final(req);
1530 static int ahash_export(struct ahash_request *req, void *out)
1532 struct caam_hash_state *state = ahash_request_ctx(req);
1533 struct caam_export_state *export = out;
1534 u8 *buf = state->buf;
1535 int len = state->buflen;
1537 memcpy(export->buf, buf, len);
1538 memcpy(export->caam_ctx, state->caam_ctx, sizeof(export->caam_ctx));
1539 export->buflen = len;
1540 export->update = state->update;
1541 export->final = state->final;
1542 export->finup = state->finup;
1544 return 0;
1547 static int ahash_import(struct ahash_request *req, const void *in)
1549 struct caam_hash_state *state = ahash_request_ctx(req);
1550 const struct caam_export_state *export = in;
1552 memset(state, 0, sizeof(*state));
1553 memcpy(state->buf, export->buf, export->buflen);
1554 memcpy(state->caam_ctx, export->caam_ctx, sizeof(state->caam_ctx));
1555 state->buflen = export->buflen;
1556 state->update = export->update;
1557 state->final = export->final;
1558 state->finup = export->finup;
1560 return 0;
1563 struct caam_hash_template {
1564 char name[CRYPTO_MAX_ALG_NAME];
1565 char driver_name[CRYPTO_MAX_ALG_NAME];
1566 char hmac_name[CRYPTO_MAX_ALG_NAME];
1567 char hmac_driver_name[CRYPTO_MAX_ALG_NAME];
1568 unsigned int blocksize;
1569 struct ahash_alg template_ahash;
1570 u32 alg_type;
1573 /* ahash descriptors */
1574 static struct caam_hash_template driver_hash[] = {
1576 .name = "sha1",
1577 .driver_name = "sha1-caam",
1578 .hmac_name = "hmac(sha1)",
1579 .hmac_driver_name = "hmac-sha1-caam",
1580 .blocksize = SHA1_BLOCK_SIZE,
1581 .template_ahash = {
1582 .init = ahash_init,
1583 .update = ahash_update,
1584 .final = ahash_final,
1585 .finup = ahash_finup,
1586 .digest = ahash_digest,
1587 .export = ahash_export,
1588 .import = ahash_import,
1589 .setkey = ahash_setkey,
1590 .halg = {
1591 .digestsize = SHA1_DIGEST_SIZE,
1592 .statesize = sizeof(struct caam_export_state),
1595 .alg_type = OP_ALG_ALGSEL_SHA1,
1596 }, {
1597 .name = "sha224",
1598 .driver_name = "sha224-caam",
1599 .hmac_name = "hmac(sha224)",
1600 .hmac_driver_name = "hmac-sha224-caam",
1601 .blocksize = SHA224_BLOCK_SIZE,
1602 .template_ahash = {
1603 .init = ahash_init,
1604 .update = ahash_update,
1605 .final = ahash_final,
1606 .finup = ahash_finup,
1607 .digest = ahash_digest,
1608 .export = ahash_export,
1609 .import = ahash_import,
1610 .setkey = ahash_setkey,
1611 .halg = {
1612 .digestsize = SHA224_DIGEST_SIZE,
1613 .statesize = sizeof(struct caam_export_state),
1616 .alg_type = OP_ALG_ALGSEL_SHA224,
1617 }, {
1618 .name = "sha256",
1619 .driver_name = "sha256-caam",
1620 .hmac_name = "hmac(sha256)",
1621 .hmac_driver_name = "hmac-sha256-caam",
1622 .blocksize = SHA256_BLOCK_SIZE,
1623 .template_ahash = {
1624 .init = ahash_init,
1625 .update = ahash_update,
1626 .final = ahash_final,
1627 .finup = ahash_finup,
1628 .digest = ahash_digest,
1629 .export = ahash_export,
1630 .import = ahash_import,
1631 .setkey = ahash_setkey,
1632 .halg = {
1633 .digestsize = SHA256_DIGEST_SIZE,
1634 .statesize = sizeof(struct caam_export_state),
1637 .alg_type = OP_ALG_ALGSEL_SHA256,
1638 }, {
1639 .name = "sha384",
1640 .driver_name = "sha384-caam",
1641 .hmac_name = "hmac(sha384)",
1642 .hmac_driver_name = "hmac-sha384-caam",
1643 .blocksize = SHA384_BLOCK_SIZE,
1644 .template_ahash = {
1645 .init = ahash_init,
1646 .update = ahash_update,
1647 .final = ahash_final,
1648 .finup = ahash_finup,
1649 .digest = ahash_digest,
1650 .export = ahash_export,
1651 .import = ahash_import,
1652 .setkey = ahash_setkey,
1653 .halg = {
1654 .digestsize = SHA384_DIGEST_SIZE,
1655 .statesize = sizeof(struct caam_export_state),
1658 .alg_type = OP_ALG_ALGSEL_SHA384,
1659 }, {
1660 .name = "sha512",
1661 .driver_name = "sha512-caam",
1662 .hmac_name = "hmac(sha512)",
1663 .hmac_driver_name = "hmac-sha512-caam",
1664 .blocksize = SHA512_BLOCK_SIZE,
1665 .template_ahash = {
1666 .init = ahash_init,
1667 .update = ahash_update,
1668 .final = ahash_final,
1669 .finup = ahash_finup,
1670 .digest = ahash_digest,
1671 .export = ahash_export,
1672 .import = ahash_import,
1673 .setkey = ahash_setkey,
1674 .halg = {
1675 .digestsize = SHA512_DIGEST_SIZE,
1676 .statesize = sizeof(struct caam_export_state),
1679 .alg_type = OP_ALG_ALGSEL_SHA512,
1680 }, {
1681 .name = "md5",
1682 .driver_name = "md5-caam",
1683 .hmac_name = "hmac(md5)",
1684 .hmac_driver_name = "hmac-md5-caam",
1685 .blocksize = MD5_BLOCK_WORDS * 4,
1686 .template_ahash = {
1687 .init = ahash_init,
1688 .update = ahash_update,
1689 .final = ahash_final,
1690 .finup = ahash_finup,
1691 .digest = ahash_digest,
1692 .export = ahash_export,
1693 .import = ahash_import,
1694 .setkey = ahash_setkey,
1695 .halg = {
1696 .digestsize = MD5_DIGEST_SIZE,
1697 .statesize = sizeof(struct caam_export_state),
1700 .alg_type = OP_ALG_ALGSEL_MD5,
1701 }, {
1702 .hmac_name = "xcbc(aes)",
1703 .hmac_driver_name = "xcbc-aes-caam",
1704 .blocksize = AES_BLOCK_SIZE,
1705 .template_ahash = {
1706 .init = ahash_init,
1707 .update = ahash_update,
1708 .final = ahash_final,
1709 .finup = ahash_finup,
1710 .digest = ahash_digest,
1711 .export = ahash_export,
1712 .import = ahash_import,
1713 .setkey = axcbc_setkey,
1714 .halg = {
1715 .digestsize = AES_BLOCK_SIZE,
1716 .statesize = sizeof(struct caam_export_state),
1719 .alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_XCBC_MAC,
1720 }, {
1721 .hmac_name = "cmac(aes)",
1722 .hmac_driver_name = "cmac-aes-caam",
1723 .blocksize = AES_BLOCK_SIZE,
1724 .template_ahash = {
1725 .init = ahash_init,
1726 .update = ahash_update,
1727 .final = ahash_final,
1728 .finup = ahash_finup,
1729 .digest = ahash_digest,
1730 .export = ahash_export,
1731 .import = ahash_import,
1732 .setkey = acmac_setkey,
1733 .halg = {
1734 .digestsize = AES_BLOCK_SIZE,
1735 .statesize = sizeof(struct caam_export_state),
1738 .alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CMAC,
1742 struct caam_hash_alg {
1743 struct list_head entry;
1744 int alg_type;
1745 struct ahash_alg ahash_alg;
1748 static int caam_hash_cra_init(struct crypto_tfm *tfm)
1750 struct crypto_ahash *ahash = __crypto_ahash_cast(tfm);
1751 struct crypto_alg *base = tfm->__crt_alg;
1752 struct hash_alg_common *halg =
1753 container_of(base, struct hash_alg_common, base);
1754 struct ahash_alg *alg =
1755 container_of(halg, struct ahash_alg, halg);
1756 struct caam_hash_alg *caam_hash =
1757 container_of(alg, struct caam_hash_alg, ahash_alg);
1758 struct caam_hash_ctx *ctx = crypto_tfm_ctx(tfm);
1759 /* Sizes for MDHA running digests: MD5, SHA1, 224, 256, 384, 512 */
1760 static const u8 runninglen[] = { HASH_MSG_LEN + MD5_DIGEST_SIZE,
1761 HASH_MSG_LEN + SHA1_DIGEST_SIZE,
1762 HASH_MSG_LEN + 32,
1763 HASH_MSG_LEN + SHA256_DIGEST_SIZE,
1764 HASH_MSG_LEN + 64,
1765 HASH_MSG_LEN + SHA512_DIGEST_SIZE };
1766 const size_t sh_desc_update_offset = offsetof(struct caam_hash_ctx,
1767 sh_desc_update);
1768 dma_addr_t dma_addr;
1769 struct caam_drv_private *priv;
1772 * Get a Job ring from Job Ring driver to ensure in-order
1773 * crypto request processing per tfm
1775 ctx->jrdev = caam_jr_alloc();
1776 if (IS_ERR(ctx->jrdev)) {
1777 pr_err("Job Ring Device allocation for transform failed\n");
1778 return PTR_ERR(ctx->jrdev);
1781 priv = dev_get_drvdata(ctx->jrdev->parent);
1783 if (is_xcbc_aes(caam_hash->alg_type)) {
1784 ctx->dir = DMA_TO_DEVICE;
1785 ctx->key_dir = DMA_BIDIRECTIONAL;
1786 ctx->adata.algtype = OP_TYPE_CLASS1_ALG | caam_hash->alg_type;
1787 ctx->ctx_len = 48;
1788 } else if (is_cmac_aes(caam_hash->alg_type)) {
1789 ctx->dir = DMA_TO_DEVICE;
1790 ctx->key_dir = DMA_NONE;
1791 ctx->adata.algtype = OP_TYPE_CLASS1_ALG | caam_hash->alg_type;
1792 ctx->ctx_len = 32;
1793 } else {
1794 if (priv->era >= 6) {
1795 ctx->dir = DMA_BIDIRECTIONAL;
1796 ctx->key_dir = alg->setkey ? DMA_TO_DEVICE : DMA_NONE;
1797 } else {
1798 ctx->dir = DMA_TO_DEVICE;
1799 ctx->key_dir = DMA_NONE;
1801 ctx->adata.algtype = OP_TYPE_CLASS2_ALG | caam_hash->alg_type;
1802 ctx->ctx_len = runninglen[(ctx->adata.algtype &
1803 OP_ALG_ALGSEL_SUBMASK) >>
1804 OP_ALG_ALGSEL_SHIFT];
1807 if (ctx->key_dir != DMA_NONE) {
1808 ctx->adata.key_dma = dma_map_single_attrs(ctx->jrdev, ctx->key,
1809 ARRAY_SIZE(ctx->key),
1810 ctx->key_dir,
1811 DMA_ATTR_SKIP_CPU_SYNC);
1812 if (dma_mapping_error(ctx->jrdev, ctx->adata.key_dma)) {
1813 dev_err(ctx->jrdev, "unable to map key\n");
1814 caam_jr_free(ctx->jrdev);
1815 return -ENOMEM;
1819 dma_addr = dma_map_single_attrs(ctx->jrdev, ctx->sh_desc_update,
1820 offsetof(struct caam_hash_ctx, key) -
1821 sh_desc_update_offset,
1822 ctx->dir, DMA_ATTR_SKIP_CPU_SYNC);
1823 if (dma_mapping_error(ctx->jrdev, dma_addr)) {
1824 dev_err(ctx->jrdev, "unable to map shared descriptors\n");
1826 if (ctx->key_dir != DMA_NONE)
1827 dma_unmap_single_attrs(ctx->jrdev, ctx->adata.key_dma,
1828 ARRAY_SIZE(ctx->key),
1829 ctx->key_dir,
1830 DMA_ATTR_SKIP_CPU_SYNC);
1832 caam_jr_free(ctx->jrdev);
1833 return -ENOMEM;
1836 ctx->sh_desc_update_dma = dma_addr;
1837 ctx->sh_desc_update_first_dma = dma_addr +
1838 offsetof(struct caam_hash_ctx,
1839 sh_desc_update_first) -
1840 sh_desc_update_offset;
1841 ctx->sh_desc_fin_dma = dma_addr + offsetof(struct caam_hash_ctx,
1842 sh_desc_fin) -
1843 sh_desc_update_offset;
1844 ctx->sh_desc_digest_dma = dma_addr + offsetof(struct caam_hash_ctx,
1845 sh_desc_digest) -
1846 sh_desc_update_offset;
1848 ctx->enginectx.op.do_one_request = ahash_do_one_req;
1850 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
1851 sizeof(struct caam_hash_state));
1854 * For keyed hash algorithms shared descriptors
1855 * will be created later in setkey() callback
1857 return alg->setkey ? 0 : ahash_set_sh_desc(ahash);
1860 static void caam_hash_cra_exit(struct crypto_tfm *tfm)
1862 struct caam_hash_ctx *ctx = crypto_tfm_ctx(tfm);
1864 dma_unmap_single_attrs(ctx->jrdev, ctx->sh_desc_update_dma,
1865 offsetof(struct caam_hash_ctx, key) -
1866 offsetof(struct caam_hash_ctx, sh_desc_update),
1867 ctx->dir, DMA_ATTR_SKIP_CPU_SYNC);
1868 if (ctx->key_dir != DMA_NONE)
1869 dma_unmap_single_attrs(ctx->jrdev, ctx->adata.key_dma,
1870 ARRAY_SIZE(ctx->key), ctx->key_dir,
1871 DMA_ATTR_SKIP_CPU_SYNC);
1872 caam_jr_free(ctx->jrdev);
1875 void caam_algapi_hash_exit(void)
1877 struct caam_hash_alg *t_alg, *n;
1879 if (!hash_list.next)
1880 return;
1882 list_for_each_entry_safe(t_alg, n, &hash_list, entry) {
1883 crypto_unregister_ahash(&t_alg->ahash_alg);
1884 list_del(&t_alg->entry);
1885 kfree(t_alg);
1889 static struct caam_hash_alg *
1890 caam_hash_alloc(struct caam_hash_template *template,
1891 bool keyed)
1893 struct caam_hash_alg *t_alg;
1894 struct ahash_alg *halg;
1895 struct crypto_alg *alg;
1897 t_alg = kzalloc(sizeof(*t_alg), GFP_KERNEL);
1898 if (!t_alg) {
1899 pr_err("failed to allocate t_alg\n");
1900 return ERR_PTR(-ENOMEM);
1903 t_alg->ahash_alg = template->template_ahash;
1904 halg = &t_alg->ahash_alg;
1905 alg = &halg->halg.base;
1907 if (keyed) {
1908 snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s",
1909 template->hmac_name);
1910 snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
1911 template->hmac_driver_name);
1912 } else {
1913 snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s",
1914 template->name);
1915 snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
1916 template->driver_name);
1917 t_alg->ahash_alg.setkey = NULL;
1919 alg->cra_module = THIS_MODULE;
1920 alg->cra_init = caam_hash_cra_init;
1921 alg->cra_exit = caam_hash_cra_exit;
1922 alg->cra_ctxsize = sizeof(struct caam_hash_ctx);
1923 alg->cra_priority = CAAM_CRA_PRIORITY;
1924 alg->cra_blocksize = template->blocksize;
1925 alg->cra_alignmask = 0;
1926 alg->cra_flags = CRYPTO_ALG_ASYNC;
1928 t_alg->alg_type = template->alg_type;
1930 return t_alg;
1933 int caam_algapi_hash_init(struct device *ctrldev)
1935 int i = 0, err = 0;
1936 struct caam_drv_private *priv = dev_get_drvdata(ctrldev);
1937 unsigned int md_limit = SHA512_DIGEST_SIZE;
1938 u32 md_inst, md_vid;
1941 * Register crypto algorithms the device supports. First, identify
1942 * presence and attributes of MD block.
1944 if (priv->era < 10) {
1945 md_vid = (rd_reg32(&priv->ctrl->perfmon.cha_id_ls) &
1946 CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT;
1947 md_inst = (rd_reg32(&priv->ctrl->perfmon.cha_num_ls) &
1948 CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT;
1949 } else {
1950 u32 mdha = rd_reg32(&priv->ctrl->vreg.mdha);
1952 md_vid = (mdha & CHA_VER_VID_MASK) >> CHA_VER_VID_SHIFT;
1953 md_inst = mdha & CHA_VER_NUM_MASK;
1957 * Skip registration of any hashing algorithms if MD block
1958 * is not present.
1960 if (!md_inst)
1961 return 0;
1963 /* Limit digest size based on LP256 */
1964 if (md_vid == CHA_VER_VID_MD_LP256)
1965 md_limit = SHA256_DIGEST_SIZE;
1967 INIT_LIST_HEAD(&hash_list);
1969 /* register crypto algorithms the device supports */
1970 for (i = 0; i < ARRAY_SIZE(driver_hash); i++) {
1971 struct caam_hash_alg *t_alg;
1972 struct caam_hash_template *alg = driver_hash + i;
1974 /* If MD size is not supported by device, skip registration */
1975 if (is_mdha(alg->alg_type) &&
1976 alg->template_ahash.halg.digestsize > md_limit)
1977 continue;
1979 /* register hmac version */
1980 t_alg = caam_hash_alloc(alg, true);
1981 if (IS_ERR(t_alg)) {
1982 err = PTR_ERR(t_alg);
1983 pr_warn("%s alg allocation failed\n",
1984 alg->hmac_driver_name);
1985 continue;
1988 err = crypto_register_ahash(&t_alg->ahash_alg);
1989 if (err) {
1990 pr_warn("%s alg registration failed: %d\n",
1991 t_alg->ahash_alg.halg.base.cra_driver_name,
1992 err);
1993 kfree(t_alg);
1994 } else
1995 list_add_tail(&t_alg->entry, &hash_list);
1997 if ((alg->alg_type & OP_ALG_ALGSEL_MASK) == OP_ALG_ALGSEL_AES)
1998 continue;
2000 /* register unkeyed version */
2001 t_alg = caam_hash_alloc(alg, false);
2002 if (IS_ERR(t_alg)) {
2003 err = PTR_ERR(t_alg);
2004 pr_warn("%s alg allocation failed\n", alg->driver_name);
2005 continue;
2008 err = crypto_register_ahash(&t_alg->ahash_alg);
2009 if (err) {
2010 pr_warn("%s alg registration failed: %d\n",
2011 t_alg->ahash_alg.halg.base.cra_driver_name,
2012 err);
2013 kfree(t_alg);
2014 } else
2015 list_add_tail(&t_alg->entry, &hash_list);
2018 return err;