dmaengine: imx-sdma: Let the core do the device node validation
[linux/fpc-iii.git] / drivers / crypto / caam / caamhash.c
blob7205d9f4029e11adb7f49d0e57bfdbf11b069fc0
1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3 * caam - Freescale FSL CAAM support for ahash functions of crypto API
5 * Copyright 2011 Freescale Semiconductor, Inc.
6 * Copyright 2018-2019 NXP
8 * Based on caamalg.c crypto API driver.
10 * relationship of digest job descriptor or first job descriptor after init to
11 * shared descriptors:
13 * --------------- ---------------
14 * | JobDesc #1 |-------------------->| ShareDesc |
15 * | *(packet 1) | | (hashKey) |
16 * --------------- | (operation) |
17 * ---------------
19 * relationship of subsequent job descriptors to shared descriptors:
21 * --------------- ---------------
22 * | JobDesc #2 |-------------------->| ShareDesc |
23 * | *(packet 2) | |------------->| (hashKey) |
24 * --------------- | |-------->| (operation) |
25 * . | | | (load ctx2) |
26 * . | | ---------------
27 * --------------- | |
28 * | JobDesc #3 |------| |
29 * | *(packet 3) | |
30 * --------------- |
31 * . |
32 * . |
33 * --------------- |
34 * | JobDesc #4 |------------
35 * | *(packet 4) |
36 * ---------------
38 * The SharedDesc never changes for a connection unless rekeyed, but
39 * each packet will likely be in a different place. So all we need
40 * to know to process the packet is where the input is, where the
41 * output goes, and what context we want to process with. Context is
42 * in the SharedDesc, packet references in the JobDesc.
44 * So, a job desc looks like:
46 * ---------------------
47 * | Header |
48 * | ShareDesc Pointer |
49 * | SEQ_OUT_PTR |
50 * | (output buffer) |
51 * | (output length) |
52 * | SEQ_IN_PTR |
53 * | (input buffer) |
54 * | (input length) |
55 * ---------------------
58 #include "compat.h"
60 #include "regs.h"
61 #include "intern.h"
62 #include "desc_constr.h"
63 #include "jr.h"
64 #include "error.h"
65 #include "sg_sw_sec4.h"
66 #include "key_gen.h"
67 #include "caamhash_desc.h"
69 #define CAAM_CRA_PRIORITY 3000
71 /* max hash key is max split key size */
72 #define CAAM_MAX_HASH_KEY_SIZE (SHA512_DIGEST_SIZE * 2)
74 #define CAAM_MAX_HASH_BLOCK_SIZE SHA512_BLOCK_SIZE
75 #define CAAM_MAX_HASH_DIGEST_SIZE SHA512_DIGEST_SIZE
77 #define DESC_HASH_MAX_USED_BYTES (DESC_AHASH_FINAL_LEN + \
78 CAAM_MAX_HASH_KEY_SIZE)
79 #define DESC_HASH_MAX_USED_LEN (DESC_HASH_MAX_USED_BYTES / CAAM_CMD_SZ)
81 /* caam context sizes for hashes: running digest + 8 */
82 #define HASH_MSG_LEN 8
83 #define MAX_CTX_LEN (HASH_MSG_LEN + SHA512_DIGEST_SIZE)
85 #ifdef DEBUG
86 /* for print_hex_dumps with line references */
87 #define debug(format, arg...) printk(format, arg)
88 #else
89 #define debug(format, arg...)
90 #endif
93 static struct list_head hash_list;
95 /* ahash per-session context */
96 struct caam_hash_ctx {
97 u32 sh_desc_update[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned;
98 u32 sh_desc_update_first[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned;
99 u32 sh_desc_fin[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned;
100 u32 sh_desc_digest[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned;
101 u8 key[CAAM_MAX_HASH_KEY_SIZE] ____cacheline_aligned;
102 dma_addr_t sh_desc_update_dma ____cacheline_aligned;
103 dma_addr_t sh_desc_update_first_dma;
104 dma_addr_t sh_desc_fin_dma;
105 dma_addr_t sh_desc_digest_dma;
106 dma_addr_t key_dma;
107 enum dma_data_direction dir;
108 struct device *jrdev;
109 int ctx_len;
110 struct alginfo adata;
113 /* ahash state */
114 struct caam_hash_state {
115 dma_addr_t buf_dma;
116 dma_addr_t ctx_dma;
117 int ctx_dma_len;
118 u8 buf_0[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned;
119 int buflen_0;
120 u8 buf_1[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned;
121 int buflen_1;
122 u8 caam_ctx[MAX_CTX_LEN] ____cacheline_aligned;
123 int (*update)(struct ahash_request *req);
124 int (*final)(struct ahash_request *req);
125 int (*finup)(struct ahash_request *req);
126 int current_buf;
129 struct caam_export_state {
130 u8 buf[CAAM_MAX_HASH_BLOCK_SIZE];
131 u8 caam_ctx[MAX_CTX_LEN];
132 int buflen;
133 int (*update)(struct ahash_request *req);
134 int (*final)(struct ahash_request *req);
135 int (*finup)(struct ahash_request *req);
138 static inline void switch_buf(struct caam_hash_state *state)
140 state->current_buf ^= 1;
143 static inline u8 *current_buf(struct caam_hash_state *state)
145 return state->current_buf ? state->buf_1 : state->buf_0;
148 static inline u8 *alt_buf(struct caam_hash_state *state)
150 return state->current_buf ? state->buf_0 : state->buf_1;
153 static inline int *current_buflen(struct caam_hash_state *state)
155 return state->current_buf ? &state->buflen_1 : &state->buflen_0;
158 static inline int *alt_buflen(struct caam_hash_state *state)
160 return state->current_buf ? &state->buflen_0 : &state->buflen_1;
163 static inline bool is_cmac_aes(u32 algtype)
165 return (algtype & (OP_ALG_ALGSEL_MASK | OP_ALG_AAI_MASK)) ==
166 (OP_ALG_ALGSEL_AES | OP_ALG_AAI_CMAC);
168 /* Common job descriptor seq in/out ptr routines */
170 /* Map state->caam_ctx, and append seq_out_ptr command that points to it */
171 static inline int map_seq_out_ptr_ctx(u32 *desc, struct device *jrdev,
172 struct caam_hash_state *state,
173 int ctx_len)
175 state->ctx_dma_len = ctx_len;
176 state->ctx_dma = dma_map_single(jrdev, state->caam_ctx,
177 ctx_len, DMA_FROM_DEVICE);
178 if (dma_mapping_error(jrdev, state->ctx_dma)) {
179 dev_err(jrdev, "unable to map ctx\n");
180 state->ctx_dma = 0;
181 return -ENOMEM;
184 append_seq_out_ptr(desc, state->ctx_dma, ctx_len, 0);
186 return 0;
189 /* Map current buffer in state (if length > 0) and put it in link table */
190 static inline int buf_map_to_sec4_sg(struct device *jrdev,
191 struct sec4_sg_entry *sec4_sg,
192 struct caam_hash_state *state)
194 int buflen = *current_buflen(state);
196 if (!buflen)
197 return 0;
199 state->buf_dma = dma_map_single(jrdev, current_buf(state), buflen,
200 DMA_TO_DEVICE);
201 if (dma_mapping_error(jrdev, state->buf_dma)) {
202 dev_err(jrdev, "unable to map buf\n");
203 state->buf_dma = 0;
204 return -ENOMEM;
207 dma_to_sec4_sg_one(sec4_sg, state->buf_dma, buflen, 0);
209 return 0;
212 /* Map state->caam_ctx, and add it to link table */
213 static inline int ctx_map_to_sec4_sg(struct device *jrdev,
214 struct caam_hash_state *state, int ctx_len,
215 struct sec4_sg_entry *sec4_sg, u32 flag)
217 state->ctx_dma_len = ctx_len;
218 state->ctx_dma = dma_map_single(jrdev, state->caam_ctx, ctx_len, flag);
219 if (dma_mapping_error(jrdev, state->ctx_dma)) {
220 dev_err(jrdev, "unable to map ctx\n");
221 state->ctx_dma = 0;
222 return -ENOMEM;
225 dma_to_sec4_sg_one(sec4_sg, state->ctx_dma, ctx_len, 0);
227 return 0;
230 static int ahash_set_sh_desc(struct crypto_ahash *ahash)
232 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
233 int digestsize = crypto_ahash_digestsize(ahash);
234 struct device *jrdev = ctx->jrdev;
235 struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent);
236 u32 *desc;
238 ctx->adata.key_virt = ctx->key;
240 /* ahash_update shared descriptor */
241 desc = ctx->sh_desc_update;
242 cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_UPDATE, ctx->ctx_len,
243 ctx->ctx_len, true, ctrlpriv->era);
244 dma_sync_single_for_device(jrdev, ctx->sh_desc_update_dma,
245 desc_bytes(desc), ctx->dir);
246 #ifdef DEBUG
247 print_hex_dump(KERN_ERR,
248 "ahash update shdesc@"__stringify(__LINE__)": ",
249 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
250 #endif
252 /* ahash_update_first shared descriptor */
253 desc = ctx->sh_desc_update_first;
254 cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_INIT, ctx->ctx_len,
255 ctx->ctx_len, false, ctrlpriv->era);
256 dma_sync_single_for_device(jrdev, ctx->sh_desc_update_first_dma,
257 desc_bytes(desc), ctx->dir);
258 #ifdef DEBUG
259 print_hex_dump(KERN_ERR,
260 "ahash update first shdesc@"__stringify(__LINE__)": ",
261 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
262 #endif
264 /* ahash_final shared descriptor */
265 desc = ctx->sh_desc_fin;
266 cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_FINALIZE, digestsize,
267 ctx->ctx_len, true, ctrlpriv->era);
268 dma_sync_single_for_device(jrdev, ctx->sh_desc_fin_dma,
269 desc_bytes(desc), ctx->dir);
270 #ifdef DEBUG
271 print_hex_dump(KERN_ERR, "ahash final shdesc@"__stringify(__LINE__)": ",
272 DUMP_PREFIX_ADDRESS, 16, 4, desc,
273 desc_bytes(desc), 1);
274 #endif
276 /* ahash_digest shared descriptor */
277 desc = ctx->sh_desc_digest;
278 cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_INITFINAL, digestsize,
279 ctx->ctx_len, false, ctrlpriv->era);
280 dma_sync_single_for_device(jrdev, ctx->sh_desc_digest_dma,
281 desc_bytes(desc), ctx->dir);
282 #ifdef DEBUG
283 print_hex_dump(KERN_ERR,
284 "ahash digest shdesc@"__stringify(__LINE__)": ",
285 DUMP_PREFIX_ADDRESS, 16, 4, desc,
286 desc_bytes(desc), 1);
287 #endif
289 return 0;
292 static int axcbc_set_sh_desc(struct crypto_ahash *ahash)
294 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
295 int digestsize = crypto_ahash_digestsize(ahash);
296 struct device *jrdev = ctx->jrdev;
297 u32 *desc;
299 /* key is loaded from memory for UPDATE and FINALIZE states */
300 ctx->adata.key_dma = ctx->key_dma;
302 /* shared descriptor for ahash_update */
303 desc = ctx->sh_desc_update;
304 cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_UPDATE,
305 ctx->ctx_len, ctx->ctx_len, 0);
306 dma_sync_single_for_device(jrdev, ctx->sh_desc_update_dma,
307 desc_bytes(desc), ctx->dir);
308 print_hex_dump_debug("axcbc update shdesc@" __stringify(__LINE__)" : ",
309 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
312 /* shared descriptor for ahash_{final,finup} */
313 desc = ctx->sh_desc_fin;
314 cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_FINALIZE,
315 digestsize, ctx->ctx_len, 0);
316 dma_sync_single_for_device(jrdev, ctx->sh_desc_fin_dma,
317 desc_bytes(desc), ctx->dir);
318 print_hex_dump_debug("axcbc finup shdesc@" __stringify(__LINE__)" : ",
319 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
322 /* key is immediate data for INIT and INITFINAL states */
323 ctx->adata.key_virt = ctx->key;
325 /* shared descriptor for first invocation of ahash_update */
326 desc = ctx->sh_desc_update_first;
327 cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_INIT, ctx->ctx_len,
328 ctx->ctx_len, ctx->key_dma);
329 dma_sync_single_for_device(jrdev, ctx->sh_desc_update_first_dma,
330 desc_bytes(desc), ctx->dir);
331 print_hex_dump_debug("axcbc update first shdesc@" __stringify(__LINE__)" : ",
332 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
335 /* shared descriptor for ahash_digest */
336 desc = ctx->sh_desc_digest;
337 cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_INITFINAL,
338 digestsize, ctx->ctx_len, 0);
339 dma_sync_single_for_device(jrdev, ctx->sh_desc_digest_dma,
340 desc_bytes(desc), ctx->dir);
341 print_hex_dump_debug("axcbc digest shdesc@" __stringify(__LINE__)" : ",
342 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
344 return 0;
347 static int acmac_set_sh_desc(struct crypto_ahash *ahash)
349 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
350 int digestsize = crypto_ahash_digestsize(ahash);
351 struct device *jrdev = ctx->jrdev;
352 u32 *desc;
354 /* shared descriptor for ahash_update */
355 desc = ctx->sh_desc_update;
356 cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_UPDATE,
357 ctx->ctx_len, ctx->ctx_len, 0);
358 dma_sync_single_for_device(jrdev, ctx->sh_desc_update_dma,
359 desc_bytes(desc), ctx->dir);
360 print_hex_dump_debug("acmac update shdesc@" __stringify(__LINE__)" : ",
361 DUMP_PREFIX_ADDRESS, 16, 4, desc,
362 desc_bytes(desc), 1);
364 /* shared descriptor for ahash_{final,finup} */
365 desc = ctx->sh_desc_fin;
366 cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_FINALIZE,
367 digestsize, ctx->ctx_len, 0);
368 dma_sync_single_for_device(jrdev, ctx->sh_desc_fin_dma,
369 desc_bytes(desc), ctx->dir);
370 print_hex_dump_debug("acmac finup shdesc@" __stringify(__LINE__)" : ",
371 DUMP_PREFIX_ADDRESS, 16, 4, desc,
372 desc_bytes(desc), 1);
374 /* shared descriptor for first invocation of ahash_update */
375 desc = ctx->sh_desc_update_first;
376 cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_INIT, ctx->ctx_len,
377 ctx->ctx_len, 0);
378 dma_sync_single_for_device(jrdev, ctx->sh_desc_update_first_dma,
379 desc_bytes(desc), ctx->dir);
380 print_hex_dump_debug("acmac update first shdesc@" __stringify(__LINE__)" : ",
381 DUMP_PREFIX_ADDRESS, 16, 4, desc,
382 desc_bytes(desc), 1);
384 /* shared descriptor for ahash_digest */
385 desc = ctx->sh_desc_digest;
386 cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_INITFINAL,
387 digestsize, ctx->ctx_len, 0);
388 dma_sync_single_for_device(jrdev, ctx->sh_desc_digest_dma,
389 desc_bytes(desc), ctx->dir);
390 print_hex_dump_debug("acmac digest shdesc@" __stringify(__LINE__)" : ",
391 DUMP_PREFIX_ADDRESS, 16, 4, desc,
392 desc_bytes(desc), 1);
394 return 0;
397 /* Digest hash size if it is too large */
398 static int hash_digest_key(struct caam_hash_ctx *ctx, u32 *keylen, u8 *key,
399 u32 digestsize)
401 struct device *jrdev = ctx->jrdev;
402 u32 *desc;
403 struct split_key_result result;
404 dma_addr_t key_dma;
405 int ret;
407 desc = kmalloc(CAAM_CMD_SZ * 8 + CAAM_PTR_SZ * 2, GFP_KERNEL | GFP_DMA);
408 if (!desc) {
409 dev_err(jrdev, "unable to allocate key input memory\n");
410 return -ENOMEM;
413 init_job_desc(desc, 0);
415 key_dma = dma_map_single(jrdev, key, *keylen, DMA_BIDIRECTIONAL);
416 if (dma_mapping_error(jrdev, key_dma)) {
417 dev_err(jrdev, "unable to map key memory\n");
418 kfree(desc);
419 return -ENOMEM;
422 /* Job descriptor to perform unkeyed hash on key_in */
423 append_operation(desc, ctx->adata.algtype | OP_ALG_ENCRYPT |
424 OP_ALG_AS_INITFINAL);
425 append_seq_in_ptr(desc, key_dma, *keylen, 0);
426 append_seq_fifo_load(desc, *keylen, FIFOLD_CLASS_CLASS2 |
427 FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_MSG);
428 append_seq_out_ptr(desc, key_dma, digestsize, 0);
429 append_seq_store(desc, digestsize, LDST_CLASS_2_CCB |
430 LDST_SRCDST_BYTE_CONTEXT);
432 #ifdef DEBUG
433 print_hex_dump(KERN_ERR, "key_in@"__stringify(__LINE__)": ",
434 DUMP_PREFIX_ADDRESS, 16, 4, key, *keylen, 1);
435 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
436 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
437 #endif
439 result.err = 0;
440 init_completion(&result.completion);
442 ret = caam_jr_enqueue(jrdev, desc, split_key_done, &result);
443 if (!ret) {
444 /* in progress */
445 wait_for_completion(&result.completion);
446 ret = result.err;
447 #ifdef DEBUG
448 print_hex_dump(KERN_ERR,
449 "digested key@"__stringify(__LINE__)": ",
450 DUMP_PREFIX_ADDRESS, 16, 4, key, digestsize, 1);
451 #endif
453 dma_unmap_single(jrdev, key_dma, *keylen, DMA_BIDIRECTIONAL);
455 *keylen = digestsize;
457 kfree(desc);
459 return ret;
462 static int ahash_setkey(struct crypto_ahash *ahash,
463 const u8 *key, unsigned int keylen)
465 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
466 int blocksize = crypto_tfm_alg_blocksize(&ahash->base);
467 int digestsize = crypto_ahash_digestsize(ahash);
468 struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctx->jrdev->parent);
469 int ret;
470 u8 *hashed_key = NULL;
472 #ifdef DEBUG
473 printk(KERN_ERR "keylen %d\n", keylen);
474 #endif
476 if (keylen > blocksize) {
477 hashed_key = kmemdup(key, keylen, GFP_KERNEL | GFP_DMA);
478 if (!hashed_key)
479 return -ENOMEM;
480 ret = hash_digest_key(ctx, &keylen, hashed_key, digestsize);
481 if (ret)
482 goto bad_free_key;
483 key = hashed_key;
487 * If DKP is supported, use it in the shared descriptor to generate
488 * the split key.
490 if (ctrlpriv->era >= 6) {
491 ctx->adata.key_inline = true;
492 ctx->adata.keylen = keylen;
493 ctx->adata.keylen_pad = split_key_len(ctx->adata.algtype &
494 OP_ALG_ALGSEL_MASK);
496 if (ctx->adata.keylen_pad > CAAM_MAX_HASH_KEY_SIZE)
497 goto bad_free_key;
499 memcpy(ctx->key, key, keylen);
500 } else {
501 ret = gen_split_key(ctx->jrdev, ctx->key, &ctx->adata, key,
502 keylen, CAAM_MAX_HASH_KEY_SIZE);
503 if (ret)
504 goto bad_free_key;
507 kfree(hashed_key);
508 return ahash_set_sh_desc(ahash);
509 bad_free_key:
510 kfree(hashed_key);
511 crypto_ahash_set_flags(ahash, CRYPTO_TFM_RES_BAD_KEY_LEN);
512 return -EINVAL;
515 static int axcbc_setkey(struct crypto_ahash *ahash, const u8 *key,
516 unsigned int keylen)
518 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
519 struct device *jrdev = ctx->jrdev;
521 memcpy(ctx->key, key, keylen);
522 dma_sync_single_for_device(jrdev, ctx->key_dma, keylen, DMA_TO_DEVICE);
523 ctx->adata.keylen = keylen;
525 print_hex_dump_debug("axcbc ctx.key@" __stringify(__LINE__)" : ",
526 DUMP_PREFIX_ADDRESS, 16, 4, ctx->key, keylen, 1);
528 return axcbc_set_sh_desc(ahash);
531 static int acmac_setkey(struct crypto_ahash *ahash, const u8 *key,
532 unsigned int keylen)
534 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
536 /* key is immediate data for all cmac shared descriptors */
537 ctx->adata.key_virt = key;
538 ctx->adata.keylen = keylen;
540 print_hex_dump_debug("acmac ctx.key@" __stringify(__LINE__)" : ",
541 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
543 return acmac_set_sh_desc(ahash);
547 * ahash_edesc - s/w-extended ahash descriptor
548 * @sec4_sg_dma: physical mapped address of h/w link table
549 * @src_nents: number of segments in input scatterlist
550 * @sec4_sg_bytes: length of dma mapped sec4_sg space
551 * @hw_desc: the h/w job descriptor followed by any referenced link tables
552 * @sec4_sg: h/w link table
554 struct ahash_edesc {
555 dma_addr_t sec4_sg_dma;
556 int src_nents;
557 int sec4_sg_bytes;
558 u32 hw_desc[DESC_JOB_IO_LEN / sizeof(u32)] ____cacheline_aligned;
559 struct sec4_sg_entry sec4_sg[0];
562 static inline void ahash_unmap(struct device *dev,
563 struct ahash_edesc *edesc,
564 struct ahash_request *req, int dst_len)
566 struct caam_hash_state *state = ahash_request_ctx(req);
568 if (edesc->src_nents)
569 dma_unmap_sg(dev, req->src, edesc->src_nents, DMA_TO_DEVICE);
571 if (edesc->sec4_sg_bytes)
572 dma_unmap_single(dev, edesc->sec4_sg_dma,
573 edesc->sec4_sg_bytes, DMA_TO_DEVICE);
575 if (state->buf_dma) {
576 dma_unmap_single(dev, state->buf_dma, *current_buflen(state),
577 DMA_TO_DEVICE);
578 state->buf_dma = 0;
582 static inline void ahash_unmap_ctx(struct device *dev,
583 struct ahash_edesc *edesc,
584 struct ahash_request *req, int dst_len, u32 flag)
586 struct caam_hash_state *state = ahash_request_ctx(req);
588 if (state->ctx_dma) {
589 dma_unmap_single(dev, state->ctx_dma, state->ctx_dma_len, flag);
590 state->ctx_dma = 0;
592 ahash_unmap(dev, edesc, req, dst_len);
595 static void ahash_done(struct device *jrdev, u32 *desc, u32 err,
596 void *context)
598 struct ahash_request *req = context;
599 struct ahash_edesc *edesc;
600 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
601 int digestsize = crypto_ahash_digestsize(ahash);
602 struct caam_hash_state *state = ahash_request_ctx(req);
603 #ifdef DEBUG
604 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
606 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
607 #endif
609 edesc = container_of(desc, struct ahash_edesc, hw_desc[0]);
610 if (err)
611 caam_jr_strstatus(jrdev, err);
613 ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE);
614 memcpy(req->result, state->caam_ctx, digestsize);
615 kfree(edesc);
617 #ifdef DEBUG
618 print_hex_dump(KERN_ERR, "ctx@"__stringify(__LINE__)": ",
619 DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
620 ctx->ctx_len, 1);
621 #endif
623 req->base.complete(&req->base, err);
626 static void ahash_done_bi(struct device *jrdev, u32 *desc, u32 err,
627 void *context)
629 struct ahash_request *req = context;
630 struct ahash_edesc *edesc;
631 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
632 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
633 struct caam_hash_state *state = ahash_request_ctx(req);
634 #ifdef DEBUG
635 int digestsize = crypto_ahash_digestsize(ahash);
637 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
638 #endif
640 edesc = container_of(desc, struct ahash_edesc, hw_desc[0]);
641 if (err)
642 caam_jr_strstatus(jrdev, err);
644 ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_BIDIRECTIONAL);
645 switch_buf(state);
646 kfree(edesc);
648 #ifdef DEBUG
649 print_hex_dump(KERN_ERR, "ctx@"__stringify(__LINE__)": ",
650 DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
651 ctx->ctx_len, 1);
652 if (req->result)
653 print_hex_dump(KERN_ERR, "result@"__stringify(__LINE__)": ",
654 DUMP_PREFIX_ADDRESS, 16, 4, req->result,
655 digestsize, 1);
656 #endif
658 req->base.complete(&req->base, err);
661 static void ahash_done_ctx_src(struct device *jrdev, u32 *desc, u32 err,
662 void *context)
664 struct ahash_request *req = context;
665 struct ahash_edesc *edesc;
666 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
667 int digestsize = crypto_ahash_digestsize(ahash);
668 struct caam_hash_state *state = ahash_request_ctx(req);
669 #ifdef DEBUG
670 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
672 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
673 #endif
675 edesc = container_of(desc, struct ahash_edesc, hw_desc[0]);
676 if (err)
677 caam_jr_strstatus(jrdev, err);
679 ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_BIDIRECTIONAL);
680 memcpy(req->result, state->caam_ctx, digestsize);
681 kfree(edesc);
683 #ifdef DEBUG
684 print_hex_dump(KERN_ERR, "ctx@"__stringify(__LINE__)": ",
685 DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
686 ctx->ctx_len, 1);
687 #endif
689 req->base.complete(&req->base, err);
692 static void ahash_done_ctx_dst(struct device *jrdev, u32 *desc, u32 err,
693 void *context)
695 struct ahash_request *req = context;
696 struct ahash_edesc *edesc;
697 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
698 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
699 struct caam_hash_state *state = ahash_request_ctx(req);
700 #ifdef DEBUG
701 int digestsize = crypto_ahash_digestsize(ahash);
703 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
704 #endif
706 edesc = container_of(desc, struct ahash_edesc, hw_desc[0]);
707 if (err)
708 caam_jr_strstatus(jrdev, err);
710 ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_FROM_DEVICE);
711 switch_buf(state);
712 kfree(edesc);
714 #ifdef DEBUG
715 print_hex_dump(KERN_ERR, "ctx@"__stringify(__LINE__)": ",
716 DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
717 ctx->ctx_len, 1);
718 if (req->result)
719 print_hex_dump(KERN_ERR, "result@"__stringify(__LINE__)": ",
720 DUMP_PREFIX_ADDRESS, 16, 4, req->result,
721 digestsize, 1);
722 #endif
724 req->base.complete(&req->base, err);
728 * Allocate an enhanced descriptor, which contains the hardware descriptor
729 * and space for hardware scatter table containing sg_num entries.
731 static struct ahash_edesc *ahash_edesc_alloc(struct caam_hash_ctx *ctx,
732 int sg_num, u32 *sh_desc,
733 dma_addr_t sh_desc_dma,
734 gfp_t flags)
736 struct ahash_edesc *edesc;
737 unsigned int sg_size = sg_num * sizeof(struct sec4_sg_entry);
739 edesc = kzalloc(sizeof(*edesc) + sg_size, GFP_DMA | flags);
740 if (!edesc) {
741 dev_err(ctx->jrdev, "could not allocate extended descriptor\n");
742 return NULL;
745 init_job_desc_shared(edesc->hw_desc, sh_desc_dma, desc_len(sh_desc),
746 HDR_SHARE_DEFER | HDR_REVERSE);
748 return edesc;
751 static int ahash_edesc_add_src(struct caam_hash_ctx *ctx,
752 struct ahash_edesc *edesc,
753 struct ahash_request *req, int nents,
754 unsigned int first_sg,
755 unsigned int first_bytes, size_t to_hash)
757 dma_addr_t src_dma;
758 u32 options;
760 if (nents > 1 || first_sg) {
761 struct sec4_sg_entry *sg = edesc->sec4_sg;
762 unsigned int sgsize = sizeof(*sg) * (first_sg + nents);
764 sg_to_sec4_sg_last(req->src, nents, sg + first_sg, 0);
766 src_dma = dma_map_single(ctx->jrdev, sg, sgsize, DMA_TO_DEVICE);
767 if (dma_mapping_error(ctx->jrdev, src_dma)) {
768 dev_err(ctx->jrdev, "unable to map S/G table\n");
769 return -ENOMEM;
772 edesc->sec4_sg_bytes = sgsize;
773 edesc->sec4_sg_dma = src_dma;
774 options = LDST_SGF;
775 } else {
776 src_dma = sg_dma_address(req->src);
777 options = 0;
780 append_seq_in_ptr(edesc->hw_desc, src_dma, first_bytes + to_hash,
781 options);
783 return 0;
786 /* submit update job descriptor */
787 static int ahash_update_ctx(struct ahash_request *req)
789 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
790 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
791 struct caam_hash_state *state = ahash_request_ctx(req);
792 struct device *jrdev = ctx->jrdev;
793 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
794 GFP_KERNEL : GFP_ATOMIC;
795 u8 *buf = current_buf(state);
796 int *buflen = current_buflen(state);
797 u8 *next_buf = alt_buf(state);
798 int blocksize = crypto_ahash_blocksize(ahash);
799 int *next_buflen = alt_buflen(state), last_buflen;
800 int in_len = *buflen + req->nbytes, to_hash;
801 u32 *desc;
802 int src_nents, mapped_nents, sec4_sg_bytes, sec4_sg_src_index;
803 struct ahash_edesc *edesc;
804 int ret = 0;
806 last_buflen = *next_buflen;
807 *next_buflen = in_len & (blocksize - 1);
808 to_hash = in_len - *next_buflen;
811 * For XCBC and CMAC, if to_hash is multiple of block size,
812 * keep last block in internal buffer
814 if ((is_xcbc_aes(ctx->adata.algtype) ||
815 is_cmac_aes(ctx->adata.algtype)) && to_hash >= blocksize &&
816 (*next_buflen == 0)) {
817 *next_buflen = blocksize;
818 to_hash -= blocksize;
821 if (to_hash) {
822 src_nents = sg_nents_for_len(req->src,
823 req->nbytes - (*next_buflen));
824 if (src_nents < 0) {
825 dev_err(jrdev, "Invalid number of src SG.\n");
826 return src_nents;
829 if (src_nents) {
830 mapped_nents = dma_map_sg(jrdev, req->src, src_nents,
831 DMA_TO_DEVICE);
832 if (!mapped_nents) {
833 dev_err(jrdev, "unable to DMA map source\n");
834 return -ENOMEM;
836 } else {
837 mapped_nents = 0;
840 sec4_sg_src_index = 1 + (*buflen ? 1 : 0);
841 sec4_sg_bytes = (sec4_sg_src_index + mapped_nents) *
842 sizeof(struct sec4_sg_entry);
845 * allocate space for base edesc and hw desc commands,
846 * link tables
848 edesc = ahash_edesc_alloc(ctx, sec4_sg_src_index + mapped_nents,
849 ctx->sh_desc_update,
850 ctx->sh_desc_update_dma, flags);
851 if (!edesc) {
852 dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
853 return -ENOMEM;
856 edesc->src_nents = src_nents;
857 edesc->sec4_sg_bytes = sec4_sg_bytes;
859 ret = ctx_map_to_sec4_sg(jrdev, state, ctx->ctx_len,
860 edesc->sec4_sg, DMA_BIDIRECTIONAL);
861 if (ret)
862 goto unmap_ctx;
864 ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1, state);
865 if (ret)
866 goto unmap_ctx;
868 if (mapped_nents)
869 sg_to_sec4_sg_last(req->src, mapped_nents,
870 edesc->sec4_sg + sec4_sg_src_index,
872 else
873 sg_to_sec4_set_last(edesc->sec4_sg + sec4_sg_src_index -
876 if (*next_buflen)
877 scatterwalk_map_and_copy(next_buf, req->src,
878 to_hash - *buflen,
879 *next_buflen, 0);
880 desc = edesc->hw_desc;
882 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
883 sec4_sg_bytes,
884 DMA_TO_DEVICE);
885 if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
886 dev_err(jrdev, "unable to map S/G table\n");
887 ret = -ENOMEM;
888 goto unmap_ctx;
891 append_seq_in_ptr(desc, edesc->sec4_sg_dma, ctx->ctx_len +
892 to_hash, LDST_SGF);
894 append_seq_out_ptr(desc, state->ctx_dma, ctx->ctx_len, 0);
896 #ifdef DEBUG
897 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
898 DUMP_PREFIX_ADDRESS, 16, 4, desc,
899 desc_bytes(desc), 1);
900 #endif
902 ret = caam_jr_enqueue(jrdev, desc, ahash_done_bi, req);
903 if (ret)
904 goto unmap_ctx;
906 ret = -EINPROGRESS;
907 } else if (*next_buflen) {
908 scatterwalk_map_and_copy(buf + *buflen, req->src, 0,
909 req->nbytes, 0);
910 *buflen = *next_buflen;
911 *next_buflen = last_buflen;
913 #ifdef DEBUG
914 print_hex_dump(KERN_ERR, "buf@"__stringify(__LINE__)": ",
915 DUMP_PREFIX_ADDRESS, 16, 4, buf, *buflen, 1);
916 print_hex_dump(KERN_ERR, "next buf@"__stringify(__LINE__)": ",
917 DUMP_PREFIX_ADDRESS, 16, 4, next_buf,
918 *next_buflen, 1);
919 #endif
921 return ret;
922 unmap_ctx:
923 ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_BIDIRECTIONAL);
924 kfree(edesc);
925 return ret;
928 static int ahash_final_ctx(struct ahash_request *req)
930 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
931 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
932 struct caam_hash_state *state = ahash_request_ctx(req);
933 struct device *jrdev = ctx->jrdev;
934 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
935 GFP_KERNEL : GFP_ATOMIC;
936 int buflen = *current_buflen(state);
937 u32 *desc;
938 int sec4_sg_bytes, sec4_sg_src_index;
939 int digestsize = crypto_ahash_digestsize(ahash);
940 struct ahash_edesc *edesc;
941 int ret;
943 sec4_sg_src_index = 1 + (buflen ? 1 : 0);
944 sec4_sg_bytes = sec4_sg_src_index * sizeof(struct sec4_sg_entry);
946 /* allocate space for base edesc and hw desc commands, link tables */
947 edesc = ahash_edesc_alloc(ctx, sec4_sg_src_index,
948 ctx->sh_desc_fin, ctx->sh_desc_fin_dma,
949 flags);
950 if (!edesc)
951 return -ENOMEM;
953 desc = edesc->hw_desc;
955 edesc->sec4_sg_bytes = sec4_sg_bytes;
957 ret = ctx_map_to_sec4_sg(jrdev, state, ctx->ctx_len,
958 edesc->sec4_sg, DMA_BIDIRECTIONAL);
959 if (ret)
960 goto unmap_ctx;
962 ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1, state);
963 if (ret)
964 goto unmap_ctx;
966 sg_to_sec4_set_last(edesc->sec4_sg + sec4_sg_src_index - 1);
968 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
969 sec4_sg_bytes, DMA_TO_DEVICE);
970 if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
971 dev_err(jrdev, "unable to map S/G table\n");
972 ret = -ENOMEM;
973 goto unmap_ctx;
976 append_seq_in_ptr(desc, edesc->sec4_sg_dma, ctx->ctx_len + buflen,
977 LDST_SGF);
978 append_seq_out_ptr(desc, state->ctx_dma, digestsize, 0);
980 #ifdef DEBUG
981 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
982 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
983 #endif
985 ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_src, req);
986 if (ret)
987 goto unmap_ctx;
989 return -EINPROGRESS;
990 unmap_ctx:
991 ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_BIDIRECTIONAL);
992 kfree(edesc);
993 return ret;
996 static int ahash_finup_ctx(struct ahash_request *req)
998 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
999 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1000 struct caam_hash_state *state = ahash_request_ctx(req);
1001 struct device *jrdev = ctx->jrdev;
1002 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
1003 GFP_KERNEL : GFP_ATOMIC;
1004 int buflen = *current_buflen(state);
1005 u32 *desc;
1006 int sec4_sg_src_index;
1007 int src_nents, mapped_nents;
1008 int digestsize = crypto_ahash_digestsize(ahash);
1009 struct ahash_edesc *edesc;
1010 int ret;
1012 src_nents = sg_nents_for_len(req->src, req->nbytes);
1013 if (src_nents < 0) {
1014 dev_err(jrdev, "Invalid number of src SG.\n");
1015 return src_nents;
1018 if (src_nents) {
1019 mapped_nents = dma_map_sg(jrdev, req->src, src_nents,
1020 DMA_TO_DEVICE);
1021 if (!mapped_nents) {
1022 dev_err(jrdev, "unable to DMA map source\n");
1023 return -ENOMEM;
1025 } else {
1026 mapped_nents = 0;
1029 sec4_sg_src_index = 1 + (buflen ? 1 : 0);
1031 /* allocate space for base edesc and hw desc commands, link tables */
1032 edesc = ahash_edesc_alloc(ctx, sec4_sg_src_index + mapped_nents,
1033 ctx->sh_desc_fin, ctx->sh_desc_fin_dma,
1034 flags);
1035 if (!edesc) {
1036 dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
1037 return -ENOMEM;
1040 desc = edesc->hw_desc;
1042 edesc->src_nents = src_nents;
1044 ret = ctx_map_to_sec4_sg(jrdev, state, ctx->ctx_len,
1045 edesc->sec4_sg, DMA_BIDIRECTIONAL);
1046 if (ret)
1047 goto unmap_ctx;
1049 ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1, state);
1050 if (ret)
1051 goto unmap_ctx;
1053 ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents,
1054 sec4_sg_src_index, ctx->ctx_len + buflen,
1055 req->nbytes);
1056 if (ret)
1057 goto unmap_ctx;
1059 append_seq_out_ptr(desc, state->ctx_dma, digestsize, 0);
1061 #ifdef DEBUG
1062 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
1063 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
1064 #endif
1066 ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_src, req);
1067 if (ret)
1068 goto unmap_ctx;
1070 return -EINPROGRESS;
1071 unmap_ctx:
1072 ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_BIDIRECTIONAL);
1073 kfree(edesc);
1074 return ret;
1077 static int ahash_digest(struct ahash_request *req)
1079 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1080 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1081 struct caam_hash_state *state = ahash_request_ctx(req);
1082 struct device *jrdev = ctx->jrdev;
1083 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
1084 GFP_KERNEL : GFP_ATOMIC;
1085 u32 *desc;
1086 int digestsize = crypto_ahash_digestsize(ahash);
1087 int src_nents, mapped_nents;
1088 struct ahash_edesc *edesc;
1089 int ret;
1091 state->buf_dma = 0;
1093 src_nents = sg_nents_for_len(req->src, req->nbytes);
1094 if (src_nents < 0) {
1095 dev_err(jrdev, "Invalid number of src SG.\n");
1096 return src_nents;
1099 if (src_nents) {
1100 mapped_nents = dma_map_sg(jrdev, req->src, src_nents,
1101 DMA_TO_DEVICE);
1102 if (!mapped_nents) {
1103 dev_err(jrdev, "unable to map source for DMA\n");
1104 return -ENOMEM;
1106 } else {
1107 mapped_nents = 0;
1110 /* allocate space for base edesc and hw desc commands, link tables */
1111 edesc = ahash_edesc_alloc(ctx, mapped_nents > 1 ? mapped_nents : 0,
1112 ctx->sh_desc_digest, ctx->sh_desc_digest_dma,
1113 flags);
1114 if (!edesc) {
1115 dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
1116 return -ENOMEM;
1119 edesc->src_nents = src_nents;
1121 ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents, 0, 0,
1122 req->nbytes);
1123 if (ret) {
1124 ahash_unmap(jrdev, edesc, req, digestsize);
1125 kfree(edesc);
1126 return ret;
1129 desc = edesc->hw_desc;
1131 ret = map_seq_out_ptr_ctx(desc, jrdev, state, digestsize);
1132 if (ret) {
1133 ahash_unmap(jrdev, edesc, req, digestsize);
1134 kfree(edesc);
1135 return -ENOMEM;
1138 #ifdef DEBUG
1139 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
1140 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
1141 #endif
1143 ret = caam_jr_enqueue(jrdev, desc, ahash_done, req);
1144 if (!ret) {
1145 ret = -EINPROGRESS;
1146 } else {
1147 ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE);
1148 kfree(edesc);
1151 return ret;
1154 /* submit ahash final if it the first job descriptor */
1155 static int ahash_final_no_ctx(struct ahash_request *req)
1157 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1158 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1159 struct caam_hash_state *state = ahash_request_ctx(req);
1160 struct device *jrdev = ctx->jrdev;
1161 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
1162 GFP_KERNEL : GFP_ATOMIC;
1163 u8 *buf = current_buf(state);
1164 int buflen = *current_buflen(state);
1165 u32 *desc;
1166 int digestsize = crypto_ahash_digestsize(ahash);
1167 struct ahash_edesc *edesc;
1168 int ret;
1170 /* allocate space for base edesc and hw desc commands, link tables */
1171 edesc = ahash_edesc_alloc(ctx, 0, ctx->sh_desc_digest,
1172 ctx->sh_desc_digest_dma, flags);
1173 if (!edesc)
1174 return -ENOMEM;
1176 desc = edesc->hw_desc;
1178 if (buflen) {
1179 state->buf_dma = dma_map_single(jrdev, buf, buflen,
1180 DMA_TO_DEVICE);
1181 if (dma_mapping_error(jrdev, state->buf_dma)) {
1182 dev_err(jrdev, "unable to map src\n");
1183 goto unmap;
1186 append_seq_in_ptr(desc, state->buf_dma, buflen, 0);
1189 ret = map_seq_out_ptr_ctx(desc, jrdev, state, digestsize);
1190 if (ret)
1191 goto unmap;
1193 #ifdef DEBUG
1194 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
1195 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
1196 #endif
1198 ret = caam_jr_enqueue(jrdev, desc, ahash_done, req);
1199 if (!ret) {
1200 ret = -EINPROGRESS;
1201 } else {
1202 ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE);
1203 kfree(edesc);
1206 return ret;
1207 unmap:
1208 ahash_unmap(jrdev, edesc, req, digestsize);
1209 kfree(edesc);
1210 return -ENOMEM;
1214 /* submit ahash update if it the first job descriptor after update */
1215 static int ahash_update_no_ctx(struct ahash_request *req)
1217 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1218 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1219 struct caam_hash_state *state = ahash_request_ctx(req);
1220 struct device *jrdev = ctx->jrdev;
1221 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
1222 GFP_KERNEL : GFP_ATOMIC;
1223 u8 *buf = current_buf(state);
1224 int *buflen = current_buflen(state);
1225 int blocksize = crypto_ahash_blocksize(ahash);
1226 u8 *next_buf = alt_buf(state);
1227 int *next_buflen = alt_buflen(state);
1228 int in_len = *buflen + req->nbytes, to_hash;
1229 int sec4_sg_bytes, src_nents, mapped_nents;
1230 struct ahash_edesc *edesc;
1231 u32 *desc;
1232 int ret = 0;
1234 *next_buflen = in_len & (blocksize - 1);
1235 to_hash = in_len - *next_buflen;
1238 * For XCBC and CMAC, if to_hash is multiple of block size,
1239 * keep last block in internal buffer
1241 if ((is_xcbc_aes(ctx->adata.algtype) ||
1242 is_cmac_aes(ctx->adata.algtype)) && to_hash >= blocksize &&
1243 (*next_buflen == 0)) {
1244 *next_buflen = blocksize;
1245 to_hash -= blocksize;
1248 if (to_hash) {
1249 src_nents = sg_nents_for_len(req->src,
1250 req->nbytes - *next_buflen);
1251 if (src_nents < 0) {
1252 dev_err(jrdev, "Invalid number of src SG.\n");
1253 return src_nents;
1256 if (src_nents) {
1257 mapped_nents = dma_map_sg(jrdev, req->src, src_nents,
1258 DMA_TO_DEVICE);
1259 if (!mapped_nents) {
1260 dev_err(jrdev, "unable to DMA map source\n");
1261 return -ENOMEM;
1263 } else {
1264 mapped_nents = 0;
1267 sec4_sg_bytes = (1 + mapped_nents) *
1268 sizeof(struct sec4_sg_entry);
1271 * allocate space for base edesc and hw desc commands,
1272 * link tables
1274 edesc = ahash_edesc_alloc(ctx, 1 + mapped_nents,
1275 ctx->sh_desc_update_first,
1276 ctx->sh_desc_update_first_dma,
1277 flags);
1278 if (!edesc) {
1279 dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
1280 return -ENOMEM;
1283 edesc->src_nents = src_nents;
1284 edesc->sec4_sg_bytes = sec4_sg_bytes;
1286 ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg, state);
1287 if (ret)
1288 goto unmap_ctx;
1290 sg_to_sec4_sg_last(req->src, mapped_nents,
1291 edesc->sec4_sg + 1, 0);
1293 if (*next_buflen) {
1294 scatterwalk_map_and_copy(next_buf, req->src,
1295 to_hash - *buflen,
1296 *next_buflen, 0);
1299 desc = edesc->hw_desc;
1301 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
1302 sec4_sg_bytes,
1303 DMA_TO_DEVICE);
1304 if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
1305 dev_err(jrdev, "unable to map S/G table\n");
1306 ret = -ENOMEM;
1307 goto unmap_ctx;
1310 append_seq_in_ptr(desc, edesc->sec4_sg_dma, to_hash, LDST_SGF);
1312 ret = map_seq_out_ptr_ctx(desc, jrdev, state, ctx->ctx_len);
1313 if (ret)
1314 goto unmap_ctx;
1316 #ifdef DEBUG
1317 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
1318 DUMP_PREFIX_ADDRESS, 16, 4, desc,
1319 desc_bytes(desc), 1);
1320 #endif
1322 ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_dst, req);
1323 if (ret)
1324 goto unmap_ctx;
1326 ret = -EINPROGRESS;
1327 state->update = ahash_update_ctx;
1328 state->finup = ahash_finup_ctx;
1329 state->final = ahash_final_ctx;
1330 } else if (*next_buflen) {
1331 scatterwalk_map_and_copy(buf + *buflen, req->src, 0,
1332 req->nbytes, 0);
1333 *buflen = *next_buflen;
1334 *next_buflen = 0;
1336 #ifdef DEBUG
1337 print_hex_dump(KERN_ERR, "buf@"__stringify(__LINE__)": ",
1338 DUMP_PREFIX_ADDRESS, 16, 4, buf, *buflen, 1);
1339 print_hex_dump(KERN_ERR, "next buf@"__stringify(__LINE__)": ",
1340 DUMP_PREFIX_ADDRESS, 16, 4, next_buf,
1341 *next_buflen, 1);
1342 #endif
1344 return ret;
1345 unmap_ctx:
1346 ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_TO_DEVICE);
1347 kfree(edesc);
1348 return ret;
1351 /* submit ahash finup if it the first job descriptor after update */
1352 static int ahash_finup_no_ctx(struct ahash_request *req)
1354 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1355 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1356 struct caam_hash_state *state = ahash_request_ctx(req);
1357 struct device *jrdev = ctx->jrdev;
1358 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
1359 GFP_KERNEL : GFP_ATOMIC;
1360 int buflen = *current_buflen(state);
1361 u32 *desc;
1362 int sec4_sg_bytes, sec4_sg_src_index, src_nents, mapped_nents;
1363 int digestsize = crypto_ahash_digestsize(ahash);
1364 struct ahash_edesc *edesc;
1365 int ret;
1367 src_nents = sg_nents_for_len(req->src, req->nbytes);
1368 if (src_nents < 0) {
1369 dev_err(jrdev, "Invalid number of src SG.\n");
1370 return src_nents;
1373 if (src_nents) {
1374 mapped_nents = dma_map_sg(jrdev, req->src, src_nents,
1375 DMA_TO_DEVICE);
1376 if (!mapped_nents) {
1377 dev_err(jrdev, "unable to DMA map source\n");
1378 return -ENOMEM;
1380 } else {
1381 mapped_nents = 0;
1384 sec4_sg_src_index = 2;
1385 sec4_sg_bytes = (sec4_sg_src_index + mapped_nents) *
1386 sizeof(struct sec4_sg_entry);
1388 /* allocate space for base edesc and hw desc commands, link tables */
1389 edesc = ahash_edesc_alloc(ctx, sec4_sg_src_index + mapped_nents,
1390 ctx->sh_desc_digest, ctx->sh_desc_digest_dma,
1391 flags);
1392 if (!edesc) {
1393 dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
1394 return -ENOMEM;
1397 desc = edesc->hw_desc;
1399 edesc->src_nents = src_nents;
1400 edesc->sec4_sg_bytes = sec4_sg_bytes;
1402 ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg, state);
1403 if (ret)
1404 goto unmap;
1406 ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents, 1, buflen,
1407 req->nbytes);
1408 if (ret) {
1409 dev_err(jrdev, "unable to map S/G table\n");
1410 goto unmap;
1413 ret = map_seq_out_ptr_ctx(desc, jrdev, state, digestsize);
1414 if (ret)
1415 goto unmap;
1417 #ifdef DEBUG
1418 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
1419 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
1420 #endif
1422 ret = caam_jr_enqueue(jrdev, desc, ahash_done, req);
1423 if (!ret) {
1424 ret = -EINPROGRESS;
1425 } else {
1426 ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE);
1427 kfree(edesc);
1430 return ret;
1431 unmap:
1432 ahash_unmap(jrdev, edesc, req, digestsize);
1433 kfree(edesc);
1434 return -ENOMEM;
1438 /* submit first update job descriptor after init */
1439 static int ahash_update_first(struct ahash_request *req)
1441 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1442 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1443 struct caam_hash_state *state = ahash_request_ctx(req);
1444 struct device *jrdev = ctx->jrdev;
1445 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
1446 GFP_KERNEL : GFP_ATOMIC;
1447 u8 *next_buf = alt_buf(state);
1448 int *next_buflen = alt_buflen(state);
1449 int to_hash;
1450 int blocksize = crypto_ahash_blocksize(ahash);
1451 u32 *desc;
1452 int src_nents, mapped_nents;
1453 struct ahash_edesc *edesc;
1454 int ret = 0;
1456 *next_buflen = req->nbytes & (blocksize - 1);
1457 to_hash = req->nbytes - *next_buflen;
1460 * For XCBC and CMAC, if to_hash is multiple of block size,
1461 * keep last block in internal buffer
1463 if ((is_xcbc_aes(ctx->adata.algtype) ||
1464 is_cmac_aes(ctx->adata.algtype)) && to_hash >= blocksize &&
1465 (*next_buflen == 0)) {
1466 *next_buflen = blocksize;
1467 to_hash -= blocksize;
1470 if (to_hash) {
1471 src_nents = sg_nents_for_len(req->src,
1472 req->nbytes - *next_buflen);
1473 if (src_nents < 0) {
1474 dev_err(jrdev, "Invalid number of src SG.\n");
1475 return src_nents;
1478 if (src_nents) {
1479 mapped_nents = dma_map_sg(jrdev, req->src, src_nents,
1480 DMA_TO_DEVICE);
1481 if (!mapped_nents) {
1482 dev_err(jrdev, "unable to map source for DMA\n");
1483 return -ENOMEM;
1485 } else {
1486 mapped_nents = 0;
1490 * allocate space for base edesc and hw desc commands,
1491 * link tables
1493 edesc = ahash_edesc_alloc(ctx, mapped_nents > 1 ?
1494 mapped_nents : 0,
1495 ctx->sh_desc_update_first,
1496 ctx->sh_desc_update_first_dma,
1497 flags);
1498 if (!edesc) {
1499 dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
1500 return -ENOMEM;
1503 edesc->src_nents = src_nents;
1505 ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents, 0, 0,
1506 to_hash);
1507 if (ret)
1508 goto unmap_ctx;
1510 if (*next_buflen)
1511 scatterwalk_map_and_copy(next_buf, req->src, to_hash,
1512 *next_buflen, 0);
1514 desc = edesc->hw_desc;
1516 ret = map_seq_out_ptr_ctx(desc, jrdev, state, ctx->ctx_len);
1517 if (ret)
1518 goto unmap_ctx;
1520 #ifdef DEBUG
1521 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
1522 DUMP_PREFIX_ADDRESS, 16, 4, desc,
1523 desc_bytes(desc), 1);
1524 #endif
1526 ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_dst, req);
1527 if (ret)
1528 goto unmap_ctx;
1530 ret = -EINPROGRESS;
1531 state->update = ahash_update_ctx;
1532 state->finup = ahash_finup_ctx;
1533 state->final = ahash_final_ctx;
1534 } else if (*next_buflen) {
1535 state->update = ahash_update_no_ctx;
1536 state->finup = ahash_finup_no_ctx;
1537 state->final = ahash_final_no_ctx;
1538 scatterwalk_map_and_copy(next_buf, req->src, 0,
1539 req->nbytes, 0);
1540 switch_buf(state);
1542 #ifdef DEBUG
1543 print_hex_dump(KERN_ERR, "next buf@"__stringify(__LINE__)": ",
1544 DUMP_PREFIX_ADDRESS, 16, 4, next_buf,
1545 *next_buflen, 1);
1546 #endif
1548 return ret;
1549 unmap_ctx:
1550 ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_TO_DEVICE);
1551 kfree(edesc);
1552 return ret;
1555 static int ahash_finup_first(struct ahash_request *req)
1557 return ahash_digest(req);
1560 static int ahash_init(struct ahash_request *req)
1562 struct caam_hash_state *state = ahash_request_ctx(req);
1564 state->update = ahash_update_first;
1565 state->finup = ahash_finup_first;
1566 state->final = ahash_final_no_ctx;
1568 state->ctx_dma = 0;
1569 state->ctx_dma_len = 0;
1570 state->current_buf = 0;
1571 state->buf_dma = 0;
1572 state->buflen_0 = 0;
1573 state->buflen_1 = 0;
1575 return 0;
1578 static int ahash_update(struct ahash_request *req)
1580 struct caam_hash_state *state = ahash_request_ctx(req);
1582 return state->update(req);
1585 static int ahash_finup(struct ahash_request *req)
1587 struct caam_hash_state *state = ahash_request_ctx(req);
1589 return state->finup(req);
1592 static int ahash_final(struct ahash_request *req)
1594 struct caam_hash_state *state = ahash_request_ctx(req);
1596 return state->final(req);
1599 static int ahash_export(struct ahash_request *req, void *out)
1601 struct caam_hash_state *state = ahash_request_ctx(req);
1602 struct caam_export_state *export = out;
1603 int len;
1604 u8 *buf;
1606 if (state->current_buf) {
1607 buf = state->buf_1;
1608 len = state->buflen_1;
1609 } else {
1610 buf = state->buf_0;
1611 len = state->buflen_0;
1614 memcpy(export->buf, buf, len);
1615 memcpy(export->caam_ctx, state->caam_ctx, sizeof(export->caam_ctx));
1616 export->buflen = len;
1617 export->update = state->update;
1618 export->final = state->final;
1619 export->finup = state->finup;
1621 return 0;
1624 static int ahash_import(struct ahash_request *req, const void *in)
1626 struct caam_hash_state *state = ahash_request_ctx(req);
1627 const struct caam_export_state *export = in;
1629 memset(state, 0, sizeof(*state));
1630 memcpy(state->buf_0, export->buf, export->buflen);
1631 memcpy(state->caam_ctx, export->caam_ctx, sizeof(state->caam_ctx));
1632 state->buflen_0 = export->buflen;
1633 state->update = export->update;
1634 state->final = export->final;
1635 state->finup = export->finup;
1637 return 0;
1640 struct caam_hash_template {
1641 char name[CRYPTO_MAX_ALG_NAME];
1642 char driver_name[CRYPTO_MAX_ALG_NAME];
1643 char hmac_name[CRYPTO_MAX_ALG_NAME];
1644 char hmac_driver_name[CRYPTO_MAX_ALG_NAME];
1645 unsigned int blocksize;
1646 struct ahash_alg template_ahash;
1647 u32 alg_type;
1650 /* ahash descriptors */
1651 static struct caam_hash_template driver_hash[] = {
1653 .name = "sha1",
1654 .driver_name = "sha1-caam",
1655 .hmac_name = "hmac(sha1)",
1656 .hmac_driver_name = "hmac-sha1-caam",
1657 .blocksize = SHA1_BLOCK_SIZE,
1658 .template_ahash = {
1659 .init = ahash_init,
1660 .update = ahash_update,
1661 .final = ahash_final,
1662 .finup = ahash_finup,
1663 .digest = ahash_digest,
1664 .export = ahash_export,
1665 .import = ahash_import,
1666 .setkey = ahash_setkey,
1667 .halg = {
1668 .digestsize = SHA1_DIGEST_SIZE,
1669 .statesize = sizeof(struct caam_export_state),
1672 .alg_type = OP_ALG_ALGSEL_SHA1,
1673 }, {
1674 .name = "sha224",
1675 .driver_name = "sha224-caam",
1676 .hmac_name = "hmac(sha224)",
1677 .hmac_driver_name = "hmac-sha224-caam",
1678 .blocksize = SHA224_BLOCK_SIZE,
1679 .template_ahash = {
1680 .init = ahash_init,
1681 .update = ahash_update,
1682 .final = ahash_final,
1683 .finup = ahash_finup,
1684 .digest = ahash_digest,
1685 .export = ahash_export,
1686 .import = ahash_import,
1687 .setkey = ahash_setkey,
1688 .halg = {
1689 .digestsize = SHA224_DIGEST_SIZE,
1690 .statesize = sizeof(struct caam_export_state),
1693 .alg_type = OP_ALG_ALGSEL_SHA224,
1694 }, {
1695 .name = "sha256",
1696 .driver_name = "sha256-caam",
1697 .hmac_name = "hmac(sha256)",
1698 .hmac_driver_name = "hmac-sha256-caam",
1699 .blocksize = SHA256_BLOCK_SIZE,
1700 .template_ahash = {
1701 .init = ahash_init,
1702 .update = ahash_update,
1703 .final = ahash_final,
1704 .finup = ahash_finup,
1705 .digest = ahash_digest,
1706 .export = ahash_export,
1707 .import = ahash_import,
1708 .setkey = ahash_setkey,
1709 .halg = {
1710 .digestsize = SHA256_DIGEST_SIZE,
1711 .statesize = sizeof(struct caam_export_state),
1714 .alg_type = OP_ALG_ALGSEL_SHA256,
1715 }, {
1716 .name = "sha384",
1717 .driver_name = "sha384-caam",
1718 .hmac_name = "hmac(sha384)",
1719 .hmac_driver_name = "hmac-sha384-caam",
1720 .blocksize = SHA384_BLOCK_SIZE,
1721 .template_ahash = {
1722 .init = ahash_init,
1723 .update = ahash_update,
1724 .final = ahash_final,
1725 .finup = ahash_finup,
1726 .digest = ahash_digest,
1727 .export = ahash_export,
1728 .import = ahash_import,
1729 .setkey = ahash_setkey,
1730 .halg = {
1731 .digestsize = SHA384_DIGEST_SIZE,
1732 .statesize = sizeof(struct caam_export_state),
1735 .alg_type = OP_ALG_ALGSEL_SHA384,
1736 }, {
1737 .name = "sha512",
1738 .driver_name = "sha512-caam",
1739 .hmac_name = "hmac(sha512)",
1740 .hmac_driver_name = "hmac-sha512-caam",
1741 .blocksize = SHA512_BLOCK_SIZE,
1742 .template_ahash = {
1743 .init = ahash_init,
1744 .update = ahash_update,
1745 .final = ahash_final,
1746 .finup = ahash_finup,
1747 .digest = ahash_digest,
1748 .export = ahash_export,
1749 .import = ahash_import,
1750 .setkey = ahash_setkey,
1751 .halg = {
1752 .digestsize = SHA512_DIGEST_SIZE,
1753 .statesize = sizeof(struct caam_export_state),
1756 .alg_type = OP_ALG_ALGSEL_SHA512,
1757 }, {
1758 .name = "md5",
1759 .driver_name = "md5-caam",
1760 .hmac_name = "hmac(md5)",
1761 .hmac_driver_name = "hmac-md5-caam",
1762 .blocksize = MD5_BLOCK_WORDS * 4,
1763 .template_ahash = {
1764 .init = ahash_init,
1765 .update = ahash_update,
1766 .final = ahash_final,
1767 .finup = ahash_finup,
1768 .digest = ahash_digest,
1769 .export = ahash_export,
1770 .import = ahash_import,
1771 .setkey = ahash_setkey,
1772 .halg = {
1773 .digestsize = MD5_DIGEST_SIZE,
1774 .statesize = sizeof(struct caam_export_state),
1777 .alg_type = OP_ALG_ALGSEL_MD5,
1778 }, {
1779 .hmac_name = "xcbc(aes)",
1780 .hmac_driver_name = "xcbc-aes-caam",
1781 .blocksize = AES_BLOCK_SIZE,
1782 .template_ahash = {
1783 .init = ahash_init,
1784 .update = ahash_update,
1785 .final = ahash_final,
1786 .finup = ahash_finup,
1787 .digest = ahash_digest,
1788 .export = ahash_export,
1789 .import = ahash_import,
1790 .setkey = axcbc_setkey,
1791 .halg = {
1792 .digestsize = AES_BLOCK_SIZE,
1793 .statesize = sizeof(struct caam_export_state),
1796 .alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_XCBC_MAC,
1797 }, {
1798 .hmac_name = "cmac(aes)",
1799 .hmac_driver_name = "cmac-aes-caam",
1800 .blocksize = AES_BLOCK_SIZE,
1801 .template_ahash = {
1802 .init = ahash_init,
1803 .update = ahash_update,
1804 .final = ahash_final,
1805 .finup = ahash_finup,
1806 .digest = ahash_digest,
1807 .export = ahash_export,
1808 .import = ahash_import,
1809 .setkey = acmac_setkey,
1810 .halg = {
1811 .digestsize = AES_BLOCK_SIZE,
1812 .statesize = sizeof(struct caam_export_state),
1815 .alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CMAC,
1819 struct caam_hash_alg {
1820 struct list_head entry;
1821 int alg_type;
1822 struct ahash_alg ahash_alg;
1825 static int caam_hash_cra_init(struct crypto_tfm *tfm)
1827 struct crypto_ahash *ahash = __crypto_ahash_cast(tfm);
1828 struct crypto_alg *base = tfm->__crt_alg;
1829 struct hash_alg_common *halg =
1830 container_of(base, struct hash_alg_common, base);
1831 struct ahash_alg *alg =
1832 container_of(halg, struct ahash_alg, halg);
1833 struct caam_hash_alg *caam_hash =
1834 container_of(alg, struct caam_hash_alg, ahash_alg);
1835 struct caam_hash_ctx *ctx = crypto_tfm_ctx(tfm);
1836 /* Sizes for MDHA running digests: MD5, SHA1, 224, 256, 384, 512 */
1837 static const u8 runninglen[] = { HASH_MSG_LEN + MD5_DIGEST_SIZE,
1838 HASH_MSG_LEN + SHA1_DIGEST_SIZE,
1839 HASH_MSG_LEN + 32,
1840 HASH_MSG_LEN + SHA256_DIGEST_SIZE,
1841 HASH_MSG_LEN + 64,
1842 HASH_MSG_LEN + SHA512_DIGEST_SIZE };
1843 dma_addr_t dma_addr;
1844 struct caam_drv_private *priv;
1847 * Get a Job ring from Job Ring driver to ensure in-order
1848 * crypto request processing per tfm
1850 ctx->jrdev = caam_jr_alloc();
1851 if (IS_ERR(ctx->jrdev)) {
1852 pr_err("Job Ring Device allocation for transform failed\n");
1853 return PTR_ERR(ctx->jrdev);
1856 priv = dev_get_drvdata(ctx->jrdev->parent);
1858 if (is_xcbc_aes(caam_hash->alg_type)) {
1859 ctx->dir = DMA_TO_DEVICE;
1860 ctx->adata.algtype = OP_TYPE_CLASS1_ALG | caam_hash->alg_type;
1861 ctx->ctx_len = 48;
1863 ctx->key_dma = dma_map_single_attrs(ctx->jrdev, ctx->key,
1864 ARRAY_SIZE(ctx->key),
1865 DMA_BIDIRECTIONAL,
1866 DMA_ATTR_SKIP_CPU_SYNC);
1867 if (dma_mapping_error(ctx->jrdev, ctx->key_dma)) {
1868 dev_err(ctx->jrdev, "unable to map key\n");
1869 caam_jr_free(ctx->jrdev);
1870 return -ENOMEM;
1872 } else if (is_cmac_aes(caam_hash->alg_type)) {
1873 ctx->dir = DMA_TO_DEVICE;
1874 ctx->adata.algtype = OP_TYPE_CLASS1_ALG | caam_hash->alg_type;
1875 ctx->ctx_len = 32;
1876 } else {
1877 ctx->dir = priv->era >= 6 ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
1878 ctx->adata.algtype = OP_TYPE_CLASS2_ALG | caam_hash->alg_type;
1879 ctx->ctx_len = runninglen[(ctx->adata.algtype &
1880 OP_ALG_ALGSEL_SUBMASK) >>
1881 OP_ALG_ALGSEL_SHIFT];
1884 dma_addr = dma_map_single_attrs(ctx->jrdev, ctx->sh_desc_update,
1885 offsetof(struct caam_hash_ctx, key),
1886 ctx->dir, DMA_ATTR_SKIP_CPU_SYNC);
1887 if (dma_mapping_error(ctx->jrdev, dma_addr)) {
1888 dev_err(ctx->jrdev, "unable to map shared descriptors\n");
1890 if (is_xcbc_aes(caam_hash->alg_type))
1891 dma_unmap_single_attrs(ctx->jrdev, ctx->key_dma,
1892 ARRAY_SIZE(ctx->key),
1893 DMA_BIDIRECTIONAL,
1894 DMA_ATTR_SKIP_CPU_SYNC);
1896 caam_jr_free(ctx->jrdev);
1897 return -ENOMEM;
1900 ctx->sh_desc_update_dma = dma_addr;
1901 ctx->sh_desc_update_first_dma = dma_addr +
1902 offsetof(struct caam_hash_ctx,
1903 sh_desc_update_first);
1904 ctx->sh_desc_fin_dma = dma_addr + offsetof(struct caam_hash_ctx,
1905 sh_desc_fin);
1906 ctx->sh_desc_digest_dma = dma_addr + offsetof(struct caam_hash_ctx,
1907 sh_desc_digest);
1909 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
1910 sizeof(struct caam_hash_state));
1913 * For keyed hash algorithms shared descriptors
1914 * will be created later in setkey() callback
1916 return alg->setkey ? 0 : ahash_set_sh_desc(ahash);
1919 static void caam_hash_cra_exit(struct crypto_tfm *tfm)
1921 struct caam_hash_ctx *ctx = crypto_tfm_ctx(tfm);
1923 dma_unmap_single_attrs(ctx->jrdev, ctx->sh_desc_update_dma,
1924 offsetof(struct caam_hash_ctx, key),
1925 ctx->dir, DMA_ATTR_SKIP_CPU_SYNC);
1926 if (is_xcbc_aes(ctx->adata.algtype))
1927 dma_unmap_single_attrs(ctx->jrdev, ctx->key_dma,
1928 ARRAY_SIZE(ctx->key), DMA_BIDIRECTIONAL,
1929 DMA_ATTR_SKIP_CPU_SYNC);
1930 caam_jr_free(ctx->jrdev);
1933 static void __exit caam_algapi_hash_exit(void)
1935 struct caam_hash_alg *t_alg, *n;
1937 if (!hash_list.next)
1938 return;
1940 list_for_each_entry_safe(t_alg, n, &hash_list, entry) {
1941 crypto_unregister_ahash(&t_alg->ahash_alg);
1942 list_del(&t_alg->entry);
1943 kfree(t_alg);
1947 static struct caam_hash_alg *
1948 caam_hash_alloc(struct caam_hash_template *template,
1949 bool keyed)
1951 struct caam_hash_alg *t_alg;
1952 struct ahash_alg *halg;
1953 struct crypto_alg *alg;
1955 t_alg = kzalloc(sizeof(*t_alg), GFP_KERNEL);
1956 if (!t_alg) {
1957 pr_err("failed to allocate t_alg\n");
1958 return ERR_PTR(-ENOMEM);
1961 t_alg->ahash_alg = template->template_ahash;
1962 halg = &t_alg->ahash_alg;
1963 alg = &halg->halg.base;
1965 if (keyed) {
1966 snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s",
1967 template->hmac_name);
1968 snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
1969 template->hmac_driver_name);
1970 } else {
1971 snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s",
1972 template->name);
1973 snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
1974 template->driver_name);
1975 t_alg->ahash_alg.setkey = NULL;
1977 alg->cra_module = THIS_MODULE;
1978 alg->cra_init = caam_hash_cra_init;
1979 alg->cra_exit = caam_hash_cra_exit;
1980 alg->cra_ctxsize = sizeof(struct caam_hash_ctx);
1981 alg->cra_priority = CAAM_CRA_PRIORITY;
1982 alg->cra_blocksize = template->blocksize;
1983 alg->cra_alignmask = 0;
1984 alg->cra_flags = CRYPTO_ALG_ASYNC;
1986 t_alg->alg_type = template->alg_type;
1988 return t_alg;
1991 static int __init caam_algapi_hash_init(void)
1993 struct device_node *dev_node;
1994 struct platform_device *pdev;
1995 int i = 0, err = 0;
1996 struct caam_drv_private *priv;
1997 unsigned int md_limit = SHA512_DIGEST_SIZE;
1998 u32 md_inst, md_vid;
2000 dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
2001 if (!dev_node) {
2002 dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0");
2003 if (!dev_node)
2004 return -ENODEV;
2007 pdev = of_find_device_by_node(dev_node);
2008 if (!pdev) {
2009 of_node_put(dev_node);
2010 return -ENODEV;
2013 priv = dev_get_drvdata(&pdev->dev);
2014 of_node_put(dev_node);
2017 * If priv is NULL, it's probably because the caam driver wasn't
2018 * properly initialized (e.g. RNG4 init failed). Thus, bail out here.
2020 if (!priv) {
2021 err = -ENODEV;
2022 goto out_put_dev;
2026 * Register crypto algorithms the device supports. First, identify
2027 * presence and attributes of MD block.
2029 if (priv->era < 10) {
2030 md_vid = (rd_reg32(&priv->ctrl->perfmon.cha_id_ls) &
2031 CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT;
2032 md_inst = (rd_reg32(&priv->ctrl->perfmon.cha_num_ls) &
2033 CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT;
2034 } else {
2035 u32 mdha = rd_reg32(&priv->ctrl->vreg.mdha);
2037 md_vid = (mdha & CHA_VER_VID_MASK) >> CHA_VER_VID_SHIFT;
2038 md_inst = mdha & CHA_VER_NUM_MASK;
2042 * Skip registration of any hashing algorithms if MD block
2043 * is not present.
2045 if (!md_inst) {
2046 err = -ENODEV;
2047 goto out_put_dev;
2050 /* Limit digest size based on LP256 */
2051 if (md_vid == CHA_VER_VID_MD_LP256)
2052 md_limit = SHA256_DIGEST_SIZE;
2054 INIT_LIST_HEAD(&hash_list);
2056 /* register crypto algorithms the device supports */
2057 for (i = 0; i < ARRAY_SIZE(driver_hash); i++) {
2058 struct caam_hash_alg *t_alg;
2059 struct caam_hash_template *alg = driver_hash + i;
2061 /* If MD size is not supported by device, skip registration */
2062 if (is_mdha(alg->alg_type) &&
2063 alg->template_ahash.halg.digestsize > md_limit)
2064 continue;
2066 /* register hmac version */
2067 t_alg = caam_hash_alloc(alg, true);
2068 if (IS_ERR(t_alg)) {
2069 err = PTR_ERR(t_alg);
2070 pr_warn("%s alg allocation failed\n",
2071 alg->hmac_driver_name);
2072 continue;
2075 err = crypto_register_ahash(&t_alg->ahash_alg);
2076 if (err) {
2077 pr_warn("%s alg registration failed: %d\n",
2078 t_alg->ahash_alg.halg.base.cra_driver_name,
2079 err);
2080 kfree(t_alg);
2081 } else
2082 list_add_tail(&t_alg->entry, &hash_list);
2084 if ((alg->alg_type & OP_ALG_ALGSEL_MASK) == OP_ALG_ALGSEL_AES)
2085 continue;
2087 /* register unkeyed version */
2088 t_alg = caam_hash_alloc(alg, false);
2089 if (IS_ERR(t_alg)) {
2090 err = PTR_ERR(t_alg);
2091 pr_warn("%s alg allocation failed\n", alg->driver_name);
2092 continue;
2095 err = crypto_register_ahash(&t_alg->ahash_alg);
2096 if (err) {
2097 pr_warn("%s alg registration failed: %d\n",
2098 t_alg->ahash_alg.halg.base.cra_driver_name,
2099 err);
2100 kfree(t_alg);
2101 } else
2102 list_add_tail(&t_alg->entry, &hash_list);
2105 out_put_dev:
2106 put_device(&pdev->dev);
2107 return err;
2110 module_init(caam_algapi_hash_init);
2111 module_exit(caam_algapi_hash_exit);
2113 MODULE_LICENSE("GPL");
2114 MODULE_DESCRIPTION("FSL CAAM support for ahash functions of crypto API");
2115 MODULE_AUTHOR("Freescale Semiconductor - NMG");