hwrng: core - Don't use a stack buffer in add_early_randomness()
[linux/fpc-iii.git] / drivers / crypto / caam / caamhash.c
blob660dc206969faa509ae8b858d2c2c8296e097d32
1 /*
2 * caam - Freescale FSL CAAM support for ahash functions of crypto API
4 * Copyright 2011 Freescale Semiconductor, Inc.
6 * Based on caamalg.c crypto API driver.
8 * relationship of digest job descriptor or first job descriptor after init to
9 * shared descriptors:
11 * --------------- ---------------
12 * | JobDesc #1 |-------------------->| ShareDesc |
13 * | *(packet 1) | | (hashKey) |
14 * --------------- | (operation) |
15 * ---------------
17 * relationship of subsequent job descriptors to shared descriptors:
19 * --------------- ---------------
20 * | JobDesc #2 |-------------------->| ShareDesc |
21 * | *(packet 2) | |------------->| (hashKey) |
22 * --------------- | |-------->| (operation) |
23 * . | | | (load ctx2) |
24 * . | | ---------------
25 * --------------- | |
26 * | JobDesc #3 |------| |
27 * | *(packet 3) | |
28 * --------------- |
29 * . |
30 * . |
31 * --------------- |
32 * | JobDesc #4 |------------
33 * | *(packet 4) |
34 * ---------------
36 * The SharedDesc never changes for a connection unless rekeyed, but
37 * each packet will likely be in a different place. So all we need
38 * to know to process the packet is where the input is, where the
39 * output goes, and what context we want to process with. Context is
40 * in the SharedDesc, packet references in the JobDesc.
42 * So, a job desc looks like:
44 * ---------------------
45 * | Header |
46 * | ShareDesc Pointer |
47 * | SEQ_OUT_PTR |
48 * | (output buffer) |
49 * | (output length) |
50 * | SEQ_IN_PTR |
51 * | (input buffer) |
52 * | (input length) |
53 * ---------------------
56 #include "compat.h"
58 #include "regs.h"
59 #include "intern.h"
60 #include "desc_constr.h"
61 #include "jr.h"
62 #include "error.h"
63 #include "sg_sw_sec4.h"
64 #include "key_gen.h"
66 #define CAAM_CRA_PRIORITY 3000
68 /* max hash key is max split key size */
69 #define CAAM_MAX_HASH_KEY_SIZE (SHA512_DIGEST_SIZE * 2)
71 #define CAAM_MAX_HASH_BLOCK_SIZE SHA512_BLOCK_SIZE
72 #define CAAM_MAX_HASH_DIGEST_SIZE SHA512_DIGEST_SIZE
74 /* length of descriptors text */
75 #define DESC_AHASH_BASE (4 * CAAM_CMD_SZ)
76 #define DESC_AHASH_UPDATE_LEN (6 * CAAM_CMD_SZ)
77 #define DESC_AHASH_UPDATE_FIRST_LEN (DESC_AHASH_BASE + 4 * CAAM_CMD_SZ)
78 #define DESC_AHASH_FINAL_LEN (DESC_AHASH_BASE + 5 * CAAM_CMD_SZ)
79 #define DESC_AHASH_FINUP_LEN (DESC_AHASH_BASE + 5 * CAAM_CMD_SZ)
80 #define DESC_AHASH_DIGEST_LEN (DESC_AHASH_BASE + 4 * CAAM_CMD_SZ)
82 #define DESC_HASH_MAX_USED_BYTES (DESC_AHASH_FINAL_LEN + \
83 CAAM_MAX_HASH_KEY_SIZE)
84 #define DESC_HASH_MAX_USED_LEN (DESC_HASH_MAX_USED_BYTES / CAAM_CMD_SZ)
86 /* caam context sizes for hashes: running digest + 8 */
87 #define HASH_MSG_LEN 8
88 #define MAX_CTX_LEN (HASH_MSG_LEN + SHA512_DIGEST_SIZE)
90 #ifdef DEBUG
91 /* for print_hex_dumps with line references */
92 #define debug(format, arg...) printk(format, arg)
93 #else
94 #define debug(format, arg...)
95 #endif
98 static struct list_head hash_list;
100 /* ahash per-session context */
101 struct caam_hash_ctx {
102 u32 sh_desc_update[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned;
103 u32 sh_desc_update_first[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned;
104 u32 sh_desc_fin[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned;
105 u32 sh_desc_digest[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned;
106 u32 sh_desc_finup[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned;
107 dma_addr_t sh_desc_update_dma ____cacheline_aligned;
108 dma_addr_t sh_desc_update_first_dma;
109 dma_addr_t sh_desc_fin_dma;
110 dma_addr_t sh_desc_digest_dma;
111 dma_addr_t sh_desc_finup_dma;
112 struct device *jrdev;
113 u32 alg_type;
114 u32 alg_op;
115 u8 key[CAAM_MAX_HASH_KEY_SIZE];
116 dma_addr_t key_dma;
117 int ctx_len;
118 unsigned int split_key_len;
119 unsigned int split_key_pad_len;
122 /* ahash state */
123 struct caam_hash_state {
124 dma_addr_t buf_dma;
125 dma_addr_t ctx_dma;
126 u8 buf_0[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned;
127 int buflen_0;
128 u8 buf_1[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned;
129 int buflen_1;
130 u8 caam_ctx[MAX_CTX_LEN] ____cacheline_aligned;
131 int (*update)(struct ahash_request *req);
132 int (*final)(struct ahash_request *req);
133 int (*finup)(struct ahash_request *req);
134 int current_buf;
137 struct caam_export_state {
138 u8 buf[CAAM_MAX_HASH_BLOCK_SIZE];
139 u8 caam_ctx[MAX_CTX_LEN];
140 int buflen;
141 int (*update)(struct ahash_request *req);
142 int (*final)(struct ahash_request *req);
143 int (*finup)(struct ahash_request *req);
146 /* Common job descriptor seq in/out ptr routines */
148 /* Map state->caam_ctx, and append seq_out_ptr command that points to it */
149 static inline int map_seq_out_ptr_ctx(u32 *desc, struct device *jrdev,
150 struct caam_hash_state *state,
151 int ctx_len)
153 state->ctx_dma = dma_map_single(jrdev, state->caam_ctx,
154 ctx_len, DMA_FROM_DEVICE);
155 if (dma_mapping_error(jrdev, state->ctx_dma)) {
156 dev_err(jrdev, "unable to map ctx\n");
157 return -ENOMEM;
160 append_seq_out_ptr(desc, state->ctx_dma, ctx_len, 0);
162 return 0;
165 /* Map req->result, and append seq_out_ptr command that points to it */
166 static inline dma_addr_t map_seq_out_ptr_result(u32 *desc, struct device *jrdev,
167 u8 *result, int digestsize)
169 dma_addr_t dst_dma;
171 dst_dma = dma_map_single(jrdev, result, digestsize, DMA_FROM_DEVICE);
172 append_seq_out_ptr(desc, dst_dma, digestsize, 0);
174 return dst_dma;
177 /* Map current buffer in state and put it in link table */
178 static inline dma_addr_t buf_map_to_sec4_sg(struct device *jrdev,
179 struct sec4_sg_entry *sec4_sg,
180 u8 *buf, int buflen)
182 dma_addr_t buf_dma;
184 buf_dma = dma_map_single(jrdev, buf, buflen, DMA_TO_DEVICE);
185 dma_to_sec4_sg_one(sec4_sg, buf_dma, buflen, 0);
187 return buf_dma;
191 * Only put buffer in link table if it contains data, which is possible,
192 * since a buffer has previously been used, and needs to be unmapped,
194 static inline dma_addr_t
195 try_buf_map_to_sec4_sg(struct device *jrdev, struct sec4_sg_entry *sec4_sg,
196 u8 *buf, dma_addr_t buf_dma, int buflen,
197 int last_buflen)
199 if (buf_dma && !dma_mapping_error(jrdev, buf_dma))
200 dma_unmap_single(jrdev, buf_dma, last_buflen, DMA_TO_DEVICE);
201 if (buflen)
202 buf_dma = buf_map_to_sec4_sg(jrdev, sec4_sg, buf, buflen);
203 else
204 buf_dma = 0;
206 return buf_dma;
209 /* Map state->caam_ctx, and add it to link table */
210 static inline int ctx_map_to_sec4_sg(u32 *desc, struct device *jrdev,
211 struct caam_hash_state *state, int ctx_len,
212 struct sec4_sg_entry *sec4_sg, u32 flag)
214 state->ctx_dma = dma_map_single(jrdev, state->caam_ctx, ctx_len, flag);
215 if (dma_mapping_error(jrdev, state->ctx_dma)) {
216 dev_err(jrdev, "unable to map ctx\n");
217 return -ENOMEM;
220 dma_to_sec4_sg_one(sec4_sg, state->ctx_dma, ctx_len, 0);
222 return 0;
225 /* Common shared descriptor commands */
226 static inline void append_key_ahash(u32 *desc, struct caam_hash_ctx *ctx)
228 append_key_as_imm(desc, ctx->key, ctx->split_key_pad_len,
229 ctx->split_key_len, CLASS_2 |
230 KEY_DEST_MDHA_SPLIT | KEY_ENC);
233 /* Append key if it has been set */
234 static inline void init_sh_desc_key_ahash(u32 *desc, struct caam_hash_ctx *ctx)
236 u32 *key_jump_cmd;
238 init_sh_desc(desc, HDR_SHARE_SERIAL);
240 if (ctx->split_key_len) {
241 /* Skip if already shared */
242 key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
243 JUMP_COND_SHRD);
245 append_key_ahash(desc, ctx);
247 set_jump_tgt_here(desc, key_jump_cmd);
250 /* Propagate errors from shared to job descriptor */
251 append_cmd(desc, SET_OK_NO_PROP_ERRORS | CMD_LOAD);
255 * For ahash read data from seqin following state->caam_ctx,
256 * and write resulting class2 context to seqout, which may be state->caam_ctx
257 * or req->result
259 static inline void ahash_append_load_str(u32 *desc, int digestsize)
261 /* Calculate remaining bytes to read */
262 append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
264 /* Read remaining bytes */
265 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_LAST2 |
266 FIFOLD_TYPE_MSG | KEY_VLF);
268 /* Store class2 context bytes */
269 append_seq_store(desc, digestsize, LDST_CLASS_2_CCB |
270 LDST_SRCDST_BYTE_CONTEXT);
274 * For ahash update, final and finup, import context, read and write to seqout
276 static inline void ahash_ctx_data_to_out(u32 *desc, u32 op, u32 state,
277 int digestsize,
278 struct caam_hash_ctx *ctx)
280 init_sh_desc_key_ahash(desc, ctx);
282 /* Import context from software */
283 append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT |
284 LDST_CLASS_2_CCB | ctx->ctx_len);
286 /* Class 2 operation */
287 append_operation(desc, op | state | OP_ALG_ENCRYPT);
290 * Load from buf and/or src and write to req->result or state->context
292 ahash_append_load_str(desc, digestsize);
295 /* For ahash firsts and digest, read and write to seqout */
296 static inline void ahash_data_to_out(u32 *desc, u32 op, u32 state,
297 int digestsize, struct caam_hash_ctx *ctx)
299 init_sh_desc_key_ahash(desc, ctx);
301 /* Class 2 operation */
302 append_operation(desc, op | state | OP_ALG_ENCRYPT);
305 * Load from buf and/or src and write to req->result or state->context
307 ahash_append_load_str(desc, digestsize);
310 static int ahash_set_sh_desc(struct crypto_ahash *ahash)
312 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
313 int digestsize = crypto_ahash_digestsize(ahash);
314 struct device *jrdev = ctx->jrdev;
315 u32 have_key = 0;
316 u32 *desc;
318 if (ctx->split_key_len)
319 have_key = OP_ALG_AAI_HMAC_PRECOMP;
321 /* ahash_update shared descriptor */
322 desc = ctx->sh_desc_update;
324 init_sh_desc(desc, HDR_SHARE_SERIAL);
326 /* Import context from software */
327 append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT |
328 LDST_CLASS_2_CCB | ctx->ctx_len);
330 /* Class 2 operation */
331 append_operation(desc, ctx->alg_type | OP_ALG_AS_UPDATE |
332 OP_ALG_ENCRYPT);
334 /* Load data and write to result or context */
335 ahash_append_load_str(desc, ctx->ctx_len);
337 ctx->sh_desc_update_dma = dma_map_single(jrdev, desc, desc_bytes(desc),
338 DMA_TO_DEVICE);
339 if (dma_mapping_error(jrdev, ctx->sh_desc_update_dma)) {
340 dev_err(jrdev, "unable to map shared descriptor\n");
341 return -ENOMEM;
343 #ifdef DEBUG
344 print_hex_dump(KERN_ERR,
345 "ahash update shdesc@"__stringify(__LINE__)": ",
346 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
347 #endif
349 /* ahash_update_first shared descriptor */
350 desc = ctx->sh_desc_update_first;
352 ahash_data_to_out(desc, have_key | ctx->alg_type, OP_ALG_AS_INIT,
353 ctx->ctx_len, ctx);
355 ctx->sh_desc_update_first_dma = dma_map_single(jrdev, desc,
356 desc_bytes(desc),
357 DMA_TO_DEVICE);
358 if (dma_mapping_error(jrdev, ctx->sh_desc_update_first_dma)) {
359 dev_err(jrdev, "unable to map shared descriptor\n");
360 return -ENOMEM;
362 #ifdef DEBUG
363 print_hex_dump(KERN_ERR,
364 "ahash update first shdesc@"__stringify(__LINE__)": ",
365 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
366 #endif
368 /* ahash_final shared descriptor */
369 desc = ctx->sh_desc_fin;
371 ahash_ctx_data_to_out(desc, have_key | ctx->alg_type,
372 OP_ALG_AS_FINALIZE, digestsize, ctx);
374 ctx->sh_desc_fin_dma = dma_map_single(jrdev, desc, desc_bytes(desc),
375 DMA_TO_DEVICE);
376 if (dma_mapping_error(jrdev, ctx->sh_desc_fin_dma)) {
377 dev_err(jrdev, "unable to map shared descriptor\n");
378 return -ENOMEM;
380 #ifdef DEBUG
381 print_hex_dump(KERN_ERR, "ahash final shdesc@"__stringify(__LINE__)": ",
382 DUMP_PREFIX_ADDRESS, 16, 4, desc,
383 desc_bytes(desc), 1);
384 #endif
386 /* ahash_finup shared descriptor */
387 desc = ctx->sh_desc_finup;
389 ahash_ctx_data_to_out(desc, have_key | ctx->alg_type,
390 OP_ALG_AS_FINALIZE, digestsize, ctx);
392 ctx->sh_desc_finup_dma = dma_map_single(jrdev, desc, desc_bytes(desc),
393 DMA_TO_DEVICE);
394 if (dma_mapping_error(jrdev, ctx->sh_desc_finup_dma)) {
395 dev_err(jrdev, "unable to map shared descriptor\n");
396 return -ENOMEM;
398 #ifdef DEBUG
399 print_hex_dump(KERN_ERR, "ahash finup shdesc@"__stringify(__LINE__)": ",
400 DUMP_PREFIX_ADDRESS, 16, 4, desc,
401 desc_bytes(desc), 1);
402 #endif
404 /* ahash_digest shared descriptor */
405 desc = ctx->sh_desc_digest;
407 ahash_data_to_out(desc, have_key | ctx->alg_type, OP_ALG_AS_INITFINAL,
408 digestsize, ctx);
410 ctx->sh_desc_digest_dma = dma_map_single(jrdev, desc,
411 desc_bytes(desc),
412 DMA_TO_DEVICE);
413 if (dma_mapping_error(jrdev, ctx->sh_desc_digest_dma)) {
414 dev_err(jrdev, "unable to map shared descriptor\n");
415 return -ENOMEM;
417 #ifdef DEBUG
418 print_hex_dump(KERN_ERR,
419 "ahash digest shdesc@"__stringify(__LINE__)": ",
420 DUMP_PREFIX_ADDRESS, 16, 4, desc,
421 desc_bytes(desc), 1);
422 #endif
424 return 0;
427 static int gen_split_hash_key(struct caam_hash_ctx *ctx, const u8 *key_in,
428 u32 keylen)
430 return gen_split_key(ctx->jrdev, ctx->key, ctx->split_key_len,
431 ctx->split_key_pad_len, key_in, keylen,
432 ctx->alg_op);
435 /* Digest hash size if it is too large */
436 static int hash_digest_key(struct caam_hash_ctx *ctx, const u8 *key_in,
437 u32 *keylen, u8 *key_out, u32 digestsize)
439 struct device *jrdev = ctx->jrdev;
440 u32 *desc;
441 struct split_key_result result;
442 dma_addr_t src_dma, dst_dma;
443 int ret;
445 desc = kmalloc(CAAM_CMD_SZ * 8 + CAAM_PTR_SZ * 2, GFP_KERNEL | GFP_DMA);
446 if (!desc) {
447 dev_err(jrdev, "unable to allocate key input memory\n");
448 return -ENOMEM;
451 init_job_desc(desc, 0);
453 src_dma = dma_map_single(jrdev, (void *)key_in, *keylen,
454 DMA_TO_DEVICE);
455 if (dma_mapping_error(jrdev, src_dma)) {
456 dev_err(jrdev, "unable to map key input memory\n");
457 kfree(desc);
458 return -ENOMEM;
460 dst_dma = dma_map_single(jrdev, (void *)key_out, digestsize,
461 DMA_FROM_DEVICE);
462 if (dma_mapping_error(jrdev, dst_dma)) {
463 dev_err(jrdev, "unable to map key output memory\n");
464 dma_unmap_single(jrdev, src_dma, *keylen, DMA_TO_DEVICE);
465 kfree(desc);
466 return -ENOMEM;
469 /* Job descriptor to perform unkeyed hash on key_in */
470 append_operation(desc, ctx->alg_type | OP_ALG_ENCRYPT |
471 OP_ALG_AS_INITFINAL);
472 append_seq_in_ptr(desc, src_dma, *keylen, 0);
473 append_seq_fifo_load(desc, *keylen, FIFOLD_CLASS_CLASS2 |
474 FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_MSG);
475 append_seq_out_ptr(desc, dst_dma, digestsize, 0);
476 append_seq_store(desc, digestsize, LDST_CLASS_2_CCB |
477 LDST_SRCDST_BYTE_CONTEXT);
479 #ifdef DEBUG
480 print_hex_dump(KERN_ERR, "key_in@"__stringify(__LINE__)": ",
481 DUMP_PREFIX_ADDRESS, 16, 4, key_in, *keylen, 1);
482 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
483 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
484 #endif
486 result.err = 0;
487 init_completion(&result.completion);
489 ret = caam_jr_enqueue(jrdev, desc, split_key_done, &result);
490 if (!ret) {
491 /* in progress */
492 wait_for_completion_interruptible(&result.completion);
493 ret = result.err;
494 #ifdef DEBUG
495 print_hex_dump(KERN_ERR,
496 "digested key@"__stringify(__LINE__)": ",
497 DUMP_PREFIX_ADDRESS, 16, 4, key_in,
498 digestsize, 1);
499 #endif
501 dma_unmap_single(jrdev, src_dma, *keylen, DMA_TO_DEVICE);
502 dma_unmap_single(jrdev, dst_dma, digestsize, DMA_FROM_DEVICE);
504 *keylen = digestsize;
506 kfree(desc);
508 return ret;
511 static int ahash_setkey(struct crypto_ahash *ahash,
512 const u8 *key, unsigned int keylen)
514 /* Sizes for MDHA pads (*not* keys): MD5, SHA1, 224, 256, 384, 512 */
515 static const u8 mdpadlen[] = { 16, 20, 32, 32, 64, 64 };
516 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
517 struct device *jrdev = ctx->jrdev;
518 int blocksize = crypto_tfm_alg_blocksize(&ahash->base);
519 int digestsize = crypto_ahash_digestsize(ahash);
520 int ret;
521 u8 *hashed_key = NULL;
523 #ifdef DEBUG
524 printk(KERN_ERR "keylen %d\n", keylen);
525 #endif
527 if (keylen > blocksize) {
528 hashed_key = kmalloc_array(digestsize,
529 sizeof(*hashed_key),
530 GFP_KERNEL | GFP_DMA);
531 if (!hashed_key)
532 return -ENOMEM;
533 ret = hash_digest_key(ctx, key, &keylen, hashed_key,
534 digestsize);
535 if (ret)
536 goto bad_free_key;
537 key = hashed_key;
540 /* Pick class 2 key length from algorithm submask */
541 ctx->split_key_len = mdpadlen[(ctx->alg_op & OP_ALG_ALGSEL_SUBMASK) >>
542 OP_ALG_ALGSEL_SHIFT] * 2;
543 ctx->split_key_pad_len = ALIGN(ctx->split_key_len, 16);
545 #ifdef DEBUG
546 printk(KERN_ERR "split_key_len %d split_key_pad_len %d\n",
547 ctx->split_key_len, ctx->split_key_pad_len);
548 print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
549 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
550 #endif
552 ret = gen_split_hash_key(ctx, key, keylen);
553 if (ret)
554 goto bad_free_key;
556 ctx->key_dma = dma_map_single(jrdev, ctx->key, ctx->split_key_pad_len,
557 DMA_TO_DEVICE);
558 if (dma_mapping_error(jrdev, ctx->key_dma)) {
559 dev_err(jrdev, "unable to map key i/o memory\n");
560 ret = -ENOMEM;
561 goto error_free_key;
563 #ifdef DEBUG
564 print_hex_dump(KERN_ERR, "ctx.key@"__stringify(__LINE__)": ",
565 DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
566 ctx->split_key_pad_len, 1);
567 #endif
569 ret = ahash_set_sh_desc(ahash);
570 if (ret) {
571 dma_unmap_single(jrdev, ctx->key_dma, ctx->split_key_pad_len,
572 DMA_TO_DEVICE);
574 error_free_key:
575 kfree(hashed_key);
576 return ret;
577 bad_free_key:
578 kfree(hashed_key);
579 crypto_ahash_set_flags(ahash, CRYPTO_TFM_RES_BAD_KEY_LEN);
580 return -EINVAL;
584 * ahash_edesc - s/w-extended ahash descriptor
585 * @dst_dma: physical mapped address of req->result
586 * @sec4_sg_dma: physical mapped address of h/w link table
587 * @src_nents: number of segments in input scatterlist
588 * @sec4_sg_bytes: length of dma mapped sec4_sg space
589 * @hw_desc: the h/w job descriptor followed by any referenced link tables
590 * @sec4_sg: h/w link table
592 struct ahash_edesc {
593 dma_addr_t dst_dma;
594 dma_addr_t sec4_sg_dma;
595 int src_nents;
596 int sec4_sg_bytes;
597 u32 hw_desc[DESC_JOB_IO_LEN / sizeof(u32)] ____cacheline_aligned;
598 struct sec4_sg_entry sec4_sg[0];
601 static inline void ahash_unmap(struct device *dev,
602 struct ahash_edesc *edesc,
603 struct ahash_request *req, int dst_len)
605 if (edesc->src_nents)
606 dma_unmap_sg(dev, req->src, edesc->src_nents, DMA_TO_DEVICE);
607 if (edesc->dst_dma)
608 dma_unmap_single(dev, edesc->dst_dma, dst_len, DMA_FROM_DEVICE);
610 if (edesc->sec4_sg_bytes)
611 dma_unmap_single(dev, edesc->sec4_sg_dma,
612 edesc->sec4_sg_bytes, DMA_TO_DEVICE);
615 static inline void ahash_unmap_ctx(struct device *dev,
616 struct ahash_edesc *edesc,
617 struct ahash_request *req, int dst_len, u32 flag)
619 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
620 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
621 struct caam_hash_state *state = ahash_request_ctx(req);
623 if (state->ctx_dma)
624 dma_unmap_single(dev, state->ctx_dma, ctx->ctx_len, flag);
625 ahash_unmap(dev, edesc, req, dst_len);
628 static void ahash_done(struct device *jrdev, u32 *desc, u32 err,
629 void *context)
631 struct ahash_request *req = context;
632 struct ahash_edesc *edesc;
633 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
634 int digestsize = crypto_ahash_digestsize(ahash);
635 #ifdef DEBUG
636 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
637 struct caam_hash_state *state = ahash_request_ctx(req);
639 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
640 #endif
642 edesc = (struct ahash_edesc *)((char *)desc -
643 offsetof(struct ahash_edesc, hw_desc));
644 if (err)
645 caam_jr_strstatus(jrdev, err);
647 ahash_unmap(jrdev, edesc, req, digestsize);
648 kfree(edesc);
650 #ifdef DEBUG
651 print_hex_dump(KERN_ERR, "ctx@"__stringify(__LINE__)": ",
652 DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
653 ctx->ctx_len, 1);
654 if (req->result)
655 print_hex_dump(KERN_ERR, "result@"__stringify(__LINE__)": ",
656 DUMP_PREFIX_ADDRESS, 16, 4, req->result,
657 digestsize, 1);
658 #endif
660 req->base.complete(&req->base, err);
663 static void ahash_done_bi(struct device *jrdev, u32 *desc, u32 err,
664 void *context)
666 struct ahash_request *req = context;
667 struct ahash_edesc *edesc;
668 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
669 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
670 #ifdef DEBUG
671 struct caam_hash_state *state = ahash_request_ctx(req);
672 int digestsize = crypto_ahash_digestsize(ahash);
674 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
675 #endif
677 edesc = (struct ahash_edesc *)((char *)desc -
678 offsetof(struct ahash_edesc, hw_desc));
679 if (err)
680 caam_jr_strstatus(jrdev, err);
682 ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_BIDIRECTIONAL);
683 kfree(edesc);
685 #ifdef DEBUG
686 print_hex_dump(KERN_ERR, "ctx@"__stringify(__LINE__)": ",
687 DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
688 ctx->ctx_len, 1);
689 if (req->result)
690 print_hex_dump(KERN_ERR, "result@"__stringify(__LINE__)": ",
691 DUMP_PREFIX_ADDRESS, 16, 4, req->result,
692 digestsize, 1);
693 #endif
695 req->base.complete(&req->base, err);
698 static void ahash_done_ctx_src(struct device *jrdev, u32 *desc, u32 err,
699 void *context)
701 struct ahash_request *req = context;
702 struct ahash_edesc *edesc;
703 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
704 int digestsize = crypto_ahash_digestsize(ahash);
705 #ifdef DEBUG
706 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
707 struct caam_hash_state *state = ahash_request_ctx(req);
709 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
710 #endif
712 edesc = (struct ahash_edesc *)((char *)desc -
713 offsetof(struct ahash_edesc, hw_desc));
714 if (err)
715 caam_jr_strstatus(jrdev, err);
717 ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_TO_DEVICE);
718 kfree(edesc);
720 #ifdef DEBUG
721 print_hex_dump(KERN_ERR, "ctx@"__stringify(__LINE__)": ",
722 DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
723 ctx->ctx_len, 1);
724 if (req->result)
725 print_hex_dump(KERN_ERR, "result@"__stringify(__LINE__)": ",
726 DUMP_PREFIX_ADDRESS, 16, 4, req->result,
727 digestsize, 1);
728 #endif
730 req->base.complete(&req->base, err);
733 static void ahash_done_ctx_dst(struct device *jrdev, u32 *desc, u32 err,
734 void *context)
736 struct ahash_request *req = context;
737 struct ahash_edesc *edesc;
738 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
739 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
740 #ifdef DEBUG
741 struct caam_hash_state *state = ahash_request_ctx(req);
742 int digestsize = crypto_ahash_digestsize(ahash);
744 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
745 #endif
747 edesc = (struct ahash_edesc *)((char *)desc -
748 offsetof(struct ahash_edesc, hw_desc));
749 if (err)
750 caam_jr_strstatus(jrdev, err);
752 ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_FROM_DEVICE);
753 kfree(edesc);
755 #ifdef DEBUG
756 print_hex_dump(KERN_ERR, "ctx@"__stringify(__LINE__)": ",
757 DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
758 ctx->ctx_len, 1);
759 if (req->result)
760 print_hex_dump(KERN_ERR, "result@"__stringify(__LINE__)": ",
761 DUMP_PREFIX_ADDRESS, 16, 4, req->result,
762 digestsize, 1);
763 #endif
765 req->base.complete(&req->base, err);
769 * Allocate an enhanced descriptor, which contains the hardware descriptor
770 * and space for hardware scatter table containing sg_num entries.
772 static struct ahash_edesc *ahash_edesc_alloc(struct caam_hash_ctx *ctx,
773 int sg_num, u32 *sh_desc,
774 dma_addr_t sh_desc_dma,
775 gfp_t flags)
777 struct ahash_edesc *edesc;
778 unsigned int sg_size = sg_num * sizeof(struct sec4_sg_entry);
780 edesc = kzalloc(sizeof(*edesc) + sg_size, GFP_DMA | flags);
781 if (!edesc) {
782 dev_err(ctx->jrdev, "could not allocate extended descriptor\n");
783 return NULL;
786 init_job_desc_shared(edesc->hw_desc, sh_desc_dma, desc_len(sh_desc),
787 HDR_SHARE_DEFER | HDR_REVERSE);
789 return edesc;
792 static int ahash_edesc_add_src(struct caam_hash_ctx *ctx,
793 struct ahash_edesc *edesc,
794 struct ahash_request *req, int nents,
795 unsigned int first_sg,
796 unsigned int first_bytes, size_t to_hash)
798 dma_addr_t src_dma;
799 u32 options;
801 if (nents > 1 || first_sg) {
802 struct sec4_sg_entry *sg = edesc->sec4_sg;
803 unsigned int sgsize = sizeof(*sg) * (first_sg + nents);
805 sg_to_sec4_sg_last(req->src, nents, sg + first_sg, 0);
807 src_dma = dma_map_single(ctx->jrdev, sg, sgsize, DMA_TO_DEVICE);
808 if (dma_mapping_error(ctx->jrdev, src_dma)) {
809 dev_err(ctx->jrdev, "unable to map S/G table\n");
810 return -ENOMEM;
813 edesc->sec4_sg_bytes = sgsize;
814 edesc->sec4_sg_dma = src_dma;
815 options = LDST_SGF;
816 } else {
817 src_dma = sg_dma_address(req->src);
818 options = 0;
821 append_seq_in_ptr(edesc->hw_desc, src_dma, first_bytes + to_hash,
822 options);
824 return 0;
827 /* submit update job descriptor */
828 static int ahash_update_ctx(struct ahash_request *req)
830 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
831 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
832 struct caam_hash_state *state = ahash_request_ctx(req);
833 struct device *jrdev = ctx->jrdev;
834 gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
835 CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
836 u8 *buf = state->current_buf ? state->buf_1 : state->buf_0;
837 int *buflen = state->current_buf ? &state->buflen_1 : &state->buflen_0;
838 u8 *next_buf = state->current_buf ? state->buf_0 : state->buf_1;
839 int *next_buflen = state->current_buf ? &state->buflen_0 :
840 &state->buflen_1, last_buflen;
841 int in_len = *buflen + req->nbytes, to_hash;
842 u32 *desc;
843 int src_nents, mapped_nents, sec4_sg_bytes, sec4_sg_src_index;
844 struct ahash_edesc *edesc;
845 int ret = 0;
847 last_buflen = *next_buflen;
848 *next_buflen = in_len & (crypto_tfm_alg_blocksize(&ahash->base) - 1);
849 to_hash = in_len - *next_buflen;
851 if (to_hash) {
852 src_nents = sg_nents_for_len(req->src,
853 req->nbytes - (*next_buflen));
854 if (src_nents < 0) {
855 dev_err(jrdev, "Invalid number of src SG.\n");
856 return src_nents;
859 if (src_nents) {
860 mapped_nents = dma_map_sg(jrdev, req->src, src_nents,
861 DMA_TO_DEVICE);
862 if (!mapped_nents) {
863 dev_err(jrdev, "unable to DMA map source\n");
864 return -ENOMEM;
866 } else {
867 mapped_nents = 0;
870 sec4_sg_src_index = 1 + (*buflen ? 1 : 0);
871 sec4_sg_bytes = (sec4_sg_src_index + mapped_nents) *
872 sizeof(struct sec4_sg_entry);
875 * allocate space for base edesc and hw desc commands,
876 * link tables
878 edesc = ahash_edesc_alloc(ctx, sec4_sg_src_index + mapped_nents,
879 ctx->sh_desc_update,
880 ctx->sh_desc_update_dma, flags);
881 if (!edesc) {
882 dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
883 return -ENOMEM;
886 edesc->src_nents = src_nents;
887 edesc->sec4_sg_bytes = sec4_sg_bytes;
889 ret = ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len,
890 edesc->sec4_sg, DMA_BIDIRECTIONAL);
891 if (ret)
892 goto unmap_ctx;
894 state->buf_dma = try_buf_map_to_sec4_sg(jrdev,
895 edesc->sec4_sg + 1,
896 buf, state->buf_dma,
897 *buflen, last_buflen);
899 if (mapped_nents) {
900 sg_to_sec4_sg_last(req->src, mapped_nents,
901 edesc->sec4_sg + sec4_sg_src_index,
903 if (*next_buflen)
904 scatterwalk_map_and_copy(next_buf, req->src,
905 to_hash - *buflen,
906 *next_buflen, 0);
907 } else {
908 (edesc->sec4_sg + sec4_sg_src_index - 1)->len |=
909 cpu_to_caam32(SEC4_SG_LEN_FIN);
912 state->current_buf = !state->current_buf;
914 desc = edesc->hw_desc;
916 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
917 sec4_sg_bytes,
918 DMA_TO_DEVICE);
919 if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
920 dev_err(jrdev, "unable to map S/G table\n");
921 ret = -ENOMEM;
922 goto unmap_ctx;
925 append_seq_in_ptr(desc, edesc->sec4_sg_dma, ctx->ctx_len +
926 to_hash, LDST_SGF);
928 append_seq_out_ptr(desc, state->ctx_dma, ctx->ctx_len, 0);
930 #ifdef DEBUG
931 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
932 DUMP_PREFIX_ADDRESS, 16, 4, desc,
933 desc_bytes(desc), 1);
934 #endif
936 ret = caam_jr_enqueue(jrdev, desc, ahash_done_bi, req);
937 if (ret)
938 goto unmap_ctx;
940 ret = -EINPROGRESS;
941 } else if (*next_buflen) {
942 scatterwalk_map_and_copy(buf + *buflen, req->src, 0,
943 req->nbytes, 0);
944 *buflen = *next_buflen;
945 *next_buflen = last_buflen;
947 #ifdef DEBUG
948 print_hex_dump(KERN_ERR, "buf@"__stringify(__LINE__)": ",
949 DUMP_PREFIX_ADDRESS, 16, 4, buf, *buflen, 1);
950 print_hex_dump(KERN_ERR, "next buf@"__stringify(__LINE__)": ",
951 DUMP_PREFIX_ADDRESS, 16, 4, next_buf,
952 *next_buflen, 1);
953 #endif
955 return ret;
956 unmap_ctx:
957 ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_BIDIRECTIONAL);
958 kfree(edesc);
959 return ret;
962 static int ahash_final_ctx(struct ahash_request *req)
964 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
965 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
966 struct caam_hash_state *state = ahash_request_ctx(req);
967 struct device *jrdev = ctx->jrdev;
968 gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
969 CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
970 u8 *buf = state->current_buf ? state->buf_1 : state->buf_0;
971 int buflen = state->current_buf ? state->buflen_1 : state->buflen_0;
972 int last_buflen = state->current_buf ? state->buflen_0 :
973 state->buflen_1;
974 u32 *desc;
975 int sec4_sg_bytes, sec4_sg_src_index;
976 int digestsize = crypto_ahash_digestsize(ahash);
977 struct ahash_edesc *edesc;
978 int ret;
980 sec4_sg_src_index = 1 + (buflen ? 1 : 0);
981 sec4_sg_bytes = sec4_sg_src_index * sizeof(struct sec4_sg_entry);
983 /* allocate space for base edesc and hw desc commands, link tables */
984 edesc = ahash_edesc_alloc(ctx, sec4_sg_src_index,
985 ctx->sh_desc_fin, ctx->sh_desc_fin_dma,
986 flags);
987 if (!edesc)
988 return -ENOMEM;
990 desc = edesc->hw_desc;
992 edesc->sec4_sg_bytes = sec4_sg_bytes;
993 edesc->src_nents = 0;
995 ret = ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len,
996 edesc->sec4_sg, DMA_TO_DEVICE);
997 if (ret)
998 goto unmap_ctx;
1000 state->buf_dma = try_buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1,
1001 buf, state->buf_dma, buflen,
1002 last_buflen);
1003 (edesc->sec4_sg + sec4_sg_src_index - 1)->len |=
1004 cpu_to_caam32(SEC4_SG_LEN_FIN);
1006 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
1007 sec4_sg_bytes, DMA_TO_DEVICE);
1008 if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
1009 dev_err(jrdev, "unable to map S/G table\n");
1010 ret = -ENOMEM;
1011 goto unmap_ctx;
1014 append_seq_in_ptr(desc, edesc->sec4_sg_dma, ctx->ctx_len + buflen,
1015 LDST_SGF);
1017 edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
1018 digestsize);
1019 if (dma_mapping_error(jrdev, edesc->dst_dma)) {
1020 dev_err(jrdev, "unable to map dst\n");
1021 ret = -ENOMEM;
1022 goto unmap_ctx;
1025 #ifdef DEBUG
1026 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
1027 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
1028 #endif
1030 ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_src, req);
1031 if (ret)
1032 goto unmap_ctx;
1034 return -EINPROGRESS;
1035 unmap_ctx:
1036 ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE);
1037 kfree(edesc);
1038 return ret;
1041 static int ahash_finup_ctx(struct ahash_request *req)
1043 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1044 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1045 struct caam_hash_state *state = ahash_request_ctx(req);
1046 struct device *jrdev = ctx->jrdev;
1047 gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
1048 CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
1049 u8 *buf = state->current_buf ? state->buf_1 : state->buf_0;
1050 int buflen = state->current_buf ? state->buflen_1 : state->buflen_0;
1051 int last_buflen = state->current_buf ? state->buflen_0 :
1052 state->buflen_1;
1053 u32 *desc;
1054 int sec4_sg_src_index;
1055 int src_nents, mapped_nents;
1056 int digestsize = crypto_ahash_digestsize(ahash);
1057 struct ahash_edesc *edesc;
1058 int ret;
1060 src_nents = sg_nents_for_len(req->src, req->nbytes);
1061 if (src_nents < 0) {
1062 dev_err(jrdev, "Invalid number of src SG.\n");
1063 return src_nents;
1066 if (src_nents) {
1067 mapped_nents = dma_map_sg(jrdev, req->src, src_nents,
1068 DMA_TO_DEVICE);
1069 if (!mapped_nents) {
1070 dev_err(jrdev, "unable to DMA map source\n");
1071 return -ENOMEM;
1073 } else {
1074 mapped_nents = 0;
1077 sec4_sg_src_index = 1 + (buflen ? 1 : 0);
1079 /* allocate space for base edesc and hw desc commands, link tables */
1080 edesc = ahash_edesc_alloc(ctx, sec4_sg_src_index + mapped_nents,
1081 ctx->sh_desc_finup, ctx->sh_desc_finup_dma,
1082 flags);
1083 if (!edesc) {
1084 dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
1085 return -ENOMEM;
1088 desc = edesc->hw_desc;
1090 edesc->src_nents = src_nents;
1092 ret = ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len,
1093 edesc->sec4_sg, DMA_TO_DEVICE);
1094 if (ret)
1095 goto unmap_ctx;
1097 state->buf_dma = try_buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1,
1098 buf, state->buf_dma, buflen,
1099 last_buflen);
1101 ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents,
1102 sec4_sg_src_index, ctx->ctx_len + buflen,
1103 req->nbytes);
1104 if (ret)
1105 goto unmap_ctx;
1107 edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
1108 digestsize);
1109 if (dma_mapping_error(jrdev, edesc->dst_dma)) {
1110 dev_err(jrdev, "unable to map dst\n");
1111 ret = -ENOMEM;
1112 goto unmap_ctx;
1115 #ifdef DEBUG
1116 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
1117 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
1118 #endif
1120 ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_src, req);
1121 if (ret)
1122 goto unmap_ctx;
1124 return -EINPROGRESS;
1125 unmap_ctx:
1126 ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE);
1127 kfree(edesc);
1128 return ret;
1131 static int ahash_digest(struct ahash_request *req)
1133 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1134 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1135 struct device *jrdev = ctx->jrdev;
1136 gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
1137 CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
1138 u32 *desc;
1139 int digestsize = crypto_ahash_digestsize(ahash);
1140 int src_nents, mapped_nents;
1141 struct ahash_edesc *edesc;
1142 int ret;
1144 src_nents = sg_nents_for_len(req->src, req->nbytes);
1145 if (src_nents < 0) {
1146 dev_err(jrdev, "Invalid number of src SG.\n");
1147 return src_nents;
1150 if (src_nents) {
1151 mapped_nents = dma_map_sg(jrdev, req->src, src_nents,
1152 DMA_TO_DEVICE);
1153 if (!mapped_nents) {
1154 dev_err(jrdev, "unable to map source for DMA\n");
1155 return -ENOMEM;
1157 } else {
1158 mapped_nents = 0;
1161 /* allocate space for base edesc and hw desc commands, link tables */
1162 edesc = ahash_edesc_alloc(ctx, mapped_nents > 1 ? mapped_nents : 0,
1163 ctx->sh_desc_digest, ctx->sh_desc_digest_dma,
1164 flags);
1165 if (!edesc) {
1166 dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
1167 return -ENOMEM;
1170 edesc->src_nents = src_nents;
1172 ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents, 0, 0,
1173 req->nbytes);
1174 if (ret) {
1175 ahash_unmap(jrdev, edesc, req, digestsize);
1176 kfree(edesc);
1177 return ret;
1180 desc = edesc->hw_desc;
1182 edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
1183 digestsize);
1184 if (dma_mapping_error(jrdev, edesc->dst_dma)) {
1185 dev_err(jrdev, "unable to map dst\n");
1186 ahash_unmap(jrdev, edesc, req, digestsize);
1187 kfree(edesc);
1188 return -ENOMEM;
1191 #ifdef DEBUG
1192 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
1193 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
1194 #endif
1196 ret = caam_jr_enqueue(jrdev, desc, ahash_done, req);
1197 if (!ret) {
1198 ret = -EINPROGRESS;
1199 } else {
1200 ahash_unmap(jrdev, edesc, req, digestsize);
1201 kfree(edesc);
1204 return ret;
1207 /* submit ahash final if it the first job descriptor */
1208 static int ahash_final_no_ctx(struct ahash_request *req)
1210 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1211 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1212 struct caam_hash_state *state = ahash_request_ctx(req);
1213 struct device *jrdev = ctx->jrdev;
1214 gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
1215 CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
1216 u8 *buf = state->current_buf ? state->buf_1 : state->buf_0;
1217 int buflen = state->current_buf ? state->buflen_1 : state->buflen_0;
1218 u32 *desc;
1219 int digestsize = crypto_ahash_digestsize(ahash);
1220 struct ahash_edesc *edesc;
1221 int ret;
1223 /* allocate space for base edesc and hw desc commands, link tables */
1224 edesc = ahash_edesc_alloc(ctx, 0, ctx->sh_desc_digest,
1225 ctx->sh_desc_digest_dma, flags);
1226 if (!edesc)
1227 return -ENOMEM;
1229 desc = edesc->hw_desc;
1231 state->buf_dma = dma_map_single(jrdev, buf, buflen, DMA_TO_DEVICE);
1232 if (dma_mapping_error(jrdev, state->buf_dma)) {
1233 dev_err(jrdev, "unable to map src\n");
1234 goto unmap;
1237 append_seq_in_ptr(desc, state->buf_dma, buflen, 0);
1239 edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
1240 digestsize);
1241 if (dma_mapping_error(jrdev, edesc->dst_dma)) {
1242 dev_err(jrdev, "unable to map dst\n");
1243 goto unmap;
1245 edesc->src_nents = 0;
1247 #ifdef DEBUG
1248 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
1249 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
1250 #endif
1252 ret = caam_jr_enqueue(jrdev, desc, ahash_done, req);
1253 if (!ret) {
1254 ret = -EINPROGRESS;
1255 } else {
1256 ahash_unmap(jrdev, edesc, req, digestsize);
1257 kfree(edesc);
1260 return ret;
1261 unmap:
1262 ahash_unmap(jrdev, edesc, req, digestsize);
1263 kfree(edesc);
1264 return -ENOMEM;
1268 /* submit ahash update if it the first job descriptor after update */
1269 static int ahash_update_no_ctx(struct ahash_request *req)
1271 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1272 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1273 struct caam_hash_state *state = ahash_request_ctx(req);
1274 struct device *jrdev = ctx->jrdev;
1275 gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
1276 CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
1277 u8 *buf = state->current_buf ? state->buf_1 : state->buf_0;
1278 int *buflen = state->current_buf ? &state->buflen_1 : &state->buflen_0;
1279 u8 *next_buf = state->current_buf ? state->buf_0 : state->buf_1;
1280 int *next_buflen = state->current_buf ? &state->buflen_0 :
1281 &state->buflen_1;
1282 int in_len = *buflen + req->nbytes, to_hash;
1283 int sec4_sg_bytes, src_nents, mapped_nents;
1284 struct ahash_edesc *edesc;
1285 u32 *desc;
1286 int ret = 0;
1288 *next_buflen = in_len & (crypto_tfm_alg_blocksize(&ahash->base) - 1);
1289 to_hash = in_len - *next_buflen;
1291 if (to_hash) {
1292 src_nents = sg_nents_for_len(req->src,
1293 req->nbytes - *next_buflen);
1294 if (src_nents < 0) {
1295 dev_err(jrdev, "Invalid number of src SG.\n");
1296 return src_nents;
1299 if (src_nents) {
1300 mapped_nents = dma_map_sg(jrdev, req->src, src_nents,
1301 DMA_TO_DEVICE);
1302 if (!mapped_nents) {
1303 dev_err(jrdev, "unable to DMA map source\n");
1304 return -ENOMEM;
1306 } else {
1307 mapped_nents = 0;
1310 sec4_sg_bytes = (1 + mapped_nents) *
1311 sizeof(struct sec4_sg_entry);
1314 * allocate space for base edesc and hw desc commands,
1315 * link tables
1317 edesc = ahash_edesc_alloc(ctx, 1 + mapped_nents,
1318 ctx->sh_desc_update_first,
1319 ctx->sh_desc_update_first_dma,
1320 flags);
1321 if (!edesc) {
1322 dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
1323 return -ENOMEM;
1326 edesc->src_nents = src_nents;
1327 edesc->sec4_sg_bytes = sec4_sg_bytes;
1328 edesc->dst_dma = 0;
1330 state->buf_dma = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg,
1331 buf, *buflen);
1332 sg_to_sec4_sg_last(req->src, mapped_nents,
1333 edesc->sec4_sg + 1, 0);
1335 if (*next_buflen) {
1336 scatterwalk_map_and_copy(next_buf, req->src,
1337 to_hash - *buflen,
1338 *next_buflen, 0);
1341 state->current_buf = !state->current_buf;
1343 desc = edesc->hw_desc;
1345 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
1346 sec4_sg_bytes,
1347 DMA_TO_DEVICE);
1348 if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
1349 dev_err(jrdev, "unable to map S/G table\n");
1350 ret = -ENOMEM;
1351 goto unmap_ctx;
1354 append_seq_in_ptr(desc, edesc->sec4_sg_dma, to_hash, LDST_SGF);
1356 ret = map_seq_out_ptr_ctx(desc, jrdev, state, ctx->ctx_len);
1357 if (ret)
1358 goto unmap_ctx;
1360 #ifdef DEBUG
1361 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
1362 DUMP_PREFIX_ADDRESS, 16, 4, desc,
1363 desc_bytes(desc), 1);
1364 #endif
1366 ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_dst, req);
1367 if (ret)
1368 goto unmap_ctx;
1370 ret = -EINPROGRESS;
1371 state->update = ahash_update_ctx;
1372 state->finup = ahash_finup_ctx;
1373 state->final = ahash_final_ctx;
1374 } else if (*next_buflen) {
1375 scatterwalk_map_and_copy(buf + *buflen, req->src, 0,
1376 req->nbytes, 0);
1377 *buflen = *next_buflen;
1378 *next_buflen = 0;
1380 #ifdef DEBUG
1381 print_hex_dump(KERN_ERR, "buf@"__stringify(__LINE__)": ",
1382 DUMP_PREFIX_ADDRESS, 16, 4, buf, *buflen, 1);
1383 print_hex_dump(KERN_ERR, "next buf@"__stringify(__LINE__)": ",
1384 DUMP_PREFIX_ADDRESS, 16, 4, next_buf,
1385 *next_buflen, 1);
1386 #endif
1388 return ret;
1389 unmap_ctx:
1390 ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_TO_DEVICE);
1391 kfree(edesc);
1392 return ret;
1395 /* submit ahash finup if it the first job descriptor after update */
1396 static int ahash_finup_no_ctx(struct ahash_request *req)
1398 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1399 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1400 struct caam_hash_state *state = ahash_request_ctx(req);
1401 struct device *jrdev = ctx->jrdev;
1402 gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
1403 CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
1404 u8 *buf = state->current_buf ? state->buf_1 : state->buf_0;
1405 int buflen = state->current_buf ? state->buflen_1 : state->buflen_0;
1406 int last_buflen = state->current_buf ? state->buflen_0 :
1407 state->buflen_1;
1408 u32 *desc;
1409 int sec4_sg_bytes, sec4_sg_src_index, src_nents, mapped_nents;
1410 int digestsize = crypto_ahash_digestsize(ahash);
1411 struct ahash_edesc *edesc;
1412 int ret;
1414 src_nents = sg_nents_for_len(req->src, req->nbytes);
1415 if (src_nents < 0) {
1416 dev_err(jrdev, "Invalid number of src SG.\n");
1417 return src_nents;
1420 if (src_nents) {
1421 mapped_nents = dma_map_sg(jrdev, req->src, src_nents,
1422 DMA_TO_DEVICE);
1423 if (!mapped_nents) {
1424 dev_err(jrdev, "unable to DMA map source\n");
1425 return -ENOMEM;
1427 } else {
1428 mapped_nents = 0;
1431 sec4_sg_src_index = 2;
1432 sec4_sg_bytes = (sec4_sg_src_index + mapped_nents) *
1433 sizeof(struct sec4_sg_entry);
1435 /* allocate space for base edesc and hw desc commands, link tables */
1436 edesc = ahash_edesc_alloc(ctx, sec4_sg_src_index + mapped_nents,
1437 ctx->sh_desc_digest, ctx->sh_desc_digest_dma,
1438 flags);
1439 if (!edesc) {
1440 dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
1441 return -ENOMEM;
1444 desc = edesc->hw_desc;
1446 edesc->src_nents = src_nents;
1447 edesc->sec4_sg_bytes = sec4_sg_bytes;
1449 state->buf_dma = try_buf_map_to_sec4_sg(jrdev, edesc->sec4_sg, buf,
1450 state->buf_dma, buflen,
1451 last_buflen);
1453 ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents, 1, buflen,
1454 req->nbytes);
1455 if (ret) {
1456 dev_err(jrdev, "unable to map S/G table\n");
1457 goto unmap;
1460 edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
1461 digestsize);
1462 if (dma_mapping_error(jrdev, edesc->dst_dma)) {
1463 dev_err(jrdev, "unable to map dst\n");
1464 goto unmap;
1467 #ifdef DEBUG
1468 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
1469 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
1470 #endif
1472 ret = caam_jr_enqueue(jrdev, desc, ahash_done, req);
1473 if (!ret) {
1474 ret = -EINPROGRESS;
1475 } else {
1476 ahash_unmap(jrdev, edesc, req, digestsize);
1477 kfree(edesc);
1480 return ret;
1481 unmap:
1482 ahash_unmap(jrdev, edesc, req, digestsize);
1483 kfree(edesc);
1484 return -ENOMEM;
1488 /* submit first update job descriptor after init */
1489 static int ahash_update_first(struct ahash_request *req)
1491 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1492 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1493 struct caam_hash_state *state = ahash_request_ctx(req);
1494 struct device *jrdev = ctx->jrdev;
1495 gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
1496 CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
1497 u8 *next_buf = state->current_buf ? state->buf_1 : state->buf_0;
1498 int *next_buflen = state->current_buf ?
1499 &state->buflen_1 : &state->buflen_0;
1500 int to_hash;
1501 u32 *desc;
1502 int src_nents, mapped_nents;
1503 struct ahash_edesc *edesc;
1504 int ret = 0;
1506 *next_buflen = req->nbytes & (crypto_tfm_alg_blocksize(&ahash->base) -
1508 to_hash = req->nbytes - *next_buflen;
1510 if (to_hash) {
1511 src_nents = sg_nents_for_len(req->src,
1512 req->nbytes - *next_buflen);
1513 if (src_nents < 0) {
1514 dev_err(jrdev, "Invalid number of src SG.\n");
1515 return src_nents;
1518 if (src_nents) {
1519 mapped_nents = dma_map_sg(jrdev, req->src, src_nents,
1520 DMA_TO_DEVICE);
1521 if (!mapped_nents) {
1522 dev_err(jrdev, "unable to map source for DMA\n");
1523 return -ENOMEM;
1525 } else {
1526 mapped_nents = 0;
1530 * allocate space for base edesc and hw desc commands,
1531 * link tables
1533 edesc = ahash_edesc_alloc(ctx, mapped_nents > 1 ?
1534 mapped_nents : 0,
1535 ctx->sh_desc_update_first,
1536 ctx->sh_desc_update_first_dma,
1537 flags);
1538 if (!edesc) {
1539 dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
1540 return -ENOMEM;
1543 edesc->src_nents = src_nents;
1544 edesc->dst_dma = 0;
1546 ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents, 0, 0,
1547 to_hash);
1548 if (ret)
1549 goto unmap_ctx;
1551 if (*next_buflen)
1552 scatterwalk_map_and_copy(next_buf, req->src, to_hash,
1553 *next_buflen, 0);
1555 desc = edesc->hw_desc;
1557 ret = map_seq_out_ptr_ctx(desc, jrdev, state, ctx->ctx_len);
1558 if (ret)
1559 goto unmap_ctx;
1561 #ifdef DEBUG
1562 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
1563 DUMP_PREFIX_ADDRESS, 16, 4, desc,
1564 desc_bytes(desc), 1);
1565 #endif
1567 ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_dst, req);
1568 if (ret)
1569 goto unmap_ctx;
1571 ret = -EINPROGRESS;
1572 state->update = ahash_update_ctx;
1573 state->finup = ahash_finup_ctx;
1574 state->final = ahash_final_ctx;
1575 } else if (*next_buflen) {
1576 state->update = ahash_update_no_ctx;
1577 state->finup = ahash_finup_no_ctx;
1578 state->final = ahash_final_no_ctx;
1579 scatterwalk_map_and_copy(next_buf, req->src, 0,
1580 req->nbytes, 0);
1582 #ifdef DEBUG
1583 print_hex_dump(KERN_ERR, "next buf@"__stringify(__LINE__)": ",
1584 DUMP_PREFIX_ADDRESS, 16, 4, next_buf,
1585 *next_buflen, 1);
1586 #endif
1588 return ret;
1589 unmap_ctx:
1590 ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_TO_DEVICE);
1591 kfree(edesc);
1592 return ret;
1595 static int ahash_finup_first(struct ahash_request *req)
1597 return ahash_digest(req);
1600 static int ahash_init(struct ahash_request *req)
1602 struct caam_hash_state *state = ahash_request_ctx(req);
1604 state->update = ahash_update_first;
1605 state->finup = ahash_finup_first;
1606 state->final = ahash_final_no_ctx;
1608 state->current_buf = 0;
1609 state->buf_dma = 0;
1610 state->buflen_0 = 0;
1611 state->buflen_1 = 0;
1613 return 0;
1616 static int ahash_update(struct ahash_request *req)
1618 struct caam_hash_state *state = ahash_request_ctx(req);
1620 return state->update(req);
1623 static int ahash_finup(struct ahash_request *req)
1625 struct caam_hash_state *state = ahash_request_ctx(req);
1627 return state->finup(req);
1630 static int ahash_final(struct ahash_request *req)
1632 struct caam_hash_state *state = ahash_request_ctx(req);
1634 return state->final(req);
1637 static int ahash_export(struct ahash_request *req, void *out)
1639 struct caam_hash_state *state = ahash_request_ctx(req);
1640 struct caam_export_state *export = out;
1641 int len;
1642 u8 *buf;
1644 if (state->current_buf) {
1645 buf = state->buf_1;
1646 len = state->buflen_1;
1647 } else {
1648 buf = state->buf_0;
1649 len = state->buflen_0;
1652 memcpy(export->buf, buf, len);
1653 memcpy(export->caam_ctx, state->caam_ctx, sizeof(export->caam_ctx));
1654 export->buflen = len;
1655 export->update = state->update;
1656 export->final = state->final;
1657 export->finup = state->finup;
1659 return 0;
1662 static int ahash_import(struct ahash_request *req, const void *in)
1664 struct caam_hash_state *state = ahash_request_ctx(req);
1665 const struct caam_export_state *export = in;
1667 memset(state, 0, sizeof(*state));
1668 memcpy(state->buf_0, export->buf, export->buflen);
1669 memcpy(state->caam_ctx, export->caam_ctx, sizeof(state->caam_ctx));
1670 state->buflen_0 = export->buflen;
1671 state->update = export->update;
1672 state->final = export->final;
1673 state->finup = export->finup;
1675 return 0;
1678 struct caam_hash_template {
1679 char name[CRYPTO_MAX_ALG_NAME];
1680 char driver_name[CRYPTO_MAX_ALG_NAME];
1681 char hmac_name[CRYPTO_MAX_ALG_NAME];
1682 char hmac_driver_name[CRYPTO_MAX_ALG_NAME];
1683 unsigned int blocksize;
1684 struct ahash_alg template_ahash;
1685 u32 alg_type;
1686 u32 alg_op;
1689 /* ahash descriptors */
1690 static struct caam_hash_template driver_hash[] = {
1692 .name = "sha1",
1693 .driver_name = "sha1-caam",
1694 .hmac_name = "hmac(sha1)",
1695 .hmac_driver_name = "hmac-sha1-caam",
1696 .blocksize = SHA1_BLOCK_SIZE,
1697 .template_ahash = {
1698 .init = ahash_init,
1699 .update = ahash_update,
1700 .final = ahash_final,
1701 .finup = ahash_finup,
1702 .digest = ahash_digest,
1703 .export = ahash_export,
1704 .import = ahash_import,
1705 .setkey = ahash_setkey,
1706 .halg = {
1707 .digestsize = SHA1_DIGEST_SIZE,
1708 .statesize = sizeof(struct caam_export_state),
1711 .alg_type = OP_ALG_ALGSEL_SHA1,
1712 .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
1713 }, {
1714 .name = "sha224",
1715 .driver_name = "sha224-caam",
1716 .hmac_name = "hmac(sha224)",
1717 .hmac_driver_name = "hmac-sha224-caam",
1718 .blocksize = SHA224_BLOCK_SIZE,
1719 .template_ahash = {
1720 .init = ahash_init,
1721 .update = ahash_update,
1722 .final = ahash_final,
1723 .finup = ahash_finup,
1724 .digest = ahash_digest,
1725 .export = ahash_export,
1726 .import = ahash_import,
1727 .setkey = ahash_setkey,
1728 .halg = {
1729 .digestsize = SHA224_DIGEST_SIZE,
1730 .statesize = sizeof(struct caam_export_state),
1733 .alg_type = OP_ALG_ALGSEL_SHA224,
1734 .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
1735 }, {
1736 .name = "sha256",
1737 .driver_name = "sha256-caam",
1738 .hmac_name = "hmac(sha256)",
1739 .hmac_driver_name = "hmac-sha256-caam",
1740 .blocksize = SHA256_BLOCK_SIZE,
1741 .template_ahash = {
1742 .init = ahash_init,
1743 .update = ahash_update,
1744 .final = ahash_final,
1745 .finup = ahash_finup,
1746 .digest = ahash_digest,
1747 .export = ahash_export,
1748 .import = ahash_import,
1749 .setkey = ahash_setkey,
1750 .halg = {
1751 .digestsize = SHA256_DIGEST_SIZE,
1752 .statesize = sizeof(struct caam_export_state),
1755 .alg_type = OP_ALG_ALGSEL_SHA256,
1756 .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
1757 }, {
1758 .name = "sha384",
1759 .driver_name = "sha384-caam",
1760 .hmac_name = "hmac(sha384)",
1761 .hmac_driver_name = "hmac-sha384-caam",
1762 .blocksize = SHA384_BLOCK_SIZE,
1763 .template_ahash = {
1764 .init = ahash_init,
1765 .update = ahash_update,
1766 .final = ahash_final,
1767 .finup = ahash_finup,
1768 .digest = ahash_digest,
1769 .export = ahash_export,
1770 .import = ahash_import,
1771 .setkey = ahash_setkey,
1772 .halg = {
1773 .digestsize = SHA384_DIGEST_SIZE,
1774 .statesize = sizeof(struct caam_export_state),
1777 .alg_type = OP_ALG_ALGSEL_SHA384,
1778 .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
1779 }, {
1780 .name = "sha512",
1781 .driver_name = "sha512-caam",
1782 .hmac_name = "hmac(sha512)",
1783 .hmac_driver_name = "hmac-sha512-caam",
1784 .blocksize = SHA512_BLOCK_SIZE,
1785 .template_ahash = {
1786 .init = ahash_init,
1787 .update = ahash_update,
1788 .final = ahash_final,
1789 .finup = ahash_finup,
1790 .digest = ahash_digest,
1791 .export = ahash_export,
1792 .import = ahash_import,
1793 .setkey = ahash_setkey,
1794 .halg = {
1795 .digestsize = SHA512_DIGEST_SIZE,
1796 .statesize = sizeof(struct caam_export_state),
1799 .alg_type = OP_ALG_ALGSEL_SHA512,
1800 .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
1801 }, {
1802 .name = "md5",
1803 .driver_name = "md5-caam",
1804 .hmac_name = "hmac(md5)",
1805 .hmac_driver_name = "hmac-md5-caam",
1806 .blocksize = MD5_BLOCK_WORDS * 4,
1807 .template_ahash = {
1808 .init = ahash_init,
1809 .update = ahash_update,
1810 .final = ahash_final,
1811 .finup = ahash_finup,
1812 .digest = ahash_digest,
1813 .export = ahash_export,
1814 .import = ahash_import,
1815 .setkey = ahash_setkey,
1816 .halg = {
1817 .digestsize = MD5_DIGEST_SIZE,
1818 .statesize = sizeof(struct caam_export_state),
1821 .alg_type = OP_ALG_ALGSEL_MD5,
1822 .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
1826 struct caam_hash_alg {
1827 struct list_head entry;
1828 int alg_type;
1829 int alg_op;
1830 struct ahash_alg ahash_alg;
1833 static int caam_hash_cra_init(struct crypto_tfm *tfm)
1835 struct crypto_ahash *ahash = __crypto_ahash_cast(tfm);
1836 struct crypto_alg *base = tfm->__crt_alg;
1837 struct hash_alg_common *halg =
1838 container_of(base, struct hash_alg_common, base);
1839 struct ahash_alg *alg =
1840 container_of(halg, struct ahash_alg, halg);
1841 struct caam_hash_alg *caam_hash =
1842 container_of(alg, struct caam_hash_alg, ahash_alg);
1843 struct caam_hash_ctx *ctx = crypto_tfm_ctx(tfm);
1844 /* Sizes for MDHA running digests: MD5, SHA1, 224, 256, 384, 512 */
1845 static const u8 runninglen[] = { HASH_MSG_LEN + MD5_DIGEST_SIZE,
1846 HASH_MSG_LEN + SHA1_DIGEST_SIZE,
1847 HASH_MSG_LEN + 32,
1848 HASH_MSG_LEN + SHA256_DIGEST_SIZE,
1849 HASH_MSG_LEN + 64,
1850 HASH_MSG_LEN + SHA512_DIGEST_SIZE };
1853 * Get a Job ring from Job Ring driver to ensure in-order
1854 * crypto request processing per tfm
1856 ctx->jrdev = caam_jr_alloc();
1857 if (IS_ERR(ctx->jrdev)) {
1858 pr_err("Job Ring Device allocation for transform failed\n");
1859 return PTR_ERR(ctx->jrdev);
1861 /* copy descriptor header template value */
1862 ctx->alg_type = OP_TYPE_CLASS2_ALG | caam_hash->alg_type;
1863 ctx->alg_op = OP_TYPE_CLASS2_ALG | caam_hash->alg_op;
1865 ctx->ctx_len = runninglen[(ctx->alg_op & OP_ALG_ALGSEL_SUBMASK) >>
1866 OP_ALG_ALGSEL_SHIFT];
1868 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
1869 sizeof(struct caam_hash_state));
1870 return ahash_set_sh_desc(ahash);
1873 static void caam_hash_cra_exit(struct crypto_tfm *tfm)
1875 struct caam_hash_ctx *ctx = crypto_tfm_ctx(tfm);
1877 if (ctx->sh_desc_update_dma &&
1878 !dma_mapping_error(ctx->jrdev, ctx->sh_desc_update_dma))
1879 dma_unmap_single(ctx->jrdev, ctx->sh_desc_update_dma,
1880 desc_bytes(ctx->sh_desc_update),
1881 DMA_TO_DEVICE);
1882 if (ctx->sh_desc_update_first_dma &&
1883 !dma_mapping_error(ctx->jrdev, ctx->sh_desc_update_first_dma))
1884 dma_unmap_single(ctx->jrdev, ctx->sh_desc_update_first_dma,
1885 desc_bytes(ctx->sh_desc_update_first),
1886 DMA_TO_DEVICE);
1887 if (ctx->sh_desc_fin_dma &&
1888 !dma_mapping_error(ctx->jrdev, ctx->sh_desc_fin_dma))
1889 dma_unmap_single(ctx->jrdev, ctx->sh_desc_fin_dma,
1890 desc_bytes(ctx->sh_desc_fin), DMA_TO_DEVICE);
1891 if (ctx->sh_desc_digest_dma &&
1892 !dma_mapping_error(ctx->jrdev, ctx->sh_desc_digest_dma))
1893 dma_unmap_single(ctx->jrdev, ctx->sh_desc_digest_dma,
1894 desc_bytes(ctx->sh_desc_digest),
1895 DMA_TO_DEVICE);
1896 if (ctx->sh_desc_finup_dma &&
1897 !dma_mapping_error(ctx->jrdev, ctx->sh_desc_finup_dma))
1898 dma_unmap_single(ctx->jrdev, ctx->sh_desc_finup_dma,
1899 desc_bytes(ctx->sh_desc_finup), DMA_TO_DEVICE);
1901 caam_jr_free(ctx->jrdev);
1904 static void __exit caam_algapi_hash_exit(void)
1906 struct caam_hash_alg *t_alg, *n;
1908 if (!hash_list.next)
1909 return;
1911 list_for_each_entry_safe(t_alg, n, &hash_list, entry) {
1912 crypto_unregister_ahash(&t_alg->ahash_alg);
1913 list_del(&t_alg->entry);
1914 kfree(t_alg);
1918 static struct caam_hash_alg *
1919 caam_hash_alloc(struct caam_hash_template *template,
1920 bool keyed)
1922 struct caam_hash_alg *t_alg;
1923 struct ahash_alg *halg;
1924 struct crypto_alg *alg;
1926 t_alg = kzalloc(sizeof(*t_alg), GFP_KERNEL);
1927 if (!t_alg) {
1928 pr_err("failed to allocate t_alg\n");
1929 return ERR_PTR(-ENOMEM);
1932 t_alg->ahash_alg = template->template_ahash;
1933 halg = &t_alg->ahash_alg;
1934 alg = &halg->halg.base;
1936 if (keyed) {
1937 snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s",
1938 template->hmac_name);
1939 snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
1940 template->hmac_driver_name);
1941 } else {
1942 snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s",
1943 template->name);
1944 snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
1945 template->driver_name);
1946 t_alg->ahash_alg.setkey = NULL;
1948 alg->cra_module = THIS_MODULE;
1949 alg->cra_init = caam_hash_cra_init;
1950 alg->cra_exit = caam_hash_cra_exit;
1951 alg->cra_ctxsize = sizeof(struct caam_hash_ctx);
1952 alg->cra_priority = CAAM_CRA_PRIORITY;
1953 alg->cra_blocksize = template->blocksize;
1954 alg->cra_alignmask = 0;
1955 alg->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_TYPE_AHASH;
1956 alg->cra_type = &crypto_ahash_type;
1958 t_alg->alg_type = template->alg_type;
1959 t_alg->alg_op = template->alg_op;
1961 return t_alg;
1964 static int __init caam_algapi_hash_init(void)
1966 struct device_node *dev_node;
1967 struct platform_device *pdev;
1968 struct device *ctrldev;
1969 int i = 0, err = 0;
1970 struct caam_drv_private *priv;
1971 unsigned int md_limit = SHA512_DIGEST_SIZE;
1972 u32 cha_inst, cha_vid;
1974 dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
1975 if (!dev_node) {
1976 dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0");
1977 if (!dev_node)
1978 return -ENODEV;
1981 pdev = of_find_device_by_node(dev_node);
1982 if (!pdev) {
1983 of_node_put(dev_node);
1984 return -ENODEV;
1987 ctrldev = &pdev->dev;
1988 priv = dev_get_drvdata(ctrldev);
1989 of_node_put(dev_node);
1992 * If priv is NULL, it's probably because the caam driver wasn't
1993 * properly initialized (e.g. RNG4 init failed). Thus, bail out here.
1995 if (!priv)
1996 return -ENODEV;
1999 * Register crypto algorithms the device supports. First, identify
2000 * presence and attributes of MD block.
2002 cha_vid = rd_reg32(&priv->ctrl->perfmon.cha_id_ls);
2003 cha_inst = rd_reg32(&priv->ctrl->perfmon.cha_num_ls);
2006 * Skip registration of any hashing algorithms if MD block
2007 * is not present.
2009 if (!((cha_inst & CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT))
2010 return -ENODEV;
2012 /* Limit digest size based on LP256 */
2013 if ((cha_vid & CHA_ID_LS_MD_MASK) == CHA_ID_LS_MD_LP256)
2014 md_limit = SHA256_DIGEST_SIZE;
2016 INIT_LIST_HEAD(&hash_list);
2018 /* register crypto algorithms the device supports */
2019 for (i = 0; i < ARRAY_SIZE(driver_hash); i++) {
2020 struct caam_hash_alg *t_alg;
2021 struct caam_hash_template *alg = driver_hash + i;
2023 /* If MD size is not supported by device, skip registration */
2024 if (alg->template_ahash.halg.digestsize > md_limit)
2025 continue;
2027 /* register hmac version */
2028 t_alg = caam_hash_alloc(alg, true);
2029 if (IS_ERR(t_alg)) {
2030 err = PTR_ERR(t_alg);
2031 pr_warn("%s alg allocation failed\n", alg->driver_name);
2032 continue;
2035 err = crypto_register_ahash(&t_alg->ahash_alg);
2036 if (err) {
2037 pr_warn("%s alg registration failed: %d\n",
2038 t_alg->ahash_alg.halg.base.cra_driver_name,
2039 err);
2040 kfree(t_alg);
2041 } else
2042 list_add_tail(&t_alg->entry, &hash_list);
2044 /* register unkeyed version */
2045 t_alg = caam_hash_alloc(alg, false);
2046 if (IS_ERR(t_alg)) {
2047 err = PTR_ERR(t_alg);
2048 pr_warn("%s alg allocation failed\n", alg->driver_name);
2049 continue;
2052 err = crypto_register_ahash(&t_alg->ahash_alg);
2053 if (err) {
2054 pr_warn("%s alg registration failed: %d\n",
2055 t_alg->ahash_alg.halg.base.cra_driver_name,
2056 err);
2057 kfree(t_alg);
2058 } else
2059 list_add_tail(&t_alg->entry, &hash_list);
2062 return err;
2065 module_init(caam_algapi_hash_init);
2066 module_exit(caam_algapi_hash_exit);
2068 MODULE_LICENSE("GPL");
2069 MODULE_DESCRIPTION("FSL CAAM support for ahash functions of crypto API");
2070 MODULE_AUTHOR("Freescale Semiconductor - NMG");