staging: rtl8188eu: rename HalSetBrateCfg() - style
[linux/fpc-iii.git] / drivers / crypto / caam / caamhash.c
blob43975ab5f09c12c391a8830da71584d76076dda8
1 /*
2 * caam - Freescale FSL CAAM support for ahash functions of crypto API
4 * Copyright 2011 Freescale Semiconductor, Inc.
6 * Based on caamalg.c crypto API driver.
8 * relationship of digest job descriptor or first job descriptor after init to
9 * shared descriptors:
11 * --------------- ---------------
12 * | JobDesc #1 |-------------------->| ShareDesc |
13 * | *(packet 1) | | (hashKey) |
14 * --------------- | (operation) |
15 * ---------------
17 * relationship of subsequent job descriptors to shared descriptors:
19 * --------------- ---------------
20 * | JobDesc #2 |-------------------->| ShareDesc |
21 * | *(packet 2) | |------------->| (hashKey) |
22 * --------------- | |-------->| (operation) |
23 * . | | | (load ctx2) |
24 * . | | ---------------
25 * --------------- | |
26 * | JobDesc #3 |------| |
27 * | *(packet 3) | |
28 * --------------- |
29 * . |
30 * . |
31 * --------------- |
32 * | JobDesc #4 |------------
33 * | *(packet 4) |
34 * ---------------
36 * The SharedDesc never changes for a connection unless rekeyed, but
37 * each packet will likely be in a different place. So all we need
38 * to know to process the packet is where the input is, where the
39 * output goes, and what context we want to process with. Context is
40 * in the SharedDesc, packet references in the JobDesc.
42 * So, a job desc looks like:
44 * ---------------------
45 * | Header |
46 * | ShareDesc Pointer |
47 * | SEQ_OUT_PTR |
48 * | (output buffer) |
49 * | (output length) |
50 * | SEQ_IN_PTR |
51 * | (input buffer) |
52 * | (input length) |
53 * ---------------------
56 #include "compat.h"
58 #include "regs.h"
59 #include "intern.h"
60 #include "desc_constr.h"
61 #include "jr.h"
62 #include "error.h"
63 #include "sg_sw_sec4.h"
64 #include "key_gen.h"
66 #define CAAM_CRA_PRIORITY 3000
68 /* max hash key is max split key size */
69 #define CAAM_MAX_HASH_KEY_SIZE (SHA512_DIGEST_SIZE * 2)
71 #define CAAM_MAX_HASH_BLOCK_SIZE SHA512_BLOCK_SIZE
72 #define CAAM_MAX_HASH_DIGEST_SIZE SHA512_DIGEST_SIZE
74 /* length of descriptors text */
75 #define DESC_AHASH_BASE (3 * CAAM_CMD_SZ)
76 #define DESC_AHASH_UPDATE_LEN (6 * CAAM_CMD_SZ)
77 #define DESC_AHASH_UPDATE_FIRST_LEN (DESC_AHASH_BASE + 4 * CAAM_CMD_SZ)
78 #define DESC_AHASH_FINAL_LEN (DESC_AHASH_BASE + 5 * CAAM_CMD_SZ)
79 #define DESC_AHASH_FINUP_LEN (DESC_AHASH_BASE + 5 * CAAM_CMD_SZ)
80 #define DESC_AHASH_DIGEST_LEN (DESC_AHASH_BASE + 4 * CAAM_CMD_SZ)
82 #define DESC_HASH_MAX_USED_BYTES (DESC_AHASH_FINAL_LEN + \
83 CAAM_MAX_HASH_KEY_SIZE)
84 #define DESC_HASH_MAX_USED_LEN (DESC_HASH_MAX_USED_BYTES / CAAM_CMD_SZ)
86 /* caam context sizes for hashes: running digest + 8 */
87 #define HASH_MSG_LEN 8
88 #define MAX_CTX_LEN (HASH_MSG_LEN + SHA512_DIGEST_SIZE)
90 #ifdef DEBUG
91 /* for print_hex_dumps with line references */
92 #define debug(format, arg...) printk(format, arg)
93 #else
94 #define debug(format, arg...)
95 #endif
98 static struct list_head hash_list;
100 /* ahash per-session context */
101 struct caam_hash_ctx {
102 u32 sh_desc_update[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned;
103 u32 sh_desc_update_first[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned;
104 u32 sh_desc_fin[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned;
105 u32 sh_desc_digest[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned;
106 dma_addr_t sh_desc_update_dma ____cacheline_aligned;
107 dma_addr_t sh_desc_update_first_dma;
108 dma_addr_t sh_desc_fin_dma;
109 dma_addr_t sh_desc_digest_dma;
110 enum dma_data_direction dir;
111 struct device *jrdev;
112 u8 key[CAAM_MAX_HASH_KEY_SIZE];
113 int ctx_len;
114 struct alginfo adata;
117 /* ahash state */
118 struct caam_hash_state {
119 dma_addr_t buf_dma;
120 dma_addr_t ctx_dma;
121 u8 buf_0[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned;
122 int buflen_0;
123 u8 buf_1[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned;
124 int buflen_1;
125 u8 caam_ctx[MAX_CTX_LEN] ____cacheline_aligned;
126 int (*update)(struct ahash_request *req);
127 int (*final)(struct ahash_request *req);
128 int (*finup)(struct ahash_request *req);
129 int current_buf;
132 struct caam_export_state {
133 u8 buf[CAAM_MAX_HASH_BLOCK_SIZE];
134 u8 caam_ctx[MAX_CTX_LEN];
135 int buflen;
136 int (*update)(struct ahash_request *req);
137 int (*final)(struct ahash_request *req);
138 int (*finup)(struct ahash_request *req);
141 static inline void switch_buf(struct caam_hash_state *state)
143 state->current_buf ^= 1;
146 static inline u8 *current_buf(struct caam_hash_state *state)
148 return state->current_buf ? state->buf_1 : state->buf_0;
151 static inline u8 *alt_buf(struct caam_hash_state *state)
153 return state->current_buf ? state->buf_0 : state->buf_1;
156 static inline int *current_buflen(struct caam_hash_state *state)
158 return state->current_buf ? &state->buflen_1 : &state->buflen_0;
161 static inline int *alt_buflen(struct caam_hash_state *state)
163 return state->current_buf ? &state->buflen_0 : &state->buflen_1;
166 /* Common job descriptor seq in/out ptr routines */
168 /* Map state->caam_ctx, and append seq_out_ptr command that points to it */
169 static inline int map_seq_out_ptr_ctx(u32 *desc, struct device *jrdev,
170 struct caam_hash_state *state,
171 int ctx_len)
173 state->ctx_dma = dma_map_single(jrdev, state->caam_ctx,
174 ctx_len, DMA_FROM_DEVICE);
175 if (dma_mapping_error(jrdev, state->ctx_dma)) {
176 dev_err(jrdev, "unable to map ctx\n");
177 state->ctx_dma = 0;
178 return -ENOMEM;
181 append_seq_out_ptr(desc, state->ctx_dma, ctx_len, 0);
183 return 0;
186 /* Map req->result, and append seq_out_ptr command that points to it */
187 static inline dma_addr_t map_seq_out_ptr_result(u32 *desc, struct device *jrdev,
188 u8 *result, int digestsize)
190 dma_addr_t dst_dma;
192 dst_dma = dma_map_single(jrdev, result, digestsize, DMA_FROM_DEVICE);
193 append_seq_out_ptr(desc, dst_dma, digestsize, 0);
195 return dst_dma;
198 /* Map current buffer in state (if length > 0) and put it in link table */
199 static inline int buf_map_to_sec4_sg(struct device *jrdev,
200 struct sec4_sg_entry *sec4_sg,
201 struct caam_hash_state *state)
203 int buflen = *current_buflen(state);
205 if (!buflen)
206 return 0;
208 state->buf_dma = dma_map_single(jrdev, current_buf(state), buflen,
209 DMA_TO_DEVICE);
210 if (dma_mapping_error(jrdev, state->buf_dma)) {
211 dev_err(jrdev, "unable to map buf\n");
212 state->buf_dma = 0;
213 return -ENOMEM;
216 dma_to_sec4_sg_one(sec4_sg, state->buf_dma, buflen, 0);
218 return 0;
221 /* Map state->caam_ctx, and add it to link table */
222 static inline int ctx_map_to_sec4_sg(struct device *jrdev,
223 struct caam_hash_state *state, int ctx_len,
224 struct sec4_sg_entry *sec4_sg, u32 flag)
226 state->ctx_dma = dma_map_single(jrdev, state->caam_ctx, ctx_len, flag);
227 if (dma_mapping_error(jrdev, state->ctx_dma)) {
228 dev_err(jrdev, "unable to map ctx\n");
229 state->ctx_dma = 0;
230 return -ENOMEM;
233 dma_to_sec4_sg_one(sec4_sg, state->ctx_dma, ctx_len, 0);
235 return 0;
239 * For ahash update, final and finup (import_ctx = true)
240 * import context, read and write to seqout
241 * For ahash firsts and digest (import_ctx = false)
242 * read and write to seqout
244 static inline void ahash_gen_sh_desc(u32 *desc, u32 state, int digestsize,
245 struct caam_hash_ctx *ctx, bool import_ctx,
246 int era)
248 u32 op = ctx->adata.algtype;
249 u32 *skip_key_load;
251 init_sh_desc(desc, HDR_SHARE_SERIAL);
253 /* Append key if it has been set; ahash update excluded */
254 if ((state != OP_ALG_AS_UPDATE) && (ctx->adata.keylen)) {
255 /* Skip key loading if already shared */
256 skip_key_load = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
257 JUMP_COND_SHRD);
259 if (era < 6)
260 append_key_as_imm(desc, ctx->key, ctx->adata.keylen_pad,
261 ctx->adata.keylen, CLASS_2 |
262 KEY_DEST_MDHA_SPLIT | KEY_ENC);
263 else
264 append_proto_dkp(desc, &ctx->adata);
266 set_jump_tgt_here(desc, skip_key_load);
268 op |= OP_ALG_AAI_HMAC_PRECOMP;
271 /* If needed, import context from software */
272 if (import_ctx)
273 append_seq_load(desc, ctx->ctx_len, LDST_CLASS_2_CCB |
274 LDST_SRCDST_BYTE_CONTEXT);
276 /* Class 2 operation */
277 append_operation(desc, op | state | OP_ALG_ENCRYPT);
280 * Load from buf and/or src and write to req->result or state->context
281 * Calculate remaining bytes to read
283 append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
284 /* Read remaining bytes */
285 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_LAST2 |
286 FIFOLD_TYPE_MSG | KEY_VLF);
287 /* Store class2 context bytes */
288 append_seq_store(desc, digestsize, LDST_CLASS_2_CCB |
289 LDST_SRCDST_BYTE_CONTEXT);
292 static int ahash_set_sh_desc(struct crypto_ahash *ahash)
294 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
295 int digestsize = crypto_ahash_digestsize(ahash);
296 struct device *jrdev = ctx->jrdev;
297 struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent);
298 u32 *desc;
300 ctx->adata.key_virt = ctx->key;
302 /* ahash_update shared descriptor */
303 desc = ctx->sh_desc_update;
304 ahash_gen_sh_desc(desc, OP_ALG_AS_UPDATE, ctx->ctx_len, ctx, true,
305 ctrlpriv->era);
306 dma_sync_single_for_device(jrdev, ctx->sh_desc_update_dma,
307 desc_bytes(desc), ctx->dir);
308 #ifdef DEBUG
309 print_hex_dump(KERN_ERR,
310 "ahash update shdesc@"__stringify(__LINE__)": ",
311 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
312 #endif
314 /* ahash_update_first shared descriptor */
315 desc = ctx->sh_desc_update_first;
316 ahash_gen_sh_desc(desc, OP_ALG_AS_INIT, ctx->ctx_len, ctx, false,
317 ctrlpriv->era);
318 dma_sync_single_for_device(jrdev, ctx->sh_desc_update_first_dma,
319 desc_bytes(desc), ctx->dir);
320 #ifdef DEBUG
321 print_hex_dump(KERN_ERR,
322 "ahash update first shdesc@"__stringify(__LINE__)": ",
323 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
324 #endif
326 /* ahash_final shared descriptor */
327 desc = ctx->sh_desc_fin;
328 ahash_gen_sh_desc(desc, OP_ALG_AS_FINALIZE, digestsize, ctx, true,
329 ctrlpriv->era);
330 dma_sync_single_for_device(jrdev, ctx->sh_desc_fin_dma,
331 desc_bytes(desc), ctx->dir);
332 #ifdef DEBUG
333 print_hex_dump(KERN_ERR, "ahash final shdesc@"__stringify(__LINE__)": ",
334 DUMP_PREFIX_ADDRESS, 16, 4, desc,
335 desc_bytes(desc), 1);
336 #endif
338 /* ahash_digest shared descriptor */
339 desc = ctx->sh_desc_digest;
340 ahash_gen_sh_desc(desc, OP_ALG_AS_INITFINAL, digestsize, ctx, false,
341 ctrlpriv->era);
342 dma_sync_single_for_device(jrdev, ctx->sh_desc_digest_dma,
343 desc_bytes(desc), ctx->dir);
344 #ifdef DEBUG
345 print_hex_dump(KERN_ERR,
346 "ahash digest shdesc@"__stringify(__LINE__)": ",
347 DUMP_PREFIX_ADDRESS, 16, 4, desc,
348 desc_bytes(desc), 1);
349 #endif
351 return 0;
354 /* Digest hash size if it is too large */
355 static int hash_digest_key(struct caam_hash_ctx *ctx, const u8 *key_in,
356 u32 *keylen, u8 *key_out, u32 digestsize)
358 struct device *jrdev = ctx->jrdev;
359 u32 *desc;
360 struct split_key_result result;
361 dma_addr_t src_dma, dst_dma;
362 int ret;
364 desc = kmalloc(CAAM_CMD_SZ * 8 + CAAM_PTR_SZ * 2, GFP_KERNEL | GFP_DMA);
365 if (!desc) {
366 dev_err(jrdev, "unable to allocate key input memory\n");
367 return -ENOMEM;
370 init_job_desc(desc, 0);
372 src_dma = dma_map_single(jrdev, (void *)key_in, *keylen,
373 DMA_TO_DEVICE);
374 if (dma_mapping_error(jrdev, src_dma)) {
375 dev_err(jrdev, "unable to map key input memory\n");
376 kfree(desc);
377 return -ENOMEM;
379 dst_dma = dma_map_single(jrdev, (void *)key_out, digestsize,
380 DMA_FROM_DEVICE);
381 if (dma_mapping_error(jrdev, dst_dma)) {
382 dev_err(jrdev, "unable to map key output memory\n");
383 dma_unmap_single(jrdev, src_dma, *keylen, DMA_TO_DEVICE);
384 kfree(desc);
385 return -ENOMEM;
388 /* Job descriptor to perform unkeyed hash on key_in */
389 append_operation(desc, ctx->adata.algtype | OP_ALG_ENCRYPT |
390 OP_ALG_AS_INITFINAL);
391 append_seq_in_ptr(desc, src_dma, *keylen, 0);
392 append_seq_fifo_load(desc, *keylen, FIFOLD_CLASS_CLASS2 |
393 FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_MSG);
394 append_seq_out_ptr(desc, dst_dma, digestsize, 0);
395 append_seq_store(desc, digestsize, LDST_CLASS_2_CCB |
396 LDST_SRCDST_BYTE_CONTEXT);
398 #ifdef DEBUG
399 print_hex_dump(KERN_ERR, "key_in@"__stringify(__LINE__)": ",
400 DUMP_PREFIX_ADDRESS, 16, 4, key_in, *keylen, 1);
401 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
402 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
403 #endif
405 result.err = 0;
406 init_completion(&result.completion);
408 ret = caam_jr_enqueue(jrdev, desc, split_key_done, &result);
409 if (!ret) {
410 /* in progress */
411 wait_for_completion(&result.completion);
412 ret = result.err;
413 #ifdef DEBUG
414 print_hex_dump(KERN_ERR,
415 "digested key@"__stringify(__LINE__)": ",
416 DUMP_PREFIX_ADDRESS, 16, 4, key_in,
417 digestsize, 1);
418 #endif
420 dma_unmap_single(jrdev, src_dma, *keylen, DMA_TO_DEVICE);
421 dma_unmap_single(jrdev, dst_dma, digestsize, DMA_FROM_DEVICE);
423 *keylen = digestsize;
425 kfree(desc);
427 return ret;
430 static int ahash_setkey(struct crypto_ahash *ahash,
431 const u8 *key, unsigned int keylen)
433 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
434 int blocksize = crypto_tfm_alg_blocksize(&ahash->base);
435 int digestsize = crypto_ahash_digestsize(ahash);
436 struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctx->jrdev->parent);
437 int ret;
438 u8 *hashed_key = NULL;
440 #ifdef DEBUG
441 printk(KERN_ERR "keylen %d\n", keylen);
442 #endif
444 if (keylen > blocksize) {
445 hashed_key = kmalloc_array(digestsize,
446 sizeof(*hashed_key),
447 GFP_KERNEL | GFP_DMA);
448 if (!hashed_key)
449 return -ENOMEM;
450 ret = hash_digest_key(ctx, key, &keylen, hashed_key,
451 digestsize);
452 if (ret)
453 goto bad_free_key;
454 key = hashed_key;
458 * If DKP is supported, use it in the shared descriptor to generate
459 * the split key.
461 if (ctrlpriv->era >= 6) {
462 ctx->adata.key_inline = true;
463 ctx->adata.keylen = keylen;
464 ctx->adata.keylen_pad = split_key_len(ctx->adata.algtype &
465 OP_ALG_ALGSEL_MASK);
467 if (ctx->adata.keylen_pad > CAAM_MAX_HASH_KEY_SIZE)
468 goto bad_free_key;
470 memcpy(ctx->key, key, keylen);
471 } else {
472 ret = gen_split_key(ctx->jrdev, ctx->key, &ctx->adata, key,
473 keylen, CAAM_MAX_HASH_KEY_SIZE);
474 if (ret)
475 goto bad_free_key;
478 kfree(hashed_key);
479 return ahash_set_sh_desc(ahash);
480 bad_free_key:
481 kfree(hashed_key);
482 crypto_ahash_set_flags(ahash, CRYPTO_TFM_RES_BAD_KEY_LEN);
483 return -EINVAL;
487 * ahash_edesc - s/w-extended ahash descriptor
488 * @dst_dma: physical mapped address of req->result
489 * @sec4_sg_dma: physical mapped address of h/w link table
490 * @src_nents: number of segments in input scatterlist
491 * @sec4_sg_bytes: length of dma mapped sec4_sg space
492 * @hw_desc: the h/w job descriptor followed by any referenced link tables
493 * @sec4_sg: h/w link table
495 struct ahash_edesc {
496 dma_addr_t dst_dma;
497 dma_addr_t sec4_sg_dma;
498 int src_nents;
499 int sec4_sg_bytes;
500 u32 hw_desc[DESC_JOB_IO_LEN / sizeof(u32)] ____cacheline_aligned;
501 struct sec4_sg_entry sec4_sg[0];
504 static inline void ahash_unmap(struct device *dev,
505 struct ahash_edesc *edesc,
506 struct ahash_request *req, int dst_len)
508 struct caam_hash_state *state = ahash_request_ctx(req);
510 if (edesc->src_nents)
511 dma_unmap_sg(dev, req->src, edesc->src_nents, DMA_TO_DEVICE);
512 if (edesc->dst_dma)
513 dma_unmap_single(dev, edesc->dst_dma, dst_len, DMA_FROM_DEVICE);
515 if (edesc->sec4_sg_bytes)
516 dma_unmap_single(dev, edesc->sec4_sg_dma,
517 edesc->sec4_sg_bytes, DMA_TO_DEVICE);
519 if (state->buf_dma) {
520 dma_unmap_single(dev, state->buf_dma, *current_buflen(state),
521 DMA_TO_DEVICE);
522 state->buf_dma = 0;
526 static inline void ahash_unmap_ctx(struct device *dev,
527 struct ahash_edesc *edesc,
528 struct ahash_request *req, int dst_len, u32 flag)
530 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
531 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
532 struct caam_hash_state *state = ahash_request_ctx(req);
534 if (state->ctx_dma) {
535 dma_unmap_single(dev, state->ctx_dma, ctx->ctx_len, flag);
536 state->ctx_dma = 0;
538 ahash_unmap(dev, edesc, req, dst_len);
541 static void ahash_done(struct device *jrdev, u32 *desc, u32 err,
542 void *context)
544 struct ahash_request *req = context;
545 struct ahash_edesc *edesc;
546 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
547 int digestsize = crypto_ahash_digestsize(ahash);
548 #ifdef DEBUG
549 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
550 struct caam_hash_state *state = ahash_request_ctx(req);
552 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
553 #endif
555 edesc = container_of(desc, struct ahash_edesc, hw_desc[0]);
556 if (err)
557 caam_jr_strstatus(jrdev, err);
559 ahash_unmap(jrdev, edesc, req, digestsize);
560 kfree(edesc);
562 #ifdef DEBUG
563 print_hex_dump(KERN_ERR, "ctx@"__stringify(__LINE__)": ",
564 DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
565 ctx->ctx_len, 1);
566 if (req->result)
567 print_hex_dump(KERN_ERR, "result@"__stringify(__LINE__)": ",
568 DUMP_PREFIX_ADDRESS, 16, 4, req->result,
569 digestsize, 1);
570 #endif
572 req->base.complete(&req->base, err);
575 static void ahash_done_bi(struct device *jrdev, u32 *desc, u32 err,
576 void *context)
578 struct ahash_request *req = context;
579 struct ahash_edesc *edesc;
580 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
581 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
582 struct caam_hash_state *state = ahash_request_ctx(req);
583 #ifdef DEBUG
584 int digestsize = crypto_ahash_digestsize(ahash);
586 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
587 #endif
589 edesc = container_of(desc, struct ahash_edesc, hw_desc[0]);
590 if (err)
591 caam_jr_strstatus(jrdev, err);
593 ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_BIDIRECTIONAL);
594 switch_buf(state);
595 kfree(edesc);
597 #ifdef DEBUG
598 print_hex_dump(KERN_ERR, "ctx@"__stringify(__LINE__)": ",
599 DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
600 ctx->ctx_len, 1);
601 if (req->result)
602 print_hex_dump(KERN_ERR, "result@"__stringify(__LINE__)": ",
603 DUMP_PREFIX_ADDRESS, 16, 4, req->result,
604 digestsize, 1);
605 #endif
607 req->base.complete(&req->base, err);
610 static void ahash_done_ctx_src(struct device *jrdev, u32 *desc, u32 err,
611 void *context)
613 struct ahash_request *req = context;
614 struct ahash_edesc *edesc;
615 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
616 int digestsize = crypto_ahash_digestsize(ahash);
617 #ifdef DEBUG
618 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
619 struct caam_hash_state *state = ahash_request_ctx(req);
621 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
622 #endif
624 edesc = container_of(desc, struct ahash_edesc, hw_desc[0]);
625 if (err)
626 caam_jr_strstatus(jrdev, err);
628 ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_TO_DEVICE);
629 kfree(edesc);
631 #ifdef DEBUG
632 print_hex_dump(KERN_ERR, "ctx@"__stringify(__LINE__)": ",
633 DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
634 ctx->ctx_len, 1);
635 if (req->result)
636 print_hex_dump(KERN_ERR, "result@"__stringify(__LINE__)": ",
637 DUMP_PREFIX_ADDRESS, 16, 4, req->result,
638 digestsize, 1);
639 #endif
641 req->base.complete(&req->base, err);
644 static void ahash_done_ctx_dst(struct device *jrdev, u32 *desc, u32 err,
645 void *context)
647 struct ahash_request *req = context;
648 struct ahash_edesc *edesc;
649 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
650 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
651 struct caam_hash_state *state = ahash_request_ctx(req);
652 #ifdef DEBUG
653 int digestsize = crypto_ahash_digestsize(ahash);
655 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
656 #endif
658 edesc = container_of(desc, struct ahash_edesc, hw_desc[0]);
659 if (err)
660 caam_jr_strstatus(jrdev, err);
662 ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_FROM_DEVICE);
663 switch_buf(state);
664 kfree(edesc);
666 #ifdef DEBUG
667 print_hex_dump(KERN_ERR, "ctx@"__stringify(__LINE__)": ",
668 DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
669 ctx->ctx_len, 1);
670 if (req->result)
671 print_hex_dump(KERN_ERR, "result@"__stringify(__LINE__)": ",
672 DUMP_PREFIX_ADDRESS, 16, 4, req->result,
673 digestsize, 1);
674 #endif
676 req->base.complete(&req->base, err);
680 * Allocate an enhanced descriptor, which contains the hardware descriptor
681 * and space for hardware scatter table containing sg_num entries.
683 static struct ahash_edesc *ahash_edesc_alloc(struct caam_hash_ctx *ctx,
684 int sg_num, u32 *sh_desc,
685 dma_addr_t sh_desc_dma,
686 gfp_t flags)
688 struct ahash_edesc *edesc;
689 unsigned int sg_size = sg_num * sizeof(struct sec4_sg_entry);
691 edesc = kzalloc(sizeof(*edesc) + sg_size, GFP_DMA | flags);
692 if (!edesc) {
693 dev_err(ctx->jrdev, "could not allocate extended descriptor\n");
694 return NULL;
697 init_job_desc_shared(edesc->hw_desc, sh_desc_dma, desc_len(sh_desc),
698 HDR_SHARE_DEFER | HDR_REVERSE);
700 return edesc;
703 static int ahash_edesc_add_src(struct caam_hash_ctx *ctx,
704 struct ahash_edesc *edesc,
705 struct ahash_request *req, int nents,
706 unsigned int first_sg,
707 unsigned int first_bytes, size_t to_hash)
709 dma_addr_t src_dma;
710 u32 options;
712 if (nents > 1 || first_sg) {
713 struct sec4_sg_entry *sg = edesc->sec4_sg;
714 unsigned int sgsize = sizeof(*sg) * (first_sg + nents);
716 sg_to_sec4_sg_last(req->src, nents, sg + first_sg, 0);
718 src_dma = dma_map_single(ctx->jrdev, sg, sgsize, DMA_TO_DEVICE);
719 if (dma_mapping_error(ctx->jrdev, src_dma)) {
720 dev_err(ctx->jrdev, "unable to map S/G table\n");
721 return -ENOMEM;
724 edesc->sec4_sg_bytes = sgsize;
725 edesc->sec4_sg_dma = src_dma;
726 options = LDST_SGF;
727 } else {
728 src_dma = sg_dma_address(req->src);
729 options = 0;
732 append_seq_in_ptr(edesc->hw_desc, src_dma, first_bytes + to_hash,
733 options);
735 return 0;
738 /* submit update job descriptor */
739 static int ahash_update_ctx(struct ahash_request *req)
741 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
742 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
743 struct caam_hash_state *state = ahash_request_ctx(req);
744 struct device *jrdev = ctx->jrdev;
745 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
746 GFP_KERNEL : GFP_ATOMIC;
747 u8 *buf = current_buf(state);
748 int *buflen = current_buflen(state);
749 u8 *next_buf = alt_buf(state);
750 int *next_buflen = alt_buflen(state), last_buflen;
751 int in_len = *buflen + req->nbytes, to_hash;
752 u32 *desc;
753 int src_nents, mapped_nents, sec4_sg_bytes, sec4_sg_src_index;
754 struct ahash_edesc *edesc;
755 int ret = 0;
757 last_buflen = *next_buflen;
758 *next_buflen = in_len & (crypto_tfm_alg_blocksize(&ahash->base) - 1);
759 to_hash = in_len - *next_buflen;
761 if (to_hash) {
762 src_nents = sg_nents_for_len(req->src,
763 req->nbytes - (*next_buflen));
764 if (src_nents < 0) {
765 dev_err(jrdev, "Invalid number of src SG.\n");
766 return src_nents;
769 if (src_nents) {
770 mapped_nents = dma_map_sg(jrdev, req->src, src_nents,
771 DMA_TO_DEVICE);
772 if (!mapped_nents) {
773 dev_err(jrdev, "unable to DMA map source\n");
774 return -ENOMEM;
776 } else {
777 mapped_nents = 0;
780 sec4_sg_src_index = 1 + (*buflen ? 1 : 0);
781 sec4_sg_bytes = (sec4_sg_src_index + mapped_nents) *
782 sizeof(struct sec4_sg_entry);
785 * allocate space for base edesc and hw desc commands,
786 * link tables
788 edesc = ahash_edesc_alloc(ctx, sec4_sg_src_index + mapped_nents,
789 ctx->sh_desc_update,
790 ctx->sh_desc_update_dma, flags);
791 if (!edesc) {
792 dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
793 return -ENOMEM;
796 edesc->src_nents = src_nents;
797 edesc->sec4_sg_bytes = sec4_sg_bytes;
799 ret = ctx_map_to_sec4_sg(jrdev, state, ctx->ctx_len,
800 edesc->sec4_sg, DMA_BIDIRECTIONAL);
801 if (ret)
802 goto unmap_ctx;
804 ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1, state);
805 if (ret)
806 goto unmap_ctx;
808 if (mapped_nents) {
809 sg_to_sec4_sg_last(req->src, mapped_nents,
810 edesc->sec4_sg + sec4_sg_src_index,
812 if (*next_buflen)
813 scatterwalk_map_and_copy(next_buf, req->src,
814 to_hash - *buflen,
815 *next_buflen, 0);
816 } else {
817 sg_to_sec4_set_last(edesc->sec4_sg + sec4_sg_src_index -
821 desc = edesc->hw_desc;
823 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
824 sec4_sg_bytes,
825 DMA_TO_DEVICE);
826 if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
827 dev_err(jrdev, "unable to map S/G table\n");
828 ret = -ENOMEM;
829 goto unmap_ctx;
832 append_seq_in_ptr(desc, edesc->sec4_sg_dma, ctx->ctx_len +
833 to_hash, LDST_SGF);
835 append_seq_out_ptr(desc, state->ctx_dma, ctx->ctx_len, 0);
837 #ifdef DEBUG
838 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
839 DUMP_PREFIX_ADDRESS, 16, 4, desc,
840 desc_bytes(desc), 1);
841 #endif
843 ret = caam_jr_enqueue(jrdev, desc, ahash_done_bi, req);
844 if (ret)
845 goto unmap_ctx;
847 ret = -EINPROGRESS;
848 } else if (*next_buflen) {
849 scatterwalk_map_and_copy(buf + *buflen, req->src, 0,
850 req->nbytes, 0);
851 *buflen = *next_buflen;
852 *next_buflen = last_buflen;
854 #ifdef DEBUG
855 print_hex_dump(KERN_ERR, "buf@"__stringify(__LINE__)": ",
856 DUMP_PREFIX_ADDRESS, 16, 4, buf, *buflen, 1);
857 print_hex_dump(KERN_ERR, "next buf@"__stringify(__LINE__)": ",
858 DUMP_PREFIX_ADDRESS, 16, 4, next_buf,
859 *next_buflen, 1);
860 #endif
862 return ret;
863 unmap_ctx:
864 ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_BIDIRECTIONAL);
865 kfree(edesc);
866 return ret;
869 static int ahash_final_ctx(struct ahash_request *req)
871 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
872 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
873 struct caam_hash_state *state = ahash_request_ctx(req);
874 struct device *jrdev = ctx->jrdev;
875 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
876 GFP_KERNEL : GFP_ATOMIC;
877 int buflen = *current_buflen(state);
878 u32 *desc;
879 int sec4_sg_bytes, sec4_sg_src_index;
880 int digestsize = crypto_ahash_digestsize(ahash);
881 struct ahash_edesc *edesc;
882 int ret;
884 sec4_sg_src_index = 1 + (buflen ? 1 : 0);
885 sec4_sg_bytes = sec4_sg_src_index * sizeof(struct sec4_sg_entry);
887 /* allocate space for base edesc and hw desc commands, link tables */
888 edesc = ahash_edesc_alloc(ctx, sec4_sg_src_index,
889 ctx->sh_desc_fin, ctx->sh_desc_fin_dma,
890 flags);
891 if (!edesc)
892 return -ENOMEM;
894 desc = edesc->hw_desc;
896 edesc->sec4_sg_bytes = sec4_sg_bytes;
898 ret = ctx_map_to_sec4_sg(jrdev, state, ctx->ctx_len,
899 edesc->sec4_sg, DMA_TO_DEVICE);
900 if (ret)
901 goto unmap_ctx;
903 ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1, state);
904 if (ret)
905 goto unmap_ctx;
907 sg_to_sec4_set_last(edesc->sec4_sg + sec4_sg_src_index - 1);
909 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
910 sec4_sg_bytes, DMA_TO_DEVICE);
911 if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
912 dev_err(jrdev, "unable to map S/G table\n");
913 ret = -ENOMEM;
914 goto unmap_ctx;
917 append_seq_in_ptr(desc, edesc->sec4_sg_dma, ctx->ctx_len + buflen,
918 LDST_SGF);
920 edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
921 digestsize);
922 if (dma_mapping_error(jrdev, edesc->dst_dma)) {
923 dev_err(jrdev, "unable to map dst\n");
924 ret = -ENOMEM;
925 goto unmap_ctx;
928 #ifdef DEBUG
929 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
930 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
931 #endif
933 ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_src, req);
934 if (ret)
935 goto unmap_ctx;
937 return -EINPROGRESS;
938 unmap_ctx:
939 ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE);
940 kfree(edesc);
941 return ret;
944 static int ahash_finup_ctx(struct ahash_request *req)
946 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
947 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
948 struct caam_hash_state *state = ahash_request_ctx(req);
949 struct device *jrdev = ctx->jrdev;
950 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
951 GFP_KERNEL : GFP_ATOMIC;
952 int buflen = *current_buflen(state);
953 u32 *desc;
954 int sec4_sg_src_index;
955 int src_nents, mapped_nents;
956 int digestsize = crypto_ahash_digestsize(ahash);
957 struct ahash_edesc *edesc;
958 int ret;
960 src_nents = sg_nents_for_len(req->src, req->nbytes);
961 if (src_nents < 0) {
962 dev_err(jrdev, "Invalid number of src SG.\n");
963 return src_nents;
966 if (src_nents) {
967 mapped_nents = dma_map_sg(jrdev, req->src, src_nents,
968 DMA_TO_DEVICE);
969 if (!mapped_nents) {
970 dev_err(jrdev, "unable to DMA map source\n");
971 return -ENOMEM;
973 } else {
974 mapped_nents = 0;
977 sec4_sg_src_index = 1 + (buflen ? 1 : 0);
979 /* allocate space for base edesc and hw desc commands, link tables */
980 edesc = ahash_edesc_alloc(ctx, sec4_sg_src_index + mapped_nents,
981 ctx->sh_desc_fin, ctx->sh_desc_fin_dma,
982 flags);
983 if (!edesc) {
984 dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
985 return -ENOMEM;
988 desc = edesc->hw_desc;
990 edesc->src_nents = src_nents;
992 ret = ctx_map_to_sec4_sg(jrdev, state, ctx->ctx_len,
993 edesc->sec4_sg, DMA_TO_DEVICE);
994 if (ret)
995 goto unmap_ctx;
997 ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1, state);
998 if (ret)
999 goto unmap_ctx;
1001 ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents,
1002 sec4_sg_src_index, ctx->ctx_len + buflen,
1003 req->nbytes);
1004 if (ret)
1005 goto unmap_ctx;
1007 edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
1008 digestsize);
1009 if (dma_mapping_error(jrdev, edesc->dst_dma)) {
1010 dev_err(jrdev, "unable to map dst\n");
1011 ret = -ENOMEM;
1012 goto unmap_ctx;
1015 #ifdef DEBUG
1016 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
1017 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
1018 #endif
1020 ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_src, req);
1021 if (ret)
1022 goto unmap_ctx;
1024 return -EINPROGRESS;
1025 unmap_ctx:
1026 ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE);
1027 kfree(edesc);
1028 return ret;
1031 static int ahash_digest(struct ahash_request *req)
1033 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1034 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1035 struct caam_hash_state *state = ahash_request_ctx(req);
1036 struct device *jrdev = ctx->jrdev;
1037 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
1038 GFP_KERNEL : GFP_ATOMIC;
1039 u32 *desc;
1040 int digestsize = crypto_ahash_digestsize(ahash);
1041 int src_nents, mapped_nents;
1042 struct ahash_edesc *edesc;
1043 int ret;
1045 state->buf_dma = 0;
1047 src_nents = sg_nents_for_len(req->src, req->nbytes);
1048 if (src_nents < 0) {
1049 dev_err(jrdev, "Invalid number of src SG.\n");
1050 return src_nents;
1053 if (src_nents) {
1054 mapped_nents = dma_map_sg(jrdev, req->src, src_nents,
1055 DMA_TO_DEVICE);
1056 if (!mapped_nents) {
1057 dev_err(jrdev, "unable to map source for DMA\n");
1058 return -ENOMEM;
1060 } else {
1061 mapped_nents = 0;
1064 /* allocate space for base edesc and hw desc commands, link tables */
1065 edesc = ahash_edesc_alloc(ctx, mapped_nents > 1 ? mapped_nents : 0,
1066 ctx->sh_desc_digest, ctx->sh_desc_digest_dma,
1067 flags);
1068 if (!edesc) {
1069 dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
1070 return -ENOMEM;
1073 edesc->src_nents = src_nents;
1075 ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents, 0, 0,
1076 req->nbytes);
1077 if (ret) {
1078 ahash_unmap(jrdev, edesc, req, digestsize);
1079 kfree(edesc);
1080 return ret;
1083 desc = edesc->hw_desc;
1085 edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
1086 digestsize);
1087 if (dma_mapping_error(jrdev, edesc->dst_dma)) {
1088 dev_err(jrdev, "unable to map dst\n");
1089 ahash_unmap(jrdev, edesc, req, digestsize);
1090 kfree(edesc);
1091 return -ENOMEM;
1094 #ifdef DEBUG
1095 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
1096 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
1097 #endif
1099 ret = caam_jr_enqueue(jrdev, desc, ahash_done, req);
1100 if (!ret) {
1101 ret = -EINPROGRESS;
1102 } else {
1103 ahash_unmap(jrdev, edesc, req, digestsize);
1104 kfree(edesc);
1107 return ret;
1110 /* submit ahash final if it the first job descriptor */
1111 static int ahash_final_no_ctx(struct ahash_request *req)
1113 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1114 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1115 struct caam_hash_state *state = ahash_request_ctx(req);
1116 struct device *jrdev = ctx->jrdev;
1117 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
1118 GFP_KERNEL : GFP_ATOMIC;
1119 u8 *buf = current_buf(state);
1120 int buflen = *current_buflen(state);
1121 u32 *desc;
1122 int digestsize = crypto_ahash_digestsize(ahash);
1123 struct ahash_edesc *edesc;
1124 int ret;
1126 /* allocate space for base edesc and hw desc commands, link tables */
1127 edesc = ahash_edesc_alloc(ctx, 0, ctx->sh_desc_digest,
1128 ctx->sh_desc_digest_dma, flags);
1129 if (!edesc)
1130 return -ENOMEM;
1132 desc = edesc->hw_desc;
1134 state->buf_dma = dma_map_single(jrdev, buf, buflen, DMA_TO_DEVICE);
1135 if (dma_mapping_error(jrdev, state->buf_dma)) {
1136 dev_err(jrdev, "unable to map src\n");
1137 goto unmap;
1140 append_seq_in_ptr(desc, state->buf_dma, buflen, 0);
1142 edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
1143 digestsize);
1144 if (dma_mapping_error(jrdev, edesc->dst_dma)) {
1145 dev_err(jrdev, "unable to map dst\n");
1146 goto unmap;
1149 #ifdef DEBUG
1150 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
1151 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
1152 #endif
1154 ret = caam_jr_enqueue(jrdev, desc, ahash_done, req);
1155 if (!ret) {
1156 ret = -EINPROGRESS;
1157 } else {
1158 ahash_unmap(jrdev, edesc, req, digestsize);
1159 kfree(edesc);
1162 return ret;
1163 unmap:
1164 ahash_unmap(jrdev, edesc, req, digestsize);
1165 kfree(edesc);
1166 return -ENOMEM;
1170 /* submit ahash update if it the first job descriptor after update */
1171 static int ahash_update_no_ctx(struct ahash_request *req)
1173 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1174 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1175 struct caam_hash_state *state = ahash_request_ctx(req);
1176 struct device *jrdev = ctx->jrdev;
1177 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
1178 GFP_KERNEL : GFP_ATOMIC;
1179 u8 *buf = current_buf(state);
1180 int *buflen = current_buflen(state);
1181 u8 *next_buf = alt_buf(state);
1182 int *next_buflen = alt_buflen(state);
1183 int in_len = *buflen + req->nbytes, to_hash;
1184 int sec4_sg_bytes, src_nents, mapped_nents;
1185 struct ahash_edesc *edesc;
1186 u32 *desc;
1187 int ret = 0;
1189 *next_buflen = in_len & (crypto_tfm_alg_blocksize(&ahash->base) - 1);
1190 to_hash = in_len - *next_buflen;
1192 if (to_hash) {
1193 src_nents = sg_nents_for_len(req->src,
1194 req->nbytes - *next_buflen);
1195 if (src_nents < 0) {
1196 dev_err(jrdev, "Invalid number of src SG.\n");
1197 return src_nents;
1200 if (src_nents) {
1201 mapped_nents = dma_map_sg(jrdev, req->src, src_nents,
1202 DMA_TO_DEVICE);
1203 if (!mapped_nents) {
1204 dev_err(jrdev, "unable to DMA map source\n");
1205 return -ENOMEM;
1207 } else {
1208 mapped_nents = 0;
1211 sec4_sg_bytes = (1 + mapped_nents) *
1212 sizeof(struct sec4_sg_entry);
1215 * allocate space for base edesc and hw desc commands,
1216 * link tables
1218 edesc = ahash_edesc_alloc(ctx, 1 + mapped_nents,
1219 ctx->sh_desc_update_first,
1220 ctx->sh_desc_update_first_dma,
1221 flags);
1222 if (!edesc) {
1223 dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
1224 return -ENOMEM;
1227 edesc->src_nents = src_nents;
1228 edesc->sec4_sg_bytes = sec4_sg_bytes;
1230 ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg, state);
1231 if (ret)
1232 goto unmap_ctx;
1234 sg_to_sec4_sg_last(req->src, mapped_nents,
1235 edesc->sec4_sg + 1, 0);
1237 if (*next_buflen) {
1238 scatterwalk_map_and_copy(next_buf, req->src,
1239 to_hash - *buflen,
1240 *next_buflen, 0);
1243 desc = edesc->hw_desc;
1245 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
1246 sec4_sg_bytes,
1247 DMA_TO_DEVICE);
1248 if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
1249 dev_err(jrdev, "unable to map S/G table\n");
1250 ret = -ENOMEM;
1251 goto unmap_ctx;
1254 append_seq_in_ptr(desc, edesc->sec4_sg_dma, to_hash, LDST_SGF);
1256 ret = map_seq_out_ptr_ctx(desc, jrdev, state, ctx->ctx_len);
1257 if (ret)
1258 goto unmap_ctx;
1260 #ifdef DEBUG
1261 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
1262 DUMP_PREFIX_ADDRESS, 16, 4, desc,
1263 desc_bytes(desc), 1);
1264 #endif
1266 ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_dst, req);
1267 if (ret)
1268 goto unmap_ctx;
1270 ret = -EINPROGRESS;
1271 state->update = ahash_update_ctx;
1272 state->finup = ahash_finup_ctx;
1273 state->final = ahash_final_ctx;
1274 } else if (*next_buflen) {
1275 scatterwalk_map_and_copy(buf + *buflen, req->src, 0,
1276 req->nbytes, 0);
1277 *buflen = *next_buflen;
1278 *next_buflen = 0;
1280 #ifdef DEBUG
1281 print_hex_dump(KERN_ERR, "buf@"__stringify(__LINE__)": ",
1282 DUMP_PREFIX_ADDRESS, 16, 4, buf, *buflen, 1);
1283 print_hex_dump(KERN_ERR, "next buf@"__stringify(__LINE__)": ",
1284 DUMP_PREFIX_ADDRESS, 16, 4, next_buf,
1285 *next_buflen, 1);
1286 #endif
1288 return ret;
1289 unmap_ctx:
1290 ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_TO_DEVICE);
1291 kfree(edesc);
1292 return ret;
1295 /* submit ahash finup if it the first job descriptor after update */
1296 static int ahash_finup_no_ctx(struct ahash_request *req)
1298 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1299 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1300 struct caam_hash_state *state = ahash_request_ctx(req);
1301 struct device *jrdev = ctx->jrdev;
1302 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
1303 GFP_KERNEL : GFP_ATOMIC;
1304 int buflen = *current_buflen(state);
1305 u32 *desc;
1306 int sec4_sg_bytes, sec4_sg_src_index, src_nents, mapped_nents;
1307 int digestsize = crypto_ahash_digestsize(ahash);
1308 struct ahash_edesc *edesc;
1309 int ret;
1311 src_nents = sg_nents_for_len(req->src, req->nbytes);
1312 if (src_nents < 0) {
1313 dev_err(jrdev, "Invalid number of src SG.\n");
1314 return src_nents;
1317 if (src_nents) {
1318 mapped_nents = dma_map_sg(jrdev, req->src, src_nents,
1319 DMA_TO_DEVICE);
1320 if (!mapped_nents) {
1321 dev_err(jrdev, "unable to DMA map source\n");
1322 return -ENOMEM;
1324 } else {
1325 mapped_nents = 0;
1328 sec4_sg_src_index = 2;
1329 sec4_sg_bytes = (sec4_sg_src_index + mapped_nents) *
1330 sizeof(struct sec4_sg_entry);
1332 /* allocate space for base edesc and hw desc commands, link tables */
1333 edesc = ahash_edesc_alloc(ctx, sec4_sg_src_index + mapped_nents,
1334 ctx->sh_desc_digest, ctx->sh_desc_digest_dma,
1335 flags);
1336 if (!edesc) {
1337 dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
1338 return -ENOMEM;
1341 desc = edesc->hw_desc;
1343 edesc->src_nents = src_nents;
1344 edesc->sec4_sg_bytes = sec4_sg_bytes;
1346 ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg, state);
1347 if (ret)
1348 goto unmap;
1350 ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents, 1, buflen,
1351 req->nbytes);
1352 if (ret) {
1353 dev_err(jrdev, "unable to map S/G table\n");
1354 goto unmap;
1357 edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
1358 digestsize);
1359 if (dma_mapping_error(jrdev, edesc->dst_dma)) {
1360 dev_err(jrdev, "unable to map dst\n");
1361 goto unmap;
1364 #ifdef DEBUG
1365 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
1366 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
1367 #endif
1369 ret = caam_jr_enqueue(jrdev, desc, ahash_done, req);
1370 if (!ret) {
1371 ret = -EINPROGRESS;
1372 } else {
1373 ahash_unmap(jrdev, edesc, req, digestsize);
1374 kfree(edesc);
1377 return ret;
1378 unmap:
1379 ahash_unmap(jrdev, edesc, req, digestsize);
1380 kfree(edesc);
1381 return -ENOMEM;
1385 /* submit first update job descriptor after init */
1386 static int ahash_update_first(struct ahash_request *req)
1388 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1389 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1390 struct caam_hash_state *state = ahash_request_ctx(req);
1391 struct device *jrdev = ctx->jrdev;
1392 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
1393 GFP_KERNEL : GFP_ATOMIC;
1394 u8 *next_buf = alt_buf(state);
1395 int *next_buflen = alt_buflen(state);
1396 int to_hash;
1397 u32 *desc;
1398 int src_nents, mapped_nents;
1399 struct ahash_edesc *edesc;
1400 int ret = 0;
1402 *next_buflen = req->nbytes & (crypto_tfm_alg_blocksize(&ahash->base) -
1404 to_hash = req->nbytes - *next_buflen;
1406 if (to_hash) {
1407 src_nents = sg_nents_for_len(req->src,
1408 req->nbytes - *next_buflen);
1409 if (src_nents < 0) {
1410 dev_err(jrdev, "Invalid number of src SG.\n");
1411 return src_nents;
1414 if (src_nents) {
1415 mapped_nents = dma_map_sg(jrdev, req->src, src_nents,
1416 DMA_TO_DEVICE);
1417 if (!mapped_nents) {
1418 dev_err(jrdev, "unable to map source for DMA\n");
1419 return -ENOMEM;
1421 } else {
1422 mapped_nents = 0;
1426 * allocate space for base edesc and hw desc commands,
1427 * link tables
1429 edesc = ahash_edesc_alloc(ctx, mapped_nents > 1 ?
1430 mapped_nents : 0,
1431 ctx->sh_desc_update_first,
1432 ctx->sh_desc_update_first_dma,
1433 flags);
1434 if (!edesc) {
1435 dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
1436 return -ENOMEM;
1439 edesc->src_nents = src_nents;
1441 ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents, 0, 0,
1442 to_hash);
1443 if (ret)
1444 goto unmap_ctx;
1446 if (*next_buflen)
1447 scatterwalk_map_and_copy(next_buf, req->src, to_hash,
1448 *next_buflen, 0);
1450 desc = edesc->hw_desc;
1452 ret = map_seq_out_ptr_ctx(desc, jrdev, state, ctx->ctx_len);
1453 if (ret)
1454 goto unmap_ctx;
1456 #ifdef DEBUG
1457 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
1458 DUMP_PREFIX_ADDRESS, 16, 4, desc,
1459 desc_bytes(desc), 1);
1460 #endif
1462 ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_dst, req);
1463 if (ret)
1464 goto unmap_ctx;
1466 ret = -EINPROGRESS;
1467 state->update = ahash_update_ctx;
1468 state->finup = ahash_finup_ctx;
1469 state->final = ahash_final_ctx;
1470 } else if (*next_buflen) {
1471 state->update = ahash_update_no_ctx;
1472 state->finup = ahash_finup_no_ctx;
1473 state->final = ahash_final_no_ctx;
1474 scatterwalk_map_and_copy(next_buf, req->src, 0,
1475 req->nbytes, 0);
1476 switch_buf(state);
1478 #ifdef DEBUG
1479 print_hex_dump(KERN_ERR, "next buf@"__stringify(__LINE__)": ",
1480 DUMP_PREFIX_ADDRESS, 16, 4, next_buf,
1481 *next_buflen, 1);
1482 #endif
1484 return ret;
1485 unmap_ctx:
1486 ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_TO_DEVICE);
1487 kfree(edesc);
1488 return ret;
1491 static int ahash_finup_first(struct ahash_request *req)
1493 return ahash_digest(req);
1496 static int ahash_init(struct ahash_request *req)
1498 struct caam_hash_state *state = ahash_request_ctx(req);
1500 state->update = ahash_update_first;
1501 state->finup = ahash_finup_first;
1502 state->final = ahash_final_no_ctx;
1504 state->ctx_dma = 0;
1505 state->current_buf = 0;
1506 state->buf_dma = 0;
1507 state->buflen_0 = 0;
1508 state->buflen_1 = 0;
1510 return 0;
1513 static int ahash_update(struct ahash_request *req)
1515 struct caam_hash_state *state = ahash_request_ctx(req);
1517 return state->update(req);
1520 static int ahash_finup(struct ahash_request *req)
1522 struct caam_hash_state *state = ahash_request_ctx(req);
1524 return state->finup(req);
1527 static int ahash_final(struct ahash_request *req)
1529 struct caam_hash_state *state = ahash_request_ctx(req);
1531 return state->final(req);
1534 static int ahash_export(struct ahash_request *req, void *out)
1536 struct caam_hash_state *state = ahash_request_ctx(req);
1537 struct caam_export_state *export = out;
1538 int len;
1539 u8 *buf;
1541 if (state->current_buf) {
1542 buf = state->buf_1;
1543 len = state->buflen_1;
1544 } else {
1545 buf = state->buf_0;
1546 len = state->buflen_0;
1549 memcpy(export->buf, buf, len);
1550 memcpy(export->caam_ctx, state->caam_ctx, sizeof(export->caam_ctx));
1551 export->buflen = len;
1552 export->update = state->update;
1553 export->final = state->final;
1554 export->finup = state->finup;
1556 return 0;
1559 static int ahash_import(struct ahash_request *req, const void *in)
1561 struct caam_hash_state *state = ahash_request_ctx(req);
1562 const struct caam_export_state *export = in;
1564 memset(state, 0, sizeof(*state));
1565 memcpy(state->buf_0, export->buf, export->buflen);
1566 memcpy(state->caam_ctx, export->caam_ctx, sizeof(state->caam_ctx));
1567 state->buflen_0 = export->buflen;
1568 state->update = export->update;
1569 state->final = export->final;
1570 state->finup = export->finup;
1572 return 0;
1575 struct caam_hash_template {
1576 char name[CRYPTO_MAX_ALG_NAME];
1577 char driver_name[CRYPTO_MAX_ALG_NAME];
1578 char hmac_name[CRYPTO_MAX_ALG_NAME];
1579 char hmac_driver_name[CRYPTO_MAX_ALG_NAME];
1580 unsigned int blocksize;
1581 struct ahash_alg template_ahash;
1582 u32 alg_type;
1585 /* ahash descriptors */
1586 static struct caam_hash_template driver_hash[] = {
1588 .name = "sha1",
1589 .driver_name = "sha1-caam",
1590 .hmac_name = "hmac(sha1)",
1591 .hmac_driver_name = "hmac-sha1-caam",
1592 .blocksize = SHA1_BLOCK_SIZE,
1593 .template_ahash = {
1594 .init = ahash_init,
1595 .update = ahash_update,
1596 .final = ahash_final,
1597 .finup = ahash_finup,
1598 .digest = ahash_digest,
1599 .export = ahash_export,
1600 .import = ahash_import,
1601 .setkey = ahash_setkey,
1602 .halg = {
1603 .digestsize = SHA1_DIGEST_SIZE,
1604 .statesize = sizeof(struct caam_export_state),
1607 .alg_type = OP_ALG_ALGSEL_SHA1,
1608 }, {
1609 .name = "sha224",
1610 .driver_name = "sha224-caam",
1611 .hmac_name = "hmac(sha224)",
1612 .hmac_driver_name = "hmac-sha224-caam",
1613 .blocksize = SHA224_BLOCK_SIZE,
1614 .template_ahash = {
1615 .init = ahash_init,
1616 .update = ahash_update,
1617 .final = ahash_final,
1618 .finup = ahash_finup,
1619 .digest = ahash_digest,
1620 .export = ahash_export,
1621 .import = ahash_import,
1622 .setkey = ahash_setkey,
1623 .halg = {
1624 .digestsize = SHA224_DIGEST_SIZE,
1625 .statesize = sizeof(struct caam_export_state),
1628 .alg_type = OP_ALG_ALGSEL_SHA224,
1629 }, {
1630 .name = "sha256",
1631 .driver_name = "sha256-caam",
1632 .hmac_name = "hmac(sha256)",
1633 .hmac_driver_name = "hmac-sha256-caam",
1634 .blocksize = SHA256_BLOCK_SIZE,
1635 .template_ahash = {
1636 .init = ahash_init,
1637 .update = ahash_update,
1638 .final = ahash_final,
1639 .finup = ahash_finup,
1640 .digest = ahash_digest,
1641 .export = ahash_export,
1642 .import = ahash_import,
1643 .setkey = ahash_setkey,
1644 .halg = {
1645 .digestsize = SHA256_DIGEST_SIZE,
1646 .statesize = sizeof(struct caam_export_state),
1649 .alg_type = OP_ALG_ALGSEL_SHA256,
1650 }, {
1651 .name = "sha384",
1652 .driver_name = "sha384-caam",
1653 .hmac_name = "hmac(sha384)",
1654 .hmac_driver_name = "hmac-sha384-caam",
1655 .blocksize = SHA384_BLOCK_SIZE,
1656 .template_ahash = {
1657 .init = ahash_init,
1658 .update = ahash_update,
1659 .final = ahash_final,
1660 .finup = ahash_finup,
1661 .digest = ahash_digest,
1662 .export = ahash_export,
1663 .import = ahash_import,
1664 .setkey = ahash_setkey,
1665 .halg = {
1666 .digestsize = SHA384_DIGEST_SIZE,
1667 .statesize = sizeof(struct caam_export_state),
1670 .alg_type = OP_ALG_ALGSEL_SHA384,
1671 }, {
1672 .name = "sha512",
1673 .driver_name = "sha512-caam",
1674 .hmac_name = "hmac(sha512)",
1675 .hmac_driver_name = "hmac-sha512-caam",
1676 .blocksize = SHA512_BLOCK_SIZE,
1677 .template_ahash = {
1678 .init = ahash_init,
1679 .update = ahash_update,
1680 .final = ahash_final,
1681 .finup = ahash_finup,
1682 .digest = ahash_digest,
1683 .export = ahash_export,
1684 .import = ahash_import,
1685 .setkey = ahash_setkey,
1686 .halg = {
1687 .digestsize = SHA512_DIGEST_SIZE,
1688 .statesize = sizeof(struct caam_export_state),
1691 .alg_type = OP_ALG_ALGSEL_SHA512,
1692 }, {
1693 .name = "md5",
1694 .driver_name = "md5-caam",
1695 .hmac_name = "hmac(md5)",
1696 .hmac_driver_name = "hmac-md5-caam",
1697 .blocksize = MD5_BLOCK_WORDS * 4,
1698 .template_ahash = {
1699 .init = ahash_init,
1700 .update = ahash_update,
1701 .final = ahash_final,
1702 .finup = ahash_finup,
1703 .digest = ahash_digest,
1704 .export = ahash_export,
1705 .import = ahash_import,
1706 .setkey = ahash_setkey,
1707 .halg = {
1708 .digestsize = MD5_DIGEST_SIZE,
1709 .statesize = sizeof(struct caam_export_state),
1712 .alg_type = OP_ALG_ALGSEL_MD5,
1716 struct caam_hash_alg {
1717 struct list_head entry;
1718 int alg_type;
1719 struct ahash_alg ahash_alg;
1722 static int caam_hash_cra_init(struct crypto_tfm *tfm)
1724 struct crypto_ahash *ahash = __crypto_ahash_cast(tfm);
1725 struct crypto_alg *base = tfm->__crt_alg;
1726 struct hash_alg_common *halg =
1727 container_of(base, struct hash_alg_common, base);
1728 struct ahash_alg *alg =
1729 container_of(halg, struct ahash_alg, halg);
1730 struct caam_hash_alg *caam_hash =
1731 container_of(alg, struct caam_hash_alg, ahash_alg);
1732 struct caam_hash_ctx *ctx = crypto_tfm_ctx(tfm);
1733 /* Sizes for MDHA running digests: MD5, SHA1, 224, 256, 384, 512 */
1734 static const u8 runninglen[] = { HASH_MSG_LEN + MD5_DIGEST_SIZE,
1735 HASH_MSG_LEN + SHA1_DIGEST_SIZE,
1736 HASH_MSG_LEN + 32,
1737 HASH_MSG_LEN + SHA256_DIGEST_SIZE,
1738 HASH_MSG_LEN + 64,
1739 HASH_MSG_LEN + SHA512_DIGEST_SIZE };
1740 dma_addr_t dma_addr;
1741 struct caam_drv_private *priv;
1744 * Get a Job ring from Job Ring driver to ensure in-order
1745 * crypto request processing per tfm
1747 ctx->jrdev = caam_jr_alloc();
1748 if (IS_ERR(ctx->jrdev)) {
1749 pr_err("Job Ring Device allocation for transform failed\n");
1750 return PTR_ERR(ctx->jrdev);
1753 priv = dev_get_drvdata(ctx->jrdev->parent);
1754 ctx->dir = priv->era >= 6 ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
1756 dma_addr = dma_map_single_attrs(ctx->jrdev, ctx->sh_desc_update,
1757 offsetof(struct caam_hash_ctx,
1758 sh_desc_update_dma),
1759 ctx->dir, DMA_ATTR_SKIP_CPU_SYNC);
1760 if (dma_mapping_error(ctx->jrdev, dma_addr)) {
1761 dev_err(ctx->jrdev, "unable to map shared descriptors\n");
1762 caam_jr_free(ctx->jrdev);
1763 return -ENOMEM;
1766 ctx->sh_desc_update_dma = dma_addr;
1767 ctx->sh_desc_update_first_dma = dma_addr +
1768 offsetof(struct caam_hash_ctx,
1769 sh_desc_update_first);
1770 ctx->sh_desc_fin_dma = dma_addr + offsetof(struct caam_hash_ctx,
1771 sh_desc_fin);
1772 ctx->sh_desc_digest_dma = dma_addr + offsetof(struct caam_hash_ctx,
1773 sh_desc_digest);
1775 /* copy descriptor header template value */
1776 ctx->adata.algtype = OP_TYPE_CLASS2_ALG | caam_hash->alg_type;
1778 ctx->ctx_len = runninglen[(ctx->adata.algtype &
1779 OP_ALG_ALGSEL_SUBMASK) >>
1780 OP_ALG_ALGSEL_SHIFT];
1782 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
1783 sizeof(struct caam_hash_state));
1784 return ahash_set_sh_desc(ahash);
1787 static void caam_hash_cra_exit(struct crypto_tfm *tfm)
1789 struct caam_hash_ctx *ctx = crypto_tfm_ctx(tfm);
1791 dma_unmap_single_attrs(ctx->jrdev, ctx->sh_desc_update_dma,
1792 offsetof(struct caam_hash_ctx,
1793 sh_desc_update_dma),
1794 ctx->dir, DMA_ATTR_SKIP_CPU_SYNC);
1795 caam_jr_free(ctx->jrdev);
1798 static void __exit caam_algapi_hash_exit(void)
1800 struct caam_hash_alg *t_alg, *n;
1802 if (!hash_list.next)
1803 return;
1805 list_for_each_entry_safe(t_alg, n, &hash_list, entry) {
1806 crypto_unregister_ahash(&t_alg->ahash_alg);
1807 list_del(&t_alg->entry);
1808 kfree(t_alg);
1812 static struct caam_hash_alg *
1813 caam_hash_alloc(struct caam_hash_template *template,
1814 bool keyed)
1816 struct caam_hash_alg *t_alg;
1817 struct ahash_alg *halg;
1818 struct crypto_alg *alg;
1820 t_alg = kzalloc(sizeof(*t_alg), GFP_KERNEL);
1821 if (!t_alg) {
1822 pr_err("failed to allocate t_alg\n");
1823 return ERR_PTR(-ENOMEM);
1826 t_alg->ahash_alg = template->template_ahash;
1827 halg = &t_alg->ahash_alg;
1828 alg = &halg->halg.base;
1830 if (keyed) {
1831 snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s",
1832 template->hmac_name);
1833 snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
1834 template->hmac_driver_name);
1835 } else {
1836 snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s",
1837 template->name);
1838 snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
1839 template->driver_name);
1840 t_alg->ahash_alg.setkey = NULL;
1842 alg->cra_module = THIS_MODULE;
1843 alg->cra_init = caam_hash_cra_init;
1844 alg->cra_exit = caam_hash_cra_exit;
1845 alg->cra_ctxsize = sizeof(struct caam_hash_ctx);
1846 alg->cra_priority = CAAM_CRA_PRIORITY;
1847 alg->cra_blocksize = template->blocksize;
1848 alg->cra_alignmask = 0;
1849 alg->cra_flags = CRYPTO_ALG_ASYNC;
1851 t_alg->alg_type = template->alg_type;
1853 return t_alg;
1856 static int __init caam_algapi_hash_init(void)
1858 struct device_node *dev_node;
1859 struct platform_device *pdev;
1860 struct device *ctrldev;
1861 int i = 0, err = 0;
1862 struct caam_drv_private *priv;
1863 unsigned int md_limit = SHA512_DIGEST_SIZE;
1864 u32 cha_inst, cha_vid;
1866 dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
1867 if (!dev_node) {
1868 dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0");
1869 if (!dev_node)
1870 return -ENODEV;
1873 pdev = of_find_device_by_node(dev_node);
1874 if (!pdev) {
1875 of_node_put(dev_node);
1876 return -ENODEV;
1879 ctrldev = &pdev->dev;
1880 priv = dev_get_drvdata(ctrldev);
1881 of_node_put(dev_node);
1884 * If priv is NULL, it's probably because the caam driver wasn't
1885 * properly initialized (e.g. RNG4 init failed). Thus, bail out here.
1887 if (!priv)
1888 return -ENODEV;
1891 * Register crypto algorithms the device supports. First, identify
1892 * presence and attributes of MD block.
1894 cha_vid = rd_reg32(&priv->ctrl->perfmon.cha_id_ls);
1895 cha_inst = rd_reg32(&priv->ctrl->perfmon.cha_num_ls);
1898 * Skip registration of any hashing algorithms if MD block
1899 * is not present.
1901 if (!((cha_inst & CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT))
1902 return -ENODEV;
1904 /* Limit digest size based on LP256 */
1905 if ((cha_vid & CHA_ID_LS_MD_MASK) == CHA_ID_LS_MD_LP256)
1906 md_limit = SHA256_DIGEST_SIZE;
1908 INIT_LIST_HEAD(&hash_list);
1910 /* register crypto algorithms the device supports */
1911 for (i = 0; i < ARRAY_SIZE(driver_hash); i++) {
1912 struct caam_hash_alg *t_alg;
1913 struct caam_hash_template *alg = driver_hash + i;
1915 /* If MD size is not supported by device, skip registration */
1916 if (alg->template_ahash.halg.digestsize > md_limit)
1917 continue;
1919 /* register hmac version */
1920 t_alg = caam_hash_alloc(alg, true);
1921 if (IS_ERR(t_alg)) {
1922 err = PTR_ERR(t_alg);
1923 pr_warn("%s alg allocation failed\n", alg->driver_name);
1924 continue;
1927 err = crypto_register_ahash(&t_alg->ahash_alg);
1928 if (err) {
1929 pr_warn("%s alg registration failed: %d\n",
1930 t_alg->ahash_alg.halg.base.cra_driver_name,
1931 err);
1932 kfree(t_alg);
1933 } else
1934 list_add_tail(&t_alg->entry, &hash_list);
1936 /* register unkeyed version */
1937 t_alg = caam_hash_alloc(alg, false);
1938 if (IS_ERR(t_alg)) {
1939 err = PTR_ERR(t_alg);
1940 pr_warn("%s alg allocation failed\n", alg->driver_name);
1941 continue;
1944 err = crypto_register_ahash(&t_alg->ahash_alg);
1945 if (err) {
1946 pr_warn("%s alg registration failed: %d\n",
1947 t_alg->ahash_alg.halg.base.cra_driver_name,
1948 err);
1949 kfree(t_alg);
1950 } else
1951 list_add_tail(&t_alg->entry, &hash_list);
1954 return err;
1957 module_init(caam_algapi_hash_init);
1958 module_exit(caam_algapi_hash_exit);
1960 MODULE_LICENSE("GPL");
1961 MODULE_DESCRIPTION("FSL CAAM support for ahash functions of crypto API");
1962 MODULE_AUTHOR("Freescale Semiconductor - NMG");