x86/xen: resume timer irqs early
[linux/fpc-iii.git] / drivers / crypto / caam / caamhash.c
blobe732bd962e98cc715db6463c7587dbb2511707e3
1 /*
2 * caam - Freescale FSL CAAM support for ahash functions of crypto API
4 * Copyright 2011 Freescale Semiconductor, Inc.
6 * Based on caamalg.c crypto API driver.
8 * relationship of digest job descriptor or first job descriptor after init to
9 * shared descriptors:
11 * --------------- ---------------
12 * | JobDesc #1 |-------------------->| ShareDesc |
13 * | *(packet 1) | | (hashKey) |
14 * --------------- | (operation) |
15 * ---------------
17 * relationship of subsequent job descriptors to shared descriptors:
19 * --------------- ---------------
20 * | JobDesc #2 |-------------------->| ShareDesc |
21 * | *(packet 2) | |------------->| (hashKey) |
22 * --------------- | |-------->| (operation) |
23 * . | | | (load ctx2) |
24 * . | | ---------------
25 * --------------- | |
26 * | JobDesc #3 |------| |
27 * | *(packet 3) | |
28 * --------------- |
29 * . |
30 * . |
31 * --------------- |
32 * | JobDesc #4 |------------
33 * | *(packet 4) |
34 * ---------------
36 * The SharedDesc never changes for a connection unless rekeyed, but
37 * each packet will likely be in a different place. So all we need
38 * to know to process the packet is where the input is, where the
39 * output goes, and what context we want to process with. Context is
40 * in the SharedDesc, packet references in the JobDesc.
42 * So, a job desc looks like:
44 * ---------------------
45 * | Header |
46 * | ShareDesc Pointer |
47 * | SEQ_OUT_PTR |
48 * | (output buffer) |
49 * | (output length) |
50 * | SEQ_IN_PTR |
51 * | (input buffer) |
52 * | (input length) |
53 * ---------------------
56 #include "compat.h"
58 #include "regs.h"
59 #include "intern.h"
60 #include "desc_constr.h"
61 #include "jr.h"
62 #include "error.h"
63 #include "sg_sw_sec4.h"
64 #include "key_gen.h"
66 #define CAAM_CRA_PRIORITY 3000
68 /* max hash key is max split key size */
69 #define CAAM_MAX_HASH_KEY_SIZE (SHA512_DIGEST_SIZE * 2)
71 #define CAAM_MAX_HASH_BLOCK_SIZE SHA512_BLOCK_SIZE
72 #define CAAM_MAX_HASH_DIGEST_SIZE SHA512_DIGEST_SIZE
74 /* length of descriptors text */
75 #define DESC_AHASH_BASE (4 * CAAM_CMD_SZ)
76 #define DESC_AHASH_UPDATE_LEN (6 * CAAM_CMD_SZ)
77 #define DESC_AHASH_UPDATE_FIRST_LEN (DESC_AHASH_BASE + 4 * CAAM_CMD_SZ)
78 #define DESC_AHASH_FINAL_LEN (DESC_AHASH_BASE + 5 * CAAM_CMD_SZ)
79 #define DESC_AHASH_FINUP_LEN (DESC_AHASH_BASE + 5 * CAAM_CMD_SZ)
80 #define DESC_AHASH_DIGEST_LEN (DESC_AHASH_BASE + 4 * CAAM_CMD_SZ)
82 #define DESC_HASH_MAX_USED_BYTES (DESC_AHASH_FINAL_LEN + \
83 CAAM_MAX_HASH_KEY_SIZE)
84 #define DESC_HASH_MAX_USED_LEN (DESC_HASH_MAX_USED_BYTES / CAAM_CMD_SZ)
86 /* caam context sizes for hashes: running digest + 8 */
87 #define HASH_MSG_LEN 8
88 #define MAX_CTX_LEN (HASH_MSG_LEN + SHA512_DIGEST_SIZE)
90 #ifdef DEBUG
91 /* for print_hex_dumps with line references */
92 #define debug(format, arg...) printk(format, arg)
93 #else
94 #define debug(format, arg...)
95 #endif
97 /* ahash per-session context */
98 struct caam_hash_ctx {
99 struct device *jrdev;
100 u32 sh_desc_update[DESC_HASH_MAX_USED_LEN];
101 u32 sh_desc_update_first[DESC_HASH_MAX_USED_LEN];
102 u32 sh_desc_fin[DESC_HASH_MAX_USED_LEN];
103 u32 sh_desc_digest[DESC_HASH_MAX_USED_LEN];
104 u32 sh_desc_finup[DESC_HASH_MAX_USED_LEN];
105 dma_addr_t sh_desc_update_dma;
106 dma_addr_t sh_desc_update_first_dma;
107 dma_addr_t sh_desc_fin_dma;
108 dma_addr_t sh_desc_digest_dma;
109 dma_addr_t sh_desc_finup_dma;
110 u32 alg_type;
111 u32 alg_op;
112 u8 key[CAAM_MAX_HASH_KEY_SIZE];
113 dma_addr_t key_dma;
114 int ctx_len;
115 unsigned int split_key_len;
116 unsigned int split_key_pad_len;
119 /* ahash state */
120 struct caam_hash_state {
121 dma_addr_t buf_dma;
122 dma_addr_t ctx_dma;
123 u8 buf_0[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned;
124 int buflen_0;
125 u8 buf_1[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned;
126 int buflen_1;
127 u8 caam_ctx[MAX_CTX_LEN];
128 int (*update)(struct ahash_request *req);
129 int (*final)(struct ahash_request *req);
130 int (*finup)(struct ahash_request *req);
131 int current_buf;
134 /* Common job descriptor seq in/out ptr routines */
136 /* Map state->caam_ctx, and append seq_out_ptr command that points to it */
137 static inline void map_seq_out_ptr_ctx(u32 *desc, struct device *jrdev,
138 struct caam_hash_state *state,
139 int ctx_len)
141 state->ctx_dma = dma_map_single(jrdev, state->caam_ctx,
142 ctx_len, DMA_FROM_DEVICE);
143 append_seq_out_ptr(desc, state->ctx_dma, ctx_len, 0);
146 /* Map req->result, and append seq_out_ptr command that points to it */
147 static inline dma_addr_t map_seq_out_ptr_result(u32 *desc, struct device *jrdev,
148 u8 *result, int digestsize)
150 dma_addr_t dst_dma;
152 dst_dma = dma_map_single(jrdev, result, digestsize, DMA_FROM_DEVICE);
153 append_seq_out_ptr(desc, dst_dma, digestsize, 0);
155 return dst_dma;
158 /* Map current buffer in state and put it in link table */
159 static inline dma_addr_t buf_map_to_sec4_sg(struct device *jrdev,
160 struct sec4_sg_entry *sec4_sg,
161 u8 *buf, int buflen)
163 dma_addr_t buf_dma;
165 buf_dma = dma_map_single(jrdev, buf, buflen, DMA_TO_DEVICE);
166 dma_to_sec4_sg_one(sec4_sg, buf_dma, buflen, 0);
168 return buf_dma;
171 /* Map req->src and put it in link table */
172 static inline void src_map_to_sec4_sg(struct device *jrdev,
173 struct scatterlist *src, int src_nents,
174 struct sec4_sg_entry *sec4_sg,
175 bool chained)
177 dma_map_sg_chained(jrdev, src, src_nents, DMA_TO_DEVICE, chained);
178 sg_to_sec4_sg_last(src, src_nents, sec4_sg, 0);
182 * Only put buffer in link table if it contains data, which is possible,
183 * since a buffer has previously been used, and needs to be unmapped,
185 static inline dma_addr_t
186 try_buf_map_to_sec4_sg(struct device *jrdev, struct sec4_sg_entry *sec4_sg,
187 u8 *buf, dma_addr_t buf_dma, int buflen,
188 int last_buflen)
190 if (buf_dma && !dma_mapping_error(jrdev, buf_dma))
191 dma_unmap_single(jrdev, buf_dma, last_buflen, DMA_TO_DEVICE);
192 if (buflen)
193 buf_dma = buf_map_to_sec4_sg(jrdev, sec4_sg, buf, buflen);
194 else
195 buf_dma = 0;
197 return buf_dma;
200 /* Map state->caam_ctx, and add it to link table */
201 static inline void ctx_map_to_sec4_sg(u32 *desc, struct device *jrdev,
202 struct caam_hash_state *state,
203 int ctx_len,
204 struct sec4_sg_entry *sec4_sg,
205 u32 flag)
207 state->ctx_dma = dma_map_single(jrdev, state->caam_ctx, ctx_len, flag);
208 dma_to_sec4_sg_one(sec4_sg, state->ctx_dma, ctx_len, 0);
211 /* Common shared descriptor commands */
212 static inline void append_key_ahash(u32 *desc, struct caam_hash_ctx *ctx)
214 append_key_as_imm(desc, ctx->key, ctx->split_key_pad_len,
215 ctx->split_key_len, CLASS_2 |
216 KEY_DEST_MDHA_SPLIT | KEY_ENC);
219 /* Append key if it has been set */
220 static inline void init_sh_desc_key_ahash(u32 *desc, struct caam_hash_ctx *ctx)
222 u32 *key_jump_cmd;
224 init_sh_desc(desc, HDR_SHARE_SERIAL);
226 if (ctx->split_key_len) {
227 /* Skip if already shared */
228 key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
229 JUMP_COND_SHRD);
231 append_key_ahash(desc, ctx);
233 set_jump_tgt_here(desc, key_jump_cmd);
236 /* Propagate errors from shared to job descriptor */
237 append_cmd(desc, SET_OK_NO_PROP_ERRORS | CMD_LOAD);
241 * For ahash read data from seqin following state->caam_ctx,
242 * and write resulting class2 context to seqout, which may be state->caam_ctx
243 * or req->result
245 static inline void ahash_append_load_str(u32 *desc, int digestsize)
247 /* Calculate remaining bytes to read */
248 append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
250 /* Read remaining bytes */
251 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_LAST2 |
252 FIFOLD_TYPE_MSG | KEY_VLF);
254 /* Store class2 context bytes */
255 append_seq_store(desc, digestsize, LDST_CLASS_2_CCB |
256 LDST_SRCDST_BYTE_CONTEXT);
260 * For ahash update, final and finup, import context, read and write to seqout
262 static inline void ahash_ctx_data_to_out(u32 *desc, u32 op, u32 state,
263 int digestsize,
264 struct caam_hash_ctx *ctx)
266 init_sh_desc_key_ahash(desc, ctx);
268 /* Import context from software */
269 append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT |
270 LDST_CLASS_2_CCB | ctx->ctx_len);
272 /* Class 2 operation */
273 append_operation(desc, op | state | OP_ALG_ENCRYPT);
276 * Load from buf and/or src and write to req->result or state->context
278 ahash_append_load_str(desc, digestsize);
281 /* For ahash firsts and digest, read and write to seqout */
282 static inline void ahash_data_to_out(u32 *desc, u32 op, u32 state,
283 int digestsize, struct caam_hash_ctx *ctx)
285 init_sh_desc_key_ahash(desc, ctx);
287 /* Class 2 operation */
288 append_operation(desc, op | state | OP_ALG_ENCRYPT);
291 * Load from buf and/or src and write to req->result or state->context
293 ahash_append_load_str(desc, digestsize);
296 static int ahash_set_sh_desc(struct crypto_ahash *ahash)
298 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
299 int digestsize = crypto_ahash_digestsize(ahash);
300 struct device *jrdev = ctx->jrdev;
301 u32 have_key = 0;
302 u32 *desc;
304 if (ctx->split_key_len)
305 have_key = OP_ALG_AAI_HMAC_PRECOMP;
307 /* ahash_update shared descriptor */
308 desc = ctx->sh_desc_update;
310 init_sh_desc(desc, HDR_SHARE_SERIAL);
312 /* Import context from software */
313 append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT |
314 LDST_CLASS_2_CCB | ctx->ctx_len);
316 /* Class 2 operation */
317 append_operation(desc, ctx->alg_type | OP_ALG_AS_UPDATE |
318 OP_ALG_ENCRYPT);
320 /* Load data and write to result or context */
321 ahash_append_load_str(desc, ctx->ctx_len);
323 ctx->sh_desc_update_dma = dma_map_single(jrdev, desc, desc_bytes(desc),
324 DMA_TO_DEVICE);
325 if (dma_mapping_error(jrdev, ctx->sh_desc_update_dma)) {
326 dev_err(jrdev, "unable to map shared descriptor\n");
327 return -ENOMEM;
329 #ifdef DEBUG
330 print_hex_dump(KERN_ERR,
331 "ahash update shdesc@"__stringify(__LINE__)": ",
332 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
333 #endif
335 /* ahash_update_first shared descriptor */
336 desc = ctx->sh_desc_update_first;
338 ahash_data_to_out(desc, have_key | ctx->alg_type, OP_ALG_AS_INIT,
339 ctx->ctx_len, ctx);
341 ctx->sh_desc_update_first_dma = dma_map_single(jrdev, desc,
342 desc_bytes(desc),
343 DMA_TO_DEVICE);
344 if (dma_mapping_error(jrdev, ctx->sh_desc_update_first_dma)) {
345 dev_err(jrdev, "unable to map shared descriptor\n");
346 return -ENOMEM;
348 #ifdef DEBUG
349 print_hex_dump(KERN_ERR,
350 "ahash update first shdesc@"__stringify(__LINE__)": ",
351 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
352 #endif
354 /* ahash_final shared descriptor */
355 desc = ctx->sh_desc_fin;
357 ahash_ctx_data_to_out(desc, have_key | ctx->alg_type,
358 OP_ALG_AS_FINALIZE, digestsize, ctx);
360 ctx->sh_desc_fin_dma = dma_map_single(jrdev, desc, desc_bytes(desc),
361 DMA_TO_DEVICE);
362 if (dma_mapping_error(jrdev, ctx->sh_desc_fin_dma)) {
363 dev_err(jrdev, "unable to map shared descriptor\n");
364 return -ENOMEM;
366 #ifdef DEBUG
367 print_hex_dump(KERN_ERR, "ahash final shdesc@"__stringify(__LINE__)": ",
368 DUMP_PREFIX_ADDRESS, 16, 4, desc,
369 desc_bytes(desc), 1);
370 #endif
372 /* ahash_finup shared descriptor */
373 desc = ctx->sh_desc_finup;
375 ahash_ctx_data_to_out(desc, have_key | ctx->alg_type,
376 OP_ALG_AS_FINALIZE, digestsize, ctx);
378 ctx->sh_desc_finup_dma = dma_map_single(jrdev, desc, desc_bytes(desc),
379 DMA_TO_DEVICE);
380 if (dma_mapping_error(jrdev, ctx->sh_desc_finup_dma)) {
381 dev_err(jrdev, "unable to map shared descriptor\n");
382 return -ENOMEM;
384 #ifdef DEBUG
385 print_hex_dump(KERN_ERR, "ahash finup shdesc@"__stringify(__LINE__)": ",
386 DUMP_PREFIX_ADDRESS, 16, 4, desc,
387 desc_bytes(desc), 1);
388 #endif
390 /* ahash_digest shared descriptor */
391 desc = ctx->sh_desc_digest;
393 ahash_data_to_out(desc, have_key | ctx->alg_type, OP_ALG_AS_INITFINAL,
394 digestsize, ctx);
396 ctx->sh_desc_digest_dma = dma_map_single(jrdev, desc,
397 desc_bytes(desc),
398 DMA_TO_DEVICE);
399 if (dma_mapping_error(jrdev, ctx->sh_desc_digest_dma)) {
400 dev_err(jrdev, "unable to map shared descriptor\n");
401 return -ENOMEM;
403 #ifdef DEBUG
404 print_hex_dump(KERN_ERR,
405 "ahash digest shdesc@"__stringify(__LINE__)": ",
406 DUMP_PREFIX_ADDRESS, 16, 4, desc,
407 desc_bytes(desc), 1);
408 #endif
410 return 0;
413 static int gen_split_hash_key(struct caam_hash_ctx *ctx, const u8 *key_in,
414 u32 keylen)
416 return gen_split_key(ctx->jrdev, ctx->key, ctx->split_key_len,
417 ctx->split_key_pad_len, key_in, keylen,
418 ctx->alg_op);
421 /* Digest hash size if it is too large */
422 static int hash_digest_key(struct caam_hash_ctx *ctx, const u8 *key_in,
423 u32 *keylen, u8 *key_out, u32 digestsize)
425 struct device *jrdev = ctx->jrdev;
426 u32 *desc;
427 struct split_key_result result;
428 dma_addr_t src_dma, dst_dma;
429 int ret = 0;
431 desc = kmalloc(CAAM_CMD_SZ * 8 + CAAM_PTR_SZ * 2, GFP_KERNEL | GFP_DMA);
432 if (!desc) {
433 dev_err(jrdev, "unable to allocate key input memory\n");
434 return -ENOMEM;
437 init_job_desc(desc, 0);
439 src_dma = dma_map_single(jrdev, (void *)key_in, *keylen,
440 DMA_TO_DEVICE);
441 if (dma_mapping_error(jrdev, src_dma)) {
442 dev_err(jrdev, "unable to map key input memory\n");
443 kfree(desc);
444 return -ENOMEM;
446 dst_dma = dma_map_single(jrdev, (void *)key_out, digestsize,
447 DMA_FROM_DEVICE);
448 if (dma_mapping_error(jrdev, dst_dma)) {
449 dev_err(jrdev, "unable to map key output memory\n");
450 dma_unmap_single(jrdev, src_dma, *keylen, DMA_TO_DEVICE);
451 kfree(desc);
452 return -ENOMEM;
455 /* Job descriptor to perform unkeyed hash on key_in */
456 append_operation(desc, ctx->alg_type | OP_ALG_ENCRYPT |
457 OP_ALG_AS_INITFINAL);
458 append_seq_in_ptr(desc, src_dma, *keylen, 0);
459 append_seq_fifo_load(desc, *keylen, FIFOLD_CLASS_CLASS2 |
460 FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_MSG);
461 append_seq_out_ptr(desc, dst_dma, digestsize, 0);
462 append_seq_store(desc, digestsize, LDST_CLASS_2_CCB |
463 LDST_SRCDST_BYTE_CONTEXT);
465 #ifdef DEBUG
466 print_hex_dump(KERN_ERR, "key_in@"__stringify(__LINE__)": ",
467 DUMP_PREFIX_ADDRESS, 16, 4, key_in, *keylen, 1);
468 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
469 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
470 #endif
472 result.err = 0;
473 init_completion(&result.completion);
475 ret = caam_jr_enqueue(jrdev, desc, split_key_done, &result);
476 if (!ret) {
477 /* in progress */
478 wait_for_completion_interruptible(&result.completion);
479 ret = result.err;
480 #ifdef DEBUG
481 print_hex_dump(KERN_ERR,
482 "digested key@"__stringify(__LINE__)": ",
483 DUMP_PREFIX_ADDRESS, 16, 4, key_in,
484 digestsize, 1);
485 #endif
487 *keylen = digestsize;
489 dma_unmap_single(jrdev, src_dma, *keylen, DMA_TO_DEVICE);
490 dma_unmap_single(jrdev, dst_dma, digestsize, DMA_FROM_DEVICE);
492 kfree(desc);
494 return ret;
497 static int ahash_setkey(struct crypto_ahash *ahash,
498 const u8 *key, unsigned int keylen)
500 /* Sizes for MDHA pads (*not* keys): MD5, SHA1, 224, 256, 384, 512 */
501 static const u8 mdpadlen[] = { 16, 20, 32, 32, 64, 64 };
502 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
503 struct device *jrdev = ctx->jrdev;
504 int blocksize = crypto_tfm_alg_blocksize(&ahash->base);
505 int digestsize = crypto_ahash_digestsize(ahash);
506 int ret = 0;
507 u8 *hashed_key = NULL;
509 #ifdef DEBUG
510 printk(KERN_ERR "keylen %d\n", keylen);
511 #endif
513 if (keylen > blocksize) {
514 hashed_key = kmalloc(sizeof(u8) * digestsize, GFP_KERNEL |
515 GFP_DMA);
516 if (!hashed_key)
517 return -ENOMEM;
518 ret = hash_digest_key(ctx, key, &keylen, hashed_key,
519 digestsize);
520 if (ret)
521 goto badkey;
522 key = hashed_key;
525 /* Pick class 2 key length from algorithm submask */
526 ctx->split_key_len = mdpadlen[(ctx->alg_op & OP_ALG_ALGSEL_SUBMASK) >>
527 OP_ALG_ALGSEL_SHIFT] * 2;
528 ctx->split_key_pad_len = ALIGN(ctx->split_key_len, 16);
530 #ifdef DEBUG
531 printk(KERN_ERR "split_key_len %d split_key_pad_len %d\n",
532 ctx->split_key_len, ctx->split_key_pad_len);
533 print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
534 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
535 #endif
537 ret = gen_split_hash_key(ctx, key, keylen);
538 if (ret)
539 goto badkey;
541 ctx->key_dma = dma_map_single(jrdev, ctx->key, ctx->split_key_pad_len,
542 DMA_TO_DEVICE);
543 if (dma_mapping_error(jrdev, ctx->key_dma)) {
544 dev_err(jrdev, "unable to map key i/o memory\n");
545 return -ENOMEM;
547 #ifdef DEBUG
548 print_hex_dump(KERN_ERR, "ctx.key@"__stringify(__LINE__)": ",
549 DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
550 ctx->split_key_pad_len, 1);
551 #endif
553 ret = ahash_set_sh_desc(ahash);
554 if (ret) {
555 dma_unmap_single(jrdev, ctx->key_dma, ctx->split_key_pad_len,
556 DMA_TO_DEVICE);
559 kfree(hashed_key);
560 return ret;
561 badkey:
562 kfree(hashed_key);
563 crypto_ahash_set_flags(ahash, CRYPTO_TFM_RES_BAD_KEY_LEN);
564 return -EINVAL;
568 * ahash_edesc - s/w-extended ahash descriptor
569 * @dst_dma: physical mapped address of req->result
570 * @sec4_sg_dma: physical mapped address of h/w link table
571 * @chained: if source is chained
572 * @src_nents: number of segments in input scatterlist
573 * @sec4_sg_bytes: length of dma mapped sec4_sg space
574 * @sec4_sg: pointer to h/w link table
575 * @hw_desc: the h/w job descriptor followed by any referenced link tables
577 struct ahash_edesc {
578 dma_addr_t dst_dma;
579 dma_addr_t sec4_sg_dma;
580 bool chained;
581 int src_nents;
582 int sec4_sg_bytes;
583 struct sec4_sg_entry *sec4_sg;
584 u32 hw_desc[0];
587 static inline void ahash_unmap(struct device *dev,
588 struct ahash_edesc *edesc,
589 struct ahash_request *req, int dst_len)
591 if (edesc->src_nents)
592 dma_unmap_sg_chained(dev, req->src, edesc->src_nents,
593 DMA_TO_DEVICE, edesc->chained);
594 if (edesc->dst_dma)
595 dma_unmap_single(dev, edesc->dst_dma, dst_len, DMA_FROM_DEVICE);
597 if (edesc->sec4_sg_bytes)
598 dma_unmap_single(dev, edesc->sec4_sg_dma,
599 edesc->sec4_sg_bytes, DMA_TO_DEVICE);
602 static inline void ahash_unmap_ctx(struct device *dev,
603 struct ahash_edesc *edesc,
604 struct ahash_request *req, int dst_len, u32 flag)
606 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
607 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
608 struct caam_hash_state *state = ahash_request_ctx(req);
610 if (state->ctx_dma)
611 dma_unmap_single(dev, state->ctx_dma, ctx->ctx_len, flag);
612 ahash_unmap(dev, edesc, req, dst_len);
615 static void ahash_done(struct device *jrdev, u32 *desc, u32 err,
616 void *context)
618 struct ahash_request *req = context;
619 struct ahash_edesc *edesc;
620 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
621 int digestsize = crypto_ahash_digestsize(ahash);
622 #ifdef DEBUG
623 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
624 struct caam_hash_state *state = ahash_request_ctx(req);
626 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
627 #endif
629 edesc = (struct ahash_edesc *)((char *)desc -
630 offsetof(struct ahash_edesc, hw_desc));
631 if (err) {
632 char tmp[CAAM_ERROR_STR_MAX];
634 dev_err(jrdev, "%08x: %s\n", err, caam_jr_strstatus(tmp, err));
637 ahash_unmap(jrdev, edesc, req, digestsize);
638 kfree(edesc);
640 #ifdef DEBUG
641 print_hex_dump(KERN_ERR, "ctx@"__stringify(__LINE__)": ",
642 DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
643 ctx->ctx_len, 1);
644 if (req->result)
645 print_hex_dump(KERN_ERR, "result@"__stringify(__LINE__)": ",
646 DUMP_PREFIX_ADDRESS, 16, 4, req->result,
647 digestsize, 1);
648 #endif
650 req->base.complete(&req->base, err);
653 static void ahash_done_bi(struct device *jrdev, u32 *desc, u32 err,
654 void *context)
656 struct ahash_request *req = context;
657 struct ahash_edesc *edesc;
658 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
659 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
660 #ifdef DEBUG
661 struct caam_hash_state *state = ahash_request_ctx(req);
662 int digestsize = crypto_ahash_digestsize(ahash);
664 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
665 #endif
667 edesc = (struct ahash_edesc *)((char *)desc -
668 offsetof(struct ahash_edesc, hw_desc));
669 if (err) {
670 char tmp[CAAM_ERROR_STR_MAX];
672 dev_err(jrdev, "%08x: %s\n", err, caam_jr_strstatus(tmp, err));
675 ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_BIDIRECTIONAL);
676 kfree(edesc);
678 #ifdef DEBUG
679 print_hex_dump(KERN_ERR, "ctx@"__stringify(__LINE__)": ",
680 DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
681 ctx->ctx_len, 1);
682 if (req->result)
683 print_hex_dump(KERN_ERR, "result@"__stringify(__LINE__)": ",
684 DUMP_PREFIX_ADDRESS, 16, 4, req->result,
685 digestsize, 1);
686 #endif
688 req->base.complete(&req->base, err);
691 static void ahash_done_ctx_src(struct device *jrdev, u32 *desc, u32 err,
692 void *context)
694 struct ahash_request *req = context;
695 struct ahash_edesc *edesc;
696 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
697 int digestsize = crypto_ahash_digestsize(ahash);
698 #ifdef DEBUG
699 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
700 struct caam_hash_state *state = ahash_request_ctx(req);
702 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
703 #endif
705 edesc = (struct ahash_edesc *)((char *)desc -
706 offsetof(struct ahash_edesc, hw_desc));
707 if (err) {
708 char tmp[CAAM_ERROR_STR_MAX];
710 dev_err(jrdev, "%08x: %s\n", err, caam_jr_strstatus(tmp, err));
713 ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE);
714 kfree(edesc);
716 #ifdef DEBUG
717 print_hex_dump(KERN_ERR, "ctx@"__stringify(__LINE__)": ",
718 DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
719 ctx->ctx_len, 1);
720 if (req->result)
721 print_hex_dump(KERN_ERR, "result@"__stringify(__LINE__)": ",
722 DUMP_PREFIX_ADDRESS, 16, 4, req->result,
723 digestsize, 1);
724 #endif
726 req->base.complete(&req->base, err);
729 static void ahash_done_ctx_dst(struct device *jrdev, u32 *desc, u32 err,
730 void *context)
732 struct ahash_request *req = context;
733 struct ahash_edesc *edesc;
734 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
735 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
736 #ifdef DEBUG
737 struct caam_hash_state *state = ahash_request_ctx(req);
738 int digestsize = crypto_ahash_digestsize(ahash);
740 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
741 #endif
743 edesc = (struct ahash_edesc *)((char *)desc -
744 offsetof(struct ahash_edesc, hw_desc));
745 if (err) {
746 char tmp[CAAM_ERROR_STR_MAX];
748 dev_err(jrdev, "%08x: %s\n", err, caam_jr_strstatus(tmp, err));
751 ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_TO_DEVICE);
752 kfree(edesc);
754 #ifdef DEBUG
755 print_hex_dump(KERN_ERR, "ctx@"__stringify(__LINE__)": ",
756 DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
757 ctx->ctx_len, 1);
758 if (req->result)
759 print_hex_dump(KERN_ERR, "result@"__stringify(__LINE__)": ",
760 DUMP_PREFIX_ADDRESS, 16, 4, req->result,
761 digestsize, 1);
762 #endif
764 req->base.complete(&req->base, err);
767 /* submit update job descriptor */
768 static int ahash_update_ctx(struct ahash_request *req)
770 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
771 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
772 struct caam_hash_state *state = ahash_request_ctx(req);
773 struct device *jrdev = ctx->jrdev;
774 gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
775 CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
776 u8 *buf = state->current_buf ? state->buf_1 : state->buf_0;
777 int *buflen = state->current_buf ? &state->buflen_1 : &state->buflen_0;
778 u8 *next_buf = state->current_buf ? state->buf_0 : state->buf_1;
779 int *next_buflen = state->current_buf ? &state->buflen_0 :
780 &state->buflen_1, last_buflen;
781 int in_len = *buflen + req->nbytes, to_hash;
782 u32 *sh_desc = ctx->sh_desc_update, *desc;
783 dma_addr_t ptr = ctx->sh_desc_update_dma;
784 int src_nents, sec4_sg_bytes, sec4_sg_src_index;
785 struct ahash_edesc *edesc;
786 bool chained = false;
787 int ret = 0;
788 int sh_len;
790 last_buflen = *next_buflen;
791 *next_buflen = in_len & (crypto_tfm_alg_blocksize(&ahash->base) - 1);
792 to_hash = in_len - *next_buflen;
794 if (to_hash) {
795 src_nents = __sg_count(req->src, req->nbytes - (*next_buflen),
796 &chained);
797 sec4_sg_src_index = 1 + (*buflen ? 1 : 0);
798 sec4_sg_bytes = (sec4_sg_src_index + src_nents) *
799 sizeof(struct sec4_sg_entry);
802 * allocate space for base edesc and hw desc commands,
803 * link tables
805 edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN +
806 sec4_sg_bytes, GFP_DMA | flags);
807 if (!edesc) {
808 dev_err(jrdev,
809 "could not allocate extended descriptor\n");
810 return -ENOMEM;
813 edesc->src_nents = src_nents;
814 edesc->chained = chained;
815 edesc->sec4_sg_bytes = sec4_sg_bytes;
816 edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) +
817 DESC_JOB_IO_LEN;
818 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
819 sec4_sg_bytes,
820 DMA_TO_DEVICE);
822 ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len,
823 edesc->sec4_sg, DMA_BIDIRECTIONAL);
825 state->buf_dma = try_buf_map_to_sec4_sg(jrdev,
826 edesc->sec4_sg + 1,
827 buf, state->buf_dma,
828 *buflen, last_buflen);
830 if (src_nents) {
831 src_map_to_sec4_sg(jrdev, req->src, src_nents,
832 edesc->sec4_sg + sec4_sg_src_index,
833 chained);
834 if (*next_buflen) {
835 sg_copy_part(next_buf, req->src, to_hash -
836 *buflen, req->nbytes);
837 state->current_buf = !state->current_buf;
839 } else {
840 (edesc->sec4_sg + sec4_sg_src_index - 1)->len |=
841 SEC4_SG_LEN_FIN;
844 sh_len = desc_len(sh_desc);
845 desc = edesc->hw_desc;
846 init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER |
847 HDR_REVERSE);
849 append_seq_in_ptr(desc, edesc->sec4_sg_dma, ctx->ctx_len +
850 to_hash, LDST_SGF);
852 append_seq_out_ptr(desc, state->ctx_dma, ctx->ctx_len, 0);
854 #ifdef DEBUG
855 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
856 DUMP_PREFIX_ADDRESS, 16, 4, desc,
857 desc_bytes(desc), 1);
858 #endif
860 ret = caam_jr_enqueue(jrdev, desc, ahash_done_bi, req);
861 if (!ret) {
862 ret = -EINPROGRESS;
863 } else {
864 ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len,
865 DMA_BIDIRECTIONAL);
866 kfree(edesc);
868 } else if (*next_buflen) {
869 sg_copy(buf + *buflen, req->src, req->nbytes);
870 *buflen = *next_buflen;
871 *next_buflen = last_buflen;
873 #ifdef DEBUG
874 print_hex_dump(KERN_ERR, "buf@"__stringify(__LINE__)": ",
875 DUMP_PREFIX_ADDRESS, 16, 4, buf, *buflen, 1);
876 print_hex_dump(KERN_ERR, "next buf@"__stringify(__LINE__)": ",
877 DUMP_PREFIX_ADDRESS, 16, 4, next_buf,
878 *next_buflen, 1);
879 #endif
881 return ret;
884 static int ahash_final_ctx(struct ahash_request *req)
886 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
887 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
888 struct caam_hash_state *state = ahash_request_ctx(req);
889 struct device *jrdev = ctx->jrdev;
890 gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
891 CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
892 u8 *buf = state->current_buf ? state->buf_1 : state->buf_0;
893 int buflen = state->current_buf ? state->buflen_1 : state->buflen_0;
894 int last_buflen = state->current_buf ? state->buflen_0 :
895 state->buflen_1;
896 u32 *sh_desc = ctx->sh_desc_fin, *desc;
897 dma_addr_t ptr = ctx->sh_desc_fin_dma;
898 int sec4_sg_bytes;
899 int digestsize = crypto_ahash_digestsize(ahash);
900 struct ahash_edesc *edesc;
901 int ret = 0;
902 int sh_len;
904 sec4_sg_bytes = (1 + (buflen ? 1 : 0)) * sizeof(struct sec4_sg_entry);
906 /* allocate space for base edesc and hw desc commands, link tables */
907 edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN +
908 sec4_sg_bytes, GFP_DMA | flags);
909 if (!edesc) {
910 dev_err(jrdev, "could not allocate extended descriptor\n");
911 return -ENOMEM;
914 sh_len = desc_len(sh_desc);
915 desc = edesc->hw_desc;
916 init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER | HDR_REVERSE);
918 edesc->sec4_sg_bytes = sec4_sg_bytes;
919 edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) +
920 DESC_JOB_IO_LEN;
921 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
922 sec4_sg_bytes, DMA_TO_DEVICE);
923 edesc->src_nents = 0;
925 ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len, edesc->sec4_sg,
926 DMA_TO_DEVICE);
928 state->buf_dma = try_buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1,
929 buf, state->buf_dma, buflen,
930 last_buflen);
931 (edesc->sec4_sg + sec4_sg_bytes - 1)->len |= SEC4_SG_LEN_FIN;
933 append_seq_in_ptr(desc, edesc->sec4_sg_dma, ctx->ctx_len + buflen,
934 LDST_SGF);
936 edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
937 digestsize);
939 #ifdef DEBUG
940 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
941 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
942 #endif
944 ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_src, req);
945 if (!ret) {
946 ret = -EINPROGRESS;
947 } else {
948 ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE);
949 kfree(edesc);
952 return ret;
955 static int ahash_finup_ctx(struct ahash_request *req)
957 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
958 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
959 struct caam_hash_state *state = ahash_request_ctx(req);
960 struct device *jrdev = ctx->jrdev;
961 gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
962 CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
963 u8 *buf = state->current_buf ? state->buf_1 : state->buf_0;
964 int buflen = state->current_buf ? state->buflen_1 : state->buflen_0;
965 int last_buflen = state->current_buf ? state->buflen_0 :
966 state->buflen_1;
967 u32 *sh_desc = ctx->sh_desc_finup, *desc;
968 dma_addr_t ptr = ctx->sh_desc_finup_dma;
969 int sec4_sg_bytes, sec4_sg_src_index;
970 int src_nents;
971 int digestsize = crypto_ahash_digestsize(ahash);
972 struct ahash_edesc *edesc;
973 bool chained = false;
974 int ret = 0;
975 int sh_len;
977 src_nents = __sg_count(req->src, req->nbytes, &chained);
978 sec4_sg_src_index = 1 + (buflen ? 1 : 0);
979 sec4_sg_bytes = (sec4_sg_src_index + src_nents) *
980 sizeof(struct sec4_sg_entry);
982 /* allocate space for base edesc and hw desc commands, link tables */
983 edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN +
984 sec4_sg_bytes, GFP_DMA | flags);
985 if (!edesc) {
986 dev_err(jrdev, "could not allocate extended descriptor\n");
987 return -ENOMEM;
990 sh_len = desc_len(sh_desc);
991 desc = edesc->hw_desc;
992 init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER | HDR_REVERSE);
994 edesc->src_nents = src_nents;
995 edesc->chained = chained;
996 edesc->sec4_sg_bytes = sec4_sg_bytes;
997 edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) +
998 DESC_JOB_IO_LEN;
999 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
1000 sec4_sg_bytes, DMA_TO_DEVICE);
1002 ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len, edesc->sec4_sg,
1003 DMA_TO_DEVICE);
1005 state->buf_dma = try_buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1,
1006 buf, state->buf_dma, buflen,
1007 last_buflen);
1009 src_map_to_sec4_sg(jrdev, req->src, src_nents, edesc->sec4_sg +
1010 sec4_sg_src_index, chained);
1012 append_seq_in_ptr(desc, edesc->sec4_sg_dma, ctx->ctx_len +
1013 buflen + req->nbytes, LDST_SGF);
1015 edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
1016 digestsize);
1018 #ifdef DEBUG
1019 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
1020 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
1021 #endif
1023 ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_src, req);
1024 if (!ret) {
1025 ret = -EINPROGRESS;
1026 } else {
1027 ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE);
1028 kfree(edesc);
1031 return ret;
1034 static int ahash_digest(struct ahash_request *req)
1036 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1037 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1038 struct device *jrdev = ctx->jrdev;
1039 gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
1040 CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
1041 u32 *sh_desc = ctx->sh_desc_digest, *desc;
1042 dma_addr_t ptr = ctx->sh_desc_digest_dma;
1043 int digestsize = crypto_ahash_digestsize(ahash);
1044 int src_nents, sec4_sg_bytes;
1045 dma_addr_t src_dma;
1046 struct ahash_edesc *edesc;
1047 bool chained = false;
1048 int ret = 0;
1049 u32 options;
1050 int sh_len;
1052 src_nents = sg_count(req->src, req->nbytes, &chained);
1053 dma_map_sg_chained(jrdev, req->src, src_nents ? : 1, DMA_TO_DEVICE,
1054 chained);
1055 sec4_sg_bytes = src_nents * sizeof(struct sec4_sg_entry);
1057 /* allocate space for base edesc and hw desc commands, link tables */
1058 edesc = kmalloc(sizeof(struct ahash_edesc) + sec4_sg_bytes +
1059 DESC_JOB_IO_LEN, GFP_DMA | flags);
1060 if (!edesc) {
1061 dev_err(jrdev, "could not allocate extended descriptor\n");
1062 return -ENOMEM;
1064 edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) +
1065 DESC_JOB_IO_LEN;
1066 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
1067 sec4_sg_bytes, DMA_TO_DEVICE);
1068 edesc->src_nents = src_nents;
1069 edesc->chained = chained;
1071 sh_len = desc_len(sh_desc);
1072 desc = edesc->hw_desc;
1073 init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER | HDR_REVERSE);
1075 if (src_nents) {
1076 sg_to_sec4_sg_last(req->src, src_nents, edesc->sec4_sg, 0);
1077 src_dma = edesc->sec4_sg_dma;
1078 options = LDST_SGF;
1079 } else {
1080 src_dma = sg_dma_address(req->src);
1081 options = 0;
1083 append_seq_in_ptr(desc, src_dma, req->nbytes, options);
1085 edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
1086 digestsize);
1088 #ifdef DEBUG
1089 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
1090 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
1091 #endif
1093 ret = caam_jr_enqueue(jrdev, desc, ahash_done, req);
1094 if (!ret) {
1095 ret = -EINPROGRESS;
1096 } else {
1097 ahash_unmap(jrdev, edesc, req, digestsize);
1098 kfree(edesc);
1101 return ret;
1104 /* submit ahash final if it the first job descriptor */
1105 static int ahash_final_no_ctx(struct ahash_request *req)
1107 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1108 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1109 struct caam_hash_state *state = ahash_request_ctx(req);
1110 struct device *jrdev = ctx->jrdev;
1111 gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
1112 CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
1113 u8 *buf = state->current_buf ? state->buf_1 : state->buf_0;
1114 int buflen = state->current_buf ? state->buflen_1 : state->buflen_0;
1115 u32 *sh_desc = ctx->sh_desc_digest, *desc;
1116 dma_addr_t ptr = ctx->sh_desc_digest_dma;
1117 int digestsize = crypto_ahash_digestsize(ahash);
1118 struct ahash_edesc *edesc;
1119 int ret = 0;
1120 int sh_len;
1122 /* allocate space for base edesc and hw desc commands, link tables */
1123 edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN,
1124 GFP_DMA | flags);
1125 if (!edesc) {
1126 dev_err(jrdev, "could not allocate extended descriptor\n");
1127 return -ENOMEM;
1130 sh_len = desc_len(sh_desc);
1131 desc = edesc->hw_desc;
1132 init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER | HDR_REVERSE);
1134 state->buf_dma = dma_map_single(jrdev, buf, buflen, DMA_TO_DEVICE);
1136 append_seq_in_ptr(desc, state->buf_dma, buflen, 0);
1138 edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
1139 digestsize);
1140 edesc->src_nents = 0;
1142 #ifdef DEBUG
1143 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
1144 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
1145 #endif
1147 ret = caam_jr_enqueue(jrdev, desc, ahash_done, req);
1148 if (!ret) {
1149 ret = -EINPROGRESS;
1150 } else {
1151 ahash_unmap(jrdev, edesc, req, digestsize);
1152 kfree(edesc);
1155 return ret;
1158 /* submit ahash update if it the first job descriptor after update */
1159 static int ahash_update_no_ctx(struct ahash_request *req)
1161 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1162 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1163 struct caam_hash_state *state = ahash_request_ctx(req);
1164 struct device *jrdev = ctx->jrdev;
1165 gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
1166 CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
1167 u8 *buf = state->current_buf ? state->buf_1 : state->buf_0;
1168 int *buflen = state->current_buf ? &state->buflen_1 : &state->buflen_0;
1169 u8 *next_buf = state->current_buf ? state->buf_0 : state->buf_1;
1170 int *next_buflen = state->current_buf ? &state->buflen_0 :
1171 &state->buflen_1;
1172 int in_len = *buflen + req->nbytes, to_hash;
1173 int sec4_sg_bytes, src_nents;
1174 struct ahash_edesc *edesc;
1175 u32 *desc, *sh_desc = ctx->sh_desc_update_first;
1176 dma_addr_t ptr = ctx->sh_desc_update_first_dma;
1177 bool chained = false;
1178 int ret = 0;
1179 int sh_len;
1181 *next_buflen = in_len & (crypto_tfm_alg_blocksize(&ahash->base) - 1);
1182 to_hash = in_len - *next_buflen;
1184 if (to_hash) {
1185 src_nents = __sg_count(req->src, req->nbytes - (*next_buflen),
1186 &chained);
1187 sec4_sg_bytes = (1 + src_nents) *
1188 sizeof(struct sec4_sg_entry);
1191 * allocate space for base edesc and hw desc commands,
1192 * link tables
1194 edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN +
1195 sec4_sg_bytes, GFP_DMA | flags);
1196 if (!edesc) {
1197 dev_err(jrdev,
1198 "could not allocate extended descriptor\n");
1199 return -ENOMEM;
1202 edesc->src_nents = src_nents;
1203 edesc->chained = chained;
1204 edesc->sec4_sg_bytes = sec4_sg_bytes;
1205 edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) +
1206 DESC_JOB_IO_LEN;
1207 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
1208 sec4_sg_bytes,
1209 DMA_TO_DEVICE);
1211 state->buf_dma = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg,
1212 buf, *buflen);
1213 src_map_to_sec4_sg(jrdev, req->src, src_nents,
1214 edesc->sec4_sg + 1, chained);
1215 if (*next_buflen) {
1216 sg_copy_part(next_buf, req->src, to_hash - *buflen,
1217 req->nbytes);
1218 state->current_buf = !state->current_buf;
1221 sh_len = desc_len(sh_desc);
1222 desc = edesc->hw_desc;
1223 init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER |
1224 HDR_REVERSE);
1226 append_seq_in_ptr(desc, edesc->sec4_sg_dma, to_hash, LDST_SGF);
1228 map_seq_out_ptr_ctx(desc, jrdev, state, ctx->ctx_len);
1230 #ifdef DEBUG
1231 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
1232 DUMP_PREFIX_ADDRESS, 16, 4, desc,
1233 desc_bytes(desc), 1);
1234 #endif
1236 ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_dst, req);
1237 if (!ret) {
1238 ret = -EINPROGRESS;
1239 state->update = ahash_update_ctx;
1240 state->finup = ahash_finup_ctx;
1241 state->final = ahash_final_ctx;
1242 } else {
1243 ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len,
1244 DMA_TO_DEVICE);
1245 kfree(edesc);
1247 } else if (*next_buflen) {
1248 sg_copy(buf + *buflen, req->src, req->nbytes);
1249 *buflen = *next_buflen;
1250 *next_buflen = 0;
1252 #ifdef DEBUG
1253 print_hex_dump(KERN_ERR, "buf@"__stringify(__LINE__)": ",
1254 DUMP_PREFIX_ADDRESS, 16, 4, buf, *buflen, 1);
1255 print_hex_dump(KERN_ERR, "next buf@"__stringify(__LINE__)": ",
1256 DUMP_PREFIX_ADDRESS, 16, 4, next_buf,
1257 *next_buflen, 1);
1258 #endif
1260 return ret;
1263 /* submit ahash finup if it the first job descriptor after update */
1264 static int ahash_finup_no_ctx(struct ahash_request *req)
1266 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1267 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1268 struct caam_hash_state *state = ahash_request_ctx(req);
1269 struct device *jrdev = ctx->jrdev;
1270 gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
1271 CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
1272 u8 *buf = state->current_buf ? state->buf_1 : state->buf_0;
1273 int buflen = state->current_buf ? state->buflen_1 : state->buflen_0;
1274 int last_buflen = state->current_buf ? state->buflen_0 :
1275 state->buflen_1;
1276 u32 *sh_desc = ctx->sh_desc_digest, *desc;
1277 dma_addr_t ptr = ctx->sh_desc_digest_dma;
1278 int sec4_sg_bytes, sec4_sg_src_index, src_nents;
1279 int digestsize = crypto_ahash_digestsize(ahash);
1280 struct ahash_edesc *edesc;
1281 bool chained = false;
1282 int sh_len;
1283 int ret = 0;
1285 src_nents = __sg_count(req->src, req->nbytes, &chained);
1286 sec4_sg_src_index = 2;
1287 sec4_sg_bytes = (sec4_sg_src_index + src_nents) *
1288 sizeof(struct sec4_sg_entry);
1290 /* allocate space for base edesc and hw desc commands, link tables */
1291 edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN +
1292 sec4_sg_bytes, GFP_DMA | flags);
1293 if (!edesc) {
1294 dev_err(jrdev, "could not allocate extended descriptor\n");
1295 return -ENOMEM;
1298 sh_len = desc_len(sh_desc);
1299 desc = edesc->hw_desc;
1300 init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER | HDR_REVERSE);
1302 edesc->src_nents = src_nents;
1303 edesc->chained = chained;
1304 edesc->sec4_sg_bytes = sec4_sg_bytes;
1305 edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) +
1306 DESC_JOB_IO_LEN;
1307 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
1308 sec4_sg_bytes, DMA_TO_DEVICE);
1310 state->buf_dma = try_buf_map_to_sec4_sg(jrdev, edesc->sec4_sg, buf,
1311 state->buf_dma, buflen,
1312 last_buflen);
1314 src_map_to_sec4_sg(jrdev, req->src, src_nents, edesc->sec4_sg + 1,
1315 chained);
1317 append_seq_in_ptr(desc, edesc->sec4_sg_dma, buflen +
1318 req->nbytes, LDST_SGF);
1320 edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
1321 digestsize);
1323 #ifdef DEBUG
1324 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
1325 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
1326 #endif
1328 ret = caam_jr_enqueue(jrdev, desc, ahash_done, req);
1329 if (!ret) {
1330 ret = -EINPROGRESS;
1331 } else {
1332 ahash_unmap(jrdev, edesc, req, digestsize);
1333 kfree(edesc);
1336 return ret;
1339 /* submit first update job descriptor after init */
1340 static int ahash_update_first(struct ahash_request *req)
1342 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1343 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1344 struct caam_hash_state *state = ahash_request_ctx(req);
1345 struct device *jrdev = ctx->jrdev;
1346 gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
1347 CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
1348 u8 *next_buf = state->buf_0 + state->current_buf *
1349 CAAM_MAX_HASH_BLOCK_SIZE;
1350 int *next_buflen = &state->buflen_0 + state->current_buf;
1351 int to_hash;
1352 u32 *sh_desc = ctx->sh_desc_update_first, *desc;
1353 dma_addr_t ptr = ctx->sh_desc_update_first_dma;
1354 int sec4_sg_bytes, src_nents;
1355 dma_addr_t src_dma;
1356 u32 options;
1357 struct ahash_edesc *edesc;
1358 bool chained = false;
1359 int ret = 0;
1360 int sh_len;
1362 *next_buflen = req->nbytes & (crypto_tfm_alg_blocksize(&ahash->base) -
1364 to_hash = req->nbytes - *next_buflen;
1366 if (to_hash) {
1367 src_nents = sg_count(req->src, req->nbytes - (*next_buflen),
1368 &chained);
1369 dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
1370 DMA_TO_DEVICE, chained);
1371 sec4_sg_bytes = src_nents * sizeof(struct sec4_sg_entry);
1374 * allocate space for base edesc and hw desc commands,
1375 * link tables
1377 edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN +
1378 sec4_sg_bytes, GFP_DMA | flags);
1379 if (!edesc) {
1380 dev_err(jrdev,
1381 "could not allocate extended descriptor\n");
1382 return -ENOMEM;
1385 edesc->src_nents = src_nents;
1386 edesc->chained = chained;
1387 edesc->sec4_sg_bytes = sec4_sg_bytes;
1388 edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) +
1389 DESC_JOB_IO_LEN;
1390 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
1391 sec4_sg_bytes,
1392 DMA_TO_DEVICE);
1394 if (src_nents) {
1395 sg_to_sec4_sg_last(req->src, src_nents,
1396 edesc->sec4_sg, 0);
1397 src_dma = edesc->sec4_sg_dma;
1398 options = LDST_SGF;
1399 } else {
1400 src_dma = sg_dma_address(req->src);
1401 options = 0;
1404 if (*next_buflen)
1405 sg_copy_part(next_buf, req->src, to_hash, req->nbytes);
1407 sh_len = desc_len(sh_desc);
1408 desc = edesc->hw_desc;
1409 init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER |
1410 HDR_REVERSE);
1412 append_seq_in_ptr(desc, src_dma, to_hash, options);
1414 map_seq_out_ptr_ctx(desc, jrdev, state, ctx->ctx_len);
1416 #ifdef DEBUG
1417 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
1418 DUMP_PREFIX_ADDRESS, 16, 4, desc,
1419 desc_bytes(desc), 1);
1420 #endif
1422 ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_dst,
1423 req);
1424 if (!ret) {
1425 ret = -EINPROGRESS;
1426 state->update = ahash_update_ctx;
1427 state->finup = ahash_finup_ctx;
1428 state->final = ahash_final_ctx;
1429 } else {
1430 ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len,
1431 DMA_TO_DEVICE);
1432 kfree(edesc);
1434 } else if (*next_buflen) {
1435 state->update = ahash_update_no_ctx;
1436 state->finup = ahash_finup_no_ctx;
1437 state->final = ahash_final_no_ctx;
1438 sg_copy(next_buf, req->src, req->nbytes);
1440 #ifdef DEBUG
1441 print_hex_dump(KERN_ERR, "next buf@"__stringify(__LINE__)": ",
1442 DUMP_PREFIX_ADDRESS, 16, 4, next_buf,
1443 *next_buflen, 1);
1444 #endif
1446 return ret;
1449 static int ahash_finup_first(struct ahash_request *req)
1451 return ahash_digest(req);
1454 static int ahash_init(struct ahash_request *req)
1456 struct caam_hash_state *state = ahash_request_ctx(req);
1458 state->update = ahash_update_first;
1459 state->finup = ahash_finup_first;
1460 state->final = ahash_final_no_ctx;
1462 state->current_buf = 0;
1464 return 0;
1467 static int ahash_update(struct ahash_request *req)
1469 struct caam_hash_state *state = ahash_request_ctx(req);
1471 return state->update(req);
1474 static int ahash_finup(struct ahash_request *req)
1476 struct caam_hash_state *state = ahash_request_ctx(req);
1478 return state->finup(req);
1481 static int ahash_final(struct ahash_request *req)
1483 struct caam_hash_state *state = ahash_request_ctx(req);
1485 return state->final(req);
1488 static int ahash_export(struct ahash_request *req, void *out)
1490 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1491 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1492 struct caam_hash_state *state = ahash_request_ctx(req);
1494 memcpy(out, ctx, sizeof(struct caam_hash_ctx));
1495 memcpy(out + sizeof(struct caam_hash_ctx), state,
1496 sizeof(struct caam_hash_state));
1497 return 0;
1500 static int ahash_import(struct ahash_request *req, const void *in)
1502 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1503 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1504 struct caam_hash_state *state = ahash_request_ctx(req);
1506 memcpy(ctx, in, sizeof(struct caam_hash_ctx));
1507 memcpy(state, in + sizeof(struct caam_hash_ctx),
1508 sizeof(struct caam_hash_state));
1509 return 0;
1512 struct caam_hash_template {
1513 char name[CRYPTO_MAX_ALG_NAME];
1514 char driver_name[CRYPTO_MAX_ALG_NAME];
1515 char hmac_name[CRYPTO_MAX_ALG_NAME];
1516 char hmac_driver_name[CRYPTO_MAX_ALG_NAME];
1517 unsigned int blocksize;
1518 struct ahash_alg template_ahash;
1519 u32 alg_type;
1520 u32 alg_op;
1523 /* ahash descriptors */
1524 static struct caam_hash_template driver_hash[] = {
1526 .name = "sha1",
1527 .driver_name = "sha1-caam",
1528 .hmac_name = "hmac(sha1)",
1529 .hmac_driver_name = "hmac-sha1-caam",
1530 .blocksize = SHA1_BLOCK_SIZE,
1531 .template_ahash = {
1532 .init = ahash_init,
1533 .update = ahash_update,
1534 .final = ahash_final,
1535 .finup = ahash_finup,
1536 .digest = ahash_digest,
1537 .export = ahash_export,
1538 .import = ahash_import,
1539 .setkey = ahash_setkey,
1540 .halg = {
1541 .digestsize = SHA1_DIGEST_SIZE,
1544 .alg_type = OP_ALG_ALGSEL_SHA1,
1545 .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
1546 }, {
1547 .name = "sha224",
1548 .driver_name = "sha224-caam",
1549 .hmac_name = "hmac(sha224)",
1550 .hmac_driver_name = "hmac-sha224-caam",
1551 .blocksize = SHA224_BLOCK_SIZE,
1552 .template_ahash = {
1553 .init = ahash_init,
1554 .update = ahash_update,
1555 .final = ahash_final,
1556 .finup = ahash_finup,
1557 .digest = ahash_digest,
1558 .export = ahash_export,
1559 .import = ahash_import,
1560 .setkey = ahash_setkey,
1561 .halg = {
1562 .digestsize = SHA224_DIGEST_SIZE,
1565 .alg_type = OP_ALG_ALGSEL_SHA224,
1566 .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
1567 }, {
1568 .name = "sha256",
1569 .driver_name = "sha256-caam",
1570 .hmac_name = "hmac(sha256)",
1571 .hmac_driver_name = "hmac-sha256-caam",
1572 .blocksize = SHA256_BLOCK_SIZE,
1573 .template_ahash = {
1574 .init = ahash_init,
1575 .update = ahash_update,
1576 .final = ahash_final,
1577 .finup = ahash_finup,
1578 .digest = ahash_digest,
1579 .export = ahash_export,
1580 .import = ahash_import,
1581 .setkey = ahash_setkey,
1582 .halg = {
1583 .digestsize = SHA256_DIGEST_SIZE,
1586 .alg_type = OP_ALG_ALGSEL_SHA256,
1587 .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
1588 }, {
1589 .name = "sha384",
1590 .driver_name = "sha384-caam",
1591 .hmac_name = "hmac(sha384)",
1592 .hmac_driver_name = "hmac-sha384-caam",
1593 .blocksize = SHA384_BLOCK_SIZE,
1594 .template_ahash = {
1595 .init = ahash_init,
1596 .update = ahash_update,
1597 .final = ahash_final,
1598 .finup = ahash_finup,
1599 .digest = ahash_digest,
1600 .export = ahash_export,
1601 .import = ahash_import,
1602 .setkey = ahash_setkey,
1603 .halg = {
1604 .digestsize = SHA384_DIGEST_SIZE,
1607 .alg_type = OP_ALG_ALGSEL_SHA384,
1608 .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
1609 }, {
1610 .name = "sha512",
1611 .driver_name = "sha512-caam",
1612 .hmac_name = "hmac(sha512)",
1613 .hmac_driver_name = "hmac-sha512-caam",
1614 .blocksize = SHA512_BLOCK_SIZE,
1615 .template_ahash = {
1616 .init = ahash_init,
1617 .update = ahash_update,
1618 .final = ahash_final,
1619 .finup = ahash_finup,
1620 .digest = ahash_digest,
1621 .export = ahash_export,
1622 .import = ahash_import,
1623 .setkey = ahash_setkey,
1624 .halg = {
1625 .digestsize = SHA512_DIGEST_SIZE,
1628 .alg_type = OP_ALG_ALGSEL_SHA512,
1629 .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
1630 }, {
1631 .name = "md5",
1632 .driver_name = "md5-caam",
1633 .hmac_name = "hmac(md5)",
1634 .hmac_driver_name = "hmac-md5-caam",
1635 .blocksize = MD5_BLOCK_WORDS * 4,
1636 .template_ahash = {
1637 .init = ahash_init,
1638 .update = ahash_update,
1639 .final = ahash_final,
1640 .finup = ahash_finup,
1641 .digest = ahash_digest,
1642 .export = ahash_export,
1643 .import = ahash_import,
1644 .setkey = ahash_setkey,
1645 .halg = {
1646 .digestsize = MD5_DIGEST_SIZE,
1649 .alg_type = OP_ALG_ALGSEL_MD5,
1650 .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
1654 struct caam_hash_alg {
1655 struct list_head entry;
1656 struct device *ctrldev;
1657 int alg_type;
1658 int alg_op;
1659 struct ahash_alg ahash_alg;
1662 static int caam_hash_cra_init(struct crypto_tfm *tfm)
1664 struct crypto_ahash *ahash = __crypto_ahash_cast(tfm);
1665 struct crypto_alg *base = tfm->__crt_alg;
1666 struct hash_alg_common *halg =
1667 container_of(base, struct hash_alg_common, base);
1668 struct ahash_alg *alg =
1669 container_of(halg, struct ahash_alg, halg);
1670 struct caam_hash_alg *caam_hash =
1671 container_of(alg, struct caam_hash_alg, ahash_alg);
1672 struct caam_hash_ctx *ctx = crypto_tfm_ctx(tfm);
1673 struct caam_drv_private *priv = dev_get_drvdata(caam_hash->ctrldev);
1674 /* Sizes for MDHA running digests: MD5, SHA1, 224, 256, 384, 512 */
1675 static const u8 runninglen[] = { HASH_MSG_LEN + MD5_DIGEST_SIZE,
1676 HASH_MSG_LEN + SHA1_DIGEST_SIZE,
1677 HASH_MSG_LEN + 32,
1678 HASH_MSG_LEN + SHA256_DIGEST_SIZE,
1679 HASH_MSG_LEN + 64,
1680 HASH_MSG_LEN + SHA512_DIGEST_SIZE };
1681 int tgt_jr = atomic_inc_return(&priv->tfm_count);
1682 int ret = 0;
1685 * distribute tfms across job rings to ensure in-order
1686 * crypto request processing per tfm
1688 ctx->jrdev = priv->jrdev[tgt_jr % priv->total_jobrs];
1690 /* copy descriptor header template value */
1691 ctx->alg_type = OP_TYPE_CLASS2_ALG | caam_hash->alg_type;
1692 ctx->alg_op = OP_TYPE_CLASS2_ALG | caam_hash->alg_op;
1694 ctx->ctx_len = runninglen[(ctx->alg_op & OP_ALG_ALGSEL_SUBMASK) >>
1695 OP_ALG_ALGSEL_SHIFT];
1697 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
1698 sizeof(struct caam_hash_state));
1700 ret = ahash_set_sh_desc(ahash);
1702 return ret;
1705 static void caam_hash_cra_exit(struct crypto_tfm *tfm)
1707 struct caam_hash_ctx *ctx = crypto_tfm_ctx(tfm);
1709 if (ctx->sh_desc_update_dma &&
1710 !dma_mapping_error(ctx->jrdev, ctx->sh_desc_update_dma))
1711 dma_unmap_single(ctx->jrdev, ctx->sh_desc_update_dma,
1712 desc_bytes(ctx->sh_desc_update),
1713 DMA_TO_DEVICE);
1714 if (ctx->sh_desc_update_first_dma &&
1715 !dma_mapping_error(ctx->jrdev, ctx->sh_desc_update_first_dma))
1716 dma_unmap_single(ctx->jrdev, ctx->sh_desc_update_first_dma,
1717 desc_bytes(ctx->sh_desc_update_first),
1718 DMA_TO_DEVICE);
1719 if (ctx->sh_desc_fin_dma &&
1720 !dma_mapping_error(ctx->jrdev, ctx->sh_desc_fin_dma))
1721 dma_unmap_single(ctx->jrdev, ctx->sh_desc_fin_dma,
1722 desc_bytes(ctx->sh_desc_fin), DMA_TO_DEVICE);
1723 if (ctx->sh_desc_digest_dma &&
1724 !dma_mapping_error(ctx->jrdev, ctx->sh_desc_digest_dma))
1725 dma_unmap_single(ctx->jrdev, ctx->sh_desc_digest_dma,
1726 desc_bytes(ctx->sh_desc_digest),
1727 DMA_TO_DEVICE);
1728 if (ctx->sh_desc_finup_dma &&
1729 !dma_mapping_error(ctx->jrdev, ctx->sh_desc_finup_dma))
1730 dma_unmap_single(ctx->jrdev, ctx->sh_desc_finup_dma,
1731 desc_bytes(ctx->sh_desc_finup), DMA_TO_DEVICE);
1734 static void __exit caam_algapi_hash_exit(void)
1736 struct device_node *dev_node;
1737 struct platform_device *pdev;
1738 struct device *ctrldev;
1739 struct caam_drv_private *priv;
1740 struct caam_hash_alg *t_alg, *n;
1742 dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
1743 if (!dev_node) {
1744 dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0");
1745 if (!dev_node)
1746 return;
1749 pdev = of_find_device_by_node(dev_node);
1750 if (!pdev)
1751 return;
1753 ctrldev = &pdev->dev;
1754 of_node_put(dev_node);
1755 priv = dev_get_drvdata(ctrldev);
1757 if (!priv->hash_list.next)
1758 return;
1760 list_for_each_entry_safe(t_alg, n, &priv->hash_list, entry) {
1761 crypto_unregister_ahash(&t_alg->ahash_alg);
1762 list_del(&t_alg->entry);
1763 kfree(t_alg);
1767 static struct caam_hash_alg *
1768 caam_hash_alloc(struct device *ctrldev, struct caam_hash_template *template,
1769 bool keyed)
1771 struct caam_hash_alg *t_alg;
1772 struct ahash_alg *halg;
1773 struct crypto_alg *alg;
1775 t_alg = kzalloc(sizeof(struct caam_hash_alg), GFP_KERNEL);
1776 if (!t_alg) {
1777 dev_err(ctrldev, "failed to allocate t_alg\n");
1778 return ERR_PTR(-ENOMEM);
1781 t_alg->ahash_alg = template->template_ahash;
1782 halg = &t_alg->ahash_alg;
1783 alg = &halg->halg.base;
1785 if (keyed) {
1786 snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s",
1787 template->hmac_name);
1788 snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
1789 template->hmac_driver_name);
1790 } else {
1791 snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s",
1792 template->name);
1793 snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
1794 template->driver_name);
1796 alg->cra_module = THIS_MODULE;
1797 alg->cra_init = caam_hash_cra_init;
1798 alg->cra_exit = caam_hash_cra_exit;
1799 alg->cra_ctxsize = sizeof(struct caam_hash_ctx);
1800 alg->cra_priority = CAAM_CRA_PRIORITY;
1801 alg->cra_blocksize = template->blocksize;
1802 alg->cra_alignmask = 0;
1803 alg->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_TYPE_AHASH;
1804 alg->cra_type = &crypto_ahash_type;
1806 t_alg->alg_type = template->alg_type;
1807 t_alg->alg_op = template->alg_op;
1808 t_alg->ctrldev = ctrldev;
1810 return t_alg;
1813 static int __init caam_algapi_hash_init(void)
1815 struct device_node *dev_node;
1816 struct platform_device *pdev;
1817 struct device *ctrldev;
1818 struct caam_drv_private *priv;
1819 int i = 0, err = 0;
1821 dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
1822 if (!dev_node) {
1823 dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0");
1824 if (!dev_node)
1825 return -ENODEV;
1828 pdev = of_find_device_by_node(dev_node);
1829 if (!pdev)
1830 return -ENODEV;
1832 ctrldev = &pdev->dev;
1833 priv = dev_get_drvdata(ctrldev);
1834 of_node_put(dev_node);
1836 INIT_LIST_HEAD(&priv->hash_list);
1838 atomic_set(&priv->tfm_count, -1);
1840 /* register crypto algorithms the device supports */
1841 for (i = 0; i < ARRAY_SIZE(driver_hash); i++) {
1842 /* TODO: check if h/w supports alg */
1843 struct caam_hash_alg *t_alg;
1845 /* register hmac version */
1846 t_alg = caam_hash_alloc(ctrldev, &driver_hash[i], true);
1847 if (IS_ERR(t_alg)) {
1848 err = PTR_ERR(t_alg);
1849 dev_warn(ctrldev, "%s alg allocation failed\n",
1850 driver_hash[i].driver_name);
1851 continue;
1854 err = crypto_register_ahash(&t_alg->ahash_alg);
1855 if (err) {
1856 dev_warn(ctrldev, "%s alg registration failed\n",
1857 t_alg->ahash_alg.halg.base.cra_driver_name);
1858 kfree(t_alg);
1859 } else
1860 list_add_tail(&t_alg->entry, &priv->hash_list);
1862 /* register unkeyed version */
1863 t_alg = caam_hash_alloc(ctrldev, &driver_hash[i], false);
1864 if (IS_ERR(t_alg)) {
1865 err = PTR_ERR(t_alg);
1866 dev_warn(ctrldev, "%s alg allocation failed\n",
1867 driver_hash[i].driver_name);
1868 continue;
1871 err = crypto_register_ahash(&t_alg->ahash_alg);
1872 if (err) {
1873 dev_warn(ctrldev, "%s alg registration failed\n",
1874 t_alg->ahash_alg.halg.base.cra_driver_name);
1875 kfree(t_alg);
1876 } else
1877 list_add_tail(&t_alg->entry, &priv->hash_list);
1880 return err;
1883 module_init(caam_algapi_hash_init);
1884 module_exit(caam_algapi_hash_exit);
1886 MODULE_LICENSE("GPL");
1887 MODULE_DESCRIPTION("FSL CAAM support for ahash functions of crypto API");
1888 MODULE_AUTHOR("Freescale Semiconductor - NMG");