2 * caam - Freescale FSL CAAM support for ahash functions of crypto API
4 * Copyright 2011 Freescale Semiconductor, Inc.
6 * Based on caamalg.c crypto API driver.
8 * relationship of digest job descriptor or first job descriptor after init to
11 * --------------- ---------------
12 * | JobDesc #1 |-------------------->| ShareDesc |
13 * | *(packet 1) | | (hashKey) |
14 * --------------- | (operation) |
17 * relationship of subsequent job descriptors to shared descriptors:
19 * --------------- ---------------
20 * | JobDesc #2 |-------------------->| ShareDesc |
21 * | *(packet 2) | |------------->| (hashKey) |
22 * --------------- | |-------->| (operation) |
23 * . | | | (load ctx2) |
24 * . | | ---------------
26 * | JobDesc #3 |------| |
32 * | JobDesc #4 |------------
36 * The SharedDesc never changes for a connection unless rekeyed, but
37 * each packet will likely be in a different place. So all we need
38 * to know to process the packet is where the input is, where the
39 * output goes, and what context we want to process with. Context is
40 * in the SharedDesc, packet references in the JobDesc.
42 * So, a job desc looks like:
44 * ---------------------
46 * | ShareDesc Pointer |
53 * ---------------------
60 #include "desc_constr.h"
63 #include "sg_sw_sec4.h"
66 #define CAAM_CRA_PRIORITY 3000
68 /* max hash key is max split key size */
69 #define CAAM_MAX_HASH_KEY_SIZE (SHA512_DIGEST_SIZE * 2)
71 #define CAAM_MAX_HASH_BLOCK_SIZE SHA512_BLOCK_SIZE
72 #define CAAM_MAX_HASH_DIGEST_SIZE SHA512_DIGEST_SIZE
74 /* length of descriptors text */
75 #define DESC_AHASH_BASE (4 * CAAM_CMD_SZ)
76 #define DESC_AHASH_UPDATE_LEN (6 * CAAM_CMD_SZ)
77 #define DESC_AHASH_UPDATE_FIRST_LEN (DESC_AHASH_BASE + 4 * CAAM_CMD_SZ)
78 #define DESC_AHASH_FINAL_LEN (DESC_AHASH_BASE + 5 * CAAM_CMD_SZ)
79 #define DESC_AHASH_FINUP_LEN (DESC_AHASH_BASE + 5 * CAAM_CMD_SZ)
80 #define DESC_AHASH_DIGEST_LEN (DESC_AHASH_BASE + 4 * CAAM_CMD_SZ)
82 #define DESC_HASH_MAX_USED_BYTES (DESC_AHASH_FINAL_LEN + \
83 CAAM_MAX_HASH_KEY_SIZE)
84 #define DESC_HASH_MAX_USED_LEN (DESC_HASH_MAX_USED_BYTES / CAAM_CMD_SZ)
86 /* caam context sizes for hashes: running digest + 8 */
87 #define HASH_MSG_LEN 8
88 #define MAX_CTX_LEN (HASH_MSG_LEN + SHA512_DIGEST_SIZE)
91 /* for print_hex_dumps with line references */
92 #define debug(format, arg...) printk(format, arg)
94 #define debug(format, arg...)
97 /* ahash per-session context */
98 struct caam_hash_ctx
{
100 u32 sh_desc_update
[DESC_HASH_MAX_USED_LEN
];
101 u32 sh_desc_update_first
[DESC_HASH_MAX_USED_LEN
];
102 u32 sh_desc_fin
[DESC_HASH_MAX_USED_LEN
];
103 u32 sh_desc_digest
[DESC_HASH_MAX_USED_LEN
];
104 u32 sh_desc_finup
[DESC_HASH_MAX_USED_LEN
];
105 dma_addr_t sh_desc_update_dma
;
106 dma_addr_t sh_desc_update_first_dma
;
107 dma_addr_t sh_desc_fin_dma
;
108 dma_addr_t sh_desc_digest_dma
;
109 dma_addr_t sh_desc_finup_dma
;
112 u8 key
[CAAM_MAX_HASH_KEY_SIZE
];
115 unsigned int split_key_len
;
116 unsigned int split_key_pad_len
;
120 struct caam_hash_state
{
123 u8 buf_0
[CAAM_MAX_HASH_BLOCK_SIZE
] ____cacheline_aligned
;
125 u8 buf_1
[CAAM_MAX_HASH_BLOCK_SIZE
] ____cacheline_aligned
;
127 u8 caam_ctx
[MAX_CTX_LEN
];
128 int (*update
)(struct ahash_request
*req
);
129 int (*final
)(struct ahash_request
*req
);
130 int (*finup
)(struct ahash_request
*req
);
134 /* Common job descriptor seq in/out ptr routines */
136 /* Map state->caam_ctx, and append seq_out_ptr command that points to it */
137 static inline void map_seq_out_ptr_ctx(u32
*desc
, struct device
*jrdev
,
138 struct caam_hash_state
*state
,
141 state
->ctx_dma
= dma_map_single(jrdev
, state
->caam_ctx
,
142 ctx_len
, DMA_FROM_DEVICE
);
143 append_seq_out_ptr(desc
, state
->ctx_dma
, ctx_len
, 0);
146 /* Map req->result, and append seq_out_ptr command that points to it */
147 static inline dma_addr_t
map_seq_out_ptr_result(u32
*desc
, struct device
*jrdev
,
148 u8
*result
, int digestsize
)
152 dst_dma
= dma_map_single(jrdev
, result
, digestsize
, DMA_FROM_DEVICE
);
153 append_seq_out_ptr(desc
, dst_dma
, digestsize
, 0);
158 /* Map current buffer in state and put it in link table */
159 static inline dma_addr_t
buf_map_to_sec4_sg(struct device
*jrdev
,
160 struct sec4_sg_entry
*sec4_sg
,
165 buf_dma
= dma_map_single(jrdev
, buf
, buflen
, DMA_TO_DEVICE
);
166 dma_to_sec4_sg_one(sec4_sg
, buf_dma
, buflen
, 0);
171 /* Map req->src and put it in link table */
172 static inline void src_map_to_sec4_sg(struct device
*jrdev
,
173 struct scatterlist
*src
, int src_nents
,
174 struct sec4_sg_entry
*sec4_sg
,
177 dma_map_sg_chained(jrdev
, src
, src_nents
, DMA_TO_DEVICE
, chained
);
178 sg_to_sec4_sg_last(src
, src_nents
, sec4_sg
, 0);
182 * Only put buffer in link table if it contains data, which is possible,
183 * since a buffer has previously been used, and needs to be unmapped,
185 static inline dma_addr_t
186 try_buf_map_to_sec4_sg(struct device
*jrdev
, struct sec4_sg_entry
*sec4_sg
,
187 u8
*buf
, dma_addr_t buf_dma
, int buflen
,
190 if (buf_dma
&& !dma_mapping_error(jrdev
, buf_dma
))
191 dma_unmap_single(jrdev
, buf_dma
, last_buflen
, DMA_TO_DEVICE
);
193 buf_dma
= buf_map_to_sec4_sg(jrdev
, sec4_sg
, buf
, buflen
);
200 /* Map state->caam_ctx, and add it to link table */
201 static inline void ctx_map_to_sec4_sg(u32
*desc
, struct device
*jrdev
,
202 struct caam_hash_state
*state
,
204 struct sec4_sg_entry
*sec4_sg
,
207 state
->ctx_dma
= dma_map_single(jrdev
, state
->caam_ctx
, ctx_len
, flag
);
208 dma_to_sec4_sg_one(sec4_sg
, state
->ctx_dma
, ctx_len
, 0);
211 /* Common shared descriptor commands */
212 static inline void append_key_ahash(u32
*desc
, struct caam_hash_ctx
*ctx
)
214 append_key_as_imm(desc
, ctx
->key
, ctx
->split_key_pad_len
,
215 ctx
->split_key_len
, CLASS_2
|
216 KEY_DEST_MDHA_SPLIT
| KEY_ENC
);
219 /* Append key if it has been set */
220 static inline void init_sh_desc_key_ahash(u32
*desc
, struct caam_hash_ctx
*ctx
)
224 init_sh_desc(desc
, HDR_SHARE_SERIAL
);
226 if (ctx
->split_key_len
) {
227 /* Skip if already shared */
228 key_jump_cmd
= append_jump(desc
, JUMP_JSL
| JUMP_TEST_ALL
|
231 append_key_ahash(desc
, ctx
);
233 set_jump_tgt_here(desc
, key_jump_cmd
);
236 /* Propagate errors from shared to job descriptor */
237 append_cmd(desc
, SET_OK_NO_PROP_ERRORS
| CMD_LOAD
);
241 * For ahash read data from seqin following state->caam_ctx,
242 * and write resulting class2 context to seqout, which may be state->caam_ctx
245 static inline void ahash_append_load_str(u32
*desc
, int digestsize
)
247 /* Calculate remaining bytes to read */
248 append_math_add(desc
, VARSEQINLEN
, SEQINLEN
, REG0
, CAAM_CMD_SZ
);
250 /* Read remaining bytes */
251 append_seq_fifo_load(desc
, 0, FIFOLD_CLASS_CLASS2
| FIFOLD_TYPE_LAST2
|
252 FIFOLD_TYPE_MSG
| KEY_VLF
);
254 /* Store class2 context bytes */
255 append_seq_store(desc
, digestsize
, LDST_CLASS_2_CCB
|
256 LDST_SRCDST_BYTE_CONTEXT
);
260 * For ahash update, final and finup, import context, read and write to seqout
262 static inline void ahash_ctx_data_to_out(u32
*desc
, u32 op
, u32 state
,
264 struct caam_hash_ctx
*ctx
)
266 init_sh_desc_key_ahash(desc
, ctx
);
268 /* Import context from software */
269 append_cmd(desc
, CMD_SEQ_LOAD
| LDST_SRCDST_BYTE_CONTEXT
|
270 LDST_CLASS_2_CCB
| ctx
->ctx_len
);
272 /* Class 2 operation */
273 append_operation(desc
, op
| state
| OP_ALG_ENCRYPT
);
276 * Load from buf and/or src and write to req->result or state->context
278 ahash_append_load_str(desc
, digestsize
);
281 /* For ahash firsts and digest, read and write to seqout */
282 static inline void ahash_data_to_out(u32
*desc
, u32 op
, u32 state
,
283 int digestsize
, struct caam_hash_ctx
*ctx
)
285 init_sh_desc_key_ahash(desc
, ctx
);
287 /* Class 2 operation */
288 append_operation(desc
, op
| state
| OP_ALG_ENCRYPT
);
291 * Load from buf and/or src and write to req->result or state->context
293 ahash_append_load_str(desc
, digestsize
);
296 static int ahash_set_sh_desc(struct crypto_ahash
*ahash
)
298 struct caam_hash_ctx
*ctx
= crypto_ahash_ctx(ahash
);
299 int digestsize
= crypto_ahash_digestsize(ahash
);
300 struct device
*jrdev
= ctx
->jrdev
;
304 if (ctx
->split_key_len
)
305 have_key
= OP_ALG_AAI_HMAC_PRECOMP
;
307 /* ahash_update shared descriptor */
308 desc
= ctx
->sh_desc_update
;
310 init_sh_desc(desc
, HDR_SHARE_SERIAL
);
312 /* Import context from software */
313 append_cmd(desc
, CMD_SEQ_LOAD
| LDST_SRCDST_BYTE_CONTEXT
|
314 LDST_CLASS_2_CCB
| ctx
->ctx_len
);
316 /* Class 2 operation */
317 append_operation(desc
, ctx
->alg_type
| OP_ALG_AS_UPDATE
|
320 /* Load data and write to result or context */
321 ahash_append_load_str(desc
, ctx
->ctx_len
);
323 ctx
->sh_desc_update_dma
= dma_map_single(jrdev
, desc
, desc_bytes(desc
),
325 if (dma_mapping_error(jrdev
, ctx
->sh_desc_update_dma
)) {
326 dev_err(jrdev
, "unable to map shared descriptor\n");
330 print_hex_dump(KERN_ERR
,
331 "ahash update shdesc@"__stringify(__LINE__
)": ",
332 DUMP_PREFIX_ADDRESS
, 16, 4, desc
, desc_bytes(desc
), 1);
335 /* ahash_update_first shared descriptor */
336 desc
= ctx
->sh_desc_update_first
;
338 ahash_data_to_out(desc
, have_key
| ctx
->alg_type
, OP_ALG_AS_INIT
,
341 ctx
->sh_desc_update_first_dma
= dma_map_single(jrdev
, desc
,
344 if (dma_mapping_error(jrdev
, ctx
->sh_desc_update_first_dma
)) {
345 dev_err(jrdev
, "unable to map shared descriptor\n");
349 print_hex_dump(KERN_ERR
,
350 "ahash update first shdesc@"__stringify(__LINE__
)": ",
351 DUMP_PREFIX_ADDRESS
, 16, 4, desc
, desc_bytes(desc
), 1);
354 /* ahash_final shared descriptor */
355 desc
= ctx
->sh_desc_fin
;
357 ahash_ctx_data_to_out(desc
, have_key
| ctx
->alg_type
,
358 OP_ALG_AS_FINALIZE
, digestsize
, ctx
);
360 ctx
->sh_desc_fin_dma
= dma_map_single(jrdev
, desc
, desc_bytes(desc
),
362 if (dma_mapping_error(jrdev
, ctx
->sh_desc_fin_dma
)) {
363 dev_err(jrdev
, "unable to map shared descriptor\n");
367 print_hex_dump(KERN_ERR
, "ahash final shdesc@"__stringify(__LINE__
)": ",
368 DUMP_PREFIX_ADDRESS
, 16, 4, desc
,
369 desc_bytes(desc
), 1);
372 /* ahash_finup shared descriptor */
373 desc
= ctx
->sh_desc_finup
;
375 ahash_ctx_data_to_out(desc
, have_key
| ctx
->alg_type
,
376 OP_ALG_AS_FINALIZE
, digestsize
, ctx
);
378 ctx
->sh_desc_finup_dma
= dma_map_single(jrdev
, desc
, desc_bytes(desc
),
380 if (dma_mapping_error(jrdev
, ctx
->sh_desc_finup_dma
)) {
381 dev_err(jrdev
, "unable to map shared descriptor\n");
385 print_hex_dump(KERN_ERR
, "ahash finup shdesc@"__stringify(__LINE__
)": ",
386 DUMP_PREFIX_ADDRESS
, 16, 4, desc
,
387 desc_bytes(desc
), 1);
390 /* ahash_digest shared descriptor */
391 desc
= ctx
->sh_desc_digest
;
393 ahash_data_to_out(desc
, have_key
| ctx
->alg_type
, OP_ALG_AS_INITFINAL
,
396 ctx
->sh_desc_digest_dma
= dma_map_single(jrdev
, desc
,
399 if (dma_mapping_error(jrdev
, ctx
->sh_desc_digest_dma
)) {
400 dev_err(jrdev
, "unable to map shared descriptor\n");
404 print_hex_dump(KERN_ERR
,
405 "ahash digest shdesc@"__stringify(__LINE__
)": ",
406 DUMP_PREFIX_ADDRESS
, 16, 4, desc
,
407 desc_bytes(desc
), 1);
413 static int gen_split_hash_key(struct caam_hash_ctx
*ctx
, const u8
*key_in
,
416 return gen_split_key(ctx
->jrdev
, ctx
->key
, ctx
->split_key_len
,
417 ctx
->split_key_pad_len
, key_in
, keylen
,
421 /* Digest hash size if it is too large */
422 static int hash_digest_key(struct caam_hash_ctx
*ctx
, const u8
*key_in
,
423 u32
*keylen
, u8
*key_out
, u32 digestsize
)
425 struct device
*jrdev
= ctx
->jrdev
;
427 struct split_key_result result
;
428 dma_addr_t src_dma
, dst_dma
;
431 desc
= kmalloc(CAAM_CMD_SZ
* 8 + CAAM_PTR_SZ
* 2, GFP_KERNEL
| GFP_DMA
);
433 dev_err(jrdev
, "unable to allocate key input memory\n");
437 init_job_desc(desc
, 0);
439 src_dma
= dma_map_single(jrdev
, (void *)key_in
, *keylen
,
441 if (dma_mapping_error(jrdev
, src_dma
)) {
442 dev_err(jrdev
, "unable to map key input memory\n");
446 dst_dma
= dma_map_single(jrdev
, (void *)key_out
, digestsize
,
448 if (dma_mapping_error(jrdev
, dst_dma
)) {
449 dev_err(jrdev
, "unable to map key output memory\n");
450 dma_unmap_single(jrdev
, src_dma
, *keylen
, DMA_TO_DEVICE
);
455 /* Job descriptor to perform unkeyed hash on key_in */
456 append_operation(desc
, ctx
->alg_type
| OP_ALG_ENCRYPT
|
457 OP_ALG_AS_INITFINAL
);
458 append_seq_in_ptr(desc
, src_dma
, *keylen
, 0);
459 append_seq_fifo_load(desc
, *keylen
, FIFOLD_CLASS_CLASS2
|
460 FIFOLD_TYPE_LAST2
| FIFOLD_TYPE_MSG
);
461 append_seq_out_ptr(desc
, dst_dma
, digestsize
, 0);
462 append_seq_store(desc
, digestsize
, LDST_CLASS_2_CCB
|
463 LDST_SRCDST_BYTE_CONTEXT
);
466 print_hex_dump(KERN_ERR
, "key_in@"__stringify(__LINE__
)": ",
467 DUMP_PREFIX_ADDRESS
, 16, 4, key_in
, *keylen
, 1);
468 print_hex_dump(KERN_ERR
, "jobdesc@"__stringify(__LINE__
)": ",
469 DUMP_PREFIX_ADDRESS
, 16, 4, desc
, desc_bytes(desc
), 1);
473 init_completion(&result
.completion
);
475 ret
= caam_jr_enqueue(jrdev
, desc
, split_key_done
, &result
);
478 wait_for_completion_interruptible(&result
.completion
);
481 print_hex_dump(KERN_ERR
,
482 "digested key@"__stringify(__LINE__
)": ",
483 DUMP_PREFIX_ADDRESS
, 16, 4, key_in
,
487 *keylen
= digestsize
;
489 dma_unmap_single(jrdev
, src_dma
, *keylen
, DMA_TO_DEVICE
);
490 dma_unmap_single(jrdev
, dst_dma
, digestsize
, DMA_FROM_DEVICE
);
497 static int ahash_setkey(struct crypto_ahash
*ahash
,
498 const u8
*key
, unsigned int keylen
)
500 /* Sizes for MDHA pads (*not* keys): MD5, SHA1, 224, 256, 384, 512 */
501 static const u8 mdpadlen
[] = { 16, 20, 32, 32, 64, 64 };
502 struct caam_hash_ctx
*ctx
= crypto_ahash_ctx(ahash
);
503 struct device
*jrdev
= ctx
->jrdev
;
504 int blocksize
= crypto_tfm_alg_blocksize(&ahash
->base
);
505 int digestsize
= crypto_ahash_digestsize(ahash
);
507 u8
*hashed_key
= NULL
;
510 printk(KERN_ERR
"keylen %d\n", keylen
);
513 if (keylen
> blocksize
) {
514 hashed_key
= kmalloc(sizeof(u8
) * digestsize
, GFP_KERNEL
|
518 ret
= hash_digest_key(ctx
, key
, &keylen
, hashed_key
,
525 /* Pick class 2 key length from algorithm submask */
526 ctx
->split_key_len
= mdpadlen
[(ctx
->alg_op
& OP_ALG_ALGSEL_SUBMASK
) >>
527 OP_ALG_ALGSEL_SHIFT
] * 2;
528 ctx
->split_key_pad_len
= ALIGN(ctx
->split_key_len
, 16);
531 printk(KERN_ERR
"split_key_len %d split_key_pad_len %d\n",
532 ctx
->split_key_len
, ctx
->split_key_pad_len
);
533 print_hex_dump(KERN_ERR
, "key in @"__stringify(__LINE__
)": ",
534 DUMP_PREFIX_ADDRESS
, 16, 4, key
, keylen
, 1);
537 ret
= gen_split_hash_key(ctx
, key
, keylen
);
541 ctx
->key_dma
= dma_map_single(jrdev
, ctx
->key
, ctx
->split_key_pad_len
,
543 if (dma_mapping_error(jrdev
, ctx
->key_dma
)) {
544 dev_err(jrdev
, "unable to map key i/o memory\n");
548 print_hex_dump(KERN_ERR
, "ctx.key@"__stringify(__LINE__
)": ",
549 DUMP_PREFIX_ADDRESS
, 16, 4, ctx
->key
,
550 ctx
->split_key_pad_len
, 1);
553 ret
= ahash_set_sh_desc(ahash
);
555 dma_unmap_single(jrdev
, ctx
->key_dma
, ctx
->split_key_pad_len
,
563 crypto_ahash_set_flags(ahash
, CRYPTO_TFM_RES_BAD_KEY_LEN
);
568 * ahash_edesc - s/w-extended ahash descriptor
569 * @dst_dma: physical mapped address of req->result
570 * @sec4_sg_dma: physical mapped address of h/w link table
571 * @chained: if source is chained
572 * @src_nents: number of segments in input scatterlist
573 * @sec4_sg_bytes: length of dma mapped sec4_sg space
574 * @sec4_sg: pointer to h/w link table
575 * @hw_desc: the h/w job descriptor followed by any referenced link tables
579 dma_addr_t sec4_sg_dma
;
583 struct sec4_sg_entry
*sec4_sg
;
587 static inline void ahash_unmap(struct device
*dev
,
588 struct ahash_edesc
*edesc
,
589 struct ahash_request
*req
, int dst_len
)
591 if (edesc
->src_nents
)
592 dma_unmap_sg_chained(dev
, req
->src
, edesc
->src_nents
,
593 DMA_TO_DEVICE
, edesc
->chained
);
595 dma_unmap_single(dev
, edesc
->dst_dma
, dst_len
, DMA_FROM_DEVICE
);
597 if (edesc
->sec4_sg_bytes
)
598 dma_unmap_single(dev
, edesc
->sec4_sg_dma
,
599 edesc
->sec4_sg_bytes
, DMA_TO_DEVICE
);
602 static inline void ahash_unmap_ctx(struct device
*dev
,
603 struct ahash_edesc
*edesc
,
604 struct ahash_request
*req
, int dst_len
, u32 flag
)
606 struct crypto_ahash
*ahash
= crypto_ahash_reqtfm(req
);
607 struct caam_hash_ctx
*ctx
= crypto_ahash_ctx(ahash
);
608 struct caam_hash_state
*state
= ahash_request_ctx(req
);
611 dma_unmap_single(dev
, state
->ctx_dma
, ctx
->ctx_len
, flag
);
612 ahash_unmap(dev
, edesc
, req
, dst_len
);
615 static void ahash_done(struct device
*jrdev
, u32
*desc
, u32 err
,
618 struct ahash_request
*req
= context
;
619 struct ahash_edesc
*edesc
;
620 struct crypto_ahash
*ahash
= crypto_ahash_reqtfm(req
);
621 int digestsize
= crypto_ahash_digestsize(ahash
);
623 struct caam_hash_ctx
*ctx
= crypto_ahash_ctx(ahash
);
624 struct caam_hash_state
*state
= ahash_request_ctx(req
);
626 dev_err(jrdev
, "%s %d: err 0x%x\n", __func__
, __LINE__
, err
);
629 edesc
= (struct ahash_edesc
*)((char *)desc
-
630 offsetof(struct ahash_edesc
, hw_desc
));
632 char tmp
[CAAM_ERROR_STR_MAX
];
634 dev_err(jrdev
, "%08x: %s\n", err
, caam_jr_strstatus(tmp
, err
));
637 ahash_unmap(jrdev
, edesc
, req
, digestsize
);
641 print_hex_dump(KERN_ERR
, "ctx@"__stringify(__LINE__
)": ",
642 DUMP_PREFIX_ADDRESS
, 16, 4, state
->caam_ctx
,
645 print_hex_dump(KERN_ERR
, "result@"__stringify(__LINE__
)": ",
646 DUMP_PREFIX_ADDRESS
, 16, 4, req
->result
,
650 req
->base
.complete(&req
->base
, err
);
653 static void ahash_done_bi(struct device
*jrdev
, u32
*desc
, u32 err
,
656 struct ahash_request
*req
= context
;
657 struct ahash_edesc
*edesc
;
658 struct crypto_ahash
*ahash
= crypto_ahash_reqtfm(req
);
659 struct caam_hash_ctx
*ctx
= crypto_ahash_ctx(ahash
);
661 struct caam_hash_state
*state
= ahash_request_ctx(req
);
662 int digestsize
= crypto_ahash_digestsize(ahash
);
664 dev_err(jrdev
, "%s %d: err 0x%x\n", __func__
, __LINE__
, err
);
667 edesc
= (struct ahash_edesc
*)((char *)desc
-
668 offsetof(struct ahash_edesc
, hw_desc
));
670 char tmp
[CAAM_ERROR_STR_MAX
];
672 dev_err(jrdev
, "%08x: %s\n", err
, caam_jr_strstatus(tmp
, err
));
675 ahash_unmap_ctx(jrdev
, edesc
, req
, ctx
->ctx_len
, DMA_BIDIRECTIONAL
);
679 print_hex_dump(KERN_ERR
, "ctx@"__stringify(__LINE__
)": ",
680 DUMP_PREFIX_ADDRESS
, 16, 4, state
->caam_ctx
,
683 print_hex_dump(KERN_ERR
, "result@"__stringify(__LINE__
)": ",
684 DUMP_PREFIX_ADDRESS
, 16, 4, req
->result
,
688 req
->base
.complete(&req
->base
, err
);
691 static void ahash_done_ctx_src(struct device
*jrdev
, u32
*desc
, u32 err
,
694 struct ahash_request
*req
= context
;
695 struct ahash_edesc
*edesc
;
696 struct crypto_ahash
*ahash
= crypto_ahash_reqtfm(req
);
697 int digestsize
= crypto_ahash_digestsize(ahash
);
699 struct caam_hash_ctx
*ctx
= crypto_ahash_ctx(ahash
);
700 struct caam_hash_state
*state
= ahash_request_ctx(req
);
702 dev_err(jrdev
, "%s %d: err 0x%x\n", __func__
, __LINE__
, err
);
705 edesc
= (struct ahash_edesc
*)((char *)desc
-
706 offsetof(struct ahash_edesc
, hw_desc
));
708 char tmp
[CAAM_ERROR_STR_MAX
];
710 dev_err(jrdev
, "%08x: %s\n", err
, caam_jr_strstatus(tmp
, err
));
713 ahash_unmap_ctx(jrdev
, edesc
, req
, digestsize
, DMA_FROM_DEVICE
);
717 print_hex_dump(KERN_ERR
, "ctx@"__stringify(__LINE__
)": ",
718 DUMP_PREFIX_ADDRESS
, 16, 4, state
->caam_ctx
,
721 print_hex_dump(KERN_ERR
, "result@"__stringify(__LINE__
)": ",
722 DUMP_PREFIX_ADDRESS
, 16, 4, req
->result
,
726 req
->base
.complete(&req
->base
, err
);
729 static void ahash_done_ctx_dst(struct device
*jrdev
, u32
*desc
, u32 err
,
732 struct ahash_request
*req
= context
;
733 struct ahash_edesc
*edesc
;
734 struct crypto_ahash
*ahash
= crypto_ahash_reqtfm(req
);
735 struct caam_hash_ctx
*ctx
= crypto_ahash_ctx(ahash
);
737 struct caam_hash_state
*state
= ahash_request_ctx(req
);
738 int digestsize
= crypto_ahash_digestsize(ahash
);
740 dev_err(jrdev
, "%s %d: err 0x%x\n", __func__
, __LINE__
, err
);
743 edesc
= (struct ahash_edesc
*)((char *)desc
-
744 offsetof(struct ahash_edesc
, hw_desc
));
746 char tmp
[CAAM_ERROR_STR_MAX
];
748 dev_err(jrdev
, "%08x: %s\n", err
, caam_jr_strstatus(tmp
, err
));
751 ahash_unmap_ctx(jrdev
, edesc
, req
, ctx
->ctx_len
, DMA_TO_DEVICE
);
755 print_hex_dump(KERN_ERR
, "ctx@"__stringify(__LINE__
)": ",
756 DUMP_PREFIX_ADDRESS
, 16, 4, state
->caam_ctx
,
759 print_hex_dump(KERN_ERR
, "result@"__stringify(__LINE__
)": ",
760 DUMP_PREFIX_ADDRESS
, 16, 4, req
->result
,
764 req
->base
.complete(&req
->base
, err
);
767 /* submit update job descriptor */
768 static int ahash_update_ctx(struct ahash_request
*req
)
770 struct crypto_ahash
*ahash
= crypto_ahash_reqtfm(req
);
771 struct caam_hash_ctx
*ctx
= crypto_ahash_ctx(ahash
);
772 struct caam_hash_state
*state
= ahash_request_ctx(req
);
773 struct device
*jrdev
= ctx
->jrdev
;
774 gfp_t flags
= (req
->base
.flags
& (CRYPTO_TFM_REQ_MAY_BACKLOG
|
775 CRYPTO_TFM_REQ_MAY_SLEEP
)) ? GFP_KERNEL
: GFP_ATOMIC
;
776 u8
*buf
= state
->current_buf
? state
->buf_1
: state
->buf_0
;
777 int *buflen
= state
->current_buf
? &state
->buflen_1
: &state
->buflen_0
;
778 u8
*next_buf
= state
->current_buf
? state
->buf_0
: state
->buf_1
;
779 int *next_buflen
= state
->current_buf
? &state
->buflen_0
:
780 &state
->buflen_1
, last_buflen
;
781 int in_len
= *buflen
+ req
->nbytes
, to_hash
;
782 u32
*sh_desc
= ctx
->sh_desc_update
, *desc
;
783 dma_addr_t ptr
= ctx
->sh_desc_update_dma
;
784 int src_nents
, sec4_sg_bytes
, sec4_sg_src_index
;
785 struct ahash_edesc
*edesc
;
786 bool chained
= false;
790 last_buflen
= *next_buflen
;
791 *next_buflen
= in_len
& (crypto_tfm_alg_blocksize(&ahash
->base
) - 1);
792 to_hash
= in_len
- *next_buflen
;
795 src_nents
= __sg_count(req
->src
, req
->nbytes
- (*next_buflen
),
797 sec4_sg_src_index
= 1 + (*buflen
? 1 : 0);
798 sec4_sg_bytes
= (sec4_sg_src_index
+ src_nents
) *
799 sizeof(struct sec4_sg_entry
);
802 * allocate space for base edesc and hw desc commands,
805 edesc
= kmalloc(sizeof(struct ahash_edesc
) + DESC_JOB_IO_LEN
+
806 sec4_sg_bytes
, GFP_DMA
| flags
);
809 "could not allocate extended descriptor\n");
813 edesc
->src_nents
= src_nents
;
814 edesc
->chained
= chained
;
815 edesc
->sec4_sg_bytes
= sec4_sg_bytes
;
816 edesc
->sec4_sg
= (void *)edesc
+ sizeof(struct ahash_edesc
) +
818 edesc
->sec4_sg_dma
= dma_map_single(jrdev
, edesc
->sec4_sg
,
822 ctx_map_to_sec4_sg(desc
, jrdev
, state
, ctx
->ctx_len
,
823 edesc
->sec4_sg
, DMA_BIDIRECTIONAL
);
825 state
->buf_dma
= try_buf_map_to_sec4_sg(jrdev
,
828 *buflen
, last_buflen
);
831 src_map_to_sec4_sg(jrdev
, req
->src
, src_nents
,
832 edesc
->sec4_sg
+ sec4_sg_src_index
,
835 sg_copy_part(next_buf
, req
->src
, to_hash
-
836 *buflen
, req
->nbytes
);
837 state
->current_buf
= !state
->current_buf
;
840 (edesc
->sec4_sg
+ sec4_sg_src_index
- 1)->len
|=
844 sh_len
= desc_len(sh_desc
);
845 desc
= edesc
->hw_desc
;
846 init_job_desc_shared(desc
, ptr
, sh_len
, HDR_SHARE_DEFER
|
849 append_seq_in_ptr(desc
, edesc
->sec4_sg_dma
, ctx
->ctx_len
+
852 append_seq_out_ptr(desc
, state
->ctx_dma
, ctx
->ctx_len
, 0);
855 print_hex_dump(KERN_ERR
, "jobdesc@"__stringify(__LINE__
)": ",
856 DUMP_PREFIX_ADDRESS
, 16, 4, desc
,
857 desc_bytes(desc
), 1);
860 ret
= caam_jr_enqueue(jrdev
, desc
, ahash_done_bi
, req
);
864 ahash_unmap_ctx(jrdev
, edesc
, req
, ctx
->ctx_len
,
868 } else if (*next_buflen
) {
869 sg_copy(buf
+ *buflen
, req
->src
, req
->nbytes
);
870 *buflen
= *next_buflen
;
871 *next_buflen
= last_buflen
;
874 print_hex_dump(KERN_ERR
, "buf@"__stringify(__LINE__
)": ",
875 DUMP_PREFIX_ADDRESS
, 16, 4, buf
, *buflen
, 1);
876 print_hex_dump(KERN_ERR
, "next buf@"__stringify(__LINE__
)": ",
877 DUMP_PREFIX_ADDRESS
, 16, 4, next_buf
,
884 static int ahash_final_ctx(struct ahash_request
*req
)
886 struct crypto_ahash
*ahash
= crypto_ahash_reqtfm(req
);
887 struct caam_hash_ctx
*ctx
= crypto_ahash_ctx(ahash
);
888 struct caam_hash_state
*state
= ahash_request_ctx(req
);
889 struct device
*jrdev
= ctx
->jrdev
;
890 gfp_t flags
= (req
->base
.flags
& (CRYPTO_TFM_REQ_MAY_BACKLOG
|
891 CRYPTO_TFM_REQ_MAY_SLEEP
)) ? GFP_KERNEL
: GFP_ATOMIC
;
892 u8
*buf
= state
->current_buf
? state
->buf_1
: state
->buf_0
;
893 int buflen
= state
->current_buf
? state
->buflen_1
: state
->buflen_0
;
894 int last_buflen
= state
->current_buf
? state
->buflen_0
:
896 u32
*sh_desc
= ctx
->sh_desc_fin
, *desc
;
897 dma_addr_t ptr
= ctx
->sh_desc_fin_dma
;
899 int digestsize
= crypto_ahash_digestsize(ahash
);
900 struct ahash_edesc
*edesc
;
904 sec4_sg_bytes
= (1 + (buflen
? 1 : 0)) * sizeof(struct sec4_sg_entry
);
906 /* allocate space for base edesc and hw desc commands, link tables */
907 edesc
= kmalloc(sizeof(struct ahash_edesc
) + DESC_JOB_IO_LEN
+
908 sec4_sg_bytes
, GFP_DMA
| flags
);
910 dev_err(jrdev
, "could not allocate extended descriptor\n");
914 sh_len
= desc_len(sh_desc
);
915 desc
= edesc
->hw_desc
;
916 init_job_desc_shared(desc
, ptr
, sh_len
, HDR_SHARE_DEFER
| HDR_REVERSE
);
918 edesc
->sec4_sg_bytes
= sec4_sg_bytes
;
919 edesc
->sec4_sg
= (void *)edesc
+ sizeof(struct ahash_edesc
) +
921 edesc
->sec4_sg_dma
= dma_map_single(jrdev
, edesc
->sec4_sg
,
922 sec4_sg_bytes
, DMA_TO_DEVICE
);
923 edesc
->src_nents
= 0;
925 ctx_map_to_sec4_sg(desc
, jrdev
, state
, ctx
->ctx_len
, edesc
->sec4_sg
,
928 state
->buf_dma
= try_buf_map_to_sec4_sg(jrdev
, edesc
->sec4_sg
+ 1,
929 buf
, state
->buf_dma
, buflen
,
931 (edesc
->sec4_sg
+ sec4_sg_bytes
- 1)->len
|= SEC4_SG_LEN_FIN
;
933 append_seq_in_ptr(desc
, edesc
->sec4_sg_dma
, ctx
->ctx_len
+ buflen
,
936 edesc
->dst_dma
= map_seq_out_ptr_result(desc
, jrdev
, req
->result
,
940 print_hex_dump(KERN_ERR
, "jobdesc@"__stringify(__LINE__
)": ",
941 DUMP_PREFIX_ADDRESS
, 16, 4, desc
, desc_bytes(desc
), 1);
944 ret
= caam_jr_enqueue(jrdev
, desc
, ahash_done_ctx_src
, req
);
948 ahash_unmap_ctx(jrdev
, edesc
, req
, digestsize
, DMA_FROM_DEVICE
);
955 static int ahash_finup_ctx(struct ahash_request
*req
)
957 struct crypto_ahash
*ahash
= crypto_ahash_reqtfm(req
);
958 struct caam_hash_ctx
*ctx
= crypto_ahash_ctx(ahash
);
959 struct caam_hash_state
*state
= ahash_request_ctx(req
);
960 struct device
*jrdev
= ctx
->jrdev
;
961 gfp_t flags
= (req
->base
.flags
& (CRYPTO_TFM_REQ_MAY_BACKLOG
|
962 CRYPTO_TFM_REQ_MAY_SLEEP
)) ? GFP_KERNEL
: GFP_ATOMIC
;
963 u8
*buf
= state
->current_buf
? state
->buf_1
: state
->buf_0
;
964 int buflen
= state
->current_buf
? state
->buflen_1
: state
->buflen_0
;
965 int last_buflen
= state
->current_buf
? state
->buflen_0
:
967 u32
*sh_desc
= ctx
->sh_desc_finup
, *desc
;
968 dma_addr_t ptr
= ctx
->sh_desc_finup_dma
;
969 int sec4_sg_bytes
, sec4_sg_src_index
;
971 int digestsize
= crypto_ahash_digestsize(ahash
);
972 struct ahash_edesc
*edesc
;
973 bool chained
= false;
977 src_nents
= __sg_count(req
->src
, req
->nbytes
, &chained
);
978 sec4_sg_src_index
= 1 + (buflen
? 1 : 0);
979 sec4_sg_bytes
= (sec4_sg_src_index
+ src_nents
) *
980 sizeof(struct sec4_sg_entry
);
982 /* allocate space for base edesc and hw desc commands, link tables */
983 edesc
= kmalloc(sizeof(struct ahash_edesc
) + DESC_JOB_IO_LEN
+
984 sec4_sg_bytes
, GFP_DMA
| flags
);
986 dev_err(jrdev
, "could not allocate extended descriptor\n");
990 sh_len
= desc_len(sh_desc
);
991 desc
= edesc
->hw_desc
;
992 init_job_desc_shared(desc
, ptr
, sh_len
, HDR_SHARE_DEFER
| HDR_REVERSE
);
994 edesc
->src_nents
= src_nents
;
995 edesc
->chained
= chained
;
996 edesc
->sec4_sg_bytes
= sec4_sg_bytes
;
997 edesc
->sec4_sg
= (void *)edesc
+ sizeof(struct ahash_edesc
) +
999 edesc
->sec4_sg_dma
= dma_map_single(jrdev
, edesc
->sec4_sg
,
1000 sec4_sg_bytes
, DMA_TO_DEVICE
);
1002 ctx_map_to_sec4_sg(desc
, jrdev
, state
, ctx
->ctx_len
, edesc
->sec4_sg
,
1005 state
->buf_dma
= try_buf_map_to_sec4_sg(jrdev
, edesc
->sec4_sg
+ 1,
1006 buf
, state
->buf_dma
, buflen
,
1009 src_map_to_sec4_sg(jrdev
, req
->src
, src_nents
, edesc
->sec4_sg
+
1010 sec4_sg_src_index
, chained
);
1012 append_seq_in_ptr(desc
, edesc
->sec4_sg_dma
, ctx
->ctx_len
+
1013 buflen
+ req
->nbytes
, LDST_SGF
);
1015 edesc
->dst_dma
= map_seq_out_ptr_result(desc
, jrdev
, req
->result
,
1019 print_hex_dump(KERN_ERR
, "jobdesc@"__stringify(__LINE__
)": ",
1020 DUMP_PREFIX_ADDRESS
, 16, 4, desc
, desc_bytes(desc
), 1);
1023 ret
= caam_jr_enqueue(jrdev
, desc
, ahash_done_ctx_src
, req
);
1027 ahash_unmap_ctx(jrdev
, edesc
, req
, digestsize
, DMA_FROM_DEVICE
);
1034 static int ahash_digest(struct ahash_request
*req
)
1036 struct crypto_ahash
*ahash
= crypto_ahash_reqtfm(req
);
1037 struct caam_hash_ctx
*ctx
= crypto_ahash_ctx(ahash
);
1038 struct device
*jrdev
= ctx
->jrdev
;
1039 gfp_t flags
= (req
->base
.flags
& (CRYPTO_TFM_REQ_MAY_BACKLOG
|
1040 CRYPTO_TFM_REQ_MAY_SLEEP
)) ? GFP_KERNEL
: GFP_ATOMIC
;
1041 u32
*sh_desc
= ctx
->sh_desc_digest
, *desc
;
1042 dma_addr_t ptr
= ctx
->sh_desc_digest_dma
;
1043 int digestsize
= crypto_ahash_digestsize(ahash
);
1044 int src_nents
, sec4_sg_bytes
;
1046 struct ahash_edesc
*edesc
;
1047 bool chained
= false;
1052 src_nents
= sg_count(req
->src
, req
->nbytes
, &chained
);
1053 dma_map_sg_chained(jrdev
, req
->src
, src_nents
? : 1, DMA_TO_DEVICE
,
1055 sec4_sg_bytes
= src_nents
* sizeof(struct sec4_sg_entry
);
1057 /* allocate space for base edesc and hw desc commands, link tables */
1058 edesc
= kmalloc(sizeof(struct ahash_edesc
) + sec4_sg_bytes
+
1059 DESC_JOB_IO_LEN
, GFP_DMA
| flags
);
1061 dev_err(jrdev
, "could not allocate extended descriptor\n");
1064 edesc
->sec4_sg
= (void *)edesc
+ sizeof(struct ahash_edesc
) +
1066 edesc
->sec4_sg_dma
= dma_map_single(jrdev
, edesc
->sec4_sg
,
1067 sec4_sg_bytes
, DMA_TO_DEVICE
);
1068 edesc
->src_nents
= src_nents
;
1069 edesc
->chained
= chained
;
1071 sh_len
= desc_len(sh_desc
);
1072 desc
= edesc
->hw_desc
;
1073 init_job_desc_shared(desc
, ptr
, sh_len
, HDR_SHARE_DEFER
| HDR_REVERSE
);
1076 sg_to_sec4_sg_last(req
->src
, src_nents
, edesc
->sec4_sg
, 0);
1077 src_dma
= edesc
->sec4_sg_dma
;
1080 src_dma
= sg_dma_address(req
->src
);
1083 append_seq_in_ptr(desc
, src_dma
, req
->nbytes
, options
);
1085 edesc
->dst_dma
= map_seq_out_ptr_result(desc
, jrdev
, req
->result
,
1089 print_hex_dump(KERN_ERR
, "jobdesc@"__stringify(__LINE__
)": ",
1090 DUMP_PREFIX_ADDRESS
, 16, 4, desc
, desc_bytes(desc
), 1);
1093 ret
= caam_jr_enqueue(jrdev
, desc
, ahash_done
, req
);
1097 ahash_unmap(jrdev
, edesc
, req
, digestsize
);
1104 /* submit ahash final if it the first job descriptor */
1105 static int ahash_final_no_ctx(struct ahash_request
*req
)
1107 struct crypto_ahash
*ahash
= crypto_ahash_reqtfm(req
);
1108 struct caam_hash_ctx
*ctx
= crypto_ahash_ctx(ahash
);
1109 struct caam_hash_state
*state
= ahash_request_ctx(req
);
1110 struct device
*jrdev
= ctx
->jrdev
;
1111 gfp_t flags
= (req
->base
.flags
& (CRYPTO_TFM_REQ_MAY_BACKLOG
|
1112 CRYPTO_TFM_REQ_MAY_SLEEP
)) ? GFP_KERNEL
: GFP_ATOMIC
;
1113 u8
*buf
= state
->current_buf
? state
->buf_1
: state
->buf_0
;
1114 int buflen
= state
->current_buf
? state
->buflen_1
: state
->buflen_0
;
1115 u32
*sh_desc
= ctx
->sh_desc_digest
, *desc
;
1116 dma_addr_t ptr
= ctx
->sh_desc_digest_dma
;
1117 int digestsize
= crypto_ahash_digestsize(ahash
);
1118 struct ahash_edesc
*edesc
;
1122 /* allocate space for base edesc and hw desc commands, link tables */
1123 edesc
= kmalloc(sizeof(struct ahash_edesc
) + DESC_JOB_IO_LEN
,
1126 dev_err(jrdev
, "could not allocate extended descriptor\n");
1130 sh_len
= desc_len(sh_desc
);
1131 desc
= edesc
->hw_desc
;
1132 init_job_desc_shared(desc
, ptr
, sh_len
, HDR_SHARE_DEFER
| HDR_REVERSE
);
1134 state
->buf_dma
= dma_map_single(jrdev
, buf
, buflen
, DMA_TO_DEVICE
);
1136 append_seq_in_ptr(desc
, state
->buf_dma
, buflen
, 0);
1138 edesc
->dst_dma
= map_seq_out_ptr_result(desc
, jrdev
, req
->result
,
1140 edesc
->src_nents
= 0;
1143 print_hex_dump(KERN_ERR
, "jobdesc@"__stringify(__LINE__
)": ",
1144 DUMP_PREFIX_ADDRESS
, 16, 4, desc
, desc_bytes(desc
), 1);
1147 ret
= caam_jr_enqueue(jrdev
, desc
, ahash_done
, req
);
1151 ahash_unmap(jrdev
, edesc
, req
, digestsize
);
1158 /* submit ahash update if it the first job descriptor after update */
1159 static int ahash_update_no_ctx(struct ahash_request
*req
)
1161 struct crypto_ahash
*ahash
= crypto_ahash_reqtfm(req
);
1162 struct caam_hash_ctx
*ctx
= crypto_ahash_ctx(ahash
);
1163 struct caam_hash_state
*state
= ahash_request_ctx(req
);
1164 struct device
*jrdev
= ctx
->jrdev
;
1165 gfp_t flags
= (req
->base
.flags
& (CRYPTO_TFM_REQ_MAY_BACKLOG
|
1166 CRYPTO_TFM_REQ_MAY_SLEEP
)) ? GFP_KERNEL
: GFP_ATOMIC
;
1167 u8
*buf
= state
->current_buf
? state
->buf_1
: state
->buf_0
;
1168 int *buflen
= state
->current_buf
? &state
->buflen_1
: &state
->buflen_0
;
1169 u8
*next_buf
= state
->current_buf
? state
->buf_0
: state
->buf_1
;
1170 int *next_buflen
= state
->current_buf
? &state
->buflen_0
:
1172 int in_len
= *buflen
+ req
->nbytes
, to_hash
;
1173 int sec4_sg_bytes
, src_nents
;
1174 struct ahash_edesc
*edesc
;
1175 u32
*desc
, *sh_desc
= ctx
->sh_desc_update_first
;
1176 dma_addr_t ptr
= ctx
->sh_desc_update_first_dma
;
1177 bool chained
= false;
1181 *next_buflen
= in_len
& (crypto_tfm_alg_blocksize(&ahash
->base
) - 1);
1182 to_hash
= in_len
- *next_buflen
;
1185 src_nents
= __sg_count(req
->src
, req
->nbytes
- (*next_buflen
),
1187 sec4_sg_bytes
= (1 + src_nents
) *
1188 sizeof(struct sec4_sg_entry
);
1191 * allocate space for base edesc and hw desc commands,
1194 edesc
= kmalloc(sizeof(struct ahash_edesc
) + DESC_JOB_IO_LEN
+
1195 sec4_sg_bytes
, GFP_DMA
| flags
);
1198 "could not allocate extended descriptor\n");
1202 edesc
->src_nents
= src_nents
;
1203 edesc
->chained
= chained
;
1204 edesc
->sec4_sg_bytes
= sec4_sg_bytes
;
1205 edesc
->sec4_sg
= (void *)edesc
+ sizeof(struct ahash_edesc
) +
1207 edesc
->sec4_sg_dma
= dma_map_single(jrdev
, edesc
->sec4_sg
,
1211 state
->buf_dma
= buf_map_to_sec4_sg(jrdev
, edesc
->sec4_sg
,
1213 src_map_to_sec4_sg(jrdev
, req
->src
, src_nents
,
1214 edesc
->sec4_sg
+ 1, chained
);
1216 sg_copy_part(next_buf
, req
->src
, to_hash
- *buflen
,
1218 state
->current_buf
= !state
->current_buf
;
1221 sh_len
= desc_len(sh_desc
);
1222 desc
= edesc
->hw_desc
;
1223 init_job_desc_shared(desc
, ptr
, sh_len
, HDR_SHARE_DEFER
|
1226 append_seq_in_ptr(desc
, edesc
->sec4_sg_dma
, to_hash
, LDST_SGF
);
1228 map_seq_out_ptr_ctx(desc
, jrdev
, state
, ctx
->ctx_len
);
1231 print_hex_dump(KERN_ERR
, "jobdesc@"__stringify(__LINE__
)": ",
1232 DUMP_PREFIX_ADDRESS
, 16, 4, desc
,
1233 desc_bytes(desc
), 1);
1236 ret
= caam_jr_enqueue(jrdev
, desc
, ahash_done_ctx_dst
, req
);
1239 state
->update
= ahash_update_ctx
;
1240 state
->finup
= ahash_finup_ctx
;
1241 state
->final
= ahash_final_ctx
;
1243 ahash_unmap_ctx(jrdev
, edesc
, req
, ctx
->ctx_len
,
1247 } else if (*next_buflen
) {
1248 sg_copy(buf
+ *buflen
, req
->src
, req
->nbytes
);
1249 *buflen
= *next_buflen
;
1253 print_hex_dump(KERN_ERR
, "buf@"__stringify(__LINE__
)": ",
1254 DUMP_PREFIX_ADDRESS
, 16, 4, buf
, *buflen
, 1);
1255 print_hex_dump(KERN_ERR
, "next buf@"__stringify(__LINE__
)": ",
1256 DUMP_PREFIX_ADDRESS
, 16, 4, next_buf
,
1263 /* submit ahash finup if it the first job descriptor after update */
1264 static int ahash_finup_no_ctx(struct ahash_request
*req
)
1266 struct crypto_ahash
*ahash
= crypto_ahash_reqtfm(req
);
1267 struct caam_hash_ctx
*ctx
= crypto_ahash_ctx(ahash
);
1268 struct caam_hash_state
*state
= ahash_request_ctx(req
);
1269 struct device
*jrdev
= ctx
->jrdev
;
1270 gfp_t flags
= (req
->base
.flags
& (CRYPTO_TFM_REQ_MAY_BACKLOG
|
1271 CRYPTO_TFM_REQ_MAY_SLEEP
)) ? GFP_KERNEL
: GFP_ATOMIC
;
1272 u8
*buf
= state
->current_buf
? state
->buf_1
: state
->buf_0
;
1273 int buflen
= state
->current_buf
? state
->buflen_1
: state
->buflen_0
;
1274 int last_buflen
= state
->current_buf
? state
->buflen_0
:
1276 u32
*sh_desc
= ctx
->sh_desc_digest
, *desc
;
1277 dma_addr_t ptr
= ctx
->sh_desc_digest_dma
;
1278 int sec4_sg_bytes
, sec4_sg_src_index
, src_nents
;
1279 int digestsize
= crypto_ahash_digestsize(ahash
);
1280 struct ahash_edesc
*edesc
;
1281 bool chained
= false;
1285 src_nents
= __sg_count(req
->src
, req
->nbytes
, &chained
);
1286 sec4_sg_src_index
= 2;
1287 sec4_sg_bytes
= (sec4_sg_src_index
+ src_nents
) *
1288 sizeof(struct sec4_sg_entry
);
1290 /* allocate space for base edesc and hw desc commands, link tables */
1291 edesc
= kmalloc(sizeof(struct ahash_edesc
) + DESC_JOB_IO_LEN
+
1292 sec4_sg_bytes
, GFP_DMA
| flags
);
1294 dev_err(jrdev
, "could not allocate extended descriptor\n");
1298 sh_len
= desc_len(sh_desc
);
1299 desc
= edesc
->hw_desc
;
1300 init_job_desc_shared(desc
, ptr
, sh_len
, HDR_SHARE_DEFER
| HDR_REVERSE
);
1302 edesc
->src_nents
= src_nents
;
1303 edesc
->chained
= chained
;
1304 edesc
->sec4_sg_bytes
= sec4_sg_bytes
;
1305 edesc
->sec4_sg
= (void *)edesc
+ sizeof(struct ahash_edesc
) +
1307 edesc
->sec4_sg_dma
= dma_map_single(jrdev
, edesc
->sec4_sg
,
1308 sec4_sg_bytes
, DMA_TO_DEVICE
);
1310 state
->buf_dma
= try_buf_map_to_sec4_sg(jrdev
, edesc
->sec4_sg
, buf
,
1311 state
->buf_dma
, buflen
,
1314 src_map_to_sec4_sg(jrdev
, req
->src
, src_nents
, edesc
->sec4_sg
+ 1,
1317 append_seq_in_ptr(desc
, edesc
->sec4_sg_dma
, buflen
+
1318 req
->nbytes
, LDST_SGF
);
1320 edesc
->dst_dma
= map_seq_out_ptr_result(desc
, jrdev
, req
->result
,
1324 print_hex_dump(KERN_ERR
, "jobdesc@"__stringify(__LINE__
)": ",
1325 DUMP_PREFIX_ADDRESS
, 16, 4, desc
, desc_bytes(desc
), 1);
1328 ret
= caam_jr_enqueue(jrdev
, desc
, ahash_done
, req
);
1332 ahash_unmap(jrdev
, edesc
, req
, digestsize
);
1339 /* submit first update job descriptor after init */
1340 static int ahash_update_first(struct ahash_request
*req
)
1342 struct crypto_ahash
*ahash
= crypto_ahash_reqtfm(req
);
1343 struct caam_hash_ctx
*ctx
= crypto_ahash_ctx(ahash
);
1344 struct caam_hash_state
*state
= ahash_request_ctx(req
);
1345 struct device
*jrdev
= ctx
->jrdev
;
1346 gfp_t flags
= (req
->base
.flags
& (CRYPTO_TFM_REQ_MAY_BACKLOG
|
1347 CRYPTO_TFM_REQ_MAY_SLEEP
)) ? GFP_KERNEL
: GFP_ATOMIC
;
1348 u8
*next_buf
= state
->buf_0
+ state
->current_buf
*
1349 CAAM_MAX_HASH_BLOCK_SIZE
;
1350 int *next_buflen
= &state
->buflen_0
+ state
->current_buf
;
1352 u32
*sh_desc
= ctx
->sh_desc_update_first
, *desc
;
1353 dma_addr_t ptr
= ctx
->sh_desc_update_first_dma
;
1354 int sec4_sg_bytes
, src_nents
;
1357 struct ahash_edesc
*edesc
;
1358 bool chained
= false;
1362 *next_buflen
= req
->nbytes
& (crypto_tfm_alg_blocksize(&ahash
->base
) -
1364 to_hash
= req
->nbytes
- *next_buflen
;
1367 src_nents
= sg_count(req
->src
, req
->nbytes
- (*next_buflen
),
1369 dma_map_sg_chained(jrdev
, req
->src
, src_nents
? : 1,
1370 DMA_TO_DEVICE
, chained
);
1371 sec4_sg_bytes
= src_nents
* sizeof(struct sec4_sg_entry
);
1374 * allocate space for base edesc and hw desc commands,
1377 edesc
= kmalloc(sizeof(struct ahash_edesc
) + DESC_JOB_IO_LEN
+
1378 sec4_sg_bytes
, GFP_DMA
| flags
);
1381 "could not allocate extended descriptor\n");
1385 edesc
->src_nents
= src_nents
;
1386 edesc
->chained
= chained
;
1387 edesc
->sec4_sg_bytes
= sec4_sg_bytes
;
1388 edesc
->sec4_sg
= (void *)edesc
+ sizeof(struct ahash_edesc
) +
1390 edesc
->sec4_sg_dma
= dma_map_single(jrdev
, edesc
->sec4_sg
,
1395 sg_to_sec4_sg_last(req
->src
, src_nents
,
1397 src_dma
= edesc
->sec4_sg_dma
;
1400 src_dma
= sg_dma_address(req
->src
);
1405 sg_copy_part(next_buf
, req
->src
, to_hash
, req
->nbytes
);
1407 sh_len
= desc_len(sh_desc
);
1408 desc
= edesc
->hw_desc
;
1409 init_job_desc_shared(desc
, ptr
, sh_len
, HDR_SHARE_DEFER
|
1412 append_seq_in_ptr(desc
, src_dma
, to_hash
, options
);
1414 map_seq_out_ptr_ctx(desc
, jrdev
, state
, ctx
->ctx_len
);
1417 print_hex_dump(KERN_ERR
, "jobdesc@"__stringify(__LINE__
)": ",
1418 DUMP_PREFIX_ADDRESS
, 16, 4, desc
,
1419 desc_bytes(desc
), 1);
1422 ret
= caam_jr_enqueue(jrdev
, desc
, ahash_done_ctx_dst
,
1426 state
->update
= ahash_update_ctx
;
1427 state
->finup
= ahash_finup_ctx
;
1428 state
->final
= ahash_final_ctx
;
1430 ahash_unmap_ctx(jrdev
, edesc
, req
, ctx
->ctx_len
,
1434 } else if (*next_buflen
) {
1435 state
->update
= ahash_update_no_ctx
;
1436 state
->finup
= ahash_finup_no_ctx
;
1437 state
->final
= ahash_final_no_ctx
;
1438 sg_copy(next_buf
, req
->src
, req
->nbytes
);
1441 print_hex_dump(KERN_ERR
, "next buf@"__stringify(__LINE__
)": ",
1442 DUMP_PREFIX_ADDRESS
, 16, 4, next_buf
,
1449 static int ahash_finup_first(struct ahash_request
*req
)
1451 return ahash_digest(req
);
1454 static int ahash_init(struct ahash_request
*req
)
1456 struct caam_hash_state
*state
= ahash_request_ctx(req
);
1458 state
->update
= ahash_update_first
;
1459 state
->finup
= ahash_finup_first
;
1460 state
->final
= ahash_final_no_ctx
;
1462 state
->current_buf
= 0;
1467 static int ahash_update(struct ahash_request
*req
)
1469 struct caam_hash_state
*state
= ahash_request_ctx(req
);
1471 return state
->update(req
);
1474 static int ahash_finup(struct ahash_request
*req
)
1476 struct caam_hash_state
*state
= ahash_request_ctx(req
);
1478 return state
->finup(req
);
1481 static int ahash_final(struct ahash_request
*req
)
1483 struct caam_hash_state
*state
= ahash_request_ctx(req
);
1485 return state
->final(req
);
1488 static int ahash_export(struct ahash_request
*req
, void *out
)
1490 struct crypto_ahash
*ahash
= crypto_ahash_reqtfm(req
);
1491 struct caam_hash_ctx
*ctx
= crypto_ahash_ctx(ahash
);
1492 struct caam_hash_state
*state
= ahash_request_ctx(req
);
1494 memcpy(out
, ctx
, sizeof(struct caam_hash_ctx
));
1495 memcpy(out
+ sizeof(struct caam_hash_ctx
), state
,
1496 sizeof(struct caam_hash_state
));
1500 static int ahash_import(struct ahash_request
*req
, const void *in
)
1502 struct crypto_ahash
*ahash
= crypto_ahash_reqtfm(req
);
1503 struct caam_hash_ctx
*ctx
= crypto_ahash_ctx(ahash
);
1504 struct caam_hash_state
*state
= ahash_request_ctx(req
);
1506 memcpy(ctx
, in
, sizeof(struct caam_hash_ctx
));
1507 memcpy(state
, in
+ sizeof(struct caam_hash_ctx
),
1508 sizeof(struct caam_hash_state
));
1512 struct caam_hash_template
{
1513 char name
[CRYPTO_MAX_ALG_NAME
];
1514 char driver_name
[CRYPTO_MAX_ALG_NAME
];
1515 char hmac_name
[CRYPTO_MAX_ALG_NAME
];
1516 char hmac_driver_name
[CRYPTO_MAX_ALG_NAME
];
1517 unsigned int blocksize
;
1518 struct ahash_alg template_ahash
;
1523 /* ahash descriptors */
1524 static struct caam_hash_template driver_hash
[] = {
1527 .driver_name
= "sha1-caam",
1528 .hmac_name
= "hmac(sha1)",
1529 .hmac_driver_name
= "hmac-sha1-caam",
1530 .blocksize
= SHA1_BLOCK_SIZE
,
1533 .update
= ahash_update
,
1534 .final
= ahash_final
,
1535 .finup
= ahash_finup
,
1536 .digest
= ahash_digest
,
1537 .export
= ahash_export
,
1538 .import
= ahash_import
,
1539 .setkey
= ahash_setkey
,
1541 .digestsize
= SHA1_DIGEST_SIZE
,
1544 .alg_type
= OP_ALG_ALGSEL_SHA1
,
1545 .alg_op
= OP_ALG_ALGSEL_SHA1
| OP_ALG_AAI_HMAC
,
1548 .driver_name
= "sha224-caam",
1549 .hmac_name
= "hmac(sha224)",
1550 .hmac_driver_name
= "hmac-sha224-caam",
1551 .blocksize
= SHA224_BLOCK_SIZE
,
1554 .update
= ahash_update
,
1555 .final
= ahash_final
,
1556 .finup
= ahash_finup
,
1557 .digest
= ahash_digest
,
1558 .export
= ahash_export
,
1559 .import
= ahash_import
,
1560 .setkey
= ahash_setkey
,
1562 .digestsize
= SHA224_DIGEST_SIZE
,
1565 .alg_type
= OP_ALG_ALGSEL_SHA224
,
1566 .alg_op
= OP_ALG_ALGSEL_SHA224
| OP_ALG_AAI_HMAC
,
1569 .driver_name
= "sha256-caam",
1570 .hmac_name
= "hmac(sha256)",
1571 .hmac_driver_name
= "hmac-sha256-caam",
1572 .blocksize
= SHA256_BLOCK_SIZE
,
1575 .update
= ahash_update
,
1576 .final
= ahash_final
,
1577 .finup
= ahash_finup
,
1578 .digest
= ahash_digest
,
1579 .export
= ahash_export
,
1580 .import
= ahash_import
,
1581 .setkey
= ahash_setkey
,
1583 .digestsize
= SHA256_DIGEST_SIZE
,
1586 .alg_type
= OP_ALG_ALGSEL_SHA256
,
1587 .alg_op
= OP_ALG_ALGSEL_SHA256
| OP_ALG_AAI_HMAC
,
1590 .driver_name
= "sha384-caam",
1591 .hmac_name
= "hmac(sha384)",
1592 .hmac_driver_name
= "hmac-sha384-caam",
1593 .blocksize
= SHA384_BLOCK_SIZE
,
1596 .update
= ahash_update
,
1597 .final
= ahash_final
,
1598 .finup
= ahash_finup
,
1599 .digest
= ahash_digest
,
1600 .export
= ahash_export
,
1601 .import
= ahash_import
,
1602 .setkey
= ahash_setkey
,
1604 .digestsize
= SHA384_DIGEST_SIZE
,
1607 .alg_type
= OP_ALG_ALGSEL_SHA384
,
1608 .alg_op
= OP_ALG_ALGSEL_SHA384
| OP_ALG_AAI_HMAC
,
1611 .driver_name
= "sha512-caam",
1612 .hmac_name
= "hmac(sha512)",
1613 .hmac_driver_name
= "hmac-sha512-caam",
1614 .blocksize
= SHA512_BLOCK_SIZE
,
1617 .update
= ahash_update
,
1618 .final
= ahash_final
,
1619 .finup
= ahash_finup
,
1620 .digest
= ahash_digest
,
1621 .export
= ahash_export
,
1622 .import
= ahash_import
,
1623 .setkey
= ahash_setkey
,
1625 .digestsize
= SHA512_DIGEST_SIZE
,
1628 .alg_type
= OP_ALG_ALGSEL_SHA512
,
1629 .alg_op
= OP_ALG_ALGSEL_SHA512
| OP_ALG_AAI_HMAC
,
1632 .driver_name
= "md5-caam",
1633 .hmac_name
= "hmac(md5)",
1634 .hmac_driver_name
= "hmac-md5-caam",
1635 .blocksize
= MD5_BLOCK_WORDS
* 4,
1638 .update
= ahash_update
,
1639 .final
= ahash_final
,
1640 .finup
= ahash_finup
,
1641 .digest
= ahash_digest
,
1642 .export
= ahash_export
,
1643 .import
= ahash_import
,
1644 .setkey
= ahash_setkey
,
1646 .digestsize
= MD5_DIGEST_SIZE
,
1649 .alg_type
= OP_ALG_ALGSEL_MD5
,
1650 .alg_op
= OP_ALG_ALGSEL_MD5
| OP_ALG_AAI_HMAC
,
1654 struct caam_hash_alg
{
1655 struct list_head entry
;
1656 struct device
*ctrldev
;
1659 struct ahash_alg ahash_alg
;
1662 static int caam_hash_cra_init(struct crypto_tfm
*tfm
)
1664 struct crypto_ahash
*ahash
= __crypto_ahash_cast(tfm
);
1665 struct crypto_alg
*base
= tfm
->__crt_alg
;
1666 struct hash_alg_common
*halg
=
1667 container_of(base
, struct hash_alg_common
, base
);
1668 struct ahash_alg
*alg
=
1669 container_of(halg
, struct ahash_alg
, halg
);
1670 struct caam_hash_alg
*caam_hash
=
1671 container_of(alg
, struct caam_hash_alg
, ahash_alg
);
1672 struct caam_hash_ctx
*ctx
= crypto_tfm_ctx(tfm
);
1673 struct caam_drv_private
*priv
= dev_get_drvdata(caam_hash
->ctrldev
);
1674 /* Sizes for MDHA running digests: MD5, SHA1, 224, 256, 384, 512 */
1675 static const u8 runninglen
[] = { HASH_MSG_LEN
+ MD5_DIGEST_SIZE
,
1676 HASH_MSG_LEN
+ SHA1_DIGEST_SIZE
,
1678 HASH_MSG_LEN
+ SHA256_DIGEST_SIZE
,
1680 HASH_MSG_LEN
+ SHA512_DIGEST_SIZE
};
1681 int tgt_jr
= atomic_inc_return(&priv
->tfm_count
);
1685 * distribute tfms across job rings to ensure in-order
1686 * crypto request processing per tfm
1688 ctx
->jrdev
= priv
->jrdev
[tgt_jr
% priv
->total_jobrs
];
1690 /* copy descriptor header template value */
1691 ctx
->alg_type
= OP_TYPE_CLASS2_ALG
| caam_hash
->alg_type
;
1692 ctx
->alg_op
= OP_TYPE_CLASS2_ALG
| caam_hash
->alg_op
;
1694 ctx
->ctx_len
= runninglen
[(ctx
->alg_op
& OP_ALG_ALGSEL_SUBMASK
) >>
1695 OP_ALG_ALGSEL_SHIFT
];
1697 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm
),
1698 sizeof(struct caam_hash_state
));
1700 ret
= ahash_set_sh_desc(ahash
);
1705 static void caam_hash_cra_exit(struct crypto_tfm
*tfm
)
1707 struct caam_hash_ctx
*ctx
= crypto_tfm_ctx(tfm
);
1709 if (ctx
->sh_desc_update_dma
&&
1710 !dma_mapping_error(ctx
->jrdev
, ctx
->sh_desc_update_dma
))
1711 dma_unmap_single(ctx
->jrdev
, ctx
->sh_desc_update_dma
,
1712 desc_bytes(ctx
->sh_desc_update
),
1714 if (ctx
->sh_desc_update_first_dma
&&
1715 !dma_mapping_error(ctx
->jrdev
, ctx
->sh_desc_update_first_dma
))
1716 dma_unmap_single(ctx
->jrdev
, ctx
->sh_desc_update_first_dma
,
1717 desc_bytes(ctx
->sh_desc_update_first
),
1719 if (ctx
->sh_desc_fin_dma
&&
1720 !dma_mapping_error(ctx
->jrdev
, ctx
->sh_desc_fin_dma
))
1721 dma_unmap_single(ctx
->jrdev
, ctx
->sh_desc_fin_dma
,
1722 desc_bytes(ctx
->sh_desc_fin
), DMA_TO_DEVICE
);
1723 if (ctx
->sh_desc_digest_dma
&&
1724 !dma_mapping_error(ctx
->jrdev
, ctx
->sh_desc_digest_dma
))
1725 dma_unmap_single(ctx
->jrdev
, ctx
->sh_desc_digest_dma
,
1726 desc_bytes(ctx
->sh_desc_digest
),
1728 if (ctx
->sh_desc_finup_dma
&&
1729 !dma_mapping_error(ctx
->jrdev
, ctx
->sh_desc_finup_dma
))
1730 dma_unmap_single(ctx
->jrdev
, ctx
->sh_desc_finup_dma
,
1731 desc_bytes(ctx
->sh_desc_finup
), DMA_TO_DEVICE
);
1734 static void __exit
caam_algapi_hash_exit(void)
1736 struct device_node
*dev_node
;
1737 struct platform_device
*pdev
;
1738 struct device
*ctrldev
;
1739 struct caam_drv_private
*priv
;
1740 struct caam_hash_alg
*t_alg
, *n
;
1742 dev_node
= of_find_compatible_node(NULL
, NULL
, "fsl,sec-v4.0");
1744 dev_node
= of_find_compatible_node(NULL
, NULL
, "fsl,sec4.0");
1749 pdev
= of_find_device_by_node(dev_node
);
1753 ctrldev
= &pdev
->dev
;
1754 of_node_put(dev_node
);
1755 priv
= dev_get_drvdata(ctrldev
);
1757 if (!priv
->hash_list
.next
)
1760 list_for_each_entry_safe(t_alg
, n
, &priv
->hash_list
, entry
) {
1761 crypto_unregister_ahash(&t_alg
->ahash_alg
);
1762 list_del(&t_alg
->entry
);
1767 static struct caam_hash_alg
*
1768 caam_hash_alloc(struct device
*ctrldev
, struct caam_hash_template
*template,
1771 struct caam_hash_alg
*t_alg
;
1772 struct ahash_alg
*halg
;
1773 struct crypto_alg
*alg
;
1775 t_alg
= kzalloc(sizeof(struct caam_hash_alg
), GFP_KERNEL
);
1777 dev_err(ctrldev
, "failed to allocate t_alg\n");
1778 return ERR_PTR(-ENOMEM
);
1781 t_alg
->ahash_alg
= template->template_ahash
;
1782 halg
= &t_alg
->ahash_alg
;
1783 alg
= &halg
->halg
.base
;
1786 snprintf(alg
->cra_name
, CRYPTO_MAX_ALG_NAME
, "%s",
1787 template->hmac_name
);
1788 snprintf(alg
->cra_driver_name
, CRYPTO_MAX_ALG_NAME
, "%s",
1789 template->hmac_driver_name
);
1791 snprintf(alg
->cra_name
, CRYPTO_MAX_ALG_NAME
, "%s",
1793 snprintf(alg
->cra_driver_name
, CRYPTO_MAX_ALG_NAME
, "%s",
1794 template->driver_name
);
1796 alg
->cra_module
= THIS_MODULE
;
1797 alg
->cra_init
= caam_hash_cra_init
;
1798 alg
->cra_exit
= caam_hash_cra_exit
;
1799 alg
->cra_ctxsize
= sizeof(struct caam_hash_ctx
);
1800 alg
->cra_priority
= CAAM_CRA_PRIORITY
;
1801 alg
->cra_blocksize
= template->blocksize
;
1802 alg
->cra_alignmask
= 0;
1803 alg
->cra_flags
= CRYPTO_ALG_ASYNC
| CRYPTO_ALG_TYPE_AHASH
;
1804 alg
->cra_type
= &crypto_ahash_type
;
1806 t_alg
->alg_type
= template->alg_type
;
1807 t_alg
->alg_op
= template->alg_op
;
1808 t_alg
->ctrldev
= ctrldev
;
1813 static int __init
caam_algapi_hash_init(void)
1815 struct device_node
*dev_node
;
1816 struct platform_device
*pdev
;
1817 struct device
*ctrldev
;
1818 struct caam_drv_private
*priv
;
1821 dev_node
= of_find_compatible_node(NULL
, NULL
, "fsl,sec-v4.0");
1823 dev_node
= of_find_compatible_node(NULL
, NULL
, "fsl,sec4.0");
1828 pdev
= of_find_device_by_node(dev_node
);
1832 ctrldev
= &pdev
->dev
;
1833 priv
= dev_get_drvdata(ctrldev
);
1834 of_node_put(dev_node
);
1836 INIT_LIST_HEAD(&priv
->hash_list
);
1838 atomic_set(&priv
->tfm_count
, -1);
1840 /* register crypto algorithms the device supports */
1841 for (i
= 0; i
< ARRAY_SIZE(driver_hash
); i
++) {
1842 /* TODO: check if h/w supports alg */
1843 struct caam_hash_alg
*t_alg
;
1845 /* register hmac version */
1846 t_alg
= caam_hash_alloc(ctrldev
, &driver_hash
[i
], true);
1847 if (IS_ERR(t_alg
)) {
1848 err
= PTR_ERR(t_alg
);
1849 dev_warn(ctrldev
, "%s alg allocation failed\n",
1850 driver_hash
[i
].driver_name
);
1854 err
= crypto_register_ahash(&t_alg
->ahash_alg
);
1856 dev_warn(ctrldev
, "%s alg registration failed\n",
1857 t_alg
->ahash_alg
.halg
.base
.cra_driver_name
);
1860 list_add_tail(&t_alg
->entry
, &priv
->hash_list
);
1862 /* register unkeyed version */
1863 t_alg
= caam_hash_alloc(ctrldev
, &driver_hash
[i
], false);
1864 if (IS_ERR(t_alg
)) {
1865 err
= PTR_ERR(t_alg
);
1866 dev_warn(ctrldev
, "%s alg allocation failed\n",
1867 driver_hash
[i
].driver_name
);
1871 err
= crypto_register_ahash(&t_alg
->ahash_alg
);
1873 dev_warn(ctrldev
, "%s alg registration failed\n",
1874 t_alg
->ahash_alg
.halg
.base
.cra_driver_name
);
1877 list_add_tail(&t_alg
->entry
, &priv
->hash_list
);
1883 module_init(caam_algapi_hash_init
);
1884 module_exit(caam_algapi_hash_exit
);
1886 MODULE_LICENSE("GPL");
1887 MODULE_DESCRIPTION("FSL CAAM support for ahash functions of crypto API");
1888 MODULE_AUTHOR("Freescale Semiconductor - NMG");