2 * caam - Freescale FSL CAAM support for ahash functions of crypto API
4 * Copyright 2011 Freescale Semiconductor, Inc.
6 * Based on caamalg.c crypto API driver.
8 * relationship of digest job descriptor or first job descriptor after init to
11 * --------------- ---------------
12 * | JobDesc #1 |-------------------->| ShareDesc |
13 * | *(packet 1) | | (hashKey) |
14 * --------------- | (operation) |
17 * relationship of subsequent job descriptors to shared descriptors:
19 * --------------- ---------------
20 * | JobDesc #2 |-------------------->| ShareDesc |
21 * | *(packet 2) | |------------->| (hashKey) |
22 * --------------- | |-------->| (operation) |
23 * . | | | (load ctx2) |
24 * . | | ---------------
26 * | JobDesc #3 |------| |
32 * | JobDesc #4 |------------
36 * The SharedDesc never changes for a connection unless rekeyed, but
37 * each packet will likely be in a different place. So all we need
38 * to know to process the packet is where the input is, where the
39 * output goes, and what context we want to process with. Context is
40 * in the SharedDesc, packet references in the JobDesc.
42 * So, a job desc looks like:
44 * ---------------------
46 * | ShareDesc Pointer |
53 * ---------------------
60 #include "desc_constr.h"
63 #include "sg_sw_sec4.h"
66 #define CAAM_CRA_PRIORITY 3000
68 /* max hash key is max split key size */
69 #define CAAM_MAX_HASH_KEY_SIZE (SHA512_DIGEST_SIZE * 2)
71 #define CAAM_MAX_HASH_BLOCK_SIZE SHA512_BLOCK_SIZE
72 #define CAAM_MAX_HASH_DIGEST_SIZE SHA512_DIGEST_SIZE
74 /* length of descriptors text */
75 #define DESC_AHASH_BASE (4 * CAAM_CMD_SZ)
76 #define DESC_AHASH_UPDATE_LEN (6 * CAAM_CMD_SZ)
77 #define DESC_AHASH_UPDATE_FIRST_LEN (DESC_AHASH_BASE + 4 * CAAM_CMD_SZ)
78 #define DESC_AHASH_FINAL_LEN (DESC_AHASH_BASE + 5 * CAAM_CMD_SZ)
79 #define DESC_AHASH_FINUP_LEN (DESC_AHASH_BASE + 5 * CAAM_CMD_SZ)
80 #define DESC_AHASH_DIGEST_LEN (DESC_AHASH_BASE + 4 * CAAM_CMD_SZ)
82 #define DESC_HASH_MAX_USED_BYTES (DESC_AHASH_FINAL_LEN + \
83 CAAM_MAX_HASH_KEY_SIZE)
84 #define DESC_HASH_MAX_USED_LEN (DESC_HASH_MAX_USED_BYTES / CAAM_CMD_SZ)
86 /* caam context sizes for hashes: running digest + 8 */
87 #define HASH_MSG_LEN 8
88 #define MAX_CTX_LEN (HASH_MSG_LEN + SHA512_DIGEST_SIZE)
91 /* for print_hex_dumps with line references */
92 #define debug(format, arg...) printk(format, arg)
94 #define debug(format, arg...)
98 static struct list_head hash_list
;
100 /* ahash per-session context */
101 struct caam_hash_ctx
{
102 u32 sh_desc_update
[DESC_HASH_MAX_USED_LEN
] ____cacheline_aligned
;
103 u32 sh_desc_update_first
[DESC_HASH_MAX_USED_LEN
] ____cacheline_aligned
;
104 u32 sh_desc_fin
[DESC_HASH_MAX_USED_LEN
] ____cacheline_aligned
;
105 u32 sh_desc_digest
[DESC_HASH_MAX_USED_LEN
] ____cacheline_aligned
;
106 u32 sh_desc_finup
[DESC_HASH_MAX_USED_LEN
] ____cacheline_aligned
;
107 dma_addr_t sh_desc_update_dma ____cacheline_aligned
;
108 dma_addr_t sh_desc_update_first_dma
;
109 dma_addr_t sh_desc_fin_dma
;
110 dma_addr_t sh_desc_digest_dma
;
111 dma_addr_t sh_desc_finup_dma
;
112 struct device
*jrdev
;
115 u8 key
[CAAM_MAX_HASH_KEY_SIZE
];
118 unsigned int split_key_len
;
119 unsigned int split_key_pad_len
;
123 struct caam_hash_state
{
126 u8 buf_0
[CAAM_MAX_HASH_BLOCK_SIZE
] ____cacheline_aligned
;
128 u8 buf_1
[CAAM_MAX_HASH_BLOCK_SIZE
] ____cacheline_aligned
;
130 u8 caam_ctx
[MAX_CTX_LEN
] ____cacheline_aligned
;
131 int (*update
)(struct ahash_request
*req
);
132 int (*final
)(struct ahash_request
*req
);
133 int (*finup
)(struct ahash_request
*req
);
137 struct caam_export_state
{
138 u8 buf
[CAAM_MAX_HASH_BLOCK_SIZE
];
139 u8 caam_ctx
[MAX_CTX_LEN
];
141 int (*update
)(struct ahash_request
*req
);
142 int (*final
)(struct ahash_request
*req
);
143 int (*finup
)(struct ahash_request
*req
);
146 /* Common job descriptor seq in/out ptr routines */
148 /* Map state->caam_ctx, and append seq_out_ptr command that points to it */
149 static inline int map_seq_out_ptr_ctx(u32
*desc
, struct device
*jrdev
,
150 struct caam_hash_state
*state
,
153 state
->ctx_dma
= dma_map_single(jrdev
, state
->caam_ctx
,
154 ctx_len
, DMA_FROM_DEVICE
);
155 if (dma_mapping_error(jrdev
, state
->ctx_dma
)) {
156 dev_err(jrdev
, "unable to map ctx\n");
160 append_seq_out_ptr(desc
, state
->ctx_dma
, ctx_len
, 0);
165 /* Map req->result, and append seq_out_ptr command that points to it */
166 static inline dma_addr_t
map_seq_out_ptr_result(u32
*desc
, struct device
*jrdev
,
167 u8
*result
, int digestsize
)
171 dst_dma
= dma_map_single(jrdev
, result
, digestsize
, DMA_FROM_DEVICE
);
172 append_seq_out_ptr(desc
, dst_dma
, digestsize
, 0);
177 /* Map current buffer in state and put it in link table */
178 static inline dma_addr_t
buf_map_to_sec4_sg(struct device
*jrdev
,
179 struct sec4_sg_entry
*sec4_sg
,
184 buf_dma
= dma_map_single(jrdev
, buf
, buflen
, DMA_TO_DEVICE
);
185 dma_to_sec4_sg_one(sec4_sg
, buf_dma
, buflen
, 0);
191 * Only put buffer in link table if it contains data, which is possible,
192 * since a buffer has previously been used, and needs to be unmapped,
194 static inline dma_addr_t
195 try_buf_map_to_sec4_sg(struct device
*jrdev
, struct sec4_sg_entry
*sec4_sg
,
196 u8
*buf
, dma_addr_t buf_dma
, int buflen
,
199 if (buf_dma
&& !dma_mapping_error(jrdev
, buf_dma
))
200 dma_unmap_single(jrdev
, buf_dma
, last_buflen
, DMA_TO_DEVICE
);
202 buf_dma
= buf_map_to_sec4_sg(jrdev
, sec4_sg
, buf
, buflen
);
209 /* Map state->caam_ctx, and add it to link table */
210 static inline int ctx_map_to_sec4_sg(u32
*desc
, struct device
*jrdev
,
211 struct caam_hash_state
*state
, int ctx_len
,
212 struct sec4_sg_entry
*sec4_sg
, u32 flag
)
214 state
->ctx_dma
= dma_map_single(jrdev
, state
->caam_ctx
, ctx_len
, flag
);
215 if (dma_mapping_error(jrdev
, state
->ctx_dma
)) {
216 dev_err(jrdev
, "unable to map ctx\n");
220 dma_to_sec4_sg_one(sec4_sg
, state
->ctx_dma
, ctx_len
, 0);
225 /* Common shared descriptor commands */
226 static inline void append_key_ahash(u32
*desc
, struct caam_hash_ctx
*ctx
)
228 append_key_as_imm(desc
, ctx
->key
, ctx
->split_key_pad_len
,
229 ctx
->split_key_len
, CLASS_2
|
230 KEY_DEST_MDHA_SPLIT
| KEY_ENC
);
233 /* Append key if it has been set */
234 static inline void init_sh_desc_key_ahash(u32
*desc
, struct caam_hash_ctx
*ctx
)
238 init_sh_desc(desc
, HDR_SHARE_SERIAL
);
240 if (ctx
->split_key_len
) {
241 /* Skip if already shared */
242 key_jump_cmd
= append_jump(desc
, JUMP_JSL
| JUMP_TEST_ALL
|
245 append_key_ahash(desc
, ctx
);
247 set_jump_tgt_here(desc
, key_jump_cmd
);
250 /* Propagate errors from shared to job descriptor */
251 append_cmd(desc
, SET_OK_NO_PROP_ERRORS
| CMD_LOAD
);
255 * For ahash read data from seqin following state->caam_ctx,
256 * and write resulting class2 context to seqout, which may be state->caam_ctx
259 static inline void ahash_append_load_str(u32
*desc
, int digestsize
)
261 /* Calculate remaining bytes to read */
262 append_math_add(desc
, VARSEQINLEN
, SEQINLEN
, REG0
, CAAM_CMD_SZ
);
264 /* Read remaining bytes */
265 append_seq_fifo_load(desc
, 0, FIFOLD_CLASS_CLASS2
| FIFOLD_TYPE_LAST2
|
266 FIFOLD_TYPE_MSG
| KEY_VLF
);
268 /* Store class2 context bytes */
269 append_seq_store(desc
, digestsize
, LDST_CLASS_2_CCB
|
270 LDST_SRCDST_BYTE_CONTEXT
);
274 * For ahash update, final and finup, import context, read and write to seqout
276 static inline void ahash_ctx_data_to_out(u32
*desc
, u32 op
, u32 state
,
278 struct caam_hash_ctx
*ctx
)
280 init_sh_desc_key_ahash(desc
, ctx
);
282 /* Import context from software */
283 append_cmd(desc
, CMD_SEQ_LOAD
| LDST_SRCDST_BYTE_CONTEXT
|
284 LDST_CLASS_2_CCB
| ctx
->ctx_len
);
286 /* Class 2 operation */
287 append_operation(desc
, op
| state
| OP_ALG_ENCRYPT
);
290 * Load from buf and/or src and write to req->result or state->context
292 ahash_append_load_str(desc
, digestsize
);
295 /* For ahash firsts and digest, read and write to seqout */
296 static inline void ahash_data_to_out(u32
*desc
, u32 op
, u32 state
,
297 int digestsize
, struct caam_hash_ctx
*ctx
)
299 init_sh_desc_key_ahash(desc
, ctx
);
301 /* Class 2 operation */
302 append_operation(desc
, op
| state
| OP_ALG_ENCRYPT
);
305 * Load from buf and/or src and write to req->result or state->context
307 ahash_append_load_str(desc
, digestsize
);
310 static int ahash_set_sh_desc(struct crypto_ahash
*ahash
)
312 struct caam_hash_ctx
*ctx
= crypto_ahash_ctx(ahash
);
313 int digestsize
= crypto_ahash_digestsize(ahash
);
314 struct device
*jrdev
= ctx
->jrdev
;
318 if (ctx
->split_key_len
)
319 have_key
= OP_ALG_AAI_HMAC_PRECOMP
;
321 /* ahash_update shared descriptor */
322 desc
= ctx
->sh_desc_update
;
324 init_sh_desc(desc
, HDR_SHARE_SERIAL
);
326 /* Import context from software */
327 append_cmd(desc
, CMD_SEQ_LOAD
| LDST_SRCDST_BYTE_CONTEXT
|
328 LDST_CLASS_2_CCB
| ctx
->ctx_len
);
330 /* Class 2 operation */
331 append_operation(desc
, ctx
->alg_type
| OP_ALG_AS_UPDATE
|
334 /* Load data and write to result or context */
335 ahash_append_load_str(desc
, ctx
->ctx_len
);
337 ctx
->sh_desc_update_dma
= dma_map_single(jrdev
, desc
, desc_bytes(desc
),
339 if (dma_mapping_error(jrdev
, ctx
->sh_desc_update_dma
)) {
340 dev_err(jrdev
, "unable to map shared descriptor\n");
344 print_hex_dump(KERN_ERR
,
345 "ahash update shdesc@"__stringify(__LINE__
)": ",
346 DUMP_PREFIX_ADDRESS
, 16, 4, desc
, desc_bytes(desc
), 1);
349 /* ahash_update_first shared descriptor */
350 desc
= ctx
->sh_desc_update_first
;
352 ahash_data_to_out(desc
, have_key
| ctx
->alg_type
, OP_ALG_AS_INIT
,
355 ctx
->sh_desc_update_first_dma
= dma_map_single(jrdev
, desc
,
358 if (dma_mapping_error(jrdev
, ctx
->sh_desc_update_first_dma
)) {
359 dev_err(jrdev
, "unable to map shared descriptor\n");
363 print_hex_dump(KERN_ERR
,
364 "ahash update first shdesc@"__stringify(__LINE__
)": ",
365 DUMP_PREFIX_ADDRESS
, 16, 4, desc
, desc_bytes(desc
), 1);
368 /* ahash_final shared descriptor */
369 desc
= ctx
->sh_desc_fin
;
371 ahash_ctx_data_to_out(desc
, have_key
| ctx
->alg_type
,
372 OP_ALG_AS_FINALIZE
, digestsize
, ctx
);
374 ctx
->sh_desc_fin_dma
= dma_map_single(jrdev
, desc
, desc_bytes(desc
),
376 if (dma_mapping_error(jrdev
, ctx
->sh_desc_fin_dma
)) {
377 dev_err(jrdev
, "unable to map shared descriptor\n");
381 print_hex_dump(KERN_ERR
, "ahash final shdesc@"__stringify(__LINE__
)": ",
382 DUMP_PREFIX_ADDRESS
, 16, 4, desc
,
383 desc_bytes(desc
), 1);
386 /* ahash_finup shared descriptor */
387 desc
= ctx
->sh_desc_finup
;
389 ahash_ctx_data_to_out(desc
, have_key
| ctx
->alg_type
,
390 OP_ALG_AS_FINALIZE
, digestsize
, ctx
);
392 ctx
->sh_desc_finup_dma
= dma_map_single(jrdev
, desc
, desc_bytes(desc
),
394 if (dma_mapping_error(jrdev
, ctx
->sh_desc_finup_dma
)) {
395 dev_err(jrdev
, "unable to map shared descriptor\n");
399 print_hex_dump(KERN_ERR
, "ahash finup shdesc@"__stringify(__LINE__
)": ",
400 DUMP_PREFIX_ADDRESS
, 16, 4, desc
,
401 desc_bytes(desc
), 1);
404 /* ahash_digest shared descriptor */
405 desc
= ctx
->sh_desc_digest
;
407 ahash_data_to_out(desc
, have_key
| ctx
->alg_type
, OP_ALG_AS_INITFINAL
,
410 ctx
->sh_desc_digest_dma
= dma_map_single(jrdev
, desc
,
413 if (dma_mapping_error(jrdev
, ctx
->sh_desc_digest_dma
)) {
414 dev_err(jrdev
, "unable to map shared descriptor\n");
418 print_hex_dump(KERN_ERR
,
419 "ahash digest shdesc@"__stringify(__LINE__
)": ",
420 DUMP_PREFIX_ADDRESS
, 16, 4, desc
,
421 desc_bytes(desc
), 1);
427 static int gen_split_hash_key(struct caam_hash_ctx
*ctx
, const u8
*key_in
,
430 return gen_split_key(ctx
->jrdev
, ctx
->key
, ctx
->split_key_len
,
431 ctx
->split_key_pad_len
, key_in
, keylen
,
435 /* Digest hash size if it is too large */
436 static int hash_digest_key(struct caam_hash_ctx
*ctx
, const u8
*key_in
,
437 u32
*keylen
, u8
*key_out
, u32 digestsize
)
439 struct device
*jrdev
= ctx
->jrdev
;
441 struct split_key_result result
;
442 dma_addr_t src_dma
, dst_dma
;
445 desc
= kmalloc(CAAM_CMD_SZ
* 8 + CAAM_PTR_SZ
* 2, GFP_KERNEL
| GFP_DMA
);
447 dev_err(jrdev
, "unable to allocate key input memory\n");
451 init_job_desc(desc
, 0);
453 src_dma
= dma_map_single(jrdev
, (void *)key_in
, *keylen
,
455 if (dma_mapping_error(jrdev
, src_dma
)) {
456 dev_err(jrdev
, "unable to map key input memory\n");
460 dst_dma
= dma_map_single(jrdev
, (void *)key_out
, digestsize
,
462 if (dma_mapping_error(jrdev
, dst_dma
)) {
463 dev_err(jrdev
, "unable to map key output memory\n");
464 dma_unmap_single(jrdev
, src_dma
, *keylen
, DMA_TO_DEVICE
);
469 /* Job descriptor to perform unkeyed hash on key_in */
470 append_operation(desc
, ctx
->alg_type
| OP_ALG_ENCRYPT
|
471 OP_ALG_AS_INITFINAL
);
472 append_seq_in_ptr(desc
, src_dma
, *keylen
, 0);
473 append_seq_fifo_load(desc
, *keylen
, FIFOLD_CLASS_CLASS2
|
474 FIFOLD_TYPE_LAST2
| FIFOLD_TYPE_MSG
);
475 append_seq_out_ptr(desc
, dst_dma
, digestsize
, 0);
476 append_seq_store(desc
, digestsize
, LDST_CLASS_2_CCB
|
477 LDST_SRCDST_BYTE_CONTEXT
);
480 print_hex_dump(KERN_ERR
, "key_in@"__stringify(__LINE__
)": ",
481 DUMP_PREFIX_ADDRESS
, 16, 4, key_in
, *keylen
, 1);
482 print_hex_dump(KERN_ERR
, "jobdesc@"__stringify(__LINE__
)": ",
483 DUMP_PREFIX_ADDRESS
, 16, 4, desc
, desc_bytes(desc
), 1);
487 init_completion(&result
.completion
);
489 ret
= caam_jr_enqueue(jrdev
, desc
, split_key_done
, &result
);
492 wait_for_completion_interruptible(&result
.completion
);
495 print_hex_dump(KERN_ERR
,
496 "digested key@"__stringify(__LINE__
)": ",
497 DUMP_PREFIX_ADDRESS
, 16, 4, key_in
,
501 dma_unmap_single(jrdev
, src_dma
, *keylen
, DMA_TO_DEVICE
);
502 dma_unmap_single(jrdev
, dst_dma
, digestsize
, DMA_FROM_DEVICE
);
504 *keylen
= digestsize
;
511 static int ahash_setkey(struct crypto_ahash
*ahash
,
512 const u8
*key
, unsigned int keylen
)
514 /* Sizes for MDHA pads (*not* keys): MD5, SHA1, 224, 256, 384, 512 */
515 static const u8 mdpadlen
[] = { 16, 20, 32, 32, 64, 64 };
516 struct caam_hash_ctx
*ctx
= crypto_ahash_ctx(ahash
);
517 struct device
*jrdev
= ctx
->jrdev
;
518 int blocksize
= crypto_tfm_alg_blocksize(&ahash
->base
);
519 int digestsize
= crypto_ahash_digestsize(ahash
);
521 u8
*hashed_key
= NULL
;
524 printk(KERN_ERR
"keylen %d\n", keylen
);
527 if (keylen
> blocksize
) {
528 hashed_key
= kmalloc_array(digestsize
,
530 GFP_KERNEL
| GFP_DMA
);
533 ret
= hash_digest_key(ctx
, key
, &keylen
, hashed_key
,
540 /* Pick class 2 key length from algorithm submask */
541 ctx
->split_key_len
= mdpadlen
[(ctx
->alg_op
& OP_ALG_ALGSEL_SUBMASK
) >>
542 OP_ALG_ALGSEL_SHIFT
] * 2;
543 ctx
->split_key_pad_len
= ALIGN(ctx
->split_key_len
, 16);
546 printk(KERN_ERR
"split_key_len %d split_key_pad_len %d\n",
547 ctx
->split_key_len
, ctx
->split_key_pad_len
);
548 print_hex_dump(KERN_ERR
, "key in @"__stringify(__LINE__
)": ",
549 DUMP_PREFIX_ADDRESS
, 16, 4, key
, keylen
, 1);
552 ret
= gen_split_hash_key(ctx
, key
, keylen
);
556 ctx
->key_dma
= dma_map_single(jrdev
, ctx
->key
, ctx
->split_key_pad_len
,
558 if (dma_mapping_error(jrdev
, ctx
->key_dma
)) {
559 dev_err(jrdev
, "unable to map key i/o memory\n");
564 print_hex_dump(KERN_ERR
, "ctx.key@"__stringify(__LINE__
)": ",
565 DUMP_PREFIX_ADDRESS
, 16, 4, ctx
->key
,
566 ctx
->split_key_pad_len
, 1);
569 ret
= ahash_set_sh_desc(ahash
);
571 dma_unmap_single(jrdev
, ctx
->key_dma
, ctx
->split_key_pad_len
,
579 crypto_ahash_set_flags(ahash
, CRYPTO_TFM_RES_BAD_KEY_LEN
);
584 * ahash_edesc - s/w-extended ahash descriptor
585 * @dst_dma: physical mapped address of req->result
586 * @sec4_sg_dma: physical mapped address of h/w link table
587 * @src_nents: number of segments in input scatterlist
588 * @sec4_sg_bytes: length of dma mapped sec4_sg space
589 * @hw_desc: the h/w job descriptor followed by any referenced link tables
590 * @sec4_sg: h/w link table
594 dma_addr_t sec4_sg_dma
;
597 u32 hw_desc
[DESC_JOB_IO_LEN
/ sizeof(u32
)] ____cacheline_aligned
;
598 struct sec4_sg_entry sec4_sg
[0];
601 static inline void ahash_unmap(struct device
*dev
,
602 struct ahash_edesc
*edesc
,
603 struct ahash_request
*req
, int dst_len
)
605 if (edesc
->src_nents
)
606 dma_unmap_sg(dev
, req
->src
, edesc
->src_nents
, DMA_TO_DEVICE
);
608 dma_unmap_single(dev
, edesc
->dst_dma
, dst_len
, DMA_FROM_DEVICE
);
610 if (edesc
->sec4_sg_bytes
)
611 dma_unmap_single(dev
, edesc
->sec4_sg_dma
,
612 edesc
->sec4_sg_bytes
, DMA_TO_DEVICE
);
615 static inline void ahash_unmap_ctx(struct device
*dev
,
616 struct ahash_edesc
*edesc
,
617 struct ahash_request
*req
, int dst_len
, u32 flag
)
619 struct crypto_ahash
*ahash
= crypto_ahash_reqtfm(req
);
620 struct caam_hash_ctx
*ctx
= crypto_ahash_ctx(ahash
);
621 struct caam_hash_state
*state
= ahash_request_ctx(req
);
624 dma_unmap_single(dev
, state
->ctx_dma
, ctx
->ctx_len
, flag
);
625 ahash_unmap(dev
, edesc
, req
, dst_len
);
628 static void ahash_done(struct device
*jrdev
, u32
*desc
, u32 err
,
631 struct ahash_request
*req
= context
;
632 struct ahash_edesc
*edesc
;
633 struct crypto_ahash
*ahash
= crypto_ahash_reqtfm(req
);
634 int digestsize
= crypto_ahash_digestsize(ahash
);
636 struct caam_hash_ctx
*ctx
= crypto_ahash_ctx(ahash
);
637 struct caam_hash_state
*state
= ahash_request_ctx(req
);
639 dev_err(jrdev
, "%s %d: err 0x%x\n", __func__
, __LINE__
, err
);
642 edesc
= (struct ahash_edesc
*)((char *)desc
-
643 offsetof(struct ahash_edesc
, hw_desc
));
645 caam_jr_strstatus(jrdev
, err
);
647 ahash_unmap(jrdev
, edesc
, req
, digestsize
);
651 print_hex_dump(KERN_ERR
, "ctx@"__stringify(__LINE__
)": ",
652 DUMP_PREFIX_ADDRESS
, 16, 4, state
->caam_ctx
,
655 print_hex_dump(KERN_ERR
, "result@"__stringify(__LINE__
)": ",
656 DUMP_PREFIX_ADDRESS
, 16, 4, req
->result
,
660 req
->base
.complete(&req
->base
, err
);
663 static void ahash_done_bi(struct device
*jrdev
, u32
*desc
, u32 err
,
666 struct ahash_request
*req
= context
;
667 struct ahash_edesc
*edesc
;
668 struct crypto_ahash
*ahash
= crypto_ahash_reqtfm(req
);
669 struct caam_hash_ctx
*ctx
= crypto_ahash_ctx(ahash
);
671 struct caam_hash_state
*state
= ahash_request_ctx(req
);
672 int digestsize
= crypto_ahash_digestsize(ahash
);
674 dev_err(jrdev
, "%s %d: err 0x%x\n", __func__
, __LINE__
, err
);
677 edesc
= (struct ahash_edesc
*)((char *)desc
-
678 offsetof(struct ahash_edesc
, hw_desc
));
680 caam_jr_strstatus(jrdev
, err
);
682 ahash_unmap_ctx(jrdev
, edesc
, req
, ctx
->ctx_len
, DMA_BIDIRECTIONAL
);
686 print_hex_dump(KERN_ERR
, "ctx@"__stringify(__LINE__
)": ",
687 DUMP_PREFIX_ADDRESS
, 16, 4, state
->caam_ctx
,
690 print_hex_dump(KERN_ERR
, "result@"__stringify(__LINE__
)": ",
691 DUMP_PREFIX_ADDRESS
, 16, 4, req
->result
,
695 req
->base
.complete(&req
->base
, err
);
698 static void ahash_done_ctx_src(struct device
*jrdev
, u32
*desc
, u32 err
,
701 struct ahash_request
*req
= context
;
702 struct ahash_edesc
*edesc
;
703 struct crypto_ahash
*ahash
= crypto_ahash_reqtfm(req
);
704 int digestsize
= crypto_ahash_digestsize(ahash
);
706 struct caam_hash_ctx
*ctx
= crypto_ahash_ctx(ahash
);
707 struct caam_hash_state
*state
= ahash_request_ctx(req
);
709 dev_err(jrdev
, "%s %d: err 0x%x\n", __func__
, __LINE__
, err
);
712 edesc
= (struct ahash_edesc
*)((char *)desc
-
713 offsetof(struct ahash_edesc
, hw_desc
));
715 caam_jr_strstatus(jrdev
, err
);
717 ahash_unmap_ctx(jrdev
, edesc
, req
, digestsize
, DMA_TO_DEVICE
);
721 print_hex_dump(KERN_ERR
, "ctx@"__stringify(__LINE__
)": ",
722 DUMP_PREFIX_ADDRESS
, 16, 4, state
->caam_ctx
,
725 print_hex_dump(KERN_ERR
, "result@"__stringify(__LINE__
)": ",
726 DUMP_PREFIX_ADDRESS
, 16, 4, req
->result
,
730 req
->base
.complete(&req
->base
, err
);
733 static void ahash_done_ctx_dst(struct device
*jrdev
, u32
*desc
, u32 err
,
736 struct ahash_request
*req
= context
;
737 struct ahash_edesc
*edesc
;
738 struct crypto_ahash
*ahash
= crypto_ahash_reqtfm(req
);
739 struct caam_hash_ctx
*ctx
= crypto_ahash_ctx(ahash
);
741 struct caam_hash_state
*state
= ahash_request_ctx(req
);
742 int digestsize
= crypto_ahash_digestsize(ahash
);
744 dev_err(jrdev
, "%s %d: err 0x%x\n", __func__
, __LINE__
, err
);
747 edesc
= (struct ahash_edesc
*)((char *)desc
-
748 offsetof(struct ahash_edesc
, hw_desc
));
750 caam_jr_strstatus(jrdev
, err
);
752 ahash_unmap_ctx(jrdev
, edesc
, req
, ctx
->ctx_len
, DMA_FROM_DEVICE
);
756 print_hex_dump(KERN_ERR
, "ctx@"__stringify(__LINE__
)": ",
757 DUMP_PREFIX_ADDRESS
, 16, 4, state
->caam_ctx
,
760 print_hex_dump(KERN_ERR
, "result@"__stringify(__LINE__
)": ",
761 DUMP_PREFIX_ADDRESS
, 16, 4, req
->result
,
765 req
->base
.complete(&req
->base
, err
);
769 * Allocate an enhanced descriptor, which contains the hardware descriptor
770 * and space for hardware scatter table containing sg_num entries.
772 static struct ahash_edesc
*ahash_edesc_alloc(struct caam_hash_ctx
*ctx
,
773 int sg_num
, u32
*sh_desc
,
774 dma_addr_t sh_desc_dma
,
777 struct ahash_edesc
*edesc
;
778 unsigned int sg_size
= sg_num
* sizeof(struct sec4_sg_entry
);
780 edesc
= kzalloc(sizeof(*edesc
) + sg_size
, GFP_DMA
| flags
);
782 dev_err(ctx
->jrdev
, "could not allocate extended descriptor\n");
786 init_job_desc_shared(edesc
->hw_desc
, sh_desc_dma
, desc_len(sh_desc
),
787 HDR_SHARE_DEFER
| HDR_REVERSE
);
792 static int ahash_edesc_add_src(struct caam_hash_ctx
*ctx
,
793 struct ahash_edesc
*edesc
,
794 struct ahash_request
*req
, int nents
,
795 unsigned int first_sg
,
796 unsigned int first_bytes
, size_t to_hash
)
801 if (nents
> 1 || first_sg
) {
802 struct sec4_sg_entry
*sg
= edesc
->sec4_sg
;
803 unsigned int sgsize
= sizeof(*sg
) * (first_sg
+ nents
);
805 sg_to_sec4_sg_last(req
->src
, nents
, sg
+ first_sg
, 0);
807 src_dma
= dma_map_single(ctx
->jrdev
, sg
, sgsize
, DMA_TO_DEVICE
);
808 if (dma_mapping_error(ctx
->jrdev
, src_dma
)) {
809 dev_err(ctx
->jrdev
, "unable to map S/G table\n");
813 edesc
->sec4_sg_bytes
= sgsize
;
814 edesc
->sec4_sg_dma
= src_dma
;
817 src_dma
= sg_dma_address(req
->src
);
821 append_seq_in_ptr(edesc
->hw_desc
, src_dma
, first_bytes
+ to_hash
,
827 /* submit update job descriptor */
828 static int ahash_update_ctx(struct ahash_request
*req
)
830 struct crypto_ahash
*ahash
= crypto_ahash_reqtfm(req
);
831 struct caam_hash_ctx
*ctx
= crypto_ahash_ctx(ahash
);
832 struct caam_hash_state
*state
= ahash_request_ctx(req
);
833 struct device
*jrdev
= ctx
->jrdev
;
834 gfp_t flags
= (req
->base
.flags
& (CRYPTO_TFM_REQ_MAY_BACKLOG
|
835 CRYPTO_TFM_REQ_MAY_SLEEP
)) ? GFP_KERNEL
: GFP_ATOMIC
;
836 u8
*buf
= state
->current_buf
? state
->buf_1
: state
->buf_0
;
837 int *buflen
= state
->current_buf
? &state
->buflen_1
: &state
->buflen_0
;
838 u8
*next_buf
= state
->current_buf
? state
->buf_0
: state
->buf_1
;
839 int *next_buflen
= state
->current_buf
? &state
->buflen_0
:
840 &state
->buflen_1
, last_buflen
;
841 int in_len
= *buflen
+ req
->nbytes
, to_hash
;
843 int src_nents
, mapped_nents
, sec4_sg_bytes
, sec4_sg_src_index
;
844 struct ahash_edesc
*edesc
;
847 last_buflen
= *next_buflen
;
848 *next_buflen
= in_len
& (crypto_tfm_alg_blocksize(&ahash
->base
) - 1);
849 to_hash
= in_len
- *next_buflen
;
852 src_nents
= sg_nents_for_len(req
->src
,
853 req
->nbytes
- (*next_buflen
));
855 dev_err(jrdev
, "Invalid number of src SG.\n");
860 mapped_nents
= dma_map_sg(jrdev
, req
->src
, src_nents
,
863 dev_err(jrdev
, "unable to DMA map source\n");
870 sec4_sg_src_index
= 1 + (*buflen
? 1 : 0);
871 sec4_sg_bytes
= (sec4_sg_src_index
+ mapped_nents
) *
872 sizeof(struct sec4_sg_entry
);
875 * allocate space for base edesc and hw desc commands,
878 edesc
= ahash_edesc_alloc(ctx
, sec4_sg_src_index
+ mapped_nents
,
880 ctx
->sh_desc_update_dma
, flags
);
882 dma_unmap_sg(jrdev
, req
->src
, src_nents
, DMA_TO_DEVICE
);
886 edesc
->src_nents
= src_nents
;
887 edesc
->sec4_sg_bytes
= sec4_sg_bytes
;
889 ret
= ctx_map_to_sec4_sg(desc
, jrdev
, state
, ctx
->ctx_len
,
890 edesc
->sec4_sg
, DMA_BIDIRECTIONAL
);
894 state
->buf_dma
= try_buf_map_to_sec4_sg(jrdev
,
897 *buflen
, last_buflen
);
900 sg_to_sec4_sg_last(req
->src
, mapped_nents
,
901 edesc
->sec4_sg
+ sec4_sg_src_index
,
904 scatterwalk_map_and_copy(next_buf
, req
->src
,
908 (edesc
->sec4_sg
+ sec4_sg_src_index
- 1)->len
|=
909 cpu_to_caam32(SEC4_SG_LEN_FIN
);
912 state
->current_buf
= !state
->current_buf
;
914 desc
= edesc
->hw_desc
;
916 edesc
->sec4_sg_dma
= dma_map_single(jrdev
, edesc
->sec4_sg
,
919 if (dma_mapping_error(jrdev
, edesc
->sec4_sg_dma
)) {
920 dev_err(jrdev
, "unable to map S/G table\n");
925 append_seq_in_ptr(desc
, edesc
->sec4_sg_dma
, ctx
->ctx_len
+
928 append_seq_out_ptr(desc
, state
->ctx_dma
, ctx
->ctx_len
, 0);
931 print_hex_dump(KERN_ERR
, "jobdesc@"__stringify(__LINE__
)": ",
932 DUMP_PREFIX_ADDRESS
, 16, 4, desc
,
933 desc_bytes(desc
), 1);
936 ret
= caam_jr_enqueue(jrdev
, desc
, ahash_done_bi
, req
);
941 } else if (*next_buflen
) {
942 scatterwalk_map_and_copy(buf
+ *buflen
, req
->src
, 0,
944 *buflen
= *next_buflen
;
945 *next_buflen
= last_buflen
;
948 print_hex_dump(KERN_ERR
, "buf@"__stringify(__LINE__
)": ",
949 DUMP_PREFIX_ADDRESS
, 16, 4, buf
, *buflen
, 1);
950 print_hex_dump(KERN_ERR
, "next buf@"__stringify(__LINE__
)": ",
951 DUMP_PREFIX_ADDRESS
, 16, 4, next_buf
,
957 ahash_unmap_ctx(jrdev
, edesc
, req
, ctx
->ctx_len
, DMA_BIDIRECTIONAL
);
962 static int ahash_final_ctx(struct ahash_request
*req
)
964 struct crypto_ahash
*ahash
= crypto_ahash_reqtfm(req
);
965 struct caam_hash_ctx
*ctx
= crypto_ahash_ctx(ahash
);
966 struct caam_hash_state
*state
= ahash_request_ctx(req
);
967 struct device
*jrdev
= ctx
->jrdev
;
968 gfp_t flags
= (req
->base
.flags
& (CRYPTO_TFM_REQ_MAY_BACKLOG
|
969 CRYPTO_TFM_REQ_MAY_SLEEP
)) ? GFP_KERNEL
: GFP_ATOMIC
;
970 u8
*buf
= state
->current_buf
? state
->buf_1
: state
->buf_0
;
971 int buflen
= state
->current_buf
? state
->buflen_1
: state
->buflen_0
;
972 int last_buflen
= state
->current_buf
? state
->buflen_0
:
975 int sec4_sg_bytes
, sec4_sg_src_index
;
976 int digestsize
= crypto_ahash_digestsize(ahash
);
977 struct ahash_edesc
*edesc
;
980 sec4_sg_src_index
= 1 + (buflen
? 1 : 0);
981 sec4_sg_bytes
= sec4_sg_src_index
* sizeof(struct sec4_sg_entry
);
983 /* allocate space for base edesc and hw desc commands, link tables */
984 edesc
= ahash_edesc_alloc(ctx
, sec4_sg_src_index
,
985 ctx
->sh_desc_fin
, ctx
->sh_desc_fin_dma
,
990 desc
= edesc
->hw_desc
;
992 edesc
->sec4_sg_bytes
= sec4_sg_bytes
;
993 edesc
->src_nents
= 0;
995 ret
= ctx_map_to_sec4_sg(desc
, jrdev
, state
, ctx
->ctx_len
,
996 edesc
->sec4_sg
, DMA_TO_DEVICE
);
1000 state
->buf_dma
= try_buf_map_to_sec4_sg(jrdev
, edesc
->sec4_sg
+ 1,
1001 buf
, state
->buf_dma
, buflen
,
1003 (edesc
->sec4_sg
+ sec4_sg_src_index
- 1)->len
|=
1004 cpu_to_caam32(SEC4_SG_LEN_FIN
);
1006 edesc
->sec4_sg_dma
= dma_map_single(jrdev
, edesc
->sec4_sg
,
1007 sec4_sg_bytes
, DMA_TO_DEVICE
);
1008 if (dma_mapping_error(jrdev
, edesc
->sec4_sg_dma
)) {
1009 dev_err(jrdev
, "unable to map S/G table\n");
1014 append_seq_in_ptr(desc
, edesc
->sec4_sg_dma
, ctx
->ctx_len
+ buflen
,
1017 edesc
->dst_dma
= map_seq_out_ptr_result(desc
, jrdev
, req
->result
,
1019 if (dma_mapping_error(jrdev
, edesc
->dst_dma
)) {
1020 dev_err(jrdev
, "unable to map dst\n");
1026 print_hex_dump(KERN_ERR
, "jobdesc@"__stringify(__LINE__
)": ",
1027 DUMP_PREFIX_ADDRESS
, 16, 4, desc
, desc_bytes(desc
), 1);
1030 ret
= caam_jr_enqueue(jrdev
, desc
, ahash_done_ctx_src
, req
);
1034 return -EINPROGRESS
;
1036 ahash_unmap_ctx(jrdev
, edesc
, req
, digestsize
, DMA_FROM_DEVICE
);
1041 static int ahash_finup_ctx(struct ahash_request
*req
)
1043 struct crypto_ahash
*ahash
= crypto_ahash_reqtfm(req
);
1044 struct caam_hash_ctx
*ctx
= crypto_ahash_ctx(ahash
);
1045 struct caam_hash_state
*state
= ahash_request_ctx(req
);
1046 struct device
*jrdev
= ctx
->jrdev
;
1047 gfp_t flags
= (req
->base
.flags
& (CRYPTO_TFM_REQ_MAY_BACKLOG
|
1048 CRYPTO_TFM_REQ_MAY_SLEEP
)) ? GFP_KERNEL
: GFP_ATOMIC
;
1049 u8
*buf
= state
->current_buf
? state
->buf_1
: state
->buf_0
;
1050 int buflen
= state
->current_buf
? state
->buflen_1
: state
->buflen_0
;
1051 int last_buflen
= state
->current_buf
? state
->buflen_0
:
1054 int sec4_sg_src_index
;
1055 int src_nents
, mapped_nents
;
1056 int digestsize
= crypto_ahash_digestsize(ahash
);
1057 struct ahash_edesc
*edesc
;
1060 src_nents
= sg_nents_for_len(req
->src
, req
->nbytes
);
1061 if (src_nents
< 0) {
1062 dev_err(jrdev
, "Invalid number of src SG.\n");
1067 mapped_nents
= dma_map_sg(jrdev
, req
->src
, src_nents
,
1069 if (!mapped_nents
) {
1070 dev_err(jrdev
, "unable to DMA map source\n");
1077 sec4_sg_src_index
= 1 + (buflen
? 1 : 0);
1079 /* allocate space for base edesc and hw desc commands, link tables */
1080 edesc
= ahash_edesc_alloc(ctx
, sec4_sg_src_index
+ mapped_nents
,
1081 ctx
->sh_desc_finup
, ctx
->sh_desc_finup_dma
,
1084 dma_unmap_sg(jrdev
, req
->src
, src_nents
, DMA_TO_DEVICE
);
1088 desc
= edesc
->hw_desc
;
1090 edesc
->src_nents
= src_nents
;
1092 ret
= ctx_map_to_sec4_sg(desc
, jrdev
, state
, ctx
->ctx_len
,
1093 edesc
->sec4_sg
, DMA_TO_DEVICE
);
1097 state
->buf_dma
= try_buf_map_to_sec4_sg(jrdev
, edesc
->sec4_sg
+ 1,
1098 buf
, state
->buf_dma
, buflen
,
1101 ret
= ahash_edesc_add_src(ctx
, edesc
, req
, mapped_nents
,
1102 sec4_sg_src_index
, ctx
->ctx_len
+ buflen
,
1107 edesc
->dst_dma
= map_seq_out_ptr_result(desc
, jrdev
, req
->result
,
1109 if (dma_mapping_error(jrdev
, edesc
->dst_dma
)) {
1110 dev_err(jrdev
, "unable to map dst\n");
1116 print_hex_dump(KERN_ERR
, "jobdesc@"__stringify(__LINE__
)": ",
1117 DUMP_PREFIX_ADDRESS
, 16, 4, desc
, desc_bytes(desc
), 1);
1120 ret
= caam_jr_enqueue(jrdev
, desc
, ahash_done_ctx_src
, req
);
1124 return -EINPROGRESS
;
1126 ahash_unmap_ctx(jrdev
, edesc
, req
, digestsize
, DMA_FROM_DEVICE
);
1131 static int ahash_digest(struct ahash_request
*req
)
1133 struct crypto_ahash
*ahash
= crypto_ahash_reqtfm(req
);
1134 struct caam_hash_ctx
*ctx
= crypto_ahash_ctx(ahash
);
1135 struct device
*jrdev
= ctx
->jrdev
;
1136 gfp_t flags
= (req
->base
.flags
& (CRYPTO_TFM_REQ_MAY_BACKLOG
|
1137 CRYPTO_TFM_REQ_MAY_SLEEP
)) ? GFP_KERNEL
: GFP_ATOMIC
;
1139 int digestsize
= crypto_ahash_digestsize(ahash
);
1140 int src_nents
, mapped_nents
;
1141 struct ahash_edesc
*edesc
;
1144 src_nents
= sg_nents_for_len(req
->src
, req
->nbytes
);
1145 if (src_nents
< 0) {
1146 dev_err(jrdev
, "Invalid number of src SG.\n");
1151 mapped_nents
= dma_map_sg(jrdev
, req
->src
, src_nents
,
1153 if (!mapped_nents
) {
1154 dev_err(jrdev
, "unable to map source for DMA\n");
1161 /* allocate space for base edesc and hw desc commands, link tables */
1162 edesc
= ahash_edesc_alloc(ctx
, mapped_nents
> 1 ? mapped_nents
: 0,
1163 ctx
->sh_desc_digest
, ctx
->sh_desc_digest_dma
,
1166 dma_unmap_sg(jrdev
, req
->src
, src_nents
, DMA_TO_DEVICE
);
1170 edesc
->src_nents
= src_nents
;
1172 ret
= ahash_edesc_add_src(ctx
, edesc
, req
, mapped_nents
, 0, 0,
1175 ahash_unmap(jrdev
, edesc
, req
, digestsize
);
1180 desc
= edesc
->hw_desc
;
1182 edesc
->dst_dma
= map_seq_out_ptr_result(desc
, jrdev
, req
->result
,
1184 if (dma_mapping_error(jrdev
, edesc
->dst_dma
)) {
1185 dev_err(jrdev
, "unable to map dst\n");
1186 ahash_unmap(jrdev
, edesc
, req
, digestsize
);
1192 print_hex_dump(KERN_ERR
, "jobdesc@"__stringify(__LINE__
)": ",
1193 DUMP_PREFIX_ADDRESS
, 16, 4, desc
, desc_bytes(desc
), 1);
1196 ret
= caam_jr_enqueue(jrdev
, desc
, ahash_done
, req
);
1200 ahash_unmap(jrdev
, edesc
, req
, digestsize
);
1207 /* submit ahash final if it the first job descriptor */
1208 static int ahash_final_no_ctx(struct ahash_request
*req
)
1210 struct crypto_ahash
*ahash
= crypto_ahash_reqtfm(req
);
1211 struct caam_hash_ctx
*ctx
= crypto_ahash_ctx(ahash
);
1212 struct caam_hash_state
*state
= ahash_request_ctx(req
);
1213 struct device
*jrdev
= ctx
->jrdev
;
1214 gfp_t flags
= (req
->base
.flags
& (CRYPTO_TFM_REQ_MAY_BACKLOG
|
1215 CRYPTO_TFM_REQ_MAY_SLEEP
)) ? GFP_KERNEL
: GFP_ATOMIC
;
1216 u8
*buf
= state
->current_buf
? state
->buf_1
: state
->buf_0
;
1217 int buflen
= state
->current_buf
? state
->buflen_1
: state
->buflen_0
;
1219 int digestsize
= crypto_ahash_digestsize(ahash
);
1220 struct ahash_edesc
*edesc
;
1223 /* allocate space for base edesc and hw desc commands, link tables */
1224 edesc
= ahash_edesc_alloc(ctx
, 0, ctx
->sh_desc_digest
,
1225 ctx
->sh_desc_digest_dma
, flags
);
1229 desc
= edesc
->hw_desc
;
1231 state
->buf_dma
= dma_map_single(jrdev
, buf
, buflen
, DMA_TO_DEVICE
);
1232 if (dma_mapping_error(jrdev
, state
->buf_dma
)) {
1233 dev_err(jrdev
, "unable to map src\n");
1237 append_seq_in_ptr(desc
, state
->buf_dma
, buflen
, 0);
1239 edesc
->dst_dma
= map_seq_out_ptr_result(desc
, jrdev
, req
->result
,
1241 if (dma_mapping_error(jrdev
, edesc
->dst_dma
)) {
1242 dev_err(jrdev
, "unable to map dst\n");
1245 edesc
->src_nents
= 0;
1248 print_hex_dump(KERN_ERR
, "jobdesc@"__stringify(__LINE__
)": ",
1249 DUMP_PREFIX_ADDRESS
, 16, 4, desc
, desc_bytes(desc
), 1);
1252 ret
= caam_jr_enqueue(jrdev
, desc
, ahash_done
, req
);
1256 ahash_unmap(jrdev
, edesc
, req
, digestsize
);
1262 ahash_unmap(jrdev
, edesc
, req
, digestsize
);
1268 /* submit ahash update if it the first job descriptor after update */
1269 static int ahash_update_no_ctx(struct ahash_request
*req
)
1271 struct crypto_ahash
*ahash
= crypto_ahash_reqtfm(req
);
1272 struct caam_hash_ctx
*ctx
= crypto_ahash_ctx(ahash
);
1273 struct caam_hash_state
*state
= ahash_request_ctx(req
);
1274 struct device
*jrdev
= ctx
->jrdev
;
1275 gfp_t flags
= (req
->base
.flags
& (CRYPTO_TFM_REQ_MAY_BACKLOG
|
1276 CRYPTO_TFM_REQ_MAY_SLEEP
)) ? GFP_KERNEL
: GFP_ATOMIC
;
1277 u8
*buf
= state
->current_buf
? state
->buf_1
: state
->buf_0
;
1278 int *buflen
= state
->current_buf
? &state
->buflen_1
: &state
->buflen_0
;
1279 u8
*next_buf
= state
->current_buf
? state
->buf_0
: state
->buf_1
;
1280 int *next_buflen
= state
->current_buf
? &state
->buflen_0
:
1282 int in_len
= *buflen
+ req
->nbytes
, to_hash
;
1283 int sec4_sg_bytes
, src_nents
, mapped_nents
;
1284 struct ahash_edesc
*edesc
;
1288 *next_buflen
= in_len
& (crypto_tfm_alg_blocksize(&ahash
->base
) - 1);
1289 to_hash
= in_len
- *next_buflen
;
1292 src_nents
= sg_nents_for_len(req
->src
,
1293 req
->nbytes
- *next_buflen
);
1294 if (src_nents
< 0) {
1295 dev_err(jrdev
, "Invalid number of src SG.\n");
1300 mapped_nents
= dma_map_sg(jrdev
, req
->src
, src_nents
,
1302 if (!mapped_nents
) {
1303 dev_err(jrdev
, "unable to DMA map source\n");
1310 sec4_sg_bytes
= (1 + mapped_nents
) *
1311 sizeof(struct sec4_sg_entry
);
1314 * allocate space for base edesc and hw desc commands,
1317 edesc
= ahash_edesc_alloc(ctx
, 1 + mapped_nents
,
1318 ctx
->sh_desc_update_first
,
1319 ctx
->sh_desc_update_first_dma
,
1322 dma_unmap_sg(jrdev
, req
->src
, src_nents
, DMA_TO_DEVICE
);
1326 edesc
->src_nents
= src_nents
;
1327 edesc
->sec4_sg_bytes
= sec4_sg_bytes
;
1330 state
->buf_dma
= buf_map_to_sec4_sg(jrdev
, edesc
->sec4_sg
,
1332 sg_to_sec4_sg_last(req
->src
, mapped_nents
,
1333 edesc
->sec4_sg
+ 1, 0);
1336 scatterwalk_map_and_copy(next_buf
, req
->src
,
1341 state
->current_buf
= !state
->current_buf
;
1343 desc
= edesc
->hw_desc
;
1345 edesc
->sec4_sg_dma
= dma_map_single(jrdev
, edesc
->sec4_sg
,
1348 if (dma_mapping_error(jrdev
, edesc
->sec4_sg_dma
)) {
1349 dev_err(jrdev
, "unable to map S/G table\n");
1354 append_seq_in_ptr(desc
, edesc
->sec4_sg_dma
, to_hash
, LDST_SGF
);
1356 ret
= map_seq_out_ptr_ctx(desc
, jrdev
, state
, ctx
->ctx_len
);
1361 print_hex_dump(KERN_ERR
, "jobdesc@"__stringify(__LINE__
)": ",
1362 DUMP_PREFIX_ADDRESS
, 16, 4, desc
,
1363 desc_bytes(desc
), 1);
1366 ret
= caam_jr_enqueue(jrdev
, desc
, ahash_done_ctx_dst
, req
);
1371 state
->update
= ahash_update_ctx
;
1372 state
->finup
= ahash_finup_ctx
;
1373 state
->final
= ahash_final_ctx
;
1374 } else if (*next_buflen
) {
1375 scatterwalk_map_and_copy(buf
+ *buflen
, req
->src
, 0,
1377 *buflen
= *next_buflen
;
1381 print_hex_dump(KERN_ERR
, "buf@"__stringify(__LINE__
)": ",
1382 DUMP_PREFIX_ADDRESS
, 16, 4, buf
, *buflen
, 1);
1383 print_hex_dump(KERN_ERR
, "next buf@"__stringify(__LINE__
)": ",
1384 DUMP_PREFIX_ADDRESS
, 16, 4, next_buf
,
1390 ahash_unmap_ctx(jrdev
, edesc
, req
, ctx
->ctx_len
, DMA_TO_DEVICE
);
1395 /* submit ahash finup if it the first job descriptor after update */
1396 static int ahash_finup_no_ctx(struct ahash_request
*req
)
1398 struct crypto_ahash
*ahash
= crypto_ahash_reqtfm(req
);
1399 struct caam_hash_ctx
*ctx
= crypto_ahash_ctx(ahash
);
1400 struct caam_hash_state
*state
= ahash_request_ctx(req
);
1401 struct device
*jrdev
= ctx
->jrdev
;
1402 gfp_t flags
= (req
->base
.flags
& (CRYPTO_TFM_REQ_MAY_BACKLOG
|
1403 CRYPTO_TFM_REQ_MAY_SLEEP
)) ? GFP_KERNEL
: GFP_ATOMIC
;
1404 u8
*buf
= state
->current_buf
? state
->buf_1
: state
->buf_0
;
1405 int buflen
= state
->current_buf
? state
->buflen_1
: state
->buflen_0
;
1406 int last_buflen
= state
->current_buf
? state
->buflen_0
:
1409 int sec4_sg_bytes
, sec4_sg_src_index
, src_nents
, mapped_nents
;
1410 int digestsize
= crypto_ahash_digestsize(ahash
);
1411 struct ahash_edesc
*edesc
;
1414 src_nents
= sg_nents_for_len(req
->src
, req
->nbytes
);
1415 if (src_nents
< 0) {
1416 dev_err(jrdev
, "Invalid number of src SG.\n");
1421 mapped_nents
= dma_map_sg(jrdev
, req
->src
, src_nents
,
1423 if (!mapped_nents
) {
1424 dev_err(jrdev
, "unable to DMA map source\n");
1431 sec4_sg_src_index
= 2;
1432 sec4_sg_bytes
= (sec4_sg_src_index
+ mapped_nents
) *
1433 sizeof(struct sec4_sg_entry
);
1435 /* allocate space for base edesc and hw desc commands, link tables */
1436 edesc
= ahash_edesc_alloc(ctx
, sec4_sg_src_index
+ mapped_nents
,
1437 ctx
->sh_desc_digest
, ctx
->sh_desc_digest_dma
,
1440 dma_unmap_sg(jrdev
, req
->src
, src_nents
, DMA_TO_DEVICE
);
1444 desc
= edesc
->hw_desc
;
1446 edesc
->src_nents
= src_nents
;
1447 edesc
->sec4_sg_bytes
= sec4_sg_bytes
;
1449 state
->buf_dma
= try_buf_map_to_sec4_sg(jrdev
, edesc
->sec4_sg
, buf
,
1450 state
->buf_dma
, buflen
,
1453 ret
= ahash_edesc_add_src(ctx
, edesc
, req
, mapped_nents
, 1, buflen
,
1456 dev_err(jrdev
, "unable to map S/G table\n");
1460 edesc
->dst_dma
= map_seq_out_ptr_result(desc
, jrdev
, req
->result
,
1462 if (dma_mapping_error(jrdev
, edesc
->dst_dma
)) {
1463 dev_err(jrdev
, "unable to map dst\n");
1468 print_hex_dump(KERN_ERR
, "jobdesc@"__stringify(__LINE__
)": ",
1469 DUMP_PREFIX_ADDRESS
, 16, 4, desc
, desc_bytes(desc
), 1);
1472 ret
= caam_jr_enqueue(jrdev
, desc
, ahash_done
, req
);
1476 ahash_unmap(jrdev
, edesc
, req
, digestsize
);
1482 ahash_unmap(jrdev
, edesc
, req
, digestsize
);
1488 /* submit first update job descriptor after init */
1489 static int ahash_update_first(struct ahash_request
*req
)
1491 struct crypto_ahash
*ahash
= crypto_ahash_reqtfm(req
);
1492 struct caam_hash_ctx
*ctx
= crypto_ahash_ctx(ahash
);
1493 struct caam_hash_state
*state
= ahash_request_ctx(req
);
1494 struct device
*jrdev
= ctx
->jrdev
;
1495 gfp_t flags
= (req
->base
.flags
& (CRYPTO_TFM_REQ_MAY_BACKLOG
|
1496 CRYPTO_TFM_REQ_MAY_SLEEP
)) ? GFP_KERNEL
: GFP_ATOMIC
;
1497 u8
*next_buf
= state
->current_buf
? state
->buf_1
: state
->buf_0
;
1498 int *next_buflen
= state
->current_buf
?
1499 &state
->buflen_1
: &state
->buflen_0
;
1502 int src_nents
, mapped_nents
;
1503 struct ahash_edesc
*edesc
;
1506 *next_buflen
= req
->nbytes
& (crypto_tfm_alg_blocksize(&ahash
->base
) -
1508 to_hash
= req
->nbytes
- *next_buflen
;
1511 src_nents
= sg_nents_for_len(req
->src
,
1512 req
->nbytes
- *next_buflen
);
1513 if (src_nents
< 0) {
1514 dev_err(jrdev
, "Invalid number of src SG.\n");
1519 mapped_nents
= dma_map_sg(jrdev
, req
->src
, src_nents
,
1521 if (!mapped_nents
) {
1522 dev_err(jrdev
, "unable to map source for DMA\n");
1530 * allocate space for base edesc and hw desc commands,
1533 edesc
= ahash_edesc_alloc(ctx
, mapped_nents
> 1 ?
1535 ctx
->sh_desc_update_first
,
1536 ctx
->sh_desc_update_first_dma
,
1539 dma_unmap_sg(jrdev
, req
->src
, src_nents
, DMA_TO_DEVICE
);
1543 edesc
->src_nents
= src_nents
;
1546 ret
= ahash_edesc_add_src(ctx
, edesc
, req
, mapped_nents
, 0, 0,
1552 scatterwalk_map_and_copy(next_buf
, req
->src
, to_hash
,
1555 desc
= edesc
->hw_desc
;
1557 ret
= map_seq_out_ptr_ctx(desc
, jrdev
, state
, ctx
->ctx_len
);
1562 print_hex_dump(KERN_ERR
, "jobdesc@"__stringify(__LINE__
)": ",
1563 DUMP_PREFIX_ADDRESS
, 16, 4, desc
,
1564 desc_bytes(desc
), 1);
1567 ret
= caam_jr_enqueue(jrdev
, desc
, ahash_done_ctx_dst
, req
);
1572 state
->update
= ahash_update_ctx
;
1573 state
->finup
= ahash_finup_ctx
;
1574 state
->final
= ahash_final_ctx
;
1575 } else if (*next_buflen
) {
1576 state
->update
= ahash_update_no_ctx
;
1577 state
->finup
= ahash_finup_no_ctx
;
1578 state
->final
= ahash_final_no_ctx
;
1579 scatterwalk_map_and_copy(next_buf
, req
->src
, 0,
1583 print_hex_dump(KERN_ERR
, "next buf@"__stringify(__LINE__
)": ",
1584 DUMP_PREFIX_ADDRESS
, 16, 4, next_buf
,
1590 ahash_unmap_ctx(jrdev
, edesc
, req
, ctx
->ctx_len
, DMA_TO_DEVICE
);
1595 static int ahash_finup_first(struct ahash_request
*req
)
1597 return ahash_digest(req
);
1600 static int ahash_init(struct ahash_request
*req
)
1602 struct caam_hash_state
*state
= ahash_request_ctx(req
);
1604 state
->update
= ahash_update_first
;
1605 state
->finup
= ahash_finup_first
;
1606 state
->final
= ahash_final_no_ctx
;
1608 state
->current_buf
= 0;
1610 state
->buflen_0
= 0;
1611 state
->buflen_1
= 0;
1616 static int ahash_update(struct ahash_request
*req
)
1618 struct caam_hash_state
*state
= ahash_request_ctx(req
);
1620 return state
->update(req
);
1623 static int ahash_finup(struct ahash_request
*req
)
1625 struct caam_hash_state
*state
= ahash_request_ctx(req
);
1627 return state
->finup(req
);
1630 static int ahash_final(struct ahash_request
*req
)
1632 struct caam_hash_state
*state
= ahash_request_ctx(req
);
1634 return state
->final(req
);
1637 static int ahash_export(struct ahash_request
*req
, void *out
)
1639 struct caam_hash_state
*state
= ahash_request_ctx(req
);
1640 struct caam_export_state
*export
= out
;
1644 if (state
->current_buf
) {
1646 len
= state
->buflen_1
;
1649 len
= state
->buflen_0
;
1652 memcpy(export
->buf
, buf
, len
);
1653 memcpy(export
->caam_ctx
, state
->caam_ctx
, sizeof(export
->caam_ctx
));
1654 export
->buflen
= len
;
1655 export
->update
= state
->update
;
1656 export
->final
= state
->final
;
1657 export
->finup
= state
->finup
;
1662 static int ahash_import(struct ahash_request
*req
, const void *in
)
1664 struct caam_hash_state
*state
= ahash_request_ctx(req
);
1665 const struct caam_export_state
*export
= in
;
1667 memset(state
, 0, sizeof(*state
));
1668 memcpy(state
->buf_0
, export
->buf
, export
->buflen
);
1669 memcpy(state
->caam_ctx
, export
->caam_ctx
, sizeof(state
->caam_ctx
));
1670 state
->buflen_0
= export
->buflen
;
1671 state
->update
= export
->update
;
1672 state
->final
= export
->final
;
1673 state
->finup
= export
->finup
;
1678 struct caam_hash_template
{
1679 char name
[CRYPTO_MAX_ALG_NAME
];
1680 char driver_name
[CRYPTO_MAX_ALG_NAME
];
1681 char hmac_name
[CRYPTO_MAX_ALG_NAME
];
1682 char hmac_driver_name
[CRYPTO_MAX_ALG_NAME
];
1683 unsigned int blocksize
;
1684 struct ahash_alg template_ahash
;
1689 /* ahash descriptors */
1690 static struct caam_hash_template driver_hash
[] = {
1693 .driver_name
= "sha1-caam",
1694 .hmac_name
= "hmac(sha1)",
1695 .hmac_driver_name
= "hmac-sha1-caam",
1696 .blocksize
= SHA1_BLOCK_SIZE
,
1699 .update
= ahash_update
,
1700 .final
= ahash_final
,
1701 .finup
= ahash_finup
,
1702 .digest
= ahash_digest
,
1703 .export
= ahash_export
,
1704 .import
= ahash_import
,
1705 .setkey
= ahash_setkey
,
1707 .digestsize
= SHA1_DIGEST_SIZE
,
1708 .statesize
= sizeof(struct caam_export_state
),
1711 .alg_type
= OP_ALG_ALGSEL_SHA1
,
1712 .alg_op
= OP_ALG_ALGSEL_SHA1
| OP_ALG_AAI_HMAC
,
1715 .driver_name
= "sha224-caam",
1716 .hmac_name
= "hmac(sha224)",
1717 .hmac_driver_name
= "hmac-sha224-caam",
1718 .blocksize
= SHA224_BLOCK_SIZE
,
1721 .update
= ahash_update
,
1722 .final
= ahash_final
,
1723 .finup
= ahash_finup
,
1724 .digest
= ahash_digest
,
1725 .export
= ahash_export
,
1726 .import
= ahash_import
,
1727 .setkey
= ahash_setkey
,
1729 .digestsize
= SHA224_DIGEST_SIZE
,
1730 .statesize
= sizeof(struct caam_export_state
),
1733 .alg_type
= OP_ALG_ALGSEL_SHA224
,
1734 .alg_op
= OP_ALG_ALGSEL_SHA224
| OP_ALG_AAI_HMAC
,
1737 .driver_name
= "sha256-caam",
1738 .hmac_name
= "hmac(sha256)",
1739 .hmac_driver_name
= "hmac-sha256-caam",
1740 .blocksize
= SHA256_BLOCK_SIZE
,
1743 .update
= ahash_update
,
1744 .final
= ahash_final
,
1745 .finup
= ahash_finup
,
1746 .digest
= ahash_digest
,
1747 .export
= ahash_export
,
1748 .import
= ahash_import
,
1749 .setkey
= ahash_setkey
,
1751 .digestsize
= SHA256_DIGEST_SIZE
,
1752 .statesize
= sizeof(struct caam_export_state
),
1755 .alg_type
= OP_ALG_ALGSEL_SHA256
,
1756 .alg_op
= OP_ALG_ALGSEL_SHA256
| OP_ALG_AAI_HMAC
,
1759 .driver_name
= "sha384-caam",
1760 .hmac_name
= "hmac(sha384)",
1761 .hmac_driver_name
= "hmac-sha384-caam",
1762 .blocksize
= SHA384_BLOCK_SIZE
,
1765 .update
= ahash_update
,
1766 .final
= ahash_final
,
1767 .finup
= ahash_finup
,
1768 .digest
= ahash_digest
,
1769 .export
= ahash_export
,
1770 .import
= ahash_import
,
1771 .setkey
= ahash_setkey
,
1773 .digestsize
= SHA384_DIGEST_SIZE
,
1774 .statesize
= sizeof(struct caam_export_state
),
1777 .alg_type
= OP_ALG_ALGSEL_SHA384
,
1778 .alg_op
= OP_ALG_ALGSEL_SHA384
| OP_ALG_AAI_HMAC
,
1781 .driver_name
= "sha512-caam",
1782 .hmac_name
= "hmac(sha512)",
1783 .hmac_driver_name
= "hmac-sha512-caam",
1784 .blocksize
= SHA512_BLOCK_SIZE
,
1787 .update
= ahash_update
,
1788 .final
= ahash_final
,
1789 .finup
= ahash_finup
,
1790 .digest
= ahash_digest
,
1791 .export
= ahash_export
,
1792 .import
= ahash_import
,
1793 .setkey
= ahash_setkey
,
1795 .digestsize
= SHA512_DIGEST_SIZE
,
1796 .statesize
= sizeof(struct caam_export_state
),
1799 .alg_type
= OP_ALG_ALGSEL_SHA512
,
1800 .alg_op
= OP_ALG_ALGSEL_SHA512
| OP_ALG_AAI_HMAC
,
1803 .driver_name
= "md5-caam",
1804 .hmac_name
= "hmac(md5)",
1805 .hmac_driver_name
= "hmac-md5-caam",
1806 .blocksize
= MD5_BLOCK_WORDS
* 4,
1809 .update
= ahash_update
,
1810 .final
= ahash_final
,
1811 .finup
= ahash_finup
,
1812 .digest
= ahash_digest
,
1813 .export
= ahash_export
,
1814 .import
= ahash_import
,
1815 .setkey
= ahash_setkey
,
1817 .digestsize
= MD5_DIGEST_SIZE
,
1818 .statesize
= sizeof(struct caam_export_state
),
1821 .alg_type
= OP_ALG_ALGSEL_MD5
,
1822 .alg_op
= OP_ALG_ALGSEL_MD5
| OP_ALG_AAI_HMAC
,
1826 struct caam_hash_alg
{
1827 struct list_head entry
;
1830 struct ahash_alg ahash_alg
;
1833 static int caam_hash_cra_init(struct crypto_tfm
*tfm
)
1835 struct crypto_ahash
*ahash
= __crypto_ahash_cast(tfm
);
1836 struct crypto_alg
*base
= tfm
->__crt_alg
;
1837 struct hash_alg_common
*halg
=
1838 container_of(base
, struct hash_alg_common
, base
);
1839 struct ahash_alg
*alg
=
1840 container_of(halg
, struct ahash_alg
, halg
);
1841 struct caam_hash_alg
*caam_hash
=
1842 container_of(alg
, struct caam_hash_alg
, ahash_alg
);
1843 struct caam_hash_ctx
*ctx
= crypto_tfm_ctx(tfm
);
1844 /* Sizes for MDHA running digests: MD5, SHA1, 224, 256, 384, 512 */
1845 static const u8 runninglen
[] = { HASH_MSG_LEN
+ MD5_DIGEST_SIZE
,
1846 HASH_MSG_LEN
+ SHA1_DIGEST_SIZE
,
1848 HASH_MSG_LEN
+ SHA256_DIGEST_SIZE
,
1850 HASH_MSG_LEN
+ SHA512_DIGEST_SIZE
};
1853 * Get a Job ring from Job Ring driver to ensure in-order
1854 * crypto request processing per tfm
1856 ctx
->jrdev
= caam_jr_alloc();
1857 if (IS_ERR(ctx
->jrdev
)) {
1858 pr_err("Job Ring Device allocation for transform failed\n");
1859 return PTR_ERR(ctx
->jrdev
);
1861 /* copy descriptor header template value */
1862 ctx
->alg_type
= OP_TYPE_CLASS2_ALG
| caam_hash
->alg_type
;
1863 ctx
->alg_op
= OP_TYPE_CLASS2_ALG
| caam_hash
->alg_op
;
1865 ctx
->ctx_len
= runninglen
[(ctx
->alg_op
& OP_ALG_ALGSEL_SUBMASK
) >>
1866 OP_ALG_ALGSEL_SHIFT
];
1868 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm
),
1869 sizeof(struct caam_hash_state
));
1870 return ahash_set_sh_desc(ahash
);
1873 static void caam_hash_cra_exit(struct crypto_tfm
*tfm
)
1875 struct caam_hash_ctx
*ctx
= crypto_tfm_ctx(tfm
);
1877 if (ctx
->sh_desc_update_dma
&&
1878 !dma_mapping_error(ctx
->jrdev
, ctx
->sh_desc_update_dma
))
1879 dma_unmap_single(ctx
->jrdev
, ctx
->sh_desc_update_dma
,
1880 desc_bytes(ctx
->sh_desc_update
),
1882 if (ctx
->sh_desc_update_first_dma
&&
1883 !dma_mapping_error(ctx
->jrdev
, ctx
->sh_desc_update_first_dma
))
1884 dma_unmap_single(ctx
->jrdev
, ctx
->sh_desc_update_first_dma
,
1885 desc_bytes(ctx
->sh_desc_update_first
),
1887 if (ctx
->sh_desc_fin_dma
&&
1888 !dma_mapping_error(ctx
->jrdev
, ctx
->sh_desc_fin_dma
))
1889 dma_unmap_single(ctx
->jrdev
, ctx
->sh_desc_fin_dma
,
1890 desc_bytes(ctx
->sh_desc_fin
), DMA_TO_DEVICE
);
1891 if (ctx
->sh_desc_digest_dma
&&
1892 !dma_mapping_error(ctx
->jrdev
, ctx
->sh_desc_digest_dma
))
1893 dma_unmap_single(ctx
->jrdev
, ctx
->sh_desc_digest_dma
,
1894 desc_bytes(ctx
->sh_desc_digest
),
1896 if (ctx
->sh_desc_finup_dma
&&
1897 !dma_mapping_error(ctx
->jrdev
, ctx
->sh_desc_finup_dma
))
1898 dma_unmap_single(ctx
->jrdev
, ctx
->sh_desc_finup_dma
,
1899 desc_bytes(ctx
->sh_desc_finup
), DMA_TO_DEVICE
);
1901 caam_jr_free(ctx
->jrdev
);
1904 static void __exit
caam_algapi_hash_exit(void)
1906 struct caam_hash_alg
*t_alg
, *n
;
1908 if (!hash_list
.next
)
1911 list_for_each_entry_safe(t_alg
, n
, &hash_list
, entry
) {
1912 crypto_unregister_ahash(&t_alg
->ahash_alg
);
1913 list_del(&t_alg
->entry
);
1918 static struct caam_hash_alg
*
1919 caam_hash_alloc(struct caam_hash_template
*template,
1922 struct caam_hash_alg
*t_alg
;
1923 struct ahash_alg
*halg
;
1924 struct crypto_alg
*alg
;
1926 t_alg
= kzalloc(sizeof(*t_alg
), GFP_KERNEL
);
1928 pr_err("failed to allocate t_alg\n");
1929 return ERR_PTR(-ENOMEM
);
1932 t_alg
->ahash_alg
= template->template_ahash
;
1933 halg
= &t_alg
->ahash_alg
;
1934 alg
= &halg
->halg
.base
;
1937 snprintf(alg
->cra_name
, CRYPTO_MAX_ALG_NAME
, "%s",
1938 template->hmac_name
);
1939 snprintf(alg
->cra_driver_name
, CRYPTO_MAX_ALG_NAME
, "%s",
1940 template->hmac_driver_name
);
1942 snprintf(alg
->cra_name
, CRYPTO_MAX_ALG_NAME
, "%s",
1944 snprintf(alg
->cra_driver_name
, CRYPTO_MAX_ALG_NAME
, "%s",
1945 template->driver_name
);
1946 t_alg
->ahash_alg
.setkey
= NULL
;
1948 alg
->cra_module
= THIS_MODULE
;
1949 alg
->cra_init
= caam_hash_cra_init
;
1950 alg
->cra_exit
= caam_hash_cra_exit
;
1951 alg
->cra_ctxsize
= sizeof(struct caam_hash_ctx
);
1952 alg
->cra_priority
= CAAM_CRA_PRIORITY
;
1953 alg
->cra_blocksize
= template->blocksize
;
1954 alg
->cra_alignmask
= 0;
1955 alg
->cra_flags
= CRYPTO_ALG_ASYNC
| CRYPTO_ALG_TYPE_AHASH
;
1956 alg
->cra_type
= &crypto_ahash_type
;
1958 t_alg
->alg_type
= template->alg_type
;
1959 t_alg
->alg_op
= template->alg_op
;
1964 static int __init
caam_algapi_hash_init(void)
1966 struct device_node
*dev_node
;
1967 struct platform_device
*pdev
;
1968 struct device
*ctrldev
;
1970 struct caam_drv_private
*priv
;
1971 unsigned int md_limit
= SHA512_DIGEST_SIZE
;
1972 u32 cha_inst
, cha_vid
;
1974 dev_node
= of_find_compatible_node(NULL
, NULL
, "fsl,sec-v4.0");
1976 dev_node
= of_find_compatible_node(NULL
, NULL
, "fsl,sec4.0");
1981 pdev
= of_find_device_by_node(dev_node
);
1983 of_node_put(dev_node
);
1987 ctrldev
= &pdev
->dev
;
1988 priv
= dev_get_drvdata(ctrldev
);
1989 of_node_put(dev_node
);
1992 * If priv is NULL, it's probably because the caam driver wasn't
1993 * properly initialized (e.g. RNG4 init failed). Thus, bail out here.
1999 * Register crypto algorithms the device supports. First, identify
2000 * presence and attributes of MD block.
2002 cha_vid
= rd_reg32(&priv
->ctrl
->perfmon
.cha_id_ls
);
2003 cha_inst
= rd_reg32(&priv
->ctrl
->perfmon
.cha_num_ls
);
2006 * Skip registration of any hashing algorithms if MD block
2009 if (!((cha_inst
& CHA_ID_LS_MD_MASK
) >> CHA_ID_LS_MD_SHIFT
))
2012 /* Limit digest size based on LP256 */
2013 if ((cha_vid
& CHA_ID_LS_MD_MASK
) == CHA_ID_LS_MD_LP256
)
2014 md_limit
= SHA256_DIGEST_SIZE
;
2016 INIT_LIST_HEAD(&hash_list
);
2018 /* register crypto algorithms the device supports */
2019 for (i
= 0; i
< ARRAY_SIZE(driver_hash
); i
++) {
2020 struct caam_hash_alg
*t_alg
;
2021 struct caam_hash_template
*alg
= driver_hash
+ i
;
2023 /* If MD size is not supported by device, skip registration */
2024 if (alg
->template_ahash
.halg
.digestsize
> md_limit
)
2027 /* register hmac version */
2028 t_alg
= caam_hash_alloc(alg
, true);
2029 if (IS_ERR(t_alg
)) {
2030 err
= PTR_ERR(t_alg
);
2031 pr_warn("%s alg allocation failed\n", alg
->driver_name
);
2035 err
= crypto_register_ahash(&t_alg
->ahash_alg
);
2037 pr_warn("%s alg registration failed: %d\n",
2038 t_alg
->ahash_alg
.halg
.base
.cra_driver_name
,
2042 list_add_tail(&t_alg
->entry
, &hash_list
);
2044 /* register unkeyed version */
2045 t_alg
= caam_hash_alloc(alg
, false);
2046 if (IS_ERR(t_alg
)) {
2047 err
= PTR_ERR(t_alg
);
2048 pr_warn("%s alg allocation failed\n", alg
->driver_name
);
2052 err
= crypto_register_ahash(&t_alg
->ahash_alg
);
2054 pr_warn("%s alg registration failed: %d\n",
2055 t_alg
->ahash_alg
.halg
.base
.cra_driver_name
,
2059 list_add_tail(&t_alg
->entry
, &hash_list
);
2065 module_init(caam_algapi_hash_init
);
2066 module_exit(caam_algapi_hash_exit
);
2068 MODULE_LICENSE("GPL");
2069 MODULE_DESCRIPTION("FSL CAAM support for ahash functions of crypto API");
2070 MODULE_AUTHOR("Freescale Semiconductor - NMG");