2 * caam - Freescale FSL CAAM support for ahash functions of crypto API
4 * Copyright 2011 Freescale Semiconductor, Inc.
6 * Based on caamalg.c crypto API driver.
8 * relationship of digest job descriptor or first job descriptor after init to
11 * --------------- ---------------
12 * | JobDesc #1 |-------------------->| ShareDesc |
13 * | *(packet 1) | | (hashKey) |
14 * --------------- | (operation) |
17 * relationship of subsequent job descriptors to shared descriptors:
19 * --------------- ---------------
20 * | JobDesc #2 |-------------------->| ShareDesc |
21 * | *(packet 2) | |------------->| (hashKey) |
22 * --------------- | |-------->| (operation) |
23 * . | | | (load ctx2) |
24 * . | | ---------------
26 * | JobDesc #3 |------| |
32 * | JobDesc #4 |------------
36 * The SharedDesc never changes for a connection unless rekeyed, but
37 * each packet will likely be in a different place. So all we need
38 * to know to process the packet is where the input is, where the
39 * output goes, and what context we want to process with. Context is
40 * in the SharedDesc, packet references in the JobDesc.
42 * So, a job desc looks like:
44 * ---------------------
46 * | ShareDesc Pointer |
53 * ---------------------
60 #include "desc_constr.h"
63 #include "sg_sw_sec4.h"
66 #define CAAM_CRA_PRIORITY 3000
68 /* max hash key is max split key size */
69 #define CAAM_MAX_HASH_KEY_SIZE (SHA512_DIGEST_SIZE * 2)
71 #define CAAM_MAX_HASH_BLOCK_SIZE SHA512_BLOCK_SIZE
72 #define CAAM_MAX_HASH_DIGEST_SIZE SHA512_DIGEST_SIZE
74 /* length of descriptors text */
75 #define DESC_AHASH_BASE (4 * CAAM_CMD_SZ)
76 #define DESC_AHASH_UPDATE_LEN (6 * CAAM_CMD_SZ)
77 #define DESC_AHASH_UPDATE_FIRST_LEN (DESC_AHASH_BASE + 4 * CAAM_CMD_SZ)
78 #define DESC_AHASH_FINAL_LEN (DESC_AHASH_BASE + 5 * CAAM_CMD_SZ)
79 #define DESC_AHASH_FINUP_LEN (DESC_AHASH_BASE + 5 * CAAM_CMD_SZ)
80 #define DESC_AHASH_DIGEST_LEN (DESC_AHASH_BASE + 4 * CAAM_CMD_SZ)
82 #define DESC_HASH_MAX_USED_BYTES (DESC_AHASH_FINAL_LEN + \
83 CAAM_MAX_HASH_KEY_SIZE)
84 #define DESC_HASH_MAX_USED_LEN (DESC_HASH_MAX_USED_BYTES / CAAM_CMD_SZ)
86 /* caam context sizes for hashes: running digest + 8 */
87 #define HASH_MSG_LEN 8
88 #define MAX_CTX_LEN (HASH_MSG_LEN + SHA512_DIGEST_SIZE)
91 /* for print_hex_dumps with line references */
92 #define debug(format, arg...) printk(format, arg)
94 #define debug(format, arg...)
98 static struct list_head hash_list
;
100 /* ahash per-session context */
101 struct caam_hash_ctx
{
102 struct device
*jrdev
;
103 u32 sh_desc_update
[DESC_HASH_MAX_USED_LEN
];
104 u32 sh_desc_update_first
[DESC_HASH_MAX_USED_LEN
];
105 u32 sh_desc_fin
[DESC_HASH_MAX_USED_LEN
];
106 u32 sh_desc_digest
[DESC_HASH_MAX_USED_LEN
];
107 u32 sh_desc_finup
[DESC_HASH_MAX_USED_LEN
];
108 dma_addr_t sh_desc_update_dma
;
109 dma_addr_t sh_desc_update_first_dma
;
110 dma_addr_t sh_desc_fin_dma
;
111 dma_addr_t sh_desc_digest_dma
;
112 dma_addr_t sh_desc_finup_dma
;
115 u8 key
[CAAM_MAX_HASH_KEY_SIZE
];
118 unsigned int split_key_len
;
119 unsigned int split_key_pad_len
;
123 struct caam_hash_state
{
126 u8 buf_0
[CAAM_MAX_HASH_BLOCK_SIZE
] ____cacheline_aligned
;
128 u8 buf_1
[CAAM_MAX_HASH_BLOCK_SIZE
] ____cacheline_aligned
;
130 u8 caam_ctx
[MAX_CTX_LEN
];
131 int (*update
)(struct ahash_request
*req
);
132 int (*final
)(struct ahash_request
*req
);
133 int (*finup
)(struct ahash_request
*req
);
137 /* Common job descriptor seq in/out ptr routines */
139 /* Map state->caam_ctx, and append seq_out_ptr command that points to it */
140 static inline void map_seq_out_ptr_ctx(u32
*desc
, struct device
*jrdev
,
141 struct caam_hash_state
*state
,
144 state
->ctx_dma
= dma_map_single(jrdev
, state
->caam_ctx
,
145 ctx_len
, DMA_FROM_DEVICE
);
146 append_seq_out_ptr(desc
, state
->ctx_dma
, ctx_len
, 0);
149 /* Map req->result, and append seq_out_ptr command that points to it */
150 static inline dma_addr_t
map_seq_out_ptr_result(u32
*desc
, struct device
*jrdev
,
151 u8
*result
, int digestsize
)
155 dst_dma
= dma_map_single(jrdev
, result
, digestsize
, DMA_FROM_DEVICE
);
156 append_seq_out_ptr(desc
, dst_dma
, digestsize
, 0);
161 /* Map current buffer in state and put it in link table */
162 static inline dma_addr_t
buf_map_to_sec4_sg(struct device
*jrdev
,
163 struct sec4_sg_entry
*sec4_sg
,
168 buf_dma
= dma_map_single(jrdev
, buf
, buflen
, DMA_TO_DEVICE
);
169 dma_to_sec4_sg_one(sec4_sg
, buf_dma
, buflen
, 0);
174 /* Map req->src and put it in link table */
175 static inline void src_map_to_sec4_sg(struct device
*jrdev
,
176 struct scatterlist
*src
, int src_nents
,
177 struct sec4_sg_entry
*sec4_sg
,
180 dma_map_sg_chained(jrdev
, src
, src_nents
, DMA_TO_DEVICE
, chained
);
181 sg_to_sec4_sg_last(src
, src_nents
, sec4_sg
, 0);
185 * Only put buffer in link table if it contains data, which is possible,
186 * since a buffer has previously been used, and needs to be unmapped,
188 static inline dma_addr_t
189 try_buf_map_to_sec4_sg(struct device
*jrdev
, struct sec4_sg_entry
*sec4_sg
,
190 u8
*buf
, dma_addr_t buf_dma
, int buflen
,
193 if (buf_dma
&& !dma_mapping_error(jrdev
, buf_dma
))
194 dma_unmap_single(jrdev
, buf_dma
, last_buflen
, DMA_TO_DEVICE
);
196 buf_dma
= buf_map_to_sec4_sg(jrdev
, sec4_sg
, buf
, buflen
);
203 /* Map state->caam_ctx, and add it to link table */
204 static inline void ctx_map_to_sec4_sg(u32
*desc
, struct device
*jrdev
,
205 struct caam_hash_state
*state
,
207 struct sec4_sg_entry
*sec4_sg
,
210 state
->ctx_dma
= dma_map_single(jrdev
, state
->caam_ctx
, ctx_len
, flag
);
211 dma_to_sec4_sg_one(sec4_sg
, state
->ctx_dma
, ctx_len
, 0);
214 /* Common shared descriptor commands */
215 static inline void append_key_ahash(u32
*desc
, struct caam_hash_ctx
*ctx
)
217 append_key_as_imm(desc
, ctx
->key
, ctx
->split_key_pad_len
,
218 ctx
->split_key_len
, CLASS_2
|
219 KEY_DEST_MDHA_SPLIT
| KEY_ENC
);
222 /* Append key if it has been set */
223 static inline void init_sh_desc_key_ahash(u32
*desc
, struct caam_hash_ctx
*ctx
)
227 init_sh_desc(desc
, HDR_SHARE_SERIAL
);
229 if (ctx
->split_key_len
) {
230 /* Skip if already shared */
231 key_jump_cmd
= append_jump(desc
, JUMP_JSL
| JUMP_TEST_ALL
|
234 append_key_ahash(desc
, ctx
);
236 set_jump_tgt_here(desc
, key_jump_cmd
);
239 /* Propagate errors from shared to job descriptor */
240 append_cmd(desc
, SET_OK_NO_PROP_ERRORS
| CMD_LOAD
);
244 * For ahash read data from seqin following state->caam_ctx,
245 * and write resulting class2 context to seqout, which may be state->caam_ctx
248 static inline void ahash_append_load_str(u32
*desc
, int digestsize
)
250 /* Calculate remaining bytes to read */
251 append_math_add(desc
, VARSEQINLEN
, SEQINLEN
, REG0
, CAAM_CMD_SZ
);
253 /* Read remaining bytes */
254 append_seq_fifo_load(desc
, 0, FIFOLD_CLASS_CLASS2
| FIFOLD_TYPE_LAST2
|
255 FIFOLD_TYPE_MSG
| KEY_VLF
);
257 /* Store class2 context bytes */
258 append_seq_store(desc
, digestsize
, LDST_CLASS_2_CCB
|
259 LDST_SRCDST_BYTE_CONTEXT
);
263 * For ahash update, final and finup, import context, read and write to seqout
265 static inline void ahash_ctx_data_to_out(u32
*desc
, u32 op
, u32 state
,
267 struct caam_hash_ctx
*ctx
)
269 init_sh_desc_key_ahash(desc
, ctx
);
271 /* Import context from software */
272 append_cmd(desc
, CMD_SEQ_LOAD
| LDST_SRCDST_BYTE_CONTEXT
|
273 LDST_CLASS_2_CCB
| ctx
->ctx_len
);
275 /* Class 2 operation */
276 append_operation(desc
, op
| state
| OP_ALG_ENCRYPT
);
279 * Load from buf and/or src and write to req->result or state->context
281 ahash_append_load_str(desc
, digestsize
);
284 /* For ahash firsts and digest, read and write to seqout */
285 static inline void ahash_data_to_out(u32
*desc
, u32 op
, u32 state
,
286 int digestsize
, struct caam_hash_ctx
*ctx
)
288 init_sh_desc_key_ahash(desc
, ctx
);
290 /* Class 2 operation */
291 append_operation(desc
, op
| state
| OP_ALG_ENCRYPT
);
294 * Load from buf and/or src and write to req->result or state->context
296 ahash_append_load_str(desc
, digestsize
);
299 static int ahash_set_sh_desc(struct crypto_ahash
*ahash
)
301 struct caam_hash_ctx
*ctx
= crypto_ahash_ctx(ahash
);
302 int digestsize
= crypto_ahash_digestsize(ahash
);
303 struct device
*jrdev
= ctx
->jrdev
;
307 if (ctx
->split_key_len
)
308 have_key
= OP_ALG_AAI_HMAC_PRECOMP
;
310 /* ahash_update shared descriptor */
311 desc
= ctx
->sh_desc_update
;
313 init_sh_desc(desc
, HDR_SHARE_SERIAL
);
315 /* Import context from software */
316 append_cmd(desc
, CMD_SEQ_LOAD
| LDST_SRCDST_BYTE_CONTEXT
|
317 LDST_CLASS_2_CCB
| ctx
->ctx_len
);
319 /* Class 2 operation */
320 append_operation(desc
, ctx
->alg_type
| OP_ALG_AS_UPDATE
|
323 /* Load data and write to result or context */
324 ahash_append_load_str(desc
, ctx
->ctx_len
);
326 ctx
->sh_desc_update_dma
= dma_map_single(jrdev
, desc
, desc_bytes(desc
),
328 if (dma_mapping_error(jrdev
, ctx
->sh_desc_update_dma
)) {
329 dev_err(jrdev
, "unable to map shared descriptor\n");
333 print_hex_dump(KERN_ERR
,
334 "ahash update shdesc@"__stringify(__LINE__
)": ",
335 DUMP_PREFIX_ADDRESS
, 16, 4, desc
, desc_bytes(desc
), 1);
338 /* ahash_update_first shared descriptor */
339 desc
= ctx
->sh_desc_update_first
;
341 ahash_data_to_out(desc
, have_key
| ctx
->alg_type
, OP_ALG_AS_INIT
,
344 ctx
->sh_desc_update_first_dma
= dma_map_single(jrdev
, desc
,
347 if (dma_mapping_error(jrdev
, ctx
->sh_desc_update_first_dma
)) {
348 dev_err(jrdev
, "unable to map shared descriptor\n");
352 print_hex_dump(KERN_ERR
,
353 "ahash update first shdesc@"__stringify(__LINE__
)": ",
354 DUMP_PREFIX_ADDRESS
, 16, 4, desc
, desc_bytes(desc
), 1);
357 /* ahash_final shared descriptor */
358 desc
= ctx
->sh_desc_fin
;
360 ahash_ctx_data_to_out(desc
, have_key
| ctx
->alg_type
,
361 OP_ALG_AS_FINALIZE
, digestsize
, ctx
);
363 ctx
->sh_desc_fin_dma
= dma_map_single(jrdev
, desc
, desc_bytes(desc
),
365 if (dma_mapping_error(jrdev
, ctx
->sh_desc_fin_dma
)) {
366 dev_err(jrdev
, "unable to map shared descriptor\n");
370 print_hex_dump(KERN_ERR
, "ahash final shdesc@"__stringify(__LINE__
)": ",
371 DUMP_PREFIX_ADDRESS
, 16, 4, desc
,
372 desc_bytes(desc
), 1);
375 /* ahash_finup shared descriptor */
376 desc
= ctx
->sh_desc_finup
;
378 ahash_ctx_data_to_out(desc
, have_key
| ctx
->alg_type
,
379 OP_ALG_AS_FINALIZE
, digestsize
, ctx
);
381 ctx
->sh_desc_finup_dma
= dma_map_single(jrdev
, desc
, desc_bytes(desc
),
383 if (dma_mapping_error(jrdev
, ctx
->sh_desc_finup_dma
)) {
384 dev_err(jrdev
, "unable to map shared descriptor\n");
388 print_hex_dump(KERN_ERR
, "ahash finup shdesc@"__stringify(__LINE__
)": ",
389 DUMP_PREFIX_ADDRESS
, 16, 4, desc
,
390 desc_bytes(desc
), 1);
393 /* ahash_digest shared descriptor */
394 desc
= ctx
->sh_desc_digest
;
396 ahash_data_to_out(desc
, have_key
| ctx
->alg_type
, OP_ALG_AS_INITFINAL
,
399 ctx
->sh_desc_digest_dma
= dma_map_single(jrdev
, desc
,
402 if (dma_mapping_error(jrdev
, ctx
->sh_desc_digest_dma
)) {
403 dev_err(jrdev
, "unable to map shared descriptor\n");
407 print_hex_dump(KERN_ERR
,
408 "ahash digest shdesc@"__stringify(__LINE__
)": ",
409 DUMP_PREFIX_ADDRESS
, 16, 4, desc
,
410 desc_bytes(desc
), 1);
416 static int gen_split_hash_key(struct caam_hash_ctx
*ctx
, const u8
*key_in
,
419 return gen_split_key(ctx
->jrdev
, ctx
->key
, ctx
->split_key_len
,
420 ctx
->split_key_pad_len
, key_in
, keylen
,
424 /* Digest hash size if it is too large */
425 static int hash_digest_key(struct caam_hash_ctx
*ctx
, const u8
*key_in
,
426 u32
*keylen
, u8
*key_out
, u32 digestsize
)
428 struct device
*jrdev
= ctx
->jrdev
;
430 struct split_key_result result
;
431 dma_addr_t src_dma
, dst_dma
;
434 desc
= kmalloc(CAAM_CMD_SZ
* 8 + CAAM_PTR_SZ
* 2, GFP_KERNEL
| GFP_DMA
);
436 dev_err(jrdev
, "unable to allocate key input memory\n");
440 init_job_desc(desc
, 0);
442 src_dma
= dma_map_single(jrdev
, (void *)key_in
, *keylen
,
444 if (dma_mapping_error(jrdev
, src_dma
)) {
445 dev_err(jrdev
, "unable to map key input memory\n");
449 dst_dma
= dma_map_single(jrdev
, (void *)key_out
, digestsize
,
451 if (dma_mapping_error(jrdev
, dst_dma
)) {
452 dev_err(jrdev
, "unable to map key output memory\n");
453 dma_unmap_single(jrdev
, src_dma
, *keylen
, DMA_TO_DEVICE
);
458 /* Job descriptor to perform unkeyed hash on key_in */
459 append_operation(desc
, ctx
->alg_type
| OP_ALG_ENCRYPT
|
460 OP_ALG_AS_INITFINAL
);
461 append_seq_in_ptr(desc
, src_dma
, *keylen
, 0);
462 append_seq_fifo_load(desc
, *keylen
, FIFOLD_CLASS_CLASS2
|
463 FIFOLD_TYPE_LAST2
| FIFOLD_TYPE_MSG
);
464 append_seq_out_ptr(desc
, dst_dma
, digestsize
, 0);
465 append_seq_store(desc
, digestsize
, LDST_CLASS_2_CCB
|
466 LDST_SRCDST_BYTE_CONTEXT
);
469 print_hex_dump(KERN_ERR
, "key_in@"__stringify(__LINE__
)": ",
470 DUMP_PREFIX_ADDRESS
, 16, 4, key_in
, *keylen
, 1);
471 print_hex_dump(KERN_ERR
, "jobdesc@"__stringify(__LINE__
)": ",
472 DUMP_PREFIX_ADDRESS
, 16, 4, desc
, desc_bytes(desc
), 1);
476 init_completion(&result
.completion
);
478 ret
= caam_jr_enqueue(jrdev
, desc
, split_key_done
, &result
);
481 wait_for_completion_interruptible(&result
.completion
);
484 print_hex_dump(KERN_ERR
,
485 "digested key@"__stringify(__LINE__
)": ",
486 DUMP_PREFIX_ADDRESS
, 16, 4, key_in
,
490 *keylen
= digestsize
;
492 dma_unmap_single(jrdev
, src_dma
, *keylen
, DMA_TO_DEVICE
);
493 dma_unmap_single(jrdev
, dst_dma
, digestsize
, DMA_FROM_DEVICE
);
500 static int ahash_setkey(struct crypto_ahash
*ahash
,
501 const u8
*key
, unsigned int keylen
)
503 /* Sizes for MDHA pads (*not* keys): MD5, SHA1, 224, 256, 384, 512 */
504 static const u8 mdpadlen
[] = { 16, 20, 32, 32, 64, 64 };
505 struct caam_hash_ctx
*ctx
= crypto_ahash_ctx(ahash
);
506 struct device
*jrdev
= ctx
->jrdev
;
507 int blocksize
= crypto_tfm_alg_blocksize(&ahash
->base
);
508 int digestsize
= crypto_ahash_digestsize(ahash
);
510 u8
*hashed_key
= NULL
;
513 printk(KERN_ERR
"keylen %d\n", keylen
);
516 if (keylen
> blocksize
) {
517 hashed_key
= kmalloc(sizeof(u8
) * digestsize
, GFP_KERNEL
|
521 ret
= hash_digest_key(ctx
, key
, &keylen
, hashed_key
,
528 /* Pick class 2 key length from algorithm submask */
529 ctx
->split_key_len
= mdpadlen
[(ctx
->alg_op
& OP_ALG_ALGSEL_SUBMASK
) >>
530 OP_ALG_ALGSEL_SHIFT
] * 2;
531 ctx
->split_key_pad_len
= ALIGN(ctx
->split_key_len
, 16);
534 printk(KERN_ERR
"split_key_len %d split_key_pad_len %d\n",
535 ctx
->split_key_len
, ctx
->split_key_pad_len
);
536 print_hex_dump(KERN_ERR
, "key in @"__stringify(__LINE__
)": ",
537 DUMP_PREFIX_ADDRESS
, 16, 4, key
, keylen
, 1);
540 ret
= gen_split_hash_key(ctx
, key
, keylen
);
544 ctx
->key_dma
= dma_map_single(jrdev
, ctx
->key
, ctx
->split_key_pad_len
,
546 if (dma_mapping_error(jrdev
, ctx
->key_dma
)) {
547 dev_err(jrdev
, "unable to map key i/o memory\n");
551 print_hex_dump(KERN_ERR
, "ctx.key@"__stringify(__LINE__
)": ",
552 DUMP_PREFIX_ADDRESS
, 16, 4, ctx
->key
,
553 ctx
->split_key_pad_len
, 1);
556 ret
= ahash_set_sh_desc(ahash
);
558 dma_unmap_single(jrdev
, ctx
->key_dma
, ctx
->split_key_pad_len
,
566 crypto_ahash_set_flags(ahash
, CRYPTO_TFM_RES_BAD_KEY_LEN
);
571 * ahash_edesc - s/w-extended ahash descriptor
572 * @dst_dma: physical mapped address of req->result
573 * @sec4_sg_dma: physical mapped address of h/w link table
574 * @chained: if source is chained
575 * @src_nents: number of segments in input scatterlist
576 * @sec4_sg_bytes: length of dma mapped sec4_sg space
577 * @sec4_sg: pointer to h/w link table
578 * @hw_desc: the h/w job descriptor followed by any referenced link tables
582 dma_addr_t sec4_sg_dma
;
586 struct sec4_sg_entry
*sec4_sg
;
590 static inline void ahash_unmap(struct device
*dev
,
591 struct ahash_edesc
*edesc
,
592 struct ahash_request
*req
, int dst_len
)
594 if (edesc
->src_nents
)
595 dma_unmap_sg_chained(dev
, req
->src
, edesc
->src_nents
,
596 DMA_TO_DEVICE
, edesc
->chained
);
598 dma_unmap_single(dev
, edesc
->dst_dma
, dst_len
, DMA_FROM_DEVICE
);
600 if (edesc
->sec4_sg_bytes
)
601 dma_unmap_single(dev
, edesc
->sec4_sg_dma
,
602 edesc
->sec4_sg_bytes
, DMA_TO_DEVICE
);
605 static inline void ahash_unmap_ctx(struct device
*dev
,
606 struct ahash_edesc
*edesc
,
607 struct ahash_request
*req
, int dst_len
, u32 flag
)
609 struct crypto_ahash
*ahash
= crypto_ahash_reqtfm(req
);
610 struct caam_hash_ctx
*ctx
= crypto_ahash_ctx(ahash
);
611 struct caam_hash_state
*state
= ahash_request_ctx(req
);
614 dma_unmap_single(dev
, state
->ctx_dma
, ctx
->ctx_len
, flag
);
615 ahash_unmap(dev
, edesc
, req
, dst_len
);
618 static void ahash_done(struct device
*jrdev
, u32
*desc
, u32 err
,
621 struct ahash_request
*req
= context
;
622 struct ahash_edesc
*edesc
;
623 struct crypto_ahash
*ahash
= crypto_ahash_reqtfm(req
);
624 int digestsize
= crypto_ahash_digestsize(ahash
);
626 struct caam_hash_ctx
*ctx
= crypto_ahash_ctx(ahash
);
627 struct caam_hash_state
*state
= ahash_request_ctx(req
);
629 dev_err(jrdev
, "%s %d: err 0x%x\n", __func__
, __LINE__
, err
);
632 edesc
= (struct ahash_edesc
*)((char *)desc
-
633 offsetof(struct ahash_edesc
, hw_desc
));
635 char tmp
[CAAM_ERROR_STR_MAX
];
637 dev_err(jrdev
, "%08x: %s\n", err
, caam_jr_strstatus(tmp
, err
));
640 ahash_unmap(jrdev
, edesc
, req
, digestsize
);
644 print_hex_dump(KERN_ERR
, "ctx@"__stringify(__LINE__
)": ",
645 DUMP_PREFIX_ADDRESS
, 16, 4, state
->caam_ctx
,
648 print_hex_dump(KERN_ERR
, "result@"__stringify(__LINE__
)": ",
649 DUMP_PREFIX_ADDRESS
, 16, 4, req
->result
,
653 req
->base
.complete(&req
->base
, err
);
656 static void ahash_done_bi(struct device
*jrdev
, u32
*desc
, u32 err
,
659 struct ahash_request
*req
= context
;
660 struct ahash_edesc
*edesc
;
661 struct crypto_ahash
*ahash
= crypto_ahash_reqtfm(req
);
662 struct caam_hash_ctx
*ctx
= crypto_ahash_ctx(ahash
);
664 struct caam_hash_state
*state
= ahash_request_ctx(req
);
665 int digestsize
= crypto_ahash_digestsize(ahash
);
667 dev_err(jrdev
, "%s %d: err 0x%x\n", __func__
, __LINE__
, err
);
670 edesc
= (struct ahash_edesc
*)((char *)desc
-
671 offsetof(struct ahash_edesc
, hw_desc
));
673 char tmp
[CAAM_ERROR_STR_MAX
];
675 dev_err(jrdev
, "%08x: %s\n", err
, caam_jr_strstatus(tmp
, err
));
678 ahash_unmap_ctx(jrdev
, edesc
, req
, ctx
->ctx_len
, DMA_BIDIRECTIONAL
);
682 print_hex_dump(KERN_ERR
, "ctx@"__stringify(__LINE__
)": ",
683 DUMP_PREFIX_ADDRESS
, 16, 4, state
->caam_ctx
,
686 print_hex_dump(KERN_ERR
, "result@"__stringify(__LINE__
)": ",
687 DUMP_PREFIX_ADDRESS
, 16, 4, req
->result
,
691 req
->base
.complete(&req
->base
, err
);
694 static void ahash_done_ctx_src(struct device
*jrdev
, u32
*desc
, u32 err
,
697 struct ahash_request
*req
= context
;
698 struct ahash_edesc
*edesc
;
699 struct crypto_ahash
*ahash
= crypto_ahash_reqtfm(req
);
700 int digestsize
= crypto_ahash_digestsize(ahash
);
702 struct caam_hash_ctx
*ctx
= crypto_ahash_ctx(ahash
);
703 struct caam_hash_state
*state
= ahash_request_ctx(req
);
705 dev_err(jrdev
, "%s %d: err 0x%x\n", __func__
, __LINE__
, err
);
708 edesc
= (struct ahash_edesc
*)((char *)desc
-
709 offsetof(struct ahash_edesc
, hw_desc
));
711 char tmp
[CAAM_ERROR_STR_MAX
];
713 dev_err(jrdev
, "%08x: %s\n", err
, caam_jr_strstatus(tmp
, err
));
716 ahash_unmap_ctx(jrdev
, edesc
, req
, digestsize
, DMA_FROM_DEVICE
);
720 print_hex_dump(KERN_ERR
, "ctx@"__stringify(__LINE__
)": ",
721 DUMP_PREFIX_ADDRESS
, 16, 4, state
->caam_ctx
,
724 print_hex_dump(KERN_ERR
, "result@"__stringify(__LINE__
)": ",
725 DUMP_PREFIX_ADDRESS
, 16, 4, req
->result
,
729 req
->base
.complete(&req
->base
, err
);
732 static void ahash_done_ctx_dst(struct device
*jrdev
, u32
*desc
, u32 err
,
735 struct ahash_request
*req
= context
;
736 struct ahash_edesc
*edesc
;
737 struct crypto_ahash
*ahash
= crypto_ahash_reqtfm(req
);
738 struct caam_hash_ctx
*ctx
= crypto_ahash_ctx(ahash
);
740 struct caam_hash_state
*state
= ahash_request_ctx(req
);
741 int digestsize
= crypto_ahash_digestsize(ahash
);
743 dev_err(jrdev
, "%s %d: err 0x%x\n", __func__
, __LINE__
, err
);
746 edesc
= (struct ahash_edesc
*)((char *)desc
-
747 offsetof(struct ahash_edesc
, hw_desc
));
749 char tmp
[CAAM_ERROR_STR_MAX
];
751 dev_err(jrdev
, "%08x: %s\n", err
, caam_jr_strstatus(tmp
, err
));
754 ahash_unmap_ctx(jrdev
, edesc
, req
, ctx
->ctx_len
, DMA_TO_DEVICE
);
758 print_hex_dump(KERN_ERR
, "ctx@"__stringify(__LINE__
)": ",
759 DUMP_PREFIX_ADDRESS
, 16, 4, state
->caam_ctx
,
762 print_hex_dump(KERN_ERR
, "result@"__stringify(__LINE__
)": ",
763 DUMP_PREFIX_ADDRESS
, 16, 4, req
->result
,
767 req
->base
.complete(&req
->base
, err
);
770 /* submit update job descriptor */
771 static int ahash_update_ctx(struct ahash_request
*req
)
773 struct crypto_ahash
*ahash
= crypto_ahash_reqtfm(req
);
774 struct caam_hash_ctx
*ctx
= crypto_ahash_ctx(ahash
);
775 struct caam_hash_state
*state
= ahash_request_ctx(req
);
776 struct device
*jrdev
= ctx
->jrdev
;
777 gfp_t flags
= (req
->base
.flags
& (CRYPTO_TFM_REQ_MAY_BACKLOG
|
778 CRYPTO_TFM_REQ_MAY_SLEEP
)) ? GFP_KERNEL
: GFP_ATOMIC
;
779 u8
*buf
= state
->current_buf
? state
->buf_1
: state
->buf_0
;
780 int *buflen
= state
->current_buf
? &state
->buflen_1
: &state
->buflen_0
;
781 u8
*next_buf
= state
->current_buf
? state
->buf_0
: state
->buf_1
;
782 int *next_buflen
= state
->current_buf
? &state
->buflen_0
:
783 &state
->buflen_1
, last_buflen
;
784 int in_len
= *buflen
+ req
->nbytes
, to_hash
;
785 u32
*sh_desc
= ctx
->sh_desc_update
, *desc
;
786 dma_addr_t ptr
= ctx
->sh_desc_update_dma
;
787 int src_nents
, sec4_sg_bytes
, sec4_sg_src_index
;
788 struct ahash_edesc
*edesc
;
789 bool chained
= false;
793 last_buflen
= *next_buflen
;
794 *next_buflen
= in_len
& (crypto_tfm_alg_blocksize(&ahash
->base
) - 1);
795 to_hash
= in_len
- *next_buflen
;
798 src_nents
= __sg_count(req
->src
, req
->nbytes
- (*next_buflen
),
800 sec4_sg_src_index
= 1 + (*buflen
? 1 : 0);
801 sec4_sg_bytes
= (sec4_sg_src_index
+ src_nents
) *
802 sizeof(struct sec4_sg_entry
);
805 * allocate space for base edesc and hw desc commands,
808 edesc
= kmalloc(sizeof(struct ahash_edesc
) + DESC_JOB_IO_LEN
+
809 sec4_sg_bytes
, GFP_DMA
| flags
);
812 "could not allocate extended descriptor\n");
816 edesc
->src_nents
= src_nents
;
817 edesc
->chained
= chained
;
818 edesc
->sec4_sg_bytes
= sec4_sg_bytes
;
819 edesc
->sec4_sg
= (void *)edesc
+ sizeof(struct ahash_edesc
) +
821 edesc
->sec4_sg_dma
= dma_map_single(jrdev
, edesc
->sec4_sg
,
825 ctx_map_to_sec4_sg(desc
, jrdev
, state
, ctx
->ctx_len
,
826 edesc
->sec4_sg
, DMA_BIDIRECTIONAL
);
828 state
->buf_dma
= try_buf_map_to_sec4_sg(jrdev
,
831 *buflen
, last_buflen
);
834 src_map_to_sec4_sg(jrdev
, req
->src
, src_nents
,
835 edesc
->sec4_sg
+ sec4_sg_src_index
,
838 sg_copy_part(next_buf
, req
->src
, to_hash
-
839 *buflen
, req
->nbytes
);
840 state
->current_buf
= !state
->current_buf
;
843 (edesc
->sec4_sg
+ sec4_sg_src_index
- 1)->len
|=
847 sh_len
= desc_len(sh_desc
);
848 desc
= edesc
->hw_desc
;
849 init_job_desc_shared(desc
, ptr
, sh_len
, HDR_SHARE_DEFER
|
852 append_seq_in_ptr(desc
, edesc
->sec4_sg_dma
, ctx
->ctx_len
+
855 append_seq_out_ptr(desc
, state
->ctx_dma
, ctx
->ctx_len
, 0);
858 print_hex_dump(KERN_ERR
, "jobdesc@"__stringify(__LINE__
)": ",
859 DUMP_PREFIX_ADDRESS
, 16, 4, desc
,
860 desc_bytes(desc
), 1);
863 ret
= caam_jr_enqueue(jrdev
, desc
, ahash_done_bi
, req
);
867 ahash_unmap_ctx(jrdev
, edesc
, req
, ctx
->ctx_len
,
871 } else if (*next_buflen
) {
872 sg_copy(buf
+ *buflen
, req
->src
, req
->nbytes
);
873 *buflen
= *next_buflen
;
874 *next_buflen
= last_buflen
;
877 print_hex_dump(KERN_ERR
, "buf@"__stringify(__LINE__
)": ",
878 DUMP_PREFIX_ADDRESS
, 16, 4, buf
, *buflen
, 1);
879 print_hex_dump(KERN_ERR
, "next buf@"__stringify(__LINE__
)": ",
880 DUMP_PREFIX_ADDRESS
, 16, 4, next_buf
,
887 static int ahash_final_ctx(struct ahash_request
*req
)
889 struct crypto_ahash
*ahash
= crypto_ahash_reqtfm(req
);
890 struct caam_hash_ctx
*ctx
= crypto_ahash_ctx(ahash
);
891 struct caam_hash_state
*state
= ahash_request_ctx(req
);
892 struct device
*jrdev
= ctx
->jrdev
;
893 gfp_t flags
= (req
->base
.flags
& (CRYPTO_TFM_REQ_MAY_BACKLOG
|
894 CRYPTO_TFM_REQ_MAY_SLEEP
)) ? GFP_KERNEL
: GFP_ATOMIC
;
895 u8
*buf
= state
->current_buf
? state
->buf_1
: state
->buf_0
;
896 int buflen
= state
->current_buf
? state
->buflen_1
: state
->buflen_0
;
897 int last_buflen
= state
->current_buf
? state
->buflen_0
:
899 u32
*sh_desc
= ctx
->sh_desc_fin
, *desc
;
900 dma_addr_t ptr
= ctx
->sh_desc_fin_dma
;
902 int digestsize
= crypto_ahash_digestsize(ahash
);
903 struct ahash_edesc
*edesc
;
907 sec4_sg_bytes
= (1 + (buflen
? 1 : 0)) * sizeof(struct sec4_sg_entry
);
909 /* allocate space for base edesc and hw desc commands, link tables */
910 edesc
= kmalloc(sizeof(struct ahash_edesc
) + DESC_JOB_IO_LEN
+
911 sec4_sg_bytes
, GFP_DMA
| flags
);
913 dev_err(jrdev
, "could not allocate extended descriptor\n");
917 sh_len
= desc_len(sh_desc
);
918 desc
= edesc
->hw_desc
;
919 init_job_desc_shared(desc
, ptr
, sh_len
, HDR_SHARE_DEFER
| HDR_REVERSE
);
921 edesc
->sec4_sg_bytes
= sec4_sg_bytes
;
922 edesc
->sec4_sg
= (void *)edesc
+ sizeof(struct ahash_edesc
) +
924 edesc
->sec4_sg_dma
= dma_map_single(jrdev
, edesc
->sec4_sg
,
925 sec4_sg_bytes
, DMA_TO_DEVICE
);
926 edesc
->src_nents
= 0;
928 ctx_map_to_sec4_sg(desc
, jrdev
, state
, ctx
->ctx_len
, edesc
->sec4_sg
,
931 state
->buf_dma
= try_buf_map_to_sec4_sg(jrdev
, edesc
->sec4_sg
+ 1,
932 buf
, state
->buf_dma
, buflen
,
934 (edesc
->sec4_sg
+ sec4_sg_bytes
- 1)->len
|= SEC4_SG_LEN_FIN
;
936 append_seq_in_ptr(desc
, edesc
->sec4_sg_dma
, ctx
->ctx_len
+ buflen
,
939 edesc
->dst_dma
= map_seq_out_ptr_result(desc
, jrdev
, req
->result
,
943 print_hex_dump(KERN_ERR
, "jobdesc@"__stringify(__LINE__
)": ",
944 DUMP_PREFIX_ADDRESS
, 16, 4, desc
, desc_bytes(desc
), 1);
947 ret
= caam_jr_enqueue(jrdev
, desc
, ahash_done_ctx_src
, req
);
951 ahash_unmap_ctx(jrdev
, edesc
, req
, digestsize
, DMA_FROM_DEVICE
);
958 static int ahash_finup_ctx(struct ahash_request
*req
)
960 struct crypto_ahash
*ahash
= crypto_ahash_reqtfm(req
);
961 struct caam_hash_ctx
*ctx
= crypto_ahash_ctx(ahash
);
962 struct caam_hash_state
*state
= ahash_request_ctx(req
);
963 struct device
*jrdev
= ctx
->jrdev
;
964 gfp_t flags
= (req
->base
.flags
& (CRYPTO_TFM_REQ_MAY_BACKLOG
|
965 CRYPTO_TFM_REQ_MAY_SLEEP
)) ? GFP_KERNEL
: GFP_ATOMIC
;
966 u8
*buf
= state
->current_buf
? state
->buf_1
: state
->buf_0
;
967 int buflen
= state
->current_buf
? state
->buflen_1
: state
->buflen_0
;
968 int last_buflen
= state
->current_buf
? state
->buflen_0
:
970 u32
*sh_desc
= ctx
->sh_desc_finup
, *desc
;
971 dma_addr_t ptr
= ctx
->sh_desc_finup_dma
;
972 int sec4_sg_bytes
, sec4_sg_src_index
;
974 int digestsize
= crypto_ahash_digestsize(ahash
);
975 struct ahash_edesc
*edesc
;
976 bool chained
= false;
980 src_nents
= __sg_count(req
->src
, req
->nbytes
, &chained
);
981 sec4_sg_src_index
= 1 + (buflen
? 1 : 0);
982 sec4_sg_bytes
= (sec4_sg_src_index
+ src_nents
) *
983 sizeof(struct sec4_sg_entry
);
985 /* allocate space for base edesc and hw desc commands, link tables */
986 edesc
= kmalloc(sizeof(struct ahash_edesc
) + DESC_JOB_IO_LEN
+
987 sec4_sg_bytes
, GFP_DMA
| flags
);
989 dev_err(jrdev
, "could not allocate extended descriptor\n");
993 sh_len
= desc_len(sh_desc
);
994 desc
= edesc
->hw_desc
;
995 init_job_desc_shared(desc
, ptr
, sh_len
, HDR_SHARE_DEFER
| HDR_REVERSE
);
997 edesc
->src_nents
= src_nents
;
998 edesc
->chained
= chained
;
999 edesc
->sec4_sg_bytes
= sec4_sg_bytes
;
1000 edesc
->sec4_sg
= (void *)edesc
+ sizeof(struct ahash_edesc
) +
1002 edesc
->sec4_sg_dma
= dma_map_single(jrdev
, edesc
->sec4_sg
,
1003 sec4_sg_bytes
, DMA_TO_DEVICE
);
1005 ctx_map_to_sec4_sg(desc
, jrdev
, state
, ctx
->ctx_len
, edesc
->sec4_sg
,
1008 state
->buf_dma
= try_buf_map_to_sec4_sg(jrdev
, edesc
->sec4_sg
+ 1,
1009 buf
, state
->buf_dma
, buflen
,
1012 src_map_to_sec4_sg(jrdev
, req
->src
, src_nents
, edesc
->sec4_sg
+
1013 sec4_sg_src_index
, chained
);
1015 append_seq_in_ptr(desc
, edesc
->sec4_sg_dma
, ctx
->ctx_len
+
1016 buflen
+ req
->nbytes
, LDST_SGF
);
1018 edesc
->dst_dma
= map_seq_out_ptr_result(desc
, jrdev
, req
->result
,
1022 print_hex_dump(KERN_ERR
, "jobdesc@"__stringify(__LINE__
)": ",
1023 DUMP_PREFIX_ADDRESS
, 16, 4, desc
, desc_bytes(desc
), 1);
1026 ret
= caam_jr_enqueue(jrdev
, desc
, ahash_done_ctx_src
, req
);
1030 ahash_unmap_ctx(jrdev
, edesc
, req
, digestsize
, DMA_FROM_DEVICE
);
1037 static int ahash_digest(struct ahash_request
*req
)
1039 struct crypto_ahash
*ahash
= crypto_ahash_reqtfm(req
);
1040 struct caam_hash_ctx
*ctx
= crypto_ahash_ctx(ahash
);
1041 struct device
*jrdev
= ctx
->jrdev
;
1042 gfp_t flags
= (req
->base
.flags
& (CRYPTO_TFM_REQ_MAY_BACKLOG
|
1043 CRYPTO_TFM_REQ_MAY_SLEEP
)) ? GFP_KERNEL
: GFP_ATOMIC
;
1044 u32
*sh_desc
= ctx
->sh_desc_digest
, *desc
;
1045 dma_addr_t ptr
= ctx
->sh_desc_digest_dma
;
1046 int digestsize
= crypto_ahash_digestsize(ahash
);
1047 int src_nents
, sec4_sg_bytes
;
1049 struct ahash_edesc
*edesc
;
1050 bool chained
= false;
1055 src_nents
= sg_count(req
->src
, req
->nbytes
, &chained
);
1056 dma_map_sg_chained(jrdev
, req
->src
, src_nents
? : 1, DMA_TO_DEVICE
,
1058 sec4_sg_bytes
= src_nents
* sizeof(struct sec4_sg_entry
);
1060 /* allocate space for base edesc and hw desc commands, link tables */
1061 edesc
= kmalloc(sizeof(struct ahash_edesc
) + sec4_sg_bytes
+
1062 DESC_JOB_IO_LEN
, GFP_DMA
| flags
);
1064 dev_err(jrdev
, "could not allocate extended descriptor\n");
1067 edesc
->sec4_sg
= (void *)edesc
+ sizeof(struct ahash_edesc
) +
1069 edesc
->sec4_sg_dma
= dma_map_single(jrdev
, edesc
->sec4_sg
,
1070 sec4_sg_bytes
, DMA_TO_DEVICE
);
1071 edesc
->src_nents
= src_nents
;
1072 edesc
->chained
= chained
;
1074 sh_len
= desc_len(sh_desc
);
1075 desc
= edesc
->hw_desc
;
1076 init_job_desc_shared(desc
, ptr
, sh_len
, HDR_SHARE_DEFER
| HDR_REVERSE
);
1079 sg_to_sec4_sg_last(req
->src
, src_nents
, edesc
->sec4_sg
, 0);
1080 src_dma
= edesc
->sec4_sg_dma
;
1083 src_dma
= sg_dma_address(req
->src
);
1086 append_seq_in_ptr(desc
, src_dma
, req
->nbytes
, options
);
1088 edesc
->dst_dma
= map_seq_out_ptr_result(desc
, jrdev
, req
->result
,
1092 print_hex_dump(KERN_ERR
, "jobdesc@"__stringify(__LINE__
)": ",
1093 DUMP_PREFIX_ADDRESS
, 16, 4, desc
, desc_bytes(desc
), 1);
1096 ret
= caam_jr_enqueue(jrdev
, desc
, ahash_done
, req
);
1100 ahash_unmap(jrdev
, edesc
, req
, digestsize
);
1107 /* submit ahash final if it the first job descriptor */
1108 static int ahash_final_no_ctx(struct ahash_request
*req
)
1110 struct crypto_ahash
*ahash
= crypto_ahash_reqtfm(req
);
1111 struct caam_hash_ctx
*ctx
= crypto_ahash_ctx(ahash
);
1112 struct caam_hash_state
*state
= ahash_request_ctx(req
);
1113 struct device
*jrdev
= ctx
->jrdev
;
1114 gfp_t flags
= (req
->base
.flags
& (CRYPTO_TFM_REQ_MAY_BACKLOG
|
1115 CRYPTO_TFM_REQ_MAY_SLEEP
)) ? GFP_KERNEL
: GFP_ATOMIC
;
1116 u8
*buf
= state
->current_buf
? state
->buf_1
: state
->buf_0
;
1117 int buflen
= state
->current_buf
? state
->buflen_1
: state
->buflen_0
;
1118 u32
*sh_desc
= ctx
->sh_desc_digest
, *desc
;
1119 dma_addr_t ptr
= ctx
->sh_desc_digest_dma
;
1120 int digestsize
= crypto_ahash_digestsize(ahash
);
1121 struct ahash_edesc
*edesc
;
1125 /* allocate space for base edesc and hw desc commands, link tables */
1126 edesc
= kmalloc(sizeof(struct ahash_edesc
) + DESC_JOB_IO_LEN
,
1129 dev_err(jrdev
, "could not allocate extended descriptor\n");
1133 sh_len
= desc_len(sh_desc
);
1134 desc
= edesc
->hw_desc
;
1135 init_job_desc_shared(desc
, ptr
, sh_len
, HDR_SHARE_DEFER
| HDR_REVERSE
);
1137 state
->buf_dma
= dma_map_single(jrdev
, buf
, buflen
, DMA_TO_DEVICE
);
1139 append_seq_in_ptr(desc
, state
->buf_dma
, buflen
, 0);
1141 edesc
->dst_dma
= map_seq_out_ptr_result(desc
, jrdev
, req
->result
,
1143 edesc
->src_nents
= 0;
1146 print_hex_dump(KERN_ERR
, "jobdesc@"__stringify(__LINE__
)": ",
1147 DUMP_PREFIX_ADDRESS
, 16, 4, desc
, desc_bytes(desc
), 1);
1150 ret
= caam_jr_enqueue(jrdev
, desc
, ahash_done
, req
);
1154 ahash_unmap(jrdev
, edesc
, req
, digestsize
);
1161 /* submit ahash update if it the first job descriptor after update */
1162 static int ahash_update_no_ctx(struct ahash_request
*req
)
1164 struct crypto_ahash
*ahash
= crypto_ahash_reqtfm(req
);
1165 struct caam_hash_ctx
*ctx
= crypto_ahash_ctx(ahash
);
1166 struct caam_hash_state
*state
= ahash_request_ctx(req
);
1167 struct device
*jrdev
= ctx
->jrdev
;
1168 gfp_t flags
= (req
->base
.flags
& (CRYPTO_TFM_REQ_MAY_BACKLOG
|
1169 CRYPTO_TFM_REQ_MAY_SLEEP
)) ? GFP_KERNEL
: GFP_ATOMIC
;
1170 u8
*buf
= state
->current_buf
? state
->buf_1
: state
->buf_0
;
1171 int *buflen
= state
->current_buf
? &state
->buflen_1
: &state
->buflen_0
;
1172 u8
*next_buf
= state
->current_buf
? state
->buf_0
: state
->buf_1
;
1173 int *next_buflen
= state
->current_buf
? &state
->buflen_0
:
1175 int in_len
= *buflen
+ req
->nbytes
, to_hash
;
1176 int sec4_sg_bytes
, src_nents
;
1177 struct ahash_edesc
*edesc
;
1178 u32
*desc
, *sh_desc
= ctx
->sh_desc_update_first
;
1179 dma_addr_t ptr
= ctx
->sh_desc_update_first_dma
;
1180 bool chained
= false;
1184 *next_buflen
= in_len
& (crypto_tfm_alg_blocksize(&ahash
->base
) - 1);
1185 to_hash
= in_len
- *next_buflen
;
1188 src_nents
= __sg_count(req
->src
, req
->nbytes
- (*next_buflen
),
1190 sec4_sg_bytes
= (1 + src_nents
) *
1191 sizeof(struct sec4_sg_entry
);
1194 * allocate space for base edesc and hw desc commands,
1197 edesc
= kmalloc(sizeof(struct ahash_edesc
) + DESC_JOB_IO_LEN
+
1198 sec4_sg_bytes
, GFP_DMA
| flags
);
1201 "could not allocate extended descriptor\n");
1205 edesc
->src_nents
= src_nents
;
1206 edesc
->chained
= chained
;
1207 edesc
->sec4_sg_bytes
= sec4_sg_bytes
;
1208 edesc
->sec4_sg
= (void *)edesc
+ sizeof(struct ahash_edesc
) +
1210 edesc
->sec4_sg_dma
= dma_map_single(jrdev
, edesc
->sec4_sg
,
1214 state
->buf_dma
= buf_map_to_sec4_sg(jrdev
, edesc
->sec4_sg
,
1216 src_map_to_sec4_sg(jrdev
, req
->src
, src_nents
,
1217 edesc
->sec4_sg
+ 1, chained
);
1219 sg_copy_part(next_buf
, req
->src
, to_hash
- *buflen
,
1221 state
->current_buf
= !state
->current_buf
;
1224 sh_len
= desc_len(sh_desc
);
1225 desc
= edesc
->hw_desc
;
1226 init_job_desc_shared(desc
, ptr
, sh_len
, HDR_SHARE_DEFER
|
1229 append_seq_in_ptr(desc
, edesc
->sec4_sg_dma
, to_hash
, LDST_SGF
);
1231 map_seq_out_ptr_ctx(desc
, jrdev
, state
, ctx
->ctx_len
);
1234 print_hex_dump(KERN_ERR
, "jobdesc@"__stringify(__LINE__
)": ",
1235 DUMP_PREFIX_ADDRESS
, 16, 4, desc
,
1236 desc_bytes(desc
), 1);
1239 ret
= caam_jr_enqueue(jrdev
, desc
, ahash_done_ctx_dst
, req
);
1242 state
->update
= ahash_update_ctx
;
1243 state
->finup
= ahash_finup_ctx
;
1244 state
->final
= ahash_final_ctx
;
1246 ahash_unmap_ctx(jrdev
, edesc
, req
, ctx
->ctx_len
,
1250 } else if (*next_buflen
) {
1251 sg_copy(buf
+ *buflen
, req
->src
, req
->nbytes
);
1252 *buflen
= *next_buflen
;
1256 print_hex_dump(KERN_ERR
, "buf@"__stringify(__LINE__
)": ",
1257 DUMP_PREFIX_ADDRESS
, 16, 4, buf
, *buflen
, 1);
1258 print_hex_dump(KERN_ERR
, "next buf@"__stringify(__LINE__
)": ",
1259 DUMP_PREFIX_ADDRESS
, 16, 4, next_buf
,
1266 /* submit ahash finup if it the first job descriptor after update */
1267 static int ahash_finup_no_ctx(struct ahash_request
*req
)
1269 struct crypto_ahash
*ahash
= crypto_ahash_reqtfm(req
);
1270 struct caam_hash_ctx
*ctx
= crypto_ahash_ctx(ahash
);
1271 struct caam_hash_state
*state
= ahash_request_ctx(req
);
1272 struct device
*jrdev
= ctx
->jrdev
;
1273 gfp_t flags
= (req
->base
.flags
& (CRYPTO_TFM_REQ_MAY_BACKLOG
|
1274 CRYPTO_TFM_REQ_MAY_SLEEP
)) ? GFP_KERNEL
: GFP_ATOMIC
;
1275 u8
*buf
= state
->current_buf
? state
->buf_1
: state
->buf_0
;
1276 int buflen
= state
->current_buf
? state
->buflen_1
: state
->buflen_0
;
1277 int last_buflen
= state
->current_buf
? state
->buflen_0
:
1279 u32
*sh_desc
= ctx
->sh_desc_digest
, *desc
;
1280 dma_addr_t ptr
= ctx
->sh_desc_digest_dma
;
1281 int sec4_sg_bytes
, sec4_sg_src_index
, src_nents
;
1282 int digestsize
= crypto_ahash_digestsize(ahash
);
1283 struct ahash_edesc
*edesc
;
1284 bool chained
= false;
1288 src_nents
= __sg_count(req
->src
, req
->nbytes
, &chained
);
1289 sec4_sg_src_index
= 2;
1290 sec4_sg_bytes
= (sec4_sg_src_index
+ src_nents
) *
1291 sizeof(struct sec4_sg_entry
);
1293 /* allocate space for base edesc and hw desc commands, link tables */
1294 edesc
= kmalloc(sizeof(struct ahash_edesc
) + DESC_JOB_IO_LEN
+
1295 sec4_sg_bytes
, GFP_DMA
| flags
);
1297 dev_err(jrdev
, "could not allocate extended descriptor\n");
1301 sh_len
= desc_len(sh_desc
);
1302 desc
= edesc
->hw_desc
;
1303 init_job_desc_shared(desc
, ptr
, sh_len
, HDR_SHARE_DEFER
| HDR_REVERSE
);
1305 edesc
->src_nents
= src_nents
;
1306 edesc
->chained
= chained
;
1307 edesc
->sec4_sg_bytes
= sec4_sg_bytes
;
1308 edesc
->sec4_sg
= (void *)edesc
+ sizeof(struct ahash_edesc
) +
1310 edesc
->sec4_sg_dma
= dma_map_single(jrdev
, edesc
->sec4_sg
,
1311 sec4_sg_bytes
, DMA_TO_DEVICE
);
1313 state
->buf_dma
= try_buf_map_to_sec4_sg(jrdev
, edesc
->sec4_sg
, buf
,
1314 state
->buf_dma
, buflen
,
1317 src_map_to_sec4_sg(jrdev
, req
->src
, src_nents
, edesc
->sec4_sg
+ 1,
1320 append_seq_in_ptr(desc
, edesc
->sec4_sg_dma
, buflen
+
1321 req
->nbytes
, LDST_SGF
);
1323 edesc
->dst_dma
= map_seq_out_ptr_result(desc
, jrdev
, req
->result
,
1327 print_hex_dump(KERN_ERR
, "jobdesc@"__stringify(__LINE__
)": ",
1328 DUMP_PREFIX_ADDRESS
, 16, 4, desc
, desc_bytes(desc
), 1);
1331 ret
= caam_jr_enqueue(jrdev
, desc
, ahash_done
, req
);
1335 ahash_unmap(jrdev
, edesc
, req
, digestsize
);
1342 /* submit first update job descriptor after init */
1343 static int ahash_update_first(struct ahash_request
*req
)
1345 struct crypto_ahash
*ahash
= crypto_ahash_reqtfm(req
);
1346 struct caam_hash_ctx
*ctx
= crypto_ahash_ctx(ahash
);
1347 struct caam_hash_state
*state
= ahash_request_ctx(req
);
1348 struct device
*jrdev
= ctx
->jrdev
;
1349 gfp_t flags
= (req
->base
.flags
& (CRYPTO_TFM_REQ_MAY_BACKLOG
|
1350 CRYPTO_TFM_REQ_MAY_SLEEP
)) ? GFP_KERNEL
: GFP_ATOMIC
;
1351 u8
*next_buf
= state
->buf_0
+ state
->current_buf
*
1352 CAAM_MAX_HASH_BLOCK_SIZE
;
1353 int *next_buflen
= &state
->buflen_0
+ state
->current_buf
;
1355 u32
*sh_desc
= ctx
->sh_desc_update_first
, *desc
;
1356 dma_addr_t ptr
= ctx
->sh_desc_update_first_dma
;
1357 int sec4_sg_bytes
, src_nents
;
1360 struct ahash_edesc
*edesc
;
1361 bool chained
= false;
1365 *next_buflen
= req
->nbytes
& (crypto_tfm_alg_blocksize(&ahash
->base
) -
1367 to_hash
= req
->nbytes
- *next_buflen
;
1370 src_nents
= sg_count(req
->src
, req
->nbytes
- (*next_buflen
),
1372 dma_map_sg_chained(jrdev
, req
->src
, src_nents
? : 1,
1373 DMA_TO_DEVICE
, chained
);
1374 sec4_sg_bytes
= src_nents
* sizeof(struct sec4_sg_entry
);
1377 * allocate space for base edesc and hw desc commands,
1380 edesc
= kmalloc(sizeof(struct ahash_edesc
) + DESC_JOB_IO_LEN
+
1381 sec4_sg_bytes
, GFP_DMA
| flags
);
1384 "could not allocate extended descriptor\n");
1388 edesc
->src_nents
= src_nents
;
1389 edesc
->chained
= chained
;
1390 edesc
->sec4_sg_bytes
= sec4_sg_bytes
;
1391 edesc
->sec4_sg
= (void *)edesc
+ sizeof(struct ahash_edesc
) +
1393 edesc
->sec4_sg_dma
= dma_map_single(jrdev
, edesc
->sec4_sg
,
1398 sg_to_sec4_sg_last(req
->src
, src_nents
,
1400 src_dma
= edesc
->sec4_sg_dma
;
1403 src_dma
= sg_dma_address(req
->src
);
1408 sg_copy_part(next_buf
, req
->src
, to_hash
, req
->nbytes
);
1410 sh_len
= desc_len(sh_desc
);
1411 desc
= edesc
->hw_desc
;
1412 init_job_desc_shared(desc
, ptr
, sh_len
, HDR_SHARE_DEFER
|
1415 append_seq_in_ptr(desc
, src_dma
, to_hash
, options
);
1417 map_seq_out_ptr_ctx(desc
, jrdev
, state
, ctx
->ctx_len
);
1420 print_hex_dump(KERN_ERR
, "jobdesc@"__stringify(__LINE__
)": ",
1421 DUMP_PREFIX_ADDRESS
, 16, 4, desc
,
1422 desc_bytes(desc
), 1);
1425 ret
= caam_jr_enqueue(jrdev
, desc
, ahash_done_ctx_dst
,
1429 state
->update
= ahash_update_ctx
;
1430 state
->finup
= ahash_finup_ctx
;
1431 state
->final
= ahash_final_ctx
;
1433 ahash_unmap_ctx(jrdev
, edesc
, req
, ctx
->ctx_len
,
1437 } else if (*next_buflen
) {
1438 state
->update
= ahash_update_no_ctx
;
1439 state
->finup
= ahash_finup_no_ctx
;
1440 state
->final
= ahash_final_no_ctx
;
1441 sg_copy(next_buf
, req
->src
, req
->nbytes
);
1444 print_hex_dump(KERN_ERR
, "next buf@"__stringify(__LINE__
)": ",
1445 DUMP_PREFIX_ADDRESS
, 16, 4, next_buf
,
1452 static int ahash_finup_first(struct ahash_request
*req
)
1454 return ahash_digest(req
);
1457 static int ahash_init(struct ahash_request
*req
)
1459 struct caam_hash_state
*state
= ahash_request_ctx(req
);
1461 state
->update
= ahash_update_first
;
1462 state
->finup
= ahash_finup_first
;
1463 state
->final
= ahash_final_no_ctx
;
1465 state
->current_buf
= 0;
1470 static int ahash_update(struct ahash_request
*req
)
1472 struct caam_hash_state
*state
= ahash_request_ctx(req
);
1474 return state
->update(req
);
1477 static int ahash_finup(struct ahash_request
*req
)
1479 struct caam_hash_state
*state
= ahash_request_ctx(req
);
1481 return state
->finup(req
);
1484 static int ahash_final(struct ahash_request
*req
)
1486 struct caam_hash_state
*state
= ahash_request_ctx(req
);
1488 return state
->final(req
);
1491 static int ahash_export(struct ahash_request
*req
, void *out
)
1493 struct crypto_ahash
*ahash
= crypto_ahash_reqtfm(req
);
1494 struct caam_hash_ctx
*ctx
= crypto_ahash_ctx(ahash
);
1495 struct caam_hash_state
*state
= ahash_request_ctx(req
);
1497 memcpy(out
, ctx
, sizeof(struct caam_hash_ctx
));
1498 memcpy(out
+ sizeof(struct caam_hash_ctx
), state
,
1499 sizeof(struct caam_hash_state
));
1503 static int ahash_import(struct ahash_request
*req
, const void *in
)
1505 struct crypto_ahash
*ahash
= crypto_ahash_reqtfm(req
);
1506 struct caam_hash_ctx
*ctx
= crypto_ahash_ctx(ahash
);
1507 struct caam_hash_state
*state
= ahash_request_ctx(req
);
1509 memcpy(ctx
, in
, sizeof(struct caam_hash_ctx
));
1510 memcpy(state
, in
+ sizeof(struct caam_hash_ctx
),
1511 sizeof(struct caam_hash_state
));
1515 struct caam_hash_template
{
1516 char name
[CRYPTO_MAX_ALG_NAME
];
1517 char driver_name
[CRYPTO_MAX_ALG_NAME
];
1518 char hmac_name
[CRYPTO_MAX_ALG_NAME
];
1519 char hmac_driver_name
[CRYPTO_MAX_ALG_NAME
];
1520 unsigned int blocksize
;
1521 struct ahash_alg template_ahash
;
1526 /* ahash descriptors */
1527 static struct caam_hash_template driver_hash
[] = {
1530 .driver_name
= "sha1-caam",
1531 .hmac_name
= "hmac(sha1)",
1532 .hmac_driver_name
= "hmac-sha1-caam",
1533 .blocksize
= SHA1_BLOCK_SIZE
,
1536 .update
= ahash_update
,
1537 .final
= ahash_final
,
1538 .finup
= ahash_finup
,
1539 .digest
= ahash_digest
,
1540 .export
= ahash_export
,
1541 .import
= ahash_import
,
1542 .setkey
= ahash_setkey
,
1544 .digestsize
= SHA1_DIGEST_SIZE
,
1547 .alg_type
= OP_ALG_ALGSEL_SHA1
,
1548 .alg_op
= OP_ALG_ALGSEL_SHA1
| OP_ALG_AAI_HMAC
,
1551 .driver_name
= "sha224-caam",
1552 .hmac_name
= "hmac(sha224)",
1553 .hmac_driver_name
= "hmac-sha224-caam",
1554 .blocksize
= SHA224_BLOCK_SIZE
,
1557 .update
= ahash_update
,
1558 .final
= ahash_final
,
1559 .finup
= ahash_finup
,
1560 .digest
= ahash_digest
,
1561 .export
= ahash_export
,
1562 .import
= ahash_import
,
1563 .setkey
= ahash_setkey
,
1565 .digestsize
= SHA224_DIGEST_SIZE
,
1568 .alg_type
= OP_ALG_ALGSEL_SHA224
,
1569 .alg_op
= OP_ALG_ALGSEL_SHA224
| OP_ALG_AAI_HMAC
,
1572 .driver_name
= "sha256-caam",
1573 .hmac_name
= "hmac(sha256)",
1574 .hmac_driver_name
= "hmac-sha256-caam",
1575 .blocksize
= SHA256_BLOCK_SIZE
,
1578 .update
= ahash_update
,
1579 .final
= ahash_final
,
1580 .finup
= ahash_finup
,
1581 .digest
= ahash_digest
,
1582 .export
= ahash_export
,
1583 .import
= ahash_import
,
1584 .setkey
= ahash_setkey
,
1586 .digestsize
= SHA256_DIGEST_SIZE
,
1589 .alg_type
= OP_ALG_ALGSEL_SHA256
,
1590 .alg_op
= OP_ALG_ALGSEL_SHA256
| OP_ALG_AAI_HMAC
,
1593 .driver_name
= "sha384-caam",
1594 .hmac_name
= "hmac(sha384)",
1595 .hmac_driver_name
= "hmac-sha384-caam",
1596 .blocksize
= SHA384_BLOCK_SIZE
,
1599 .update
= ahash_update
,
1600 .final
= ahash_final
,
1601 .finup
= ahash_finup
,
1602 .digest
= ahash_digest
,
1603 .export
= ahash_export
,
1604 .import
= ahash_import
,
1605 .setkey
= ahash_setkey
,
1607 .digestsize
= SHA384_DIGEST_SIZE
,
1610 .alg_type
= OP_ALG_ALGSEL_SHA384
,
1611 .alg_op
= OP_ALG_ALGSEL_SHA384
| OP_ALG_AAI_HMAC
,
1614 .driver_name
= "sha512-caam",
1615 .hmac_name
= "hmac(sha512)",
1616 .hmac_driver_name
= "hmac-sha512-caam",
1617 .blocksize
= SHA512_BLOCK_SIZE
,
1620 .update
= ahash_update
,
1621 .final
= ahash_final
,
1622 .finup
= ahash_finup
,
1623 .digest
= ahash_digest
,
1624 .export
= ahash_export
,
1625 .import
= ahash_import
,
1626 .setkey
= ahash_setkey
,
1628 .digestsize
= SHA512_DIGEST_SIZE
,
1631 .alg_type
= OP_ALG_ALGSEL_SHA512
,
1632 .alg_op
= OP_ALG_ALGSEL_SHA512
| OP_ALG_AAI_HMAC
,
1635 .driver_name
= "md5-caam",
1636 .hmac_name
= "hmac(md5)",
1637 .hmac_driver_name
= "hmac-md5-caam",
1638 .blocksize
= MD5_BLOCK_WORDS
* 4,
1641 .update
= ahash_update
,
1642 .final
= ahash_final
,
1643 .finup
= ahash_finup
,
1644 .digest
= ahash_digest
,
1645 .export
= ahash_export
,
1646 .import
= ahash_import
,
1647 .setkey
= ahash_setkey
,
1649 .digestsize
= MD5_DIGEST_SIZE
,
1652 .alg_type
= OP_ALG_ALGSEL_MD5
,
1653 .alg_op
= OP_ALG_ALGSEL_MD5
| OP_ALG_AAI_HMAC
,
1657 struct caam_hash_alg
{
1658 struct list_head entry
;
1661 struct ahash_alg ahash_alg
;
1664 static int caam_hash_cra_init(struct crypto_tfm
*tfm
)
1666 struct crypto_ahash
*ahash
= __crypto_ahash_cast(tfm
);
1667 struct crypto_alg
*base
= tfm
->__crt_alg
;
1668 struct hash_alg_common
*halg
=
1669 container_of(base
, struct hash_alg_common
, base
);
1670 struct ahash_alg
*alg
=
1671 container_of(halg
, struct ahash_alg
, halg
);
1672 struct caam_hash_alg
*caam_hash
=
1673 container_of(alg
, struct caam_hash_alg
, ahash_alg
);
1674 struct caam_hash_ctx
*ctx
= crypto_tfm_ctx(tfm
);
1675 /* Sizes for MDHA running digests: MD5, SHA1, 224, 256, 384, 512 */
1676 static const u8 runninglen
[] = { HASH_MSG_LEN
+ MD5_DIGEST_SIZE
,
1677 HASH_MSG_LEN
+ SHA1_DIGEST_SIZE
,
1679 HASH_MSG_LEN
+ SHA256_DIGEST_SIZE
,
1681 HASH_MSG_LEN
+ SHA512_DIGEST_SIZE
};
1685 * Get a Job ring from Job Ring driver to ensure in-order
1686 * crypto request processing per tfm
1688 ctx
->jrdev
= caam_jr_alloc();
1689 if (IS_ERR(ctx
->jrdev
)) {
1690 pr_err("Job Ring Device allocation for transform failed\n");
1691 return PTR_ERR(ctx
->jrdev
);
1693 /* copy descriptor header template value */
1694 ctx
->alg_type
= OP_TYPE_CLASS2_ALG
| caam_hash
->alg_type
;
1695 ctx
->alg_op
= OP_TYPE_CLASS2_ALG
| caam_hash
->alg_op
;
1697 ctx
->ctx_len
= runninglen
[(ctx
->alg_op
& OP_ALG_ALGSEL_SUBMASK
) >>
1698 OP_ALG_ALGSEL_SHIFT
];
1700 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm
),
1701 sizeof(struct caam_hash_state
));
1703 ret
= ahash_set_sh_desc(ahash
);
1708 static void caam_hash_cra_exit(struct crypto_tfm
*tfm
)
1710 struct caam_hash_ctx
*ctx
= crypto_tfm_ctx(tfm
);
1712 if (ctx
->sh_desc_update_dma
&&
1713 !dma_mapping_error(ctx
->jrdev
, ctx
->sh_desc_update_dma
))
1714 dma_unmap_single(ctx
->jrdev
, ctx
->sh_desc_update_dma
,
1715 desc_bytes(ctx
->sh_desc_update
),
1717 if (ctx
->sh_desc_update_first_dma
&&
1718 !dma_mapping_error(ctx
->jrdev
, ctx
->sh_desc_update_first_dma
))
1719 dma_unmap_single(ctx
->jrdev
, ctx
->sh_desc_update_first_dma
,
1720 desc_bytes(ctx
->sh_desc_update_first
),
1722 if (ctx
->sh_desc_fin_dma
&&
1723 !dma_mapping_error(ctx
->jrdev
, ctx
->sh_desc_fin_dma
))
1724 dma_unmap_single(ctx
->jrdev
, ctx
->sh_desc_fin_dma
,
1725 desc_bytes(ctx
->sh_desc_fin
), DMA_TO_DEVICE
);
1726 if (ctx
->sh_desc_digest_dma
&&
1727 !dma_mapping_error(ctx
->jrdev
, ctx
->sh_desc_digest_dma
))
1728 dma_unmap_single(ctx
->jrdev
, ctx
->sh_desc_digest_dma
,
1729 desc_bytes(ctx
->sh_desc_digest
),
1731 if (ctx
->sh_desc_finup_dma
&&
1732 !dma_mapping_error(ctx
->jrdev
, ctx
->sh_desc_finup_dma
))
1733 dma_unmap_single(ctx
->jrdev
, ctx
->sh_desc_finup_dma
,
1734 desc_bytes(ctx
->sh_desc_finup
), DMA_TO_DEVICE
);
1736 caam_jr_free(ctx
->jrdev
);
1739 static void __exit
caam_algapi_hash_exit(void)
1741 struct caam_hash_alg
*t_alg
, *n
;
1743 if (!hash_list
.next
)
1746 list_for_each_entry_safe(t_alg
, n
, &hash_list
, entry
) {
1747 crypto_unregister_ahash(&t_alg
->ahash_alg
);
1748 list_del(&t_alg
->entry
);
1753 static struct caam_hash_alg
*
1754 caam_hash_alloc(struct caam_hash_template
*template,
1757 struct caam_hash_alg
*t_alg
;
1758 struct ahash_alg
*halg
;
1759 struct crypto_alg
*alg
;
1761 t_alg
= kzalloc(sizeof(struct caam_hash_alg
), GFP_KERNEL
);
1763 pr_err("failed to allocate t_alg\n");
1764 return ERR_PTR(-ENOMEM
);
1767 t_alg
->ahash_alg
= template->template_ahash
;
1768 halg
= &t_alg
->ahash_alg
;
1769 alg
= &halg
->halg
.base
;
1772 snprintf(alg
->cra_name
, CRYPTO_MAX_ALG_NAME
, "%s",
1773 template->hmac_name
);
1774 snprintf(alg
->cra_driver_name
, CRYPTO_MAX_ALG_NAME
, "%s",
1775 template->hmac_driver_name
);
1777 snprintf(alg
->cra_name
, CRYPTO_MAX_ALG_NAME
, "%s",
1779 snprintf(alg
->cra_driver_name
, CRYPTO_MAX_ALG_NAME
, "%s",
1780 template->driver_name
);
1782 alg
->cra_module
= THIS_MODULE
;
1783 alg
->cra_init
= caam_hash_cra_init
;
1784 alg
->cra_exit
= caam_hash_cra_exit
;
1785 alg
->cra_ctxsize
= sizeof(struct caam_hash_ctx
);
1786 alg
->cra_priority
= CAAM_CRA_PRIORITY
;
1787 alg
->cra_blocksize
= template->blocksize
;
1788 alg
->cra_alignmask
= 0;
1789 alg
->cra_flags
= CRYPTO_ALG_ASYNC
| CRYPTO_ALG_TYPE_AHASH
;
1790 alg
->cra_type
= &crypto_ahash_type
;
1792 t_alg
->alg_type
= template->alg_type
;
1793 t_alg
->alg_op
= template->alg_op
;
1798 static int __init
caam_algapi_hash_init(void)
1802 INIT_LIST_HEAD(&hash_list
);
1804 /* register crypto algorithms the device supports */
1805 for (i
= 0; i
< ARRAY_SIZE(driver_hash
); i
++) {
1806 /* TODO: check if h/w supports alg */
1807 struct caam_hash_alg
*t_alg
;
1809 /* register hmac version */
1810 t_alg
= caam_hash_alloc(&driver_hash
[i
], true);
1811 if (IS_ERR(t_alg
)) {
1812 err
= PTR_ERR(t_alg
);
1813 pr_warn("%s alg allocation failed\n",
1814 driver_hash
[i
].driver_name
);
1818 err
= crypto_register_ahash(&t_alg
->ahash_alg
);
1820 pr_warn("%s alg registration failed\n",
1821 t_alg
->ahash_alg
.halg
.base
.cra_driver_name
);
1824 list_add_tail(&t_alg
->entry
, &hash_list
);
1826 /* register unkeyed version */
1827 t_alg
= caam_hash_alloc(&driver_hash
[i
], false);
1828 if (IS_ERR(t_alg
)) {
1829 err
= PTR_ERR(t_alg
);
1830 pr_warn("%s alg allocation failed\n",
1831 driver_hash
[i
].driver_name
);
1835 err
= crypto_register_ahash(&t_alg
->ahash_alg
);
1837 pr_warn("%s alg registration failed\n",
1838 t_alg
->ahash_alg
.halg
.base
.cra_driver_name
);
1841 list_add_tail(&t_alg
->entry
, &hash_list
);
1847 module_init(caam_algapi_hash_init
);
1848 module_exit(caam_algapi_hash_exit
);
1850 MODULE_LICENSE("GPL");
1851 MODULE_DESCRIPTION("FSL CAAM support for ahash functions of crypto API");
1852 MODULE_AUTHOR("Freescale Semiconductor - NMG");