2 * caam - Freescale FSL CAAM support for ahash functions of crypto API
4 * Copyright 2011 Freescale Semiconductor, Inc.
6 * Based on caamalg.c crypto API driver.
8 * relationship of digest job descriptor or first job descriptor after init to
11 * --------------- ---------------
12 * | JobDesc #1 |-------------------->| ShareDesc |
13 * | *(packet 1) | | (hashKey) |
14 * --------------- | (operation) |
17 * relationship of subsequent job descriptors to shared descriptors:
19 * --------------- ---------------
20 * | JobDesc #2 |-------------------->| ShareDesc |
21 * | *(packet 2) | |------------->| (hashKey) |
22 * --------------- | |-------->| (operation) |
23 * . | | | (load ctx2) |
24 * . | | ---------------
26 * | JobDesc #3 |------| |
32 * | JobDesc #4 |------------
36 * The SharedDesc never changes for a connection unless rekeyed, but
37 * each packet will likely be in a different place. So all we need
38 * to know to process the packet is where the input is, where the
39 * output goes, and what context we want to process with. Context is
40 * in the SharedDesc, packet references in the JobDesc.
42 * So, a job desc looks like:
44 * ---------------------
46 * | ShareDesc Pointer |
53 * ---------------------
60 #include "desc_constr.h"
63 #include "sg_sw_sec4.h"
66 #define CAAM_CRA_PRIORITY 3000
68 /* max hash key is max split key size */
69 #define CAAM_MAX_HASH_KEY_SIZE (SHA512_DIGEST_SIZE * 2)
71 #define CAAM_MAX_HASH_BLOCK_SIZE SHA512_BLOCK_SIZE
72 #define CAAM_MAX_HASH_DIGEST_SIZE SHA512_DIGEST_SIZE
74 /* length of descriptors text */
75 #define DESC_AHASH_BASE (3 * CAAM_CMD_SZ)
76 #define DESC_AHASH_UPDATE_LEN (6 * CAAM_CMD_SZ)
77 #define DESC_AHASH_UPDATE_FIRST_LEN (DESC_AHASH_BASE + 4 * CAAM_CMD_SZ)
78 #define DESC_AHASH_FINAL_LEN (DESC_AHASH_BASE + 5 * CAAM_CMD_SZ)
79 #define DESC_AHASH_FINUP_LEN (DESC_AHASH_BASE + 5 * CAAM_CMD_SZ)
80 #define DESC_AHASH_DIGEST_LEN (DESC_AHASH_BASE + 4 * CAAM_CMD_SZ)
82 #define DESC_HASH_MAX_USED_BYTES (DESC_AHASH_FINAL_LEN + \
83 CAAM_MAX_HASH_KEY_SIZE)
84 #define DESC_HASH_MAX_USED_LEN (DESC_HASH_MAX_USED_BYTES / CAAM_CMD_SZ)
86 /* caam context sizes for hashes: running digest + 8 */
87 #define HASH_MSG_LEN 8
88 #define MAX_CTX_LEN (HASH_MSG_LEN + SHA512_DIGEST_SIZE)
91 /* for print_hex_dumps with line references */
92 #define debug(format, arg...) printk(format, arg)
94 #define debug(format, arg...)
98 static struct list_head hash_list
;
100 /* ahash per-session context */
101 struct caam_hash_ctx
{
102 u32 sh_desc_update
[DESC_HASH_MAX_USED_LEN
] ____cacheline_aligned
;
103 u32 sh_desc_update_first
[DESC_HASH_MAX_USED_LEN
] ____cacheline_aligned
;
104 u32 sh_desc_fin
[DESC_HASH_MAX_USED_LEN
] ____cacheline_aligned
;
105 u32 sh_desc_digest
[DESC_HASH_MAX_USED_LEN
] ____cacheline_aligned
;
106 dma_addr_t sh_desc_update_dma ____cacheline_aligned
;
107 dma_addr_t sh_desc_update_first_dma
;
108 dma_addr_t sh_desc_fin_dma
;
109 dma_addr_t sh_desc_digest_dma
;
110 struct device
*jrdev
;
111 u8 key
[CAAM_MAX_HASH_KEY_SIZE
];
113 struct alginfo adata
;
117 struct caam_hash_state
{
120 u8 buf_0
[CAAM_MAX_HASH_BLOCK_SIZE
] ____cacheline_aligned
;
122 u8 buf_1
[CAAM_MAX_HASH_BLOCK_SIZE
] ____cacheline_aligned
;
124 u8 caam_ctx
[MAX_CTX_LEN
] ____cacheline_aligned
;
125 int (*update
)(struct ahash_request
*req
);
126 int (*final
)(struct ahash_request
*req
);
127 int (*finup
)(struct ahash_request
*req
);
131 struct caam_export_state
{
132 u8 buf
[CAAM_MAX_HASH_BLOCK_SIZE
];
133 u8 caam_ctx
[MAX_CTX_LEN
];
135 int (*update
)(struct ahash_request
*req
);
136 int (*final
)(struct ahash_request
*req
);
137 int (*finup
)(struct ahash_request
*req
);
140 static inline void switch_buf(struct caam_hash_state
*state
)
142 state
->current_buf
^= 1;
145 static inline u8
*current_buf(struct caam_hash_state
*state
)
147 return state
->current_buf
? state
->buf_1
: state
->buf_0
;
150 static inline u8
*alt_buf(struct caam_hash_state
*state
)
152 return state
->current_buf
? state
->buf_0
: state
->buf_1
;
155 static inline int *current_buflen(struct caam_hash_state
*state
)
157 return state
->current_buf
? &state
->buflen_1
: &state
->buflen_0
;
160 static inline int *alt_buflen(struct caam_hash_state
*state
)
162 return state
->current_buf
? &state
->buflen_0
: &state
->buflen_1
;
165 /* Common job descriptor seq in/out ptr routines */
167 /* Map state->caam_ctx, and append seq_out_ptr command that points to it */
168 static inline int map_seq_out_ptr_ctx(u32
*desc
, struct device
*jrdev
,
169 struct caam_hash_state
*state
,
172 state
->ctx_dma
= dma_map_single(jrdev
, state
->caam_ctx
,
173 ctx_len
, DMA_FROM_DEVICE
);
174 if (dma_mapping_error(jrdev
, state
->ctx_dma
)) {
175 dev_err(jrdev
, "unable to map ctx\n");
180 append_seq_out_ptr(desc
, state
->ctx_dma
, ctx_len
, 0);
185 /* Map req->result, and append seq_out_ptr command that points to it */
186 static inline dma_addr_t
map_seq_out_ptr_result(u32
*desc
, struct device
*jrdev
,
187 u8
*result
, int digestsize
)
191 dst_dma
= dma_map_single(jrdev
, result
, digestsize
, DMA_FROM_DEVICE
);
192 append_seq_out_ptr(desc
, dst_dma
, digestsize
, 0);
197 /* Map current buffer in state (if length > 0) and put it in link table */
198 static inline int buf_map_to_sec4_sg(struct device
*jrdev
,
199 struct sec4_sg_entry
*sec4_sg
,
200 struct caam_hash_state
*state
)
202 int buflen
= *current_buflen(state
);
207 state
->buf_dma
= dma_map_single(jrdev
, current_buf(state
), buflen
,
209 if (dma_mapping_error(jrdev
, state
->buf_dma
)) {
210 dev_err(jrdev
, "unable to map buf\n");
215 dma_to_sec4_sg_one(sec4_sg
, state
->buf_dma
, buflen
, 0);
220 /* Map state->caam_ctx, and add it to link table */
221 static inline int ctx_map_to_sec4_sg(u32
*desc
, struct device
*jrdev
,
222 struct caam_hash_state
*state
, int ctx_len
,
223 struct sec4_sg_entry
*sec4_sg
, u32 flag
)
225 state
->ctx_dma
= dma_map_single(jrdev
, state
->caam_ctx
, ctx_len
, flag
);
226 if (dma_mapping_error(jrdev
, state
->ctx_dma
)) {
227 dev_err(jrdev
, "unable to map ctx\n");
232 dma_to_sec4_sg_one(sec4_sg
, state
->ctx_dma
, ctx_len
, 0);
238 * For ahash update, final and finup (import_ctx = true)
239 * import context, read and write to seqout
240 * For ahash firsts and digest (import_ctx = false)
241 * read and write to seqout
243 static inline void ahash_gen_sh_desc(u32
*desc
, u32 state
, int digestsize
,
244 struct caam_hash_ctx
*ctx
, bool import_ctx
)
246 u32 op
= ctx
->adata
.algtype
;
249 init_sh_desc(desc
, HDR_SHARE_SERIAL
);
251 /* Append key if it has been set; ahash update excluded */
252 if ((state
!= OP_ALG_AS_UPDATE
) && (ctx
->adata
.keylen
)) {
253 /* Skip key loading if already shared */
254 skip_key_load
= append_jump(desc
, JUMP_JSL
| JUMP_TEST_ALL
|
257 append_key_as_imm(desc
, ctx
->key
, ctx
->adata
.keylen_pad
,
258 ctx
->adata
.keylen
, CLASS_2
|
259 KEY_DEST_MDHA_SPLIT
| KEY_ENC
);
261 set_jump_tgt_here(desc
, skip_key_load
);
263 op
|= OP_ALG_AAI_HMAC_PRECOMP
;
266 /* If needed, import context from software */
268 append_seq_load(desc
, ctx
->ctx_len
, LDST_CLASS_2_CCB
|
269 LDST_SRCDST_BYTE_CONTEXT
);
271 /* Class 2 operation */
272 append_operation(desc
, op
| state
| OP_ALG_ENCRYPT
);
275 * Load from buf and/or src and write to req->result or state->context
276 * Calculate remaining bytes to read
278 append_math_add(desc
, VARSEQINLEN
, SEQINLEN
, REG0
, CAAM_CMD_SZ
);
279 /* Read remaining bytes */
280 append_seq_fifo_load(desc
, 0, FIFOLD_CLASS_CLASS2
| FIFOLD_TYPE_LAST2
|
281 FIFOLD_TYPE_MSG
| KEY_VLF
);
282 /* Store class2 context bytes */
283 append_seq_store(desc
, digestsize
, LDST_CLASS_2_CCB
|
284 LDST_SRCDST_BYTE_CONTEXT
);
287 static int ahash_set_sh_desc(struct crypto_ahash
*ahash
)
289 struct caam_hash_ctx
*ctx
= crypto_ahash_ctx(ahash
);
290 int digestsize
= crypto_ahash_digestsize(ahash
);
291 struct device
*jrdev
= ctx
->jrdev
;
294 /* ahash_update shared descriptor */
295 desc
= ctx
->sh_desc_update
;
296 ahash_gen_sh_desc(desc
, OP_ALG_AS_UPDATE
, ctx
->ctx_len
, ctx
, true);
297 dma_sync_single_for_device(jrdev
, ctx
->sh_desc_update_dma
,
298 desc_bytes(desc
), DMA_TO_DEVICE
);
300 print_hex_dump(KERN_ERR
,
301 "ahash update shdesc@"__stringify(__LINE__
)": ",
302 DUMP_PREFIX_ADDRESS
, 16, 4, desc
, desc_bytes(desc
), 1);
305 /* ahash_update_first shared descriptor */
306 desc
= ctx
->sh_desc_update_first
;
307 ahash_gen_sh_desc(desc
, OP_ALG_AS_INIT
, ctx
->ctx_len
, ctx
, false);
308 dma_sync_single_for_device(jrdev
, ctx
->sh_desc_update_first_dma
,
309 desc_bytes(desc
), DMA_TO_DEVICE
);
311 print_hex_dump(KERN_ERR
,
312 "ahash update first shdesc@"__stringify(__LINE__
)": ",
313 DUMP_PREFIX_ADDRESS
, 16, 4, desc
, desc_bytes(desc
), 1);
316 /* ahash_final shared descriptor */
317 desc
= ctx
->sh_desc_fin
;
318 ahash_gen_sh_desc(desc
, OP_ALG_AS_FINALIZE
, digestsize
, ctx
, true);
319 dma_sync_single_for_device(jrdev
, ctx
->sh_desc_fin_dma
,
320 desc_bytes(desc
), DMA_TO_DEVICE
);
322 print_hex_dump(KERN_ERR
, "ahash final shdesc@"__stringify(__LINE__
)": ",
323 DUMP_PREFIX_ADDRESS
, 16, 4, desc
,
324 desc_bytes(desc
), 1);
327 /* ahash_digest shared descriptor */
328 desc
= ctx
->sh_desc_digest
;
329 ahash_gen_sh_desc(desc
, OP_ALG_AS_INITFINAL
, digestsize
, ctx
, false);
330 dma_sync_single_for_device(jrdev
, ctx
->sh_desc_digest_dma
,
331 desc_bytes(desc
), DMA_TO_DEVICE
);
333 print_hex_dump(KERN_ERR
,
334 "ahash digest shdesc@"__stringify(__LINE__
)": ",
335 DUMP_PREFIX_ADDRESS
, 16, 4, desc
,
336 desc_bytes(desc
), 1);
342 /* Digest hash size if it is too large */
343 static int hash_digest_key(struct caam_hash_ctx
*ctx
, const u8
*key_in
,
344 u32
*keylen
, u8
*key_out
, u32 digestsize
)
346 struct device
*jrdev
= ctx
->jrdev
;
348 struct split_key_result result
;
349 dma_addr_t src_dma
, dst_dma
;
352 desc
= kmalloc(CAAM_CMD_SZ
* 8 + CAAM_PTR_SZ
* 2, GFP_KERNEL
| GFP_DMA
);
354 dev_err(jrdev
, "unable to allocate key input memory\n");
358 init_job_desc(desc
, 0);
360 src_dma
= dma_map_single(jrdev
, (void *)key_in
, *keylen
,
362 if (dma_mapping_error(jrdev
, src_dma
)) {
363 dev_err(jrdev
, "unable to map key input memory\n");
367 dst_dma
= dma_map_single(jrdev
, (void *)key_out
, digestsize
,
369 if (dma_mapping_error(jrdev
, dst_dma
)) {
370 dev_err(jrdev
, "unable to map key output memory\n");
371 dma_unmap_single(jrdev
, src_dma
, *keylen
, DMA_TO_DEVICE
);
376 /* Job descriptor to perform unkeyed hash on key_in */
377 append_operation(desc
, ctx
->adata
.algtype
| OP_ALG_ENCRYPT
|
378 OP_ALG_AS_INITFINAL
);
379 append_seq_in_ptr(desc
, src_dma
, *keylen
, 0);
380 append_seq_fifo_load(desc
, *keylen
, FIFOLD_CLASS_CLASS2
|
381 FIFOLD_TYPE_LAST2
| FIFOLD_TYPE_MSG
);
382 append_seq_out_ptr(desc
, dst_dma
, digestsize
, 0);
383 append_seq_store(desc
, digestsize
, LDST_CLASS_2_CCB
|
384 LDST_SRCDST_BYTE_CONTEXT
);
387 print_hex_dump(KERN_ERR
, "key_in@"__stringify(__LINE__
)": ",
388 DUMP_PREFIX_ADDRESS
, 16, 4, key_in
, *keylen
, 1);
389 print_hex_dump(KERN_ERR
, "jobdesc@"__stringify(__LINE__
)": ",
390 DUMP_PREFIX_ADDRESS
, 16, 4, desc
, desc_bytes(desc
), 1);
394 init_completion(&result
.completion
);
396 ret
= caam_jr_enqueue(jrdev
, desc
, split_key_done
, &result
);
399 wait_for_completion_interruptible(&result
.completion
);
402 print_hex_dump(KERN_ERR
,
403 "digested key@"__stringify(__LINE__
)": ",
404 DUMP_PREFIX_ADDRESS
, 16, 4, key_in
,
408 dma_unmap_single(jrdev
, src_dma
, *keylen
, DMA_TO_DEVICE
);
409 dma_unmap_single(jrdev
, dst_dma
, digestsize
, DMA_FROM_DEVICE
);
411 *keylen
= digestsize
;
418 static int ahash_setkey(struct crypto_ahash
*ahash
,
419 const u8
*key
, unsigned int keylen
)
421 struct caam_hash_ctx
*ctx
= crypto_ahash_ctx(ahash
);
422 int blocksize
= crypto_tfm_alg_blocksize(&ahash
->base
);
423 int digestsize
= crypto_ahash_digestsize(ahash
);
425 u8
*hashed_key
= NULL
;
428 printk(KERN_ERR
"keylen %d\n", keylen
);
431 if (keylen
> blocksize
) {
432 hashed_key
= kmalloc_array(digestsize
,
434 GFP_KERNEL
| GFP_DMA
);
437 ret
= hash_digest_key(ctx
, key
, &keylen
, hashed_key
,
444 ret
= gen_split_key(ctx
->jrdev
, ctx
->key
, &ctx
->adata
, key
, keylen
,
445 CAAM_MAX_HASH_KEY_SIZE
);
450 print_hex_dump(KERN_ERR
, "ctx.key@"__stringify(__LINE__
)": ",
451 DUMP_PREFIX_ADDRESS
, 16, 4, ctx
->key
,
452 ctx
->adata
.keylen_pad
, 1);
456 return ahash_set_sh_desc(ahash
);
459 crypto_ahash_set_flags(ahash
, CRYPTO_TFM_RES_BAD_KEY_LEN
);
464 * ahash_edesc - s/w-extended ahash descriptor
465 * @dst_dma: physical mapped address of req->result
466 * @sec4_sg_dma: physical mapped address of h/w link table
467 * @src_nents: number of segments in input scatterlist
468 * @sec4_sg_bytes: length of dma mapped sec4_sg space
469 * @hw_desc: the h/w job descriptor followed by any referenced link tables
470 * @sec4_sg: h/w link table
474 dma_addr_t sec4_sg_dma
;
477 u32 hw_desc
[DESC_JOB_IO_LEN
/ sizeof(u32
)] ____cacheline_aligned
;
478 struct sec4_sg_entry sec4_sg
[0];
481 static inline void ahash_unmap(struct device
*dev
,
482 struct ahash_edesc
*edesc
,
483 struct ahash_request
*req
, int dst_len
)
485 struct caam_hash_state
*state
= ahash_request_ctx(req
);
487 if (edesc
->src_nents
)
488 dma_unmap_sg(dev
, req
->src
, edesc
->src_nents
, DMA_TO_DEVICE
);
490 dma_unmap_single(dev
, edesc
->dst_dma
, dst_len
, DMA_FROM_DEVICE
);
492 if (edesc
->sec4_sg_bytes
)
493 dma_unmap_single(dev
, edesc
->sec4_sg_dma
,
494 edesc
->sec4_sg_bytes
, DMA_TO_DEVICE
);
496 if (state
->buf_dma
) {
497 dma_unmap_single(dev
, state
->buf_dma
, *current_buflen(state
),
503 static inline void ahash_unmap_ctx(struct device
*dev
,
504 struct ahash_edesc
*edesc
,
505 struct ahash_request
*req
, int dst_len
, u32 flag
)
507 struct crypto_ahash
*ahash
= crypto_ahash_reqtfm(req
);
508 struct caam_hash_ctx
*ctx
= crypto_ahash_ctx(ahash
);
509 struct caam_hash_state
*state
= ahash_request_ctx(req
);
511 if (state
->ctx_dma
) {
512 dma_unmap_single(dev
, state
->ctx_dma
, ctx
->ctx_len
, flag
);
515 ahash_unmap(dev
, edesc
, req
, dst_len
);
518 static void ahash_done(struct device
*jrdev
, u32
*desc
, u32 err
,
521 struct ahash_request
*req
= context
;
522 struct ahash_edesc
*edesc
;
523 struct crypto_ahash
*ahash
= crypto_ahash_reqtfm(req
);
524 int digestsize
= crypto_ahash_digestsize(ahash
);
526 struct caam_hash_ctx
*ctx
= crypto_ahash_ctx(ahash
);
527 struct caam_hash_state
*state
= ahash_request_ctx(req
);
529 dev_err(jrdev
, "%s %d: err 0x%x\n", __func__
, __LINE__
, err
);
532 edesc
= container_of(desc
, struct ahash_edesc
, hw_desc
[0]);
534 caam_jr_strstatus(jrdev
, err
);
536 ahash_unmap(jrdev
, edesc
, req
, digestsize
);
540 print_hex_dump(KERN_ERR
, "ctx@"__stringify(__LINE__
)": ",
541 DUMP_PREFIX_ADDRESS
, 16, 4, state
->caam_ctx
,
544 print_hex_dump(KERN_ERR
, "result@"__stringify(__LINE__
)": ",
545 DUMP_PREFIX_ADDRESS
, 16, 4, req
->result
,
549 req
->base
.complete(&req
->base
, err
);
552 static void ahash_done_bi(struct device
*jrdev
, u32
*desc
, u32 err
,
555 struct ahash_request
*req
= context
;
556 struct ahash_edesc
*edesc
;
557 struct crypto_ahash
*ahash
= crypto_ahash_reqtfm(req
);
558 struct caam_hash_ctx
*ctx
= crypto_ahash_ctx(ahash
);
559 struct caam_hash_state
*state
= ahash_request_ctx(req
);
561 int digestsize
= crypto_ahash_digestsize(ahash
);
563 dev_err(jrdev
, "%s %d: err 0x%x\n", __func__
, __LINE__
, err
);
566 edesc
= container_of(desc
, struct ahash_edesc
, hw_desc
[0]);
568 caam_jr_strstatus(jrdev
, err
);
570 ahash_unmap_ctx(jrdev
, edesc
, req
, ctx
->ctx_len
, DMA_BIDIRECTIONAL
);
575 print_hex_dump(KERN_ERR
, "ctx@"__stringify(__LINE__
)": ",
576 DUMP_PREFIX_ADDRESS
, 16, 4, state
->caam_ctx
,
579 print_hex_dump(KERN_ERR
, "result@"__stringify(__LINE__
)": ",
580 DUMP_PREFIX_ADDRESS
, 16, 4, req
->result
,
584 req
->base
.complete(&req
->base
, err
);
587 static void ahash_done_ctx_src(struct device
*jrdev
, u32
*desc
, u32 err
,
590 struct ahash_request
*req
= context
;
591 struct ahash_edesc
*edesc
;
592 struct crypto_ahash
*ahash
= crypto_ahash_reqtfm(req
);
593 int digestsize
= crypto_ahash_digestsize(ahash
);
595 struct caam_hash_ctx
*ctx
= crypto_ahash_ctx(ahash
);
596 struct caam_hash_state
*state
= ahash_request_ctx(req
);
598 dev_err(jrdev
, "%s %d: err 0x%x\n", __func__
, __LINE__
, err
);
601 edesc
= container_of(desc
, struct ahash_edesc
, hw_desc
[0]);
603 caam_jr_strstatus(jrdev
, err
);
605 ahash_unmap_ctx(jrdev
, edesc
, req
, digestsize
, DMA_TO_DEVICE
);
609 print_hex_dump(KERN_ERR
, "ctx@"__stringify(__LINE__
)": ",
610 DUMP_PREFIX_ADDRESS
, 16, 4, state
->caam_ctx
,
613 print_hex_dump(KERN_ERR
, "result@"__stringify(__LINE__
)": ",
614 DUMP_PREFIX_ADDRESS
, 16, 4, req
->result
,
618 req
->base
.complete(&req
->base
, err
);
621 static void ahash_done_ctx_dst(struct device
*jrdev
, u32
*desc
, u32 err
,
624 struct ahash_request
*req
= context
;
625 struct ahash_edesc
*edesc
;
626 struct crypto_ahash
*ahash
= crypto_ahash_reqtfm(req
);
627 struct caam_hash_ctx
*ctx
= crypto_ahash_ctx(ahash
);
628 struct caam_hash_state
*state
= ahash_request_ctx(req
);
630 int digestsize
= crypto_ahash_digestsize(ahash
);
632 dev_err(jrdev
, "%s %d: err 0x%x\n", __func__
, __LINE__
, err
);
635 edesc
= container_of(desc
, struct ahash_edesc
, hw_desc
[0]);
637 caam_jr_strstatus(jrdev
, err
);
639 ahash_unmap_ctx(jrdev
, edesc
, req
, ctx
->ctx_len
, DMA_FROM_DEVICE
);
644 print_hex_dump(KERN_ERR
, "ctx@"__stringify(__LINE__
)": ",
645 DUMP_PREFIX_ADDRESS
, 16, 4, state
->caam_ctx
,
648 print_hex_dump(KERN_ERR
, "result@"__stringify(__LINE__
)": ",
649 DUMP_PREFIX_ADDRESS
, 16, 4, req
->result
,
653 req
->base
.complete(&req
->base
, err
);
657 * Allocate an enhanced descriptor, which contains the hardware descriptor
658 * and space for hardware scatter table containing sg_num entries.
660 static struct ahash_edesc
*ahash_edesc_alloc(struct caam_hash_ctx
*ctx
,
661 int sg_num
, u32
*sh_desc
,
662 dma_addr_t sh_desc_dma
,
665 struct ahash_edesc
*edesc
;
666 unsigned int sg_size
= sg_num
* sizeof(struct sec4_sg_entry
);
668 edesc
= kzalloc(sizeof(*edesc
) + sg_size
, GFP_DMA
| flags
);
670 dev_err(ctx
->jrdev
, "could not allocate extended descriptor\n");
674 init_job_desc_shared(edesc
->hw_desc
, sh_desc_dma
, desc_len(sh_desc
),
675 HDR_SHARE_DEFER
| HDR_REVERSE
);
680 static int ahash_edesc_add_src(struct caam_hash_ctx
*ctx
,
681 struct ahash_edesc
*edesc
,
682 struct ahash_request
*req
, int nents
,
683 unsigned int first_sg
,
684 unsigned int first_bytes
, size_t to_hash
)
689 if (nents
> 1 || first_sg
) {
690 struct sec4_sg_entry
*sg
= edesc
->sec4_sg
;
691 unsigned int sgsize
= sizeof(*sg
) * (first_sg
+ nents
);
693 sg_to_sec4_sg_last(req
->src
, nents
, sg
+ first_sg
, 0);
695 src_dma
= dma_map_single(ctx
->jrdev
, sg
, sgsize
, DMA_TO_DEVICE
);
696 if (dma_mapping_error(ctx
->jrdev
, src_dma
)) {
697 dev_err(ctx
->jrdev
, "unable to map S/G table\n");
701 edesc
->sec4_sg_bytes
= sgsize
;
702 edesc
->sec4_sg_dma
= src_dma
;
705 src_dma
= sg_dma_address(req
->src
);
709 append_seq_in_ptr(edesc
->hw_desc
, src_dma
, first_bytes
+ to_hash
,
715 /* submit update job descriptor */
716 static int ahash_update_ctx(struct ahash_request
*req
)
718 struct crypto_ahash
*ahash
= crypto_ahash_reqtfm(req
);
719 struct caam_hash_ctx
*ctx
= crypto_ahash_ctx(ahash
);
720 struct caam_hash_state
*state
= ahash_request_ctx(req
);
721 struct device
*jrdev
= ctx
->jrdev
;
722 gfp_t flags
= (req
->base
.flags
& (CRYPTO_TFM_REQ_MAY_BACKLOG
|
723 CRYPTO_TFM_REQ_MAY_SLEEP
)) ? GFP_KERNEL
: GFP_ATOMIC
;
724 u8
*buf
= current_buf(state
);
725 int *buflen
= current_buflen(state
);
726 u8
*next_buf
= alt_buf(state
);
727 int *next_buflen
= alt_buflen(state
), last_buflen
;
728 int in_len
= *buflen
+ req
->nbytes
, to_hash
;
730 int src_nents
, mapped_nents
, sec4_sg_bytes
, sec4_sg_src_index
;
731 struct ahash_edesc
*edesc
;
734 last_buflen
= *next_buflen
;
735 *next_buflen
= in_len
& (crypto_tfm_alg_blocksize(&ahash
->base
) - 1);
736 to_hash
= in_len
- *next_buflen
;
739 src_nents
= sg_nents_for_len(req
->src
,
740 req
->nbytes
- (*next_buflen
));
742 dev_err(jrdev
, "Invalid number of src SG.\n");
747 mapped_nents
= dma_map_sg(jrdev
, req
->src
, src_nents
,
750 dev_err(jrdev
, "unable to DMA map source\n");
757 sec4_sg_src_index
= 1 + (*buflen
? 1 : 0);
758 sec4_sg_bytes
= (sec4_sg_src_index
+ mapped_nents
) *
759 sizeof(struct sec4_sg_entry
);
762 * allocate space for base edesc and hw desc commands,
765 edesc
= ahash_edesc_alloc(ctx
, sec4_sg_src_index
+ mapped_nents
,
767 ctx
->sh_desc_update_dma
, flags
);
769 dma_unmap_sg(jrdev
, req
->src
, src_nents
, DMA_TO_DEVICE
);
773 edesc
->src_nents
= src_nents
;
774 edesc
->sec4_sg_bytes
= sec4_sg_bytes
;
776 ret
= ctx_map_to_sec4_sg(desc
, jrdev
, state
, ctx
->ctx_len
,
777 edesc
->sec4_sg
, DMA_BIDIRECTIONAL
);
781 ret
= buf_map_to_sec4_sg(jrdev
, edesc
->sec4_sg
+ 1, state
);
786 sg_to_sec4_sg_last(req
->src
, mapped_nents
,
787 edesc
->sec4_sg
+ sec4_sg_src_index
,
790 scatterwalk_map_and_copy(next_buf
, req
->src
,
794 (edesc
->sec4_sg
+ sec4_sg_src_index
- 1)->len
|=
795 cpu_to_caam32(SEC4_SG_LEN_FIN
);
798 desc
= edesc
->hw_desc
;
800 edesc
->sec4_sg_dma
= dma_map_single(jrdev
, edesc
->sec4_sg
,
803 if (dma_mapping_error(jrdev
, edesc
->sec4_sg_dma
)) {
804 dev_err(jrdev
, "unable to map S/G table\n");
809 append_seq_in_ptr(desc
, edesc
->sec4_sg_dma
, ctx
->ctx_len
+
812 append_seq_out_ptr(desc
, state
->ctx_dma
, ctx
->ctx_len
, 0);
815 print_hex_dump(KERN_ERR
, "jobdesc@"__stringify(__LINE__
)": ",
816 DUMP_PREFIX_ADDRESS
, 16, 4, desc
,
817 desc_bytes(desc
), 1);
820 ret
= caam_jr_enqueue(jrdev
, desc
, ahash_done_bi
, req
);
825 } else if (*next_buflen
) {
826 scatterwalk_map_and_copy(buf
+ *buflen
, req
->src
, 0,
828 *buflen
= *next_buflen
;
829 *next_buflen
= last_buflen
;
832 print_hex_dump(KERN_ERR
, "buf@"__stringify(__LINE__
)": ",
833 DUMP_PREFIX_ADDRESS
, 16, 4, buf
, *buflen
, 1);
834 print_hex_dump(KERN_ERR
, "next buf@"__stringify(__LINE__
)": ",
835 DUMP_PREFIX_ADDRESS
, 16, 4, next_buf
,
841 ahash_unmap_ctx(jrdev
, edesc
, req
, ctx
->ctx_len
, DMA_BIDIRECTIONAL
);
846 static int ahash_final_ctx(struct ahash_request
*req
)
848 struct crypto_ahash
*ahash
= crypto_ahash_reqtfm(req
);
849 struct caam_hash_ctx
*ctx
= crypto_ahash_ctx(ahash
);
850 struct caam_hash_state
*state
= ahash_request_ctx(req
);
851 struct device
*jrdev
= ctx
->jrdev
;
852 gfp_t flags
= (req
->base
.flags
& (CRYPTO_TFM_REQ_MAY_BACKLOG
|
853 CRYPTO_TFM_REQ_MAY_SLEEP
)) ? GFP_KERNEL
: GFP_ATOMIC
;
854 int buflen
= *current_buflen(state
);
856 int sec4_sg_bytes
, sec4_sg_src_index
;
857 int digestsize
= crypto_ahash_digestsize(ahash
);
858 struct ahash_edesc
*edesc
;
861 sec4_sg_src_index
= 1 + (buflen
? 1 : 0);
862 sec4_sg_bytes
= sec4_sg_src_index
* sizeof(struct sec4_sg_entry
);
864 /* allocate space for base edesc and hw desc commands, link tables */
865 edesc
= ahash_edesc_alloc(ctx
, sec4_sg_src_index
,
866 ctx
->sh_desc_fin
, ctx
->sh_desc_fin_dma
,
871 desc
= edesc
->hw_desc
;
873 edesc
->sec4_sg_bytes
= sec4_sg_bytes
;
874 edesc
->src_nents
= 0;
876 ret
= ctx_map_to_sec4_sg(desc
, jrdev
, state
, ctx
->ctx_len
,
877 edesc
->sec4_sg
, DMA_TO_DEVICE
);
881 ret
= buf_map_to_sec4_sg(jrdev
, edesc
->sec4_sg
+ 1, state
);
885 (edesc
->sec4_sg
+ sec4_sg_src_index
- 1)->len
|=
886 cpu_to_caam32(SEC4_SG_LEN_FIN
);
888 edesc
->sec4_sg_dma
= dma_map_single(jrdev
, edesc
->sec4_sg
,
889 sec4_sg_bytes
, DMA_TO_DEVICE
);
890 if (dma_mapping_error(jrdev
, edesc
->sec4_sg_dma
)) {
891 dev_err(jrdev
, "unable to map S/G table\n");
896 append_seq_in_ptr(desc
, edesc
->sec4_sg_dma
, ctx
->ctx_len
+ buflen
,
899 edesc
->dst_dma
= map_seq_out_ptr_result(desc
, jrdev
, req
->result
,
901 if (dma_mapping_error(jrdev
, edesc
->dst_dma
)) {
902 dev_err(jrdev
, "unable to map dst\n");
908 print_hex_dump(KERN_ERR
, "jobdesc@"__stringify(__LINE__
)": ",
909 DUMP_PREFIX_ADDRESS
, 16, 4, desc
, desc_bytes(desc
), 1);
912 ret
= caam_jr_enqueue(jrdev
, desc
, ahash_done_ctx_src
, req
);
918 ahash_unmap_ctx(jrdev
, edesc
, req
, digestsize
, DMA_FROM_DEVICE
);
923 static int ahash_finup_ctx(struct ahash_request
*req
)
925 struct crypto_ahash
*ahash
= crypto_ahash_reqtfm(req
);
926 struct caam_hash_ctx
*ctx
= crypto_ahash_ctx(ahash
);
927 struct caam_hash_state
*state
= ahash_request_ctx(req
);
928 struct device
*jrdev
= ctx
->jrdev
;
929 gfp_t flags
= (req
->base
.flags
& (CRYPTO_TFM_REQ_MAY_BACKLOG
|
930 CRYPTO_TFM_REQ_MAY_SLEEP
)) ? GFP_KERNEL
: GFP_ATOMIC
;
931 int buflen
= *current_buflen(state
);
933 int sec4_sg_src_index
;
934 int src_nents
, mapped_nents
;
935 int digestsize
= crypto_ahash_digestsize(ahash
);
936 struct ahash_edesc
*edesc
;
939 src_nents
= sg_nents_for_len(req
->src
, req
->nbytes
);
941 dev_err(jrdev
, "Invalid number of src SG.\n");
946 mapped_nents
= dma_map_sg(jrdev
, req
->src
, src_nents
,
949 dev_err(jrdev
, "unable to DMA map source\n");
956 sec4_sg_src_index
= 1 + (buflen
? 1 : 0);
958 /* allocate space for base edesc and hw desc commands, link tables */
959 edesc
= ahash_edesc_alloc(ctx
, sec4_sg_src_index
+ mapped_nents
,
960 ctx
->sh_desc_fin
, ctx
->sh_desc_fin_dma
,
963 dma_unmap_sg(jrdev
, req
->src
, src_nents
, DMA_TO_DEVICE
);
967 desc
= edesc
->hw_desc
;
969 edesc
->src_nents
= src_nents
;
971 ret
= ctx_map_to_sec4_sg(desc
, jrdev
, state
, ctx
->ctx_len
,
972 edesc
->sec4_sg
, DMA_TO_DEVICE
);
976 ret
= buf_map_to_sec4_sg(jrdev
, edesc
->sec4_sg
+ 1, state
);
980 ret
= ahash_edesc_add_src(ctx
, edesc
, req
, mapped_nents
,
981 sec4_sg_src_index
, ctx
->ctx_len
+ buflen
,
986 edesc
->dst_dma
= map_seq_out_ptr_result(desc
, jrdev
, req
->result
,
988 if (dma_mapping_error(jrdev
, edesc
->dst_dma
)) {
989 dev_err(jrdev
, "unable to map dst\n");
995 print_hex_dump(KERN_ERR
, "jobdesc@"__stringify(__LINE__
)": ",
996 DUMP_PREFIX_ADDRESS
, 16, 4, desc
, desc_bytes(desc
), 1);
999 ret
= caam_jr_enqueue(jrdev
, desc
, ahash_done_ctx_src
, req
);
1003 return -EINPROGRESS
;
1005 ahash_unmap_ctx(jrdev
, edesc
, req
, digestsize
, DMA_FROM_DEVICE
);
1010 static int ahash_digest(struct ahash_request
*req
)
1012 struct crypto_ahash
*ahash
= crypto_ahash_reqtfm(req
);
1013 struct caam_hash_ctx
*ctx
= crypto_ahash_ctx(ahash
);
1014 struct caam_hash_state
*state
= ahash_request_ctx(req
);
1015 struct device
*jrdev
= ctx
->jrdev
;
1016 gfp_t flags
= (req
->base
.flags
& (CRYPTO_TFM_REQ_MAY_BACKLOG
|
1017 CRYPTO_TFM_REQ_MAY_SLEEP
)) ? GFP_KERNEL
: GFP_ATOMIC
;
1019 int digestsize
= crypto_ahash_digestsize(ahash
);
1020 int src_nents
, mapped_nents
;
1021 struct ahash_edesc
*edesc
;
1026 src_nents
= sg_nents_for_len(req
->src
, req
->nbytes
);
1027 if (src_nents
< 0) {
1028 dev_err(jrdev
, "Invalid number of src SG.\n");
1033 mapped_nents
= dma_map_sg(jrdev
, req
->src
, src_nents
,
1035 if (!mapped_nents
) {
1036 dev_err(jrdev
, "unable to map source for DMA\n");
1043 /* allocate space for base edesc and hw desc commands, link tables */
1044 edesc
= ahash_edesc_alloc(ctx
, mapped_nents
> 1 ? mapped_nents
: 0,
1045 ctx
->sh_desc_digest
, ctx
->sh_desc_digest_dma
,
1048 dma_unmap_sg(jrdev
, req
->src
, src_nents
, DMA_TO_DEVICE
);
1052 edesc
->src_nents
= src_nents
;
1054 ret
= ahash_edesc_add_src(ctx
, edesc
, req
, mapped_nents
, 0, 0,
1057 ahash_unmap(jrdev
, edesc
, req
, digestsize
);
1062 desc
= edesc
->hw_desc
;
1064 edesc
->dst_dma
= map_seq_out_ptr_result(desc
, jrdev
, req
->result
,
1066 if (dma_mapping_error(jrdev
, edesc
->dst_dma
)) {
1067 dev_err(jrdev
, "unable to map dst\n");
1068 ahash_unmap(jrdev
, edesc
, req
, digestsize
);
1074 print_hex_dump(KERN_ERR
, "jobdesc@"__stringify(__LINE__
)": ",
1075 DUMP_PREFIX_ADDRESS
, 16, 4, desc
, desc_bytes(desc
), 1);
1078 ret
= caam_jr_enqueue(jrdev
, desc
, ahash_done
, req
);
1082 ahash_unmap(jrdev
, edesc
, req
, digestsize
);
1089 /* submit ahash final if it the first job descriptor */
1090 static int ahash_final_no_ctx(struct ahash_request
*req
)
1092 struct crypto_ahash
*ahash
= crypto_ahash_reqtfm(req
);
1093 struct caam_hash_ctx
*ctx
= crypto_ahash_ctx(ahash
);
1094 struct caam_hash_state
*state
= ahash_request_ctx(req
);
1095 struct device
*jrdev
= ctx
->jrdev
;
1096 gfp_t flags
= (req
->base
.flags
& (CRYPTO_TFM_REQ_MAY_BACKLOG
|
1097 CRYPTO_TFM_REQ_MAY_SLEEP
)) ? GFP_KERNEL
: GFP_ATOMIC
;
1098 u8
*buf
= current_buf(state
);
1099 int buflen
= *current_buflen(state
);
1101 int digestsize
= crypto_ahash_digestsize(ahash
);
1102 struct ahash_edesc
*edesc
;
1105 /* allocate space for base edesc and hw desc commands, link tables */
1106 edesc
= ahash_edesc_alloc(ctx
, 0, ctx
->sh_desc_digest
,
1107 ctx
->sh_desc_digest_dma
, flags
);
1111 desc
= edesc
->hw_desc
;
1113 state
->buf_dma
= dma_map_single(jrdev
, buf
, buflen
, DMA_TO_DEVICE
);
1114 if (dma_mapping_error(jrdev
, state
->buf_dma
)) {
1115 dev_err(jrdev
, "unable to map src\n");
1119 append_seq_in_ptr(desc
, state
->buf_dma
, buflen
, 0);
1121 edesc
->dst_dma
= map_seq_out_ptr_result(desc
, jrdev
, req
->result
,
1123 if (dma_mapping_error(jrdev
, edesc
->dst_dma
)) {
1124 dev_err(jrdev
, "unable to map dst\n");
1127 edesc
->src_nents
= 0;
1130 print_hex_dump(KERN_ERR
, "jobdesc@"__stringify(__LINE__
)": ",
1131 DUMP_PREFIX_ADDRESS
, 16, 4, desc
, desc_bytes(desc
), 1);
1134 ret
= caam_jr_enqueue(jrdev
, desc
, ahash_done
, req
);
1138 ahash_unmap(jrdev
, edesc
, req
, digestsize
);
1144 ahash_unmap(jrdev
, edesc
, req
, digestsize
);
1150 /* submit ahash update if it the first job descriptor after update */
1151 static int ahash_update_no_ctx(struct ahash_request
*req
)
1153 struct crypto_ahash
*ahash
= crypto_ahash_reqtfm(req
);
1154 struct caam_hash_ctx
*ctx
= crypto_ahash_ctx(ahash
);
1155 struct caam_hash_state
*state
= ahash_request_ctx(req
);
1156 struct device
*jrdev
= ctx
->jrdev
;
1157 gfp_t flags
= (req
->base
.flags
& (CRYPTO_TFM_REQ_MAY_BACKLOG
|
1158 CRYPTO_TFM_REQ_MAY_SLEEP
)) ? GFP_KERNEL
: GFP_ATOMIC
;
1159 u8
*buf
= current_buf(state
);
1160 int *buflen
= current_buflen(state
);
1161 u8
*next_buf
= alt_buf(state
);
1162 int *next_buflen
= alt_buflen(state
);
1163 int in_len
= *buflen
+ req
->nbytes
, to_hash
;
1164 int sec4_sg_bytes
, src_nents
, mapped_nents
;
1165 struct ahash_edesc
*edesc
;
1169 *next_buflen
= in_len
& (crypto_tfm_alg_blocksize(&ahash
->base
) - 1);
1170 to_hash
= in_len
- *next_buflen
;
1173 src_nents
= sg_nents_for_len(req
->src
,
1174 req
->nbytes
- *next_buflen
);
1175 if (src_nents
< 0) {
1176 dev_err(jrdev
, "Invalid number of src SG.\n");
1181 mapped_nents
= dma_map_sg(jrdev
, req
->src
, src_nents
,
1183 if (!mapped_nents
) {
1184 dev_err(jrdev
, "unable to DMA map source\n");
1191 sec4_sg_bytes
= (1 + mapped_nents
) *
1192 sizeof(struct sec4_sg_entry
);
1195 * allocate space for base edesc and hw desc commands,
1198 edesc
= ahash_edesc_alloc(ctx
, 1 + mapped_nents
,
1199 ctx
->sh_desc_update_first
,
1200 ctx
->sh_desc_update_first_dma
,
1203 dma_unmap_sg(jrdev
, req
->src
, src_nents
, DMA_TO_DEVICE
);
1207 edesc
->src_nents
= src_nents
;
1208 edesc
->sec4_sg_bytes
= sec4_sg_bytes
;
1211 ret
= buf_map_to_sec4_sg(jrdev
, edesc
->sec4_sg
, state
);
1215 sg_to_sec4_sg_last(req
->src
, mapped_nents
,
1216 edesc
->sec4_sg
+ 1, 0);
1219 scatterwalk_map_and_copy(next_buf
, req
->src
,
1224 desc
= edesc
->hw_desc
;
1226 edesc
->sec4_sg_dma
= dma_map_single(jrdev
, edesc
->sec4_sg
,
1229 if (dma_mapping_error(jrdev
, edesc
->sec4_sg_dma
)) {
1230 dev_err(jrdev
, "unable to map S/G table\n");
1235 append_seq_in_ptr(desc
, edesc
->sec4_sg_dma
, to_hash
, LDST_SGF
);
1237 ret
= map_seq_out_ptr_ctx(desc
, jrdev
, state
, ctx
->ctx_len
);
1242 print_hex_dump(KERN_ERR
, "jobdesc@"__stringify(__LINE__
)": ",
1243 DUMP_PREFIX_ADDRESS
, 16, 4, desc
,
1244 desc_bytes(desc
), 1);
1247 ret
= caam_jr_enqueue(jrdev
, desc
, ahash_done_ctx_dst
, req
);
1252 state
->update
= ahash_update_ctx
;
1253 state
->finup
= ahash_finup_ctx
;
1254 state
->final
= ahash_final_ctx
;
1255 } else if (*next_buflen
) {
1256 scatterwalk_map_and_copy(buf
+ *buflen
, req
->src
, 0,
1258 *buflen
= *next_buflen
;
1262 print_hex_dump(KERN_ERR
, "buf@"__stringify(__LINE__
)": ",
1263 DUMP_PREFIX_ADDRESS
, 16, 4, buf
, *buflen
, 1);
1264 print_hex_dump(KERN_ERR
, "next buf@"__stringify(__LINE__
)": ",
1265 DUMP_PREFIX_ADDRESS
, 16, 4, next_buf
,
1271 ahash_unmap_ctx(jrdev
, edesc
, req
, ctx
->ctx_len
, DMA_TO_DEVICE
);
1276 /* submit ahash finup if it the first job descriptor after update */
1277 static int ahash_finup_no_ctx(struct ahash_request
*req
)
1279 struct crypto_ahash
*ahash
= crypto_ahash_reqtfm(req
);
1280 struct caam_hash_ctx
*ctx
= crypto_ahash_ctx(ahash
);
1281 struct caam_hash_state
*state
= ahash_request_ctx(req
);
1282 struct device
*jrdev
= ctx
->jrdev
;
1283 gfp_t flags
= (req
->base
.flags
& (CRYPTO_TFM_REQ_MAY_BACKLOG
|
1284 CRYPTO_TFM_REQ_MAY_SLEEP
)) ? GFP_KERNEL
: GFP_ATOMIC
;
1285 int buflen
= *current_buflen(state
);
1287 int sec4_sg_bytes
, sec4_sg_src_index
, src_nents
, mapped_nents
;
1288 int digestsize
= crypto_ahash_digestsize(ahash
);
1289 struct ahash_edesc
*edesc
;
1292 src_nents
= sg_nents_for_len(req
->src
, req
->nbytes
);
1293 if (src_nents
< 0) {
1294 dev_err(jrdev
, "Invalid number of src SG.\n");
1299 mapped_nents
= dma_map_sg(jrdev
, req
->src
, src_nents
,
1301 if (!mapped_nents
) {
1302 dev_err(jrdev
, "unable to DMA map source\n");
1309 sec4_sg_src_index
= 2;
1310 sec4_sg_bytes
= (sec4_sg_src_index
+ mapped_nents
) *
1311 sizeof(struct sec4_sg_entry
);
1313 /* allocate space for base edesc and hw desc commands, link tables */
1314 edesc
= ahash_edesc_alloc(ctx
, sec4_sg_src_index
+ mapped_nents
,
1315 ctx
->sh_desc_digest
, ctx
->sh_desc_digest_dma
,
1318 dma_unmap_sg(jrdev
, req
->src
, src_nents
, DMA_TO_DEVICE
);
1322 desc
= edesc
->hw_desc
;
1324 edesc
->src_nents
= src_nents
;
1325 edesc
->sec4_sg_bytes
= sec4_sg_bytes
;
1327 ret
= buf_map_to_sec4_sg(jrdev
, edesc
->sec4_sg
, state
);
1331 ret
= ahash_edesc_add_src(ctx
, edesc
, req
, mapped_nents
, 1, buflen
,
1334 dev_err(jrdev
, "unable to map S/G table\n");
1338 edesc
->dst_dma
= map_seq_out_ptr_result(desc
, jrdev
, req
->result
,
1340 if (dma_mapping_error(jrdev
, edesc
->dst_dma
)) {
1341 dev_err(jrdev
, "unable to map dst\n");
1346 print_hex_dump(KERN_ERR
, "jobdesc@"__stringify(__LINE__
)": ",
1347 DUMP_PREFIX_ADDRESS
, 16, 4, desc
, desc_bytes(desc
), 1);
1350 ret
= caam_jr_enqueue(jrdev
, desc
, ahash_done
, req
);
1354 ahash_unmap(jrdev
, edesc
, req
, digestsize
);
1360 ahash_unmap(jrdev
, edesc
, req
, digestsize
);
1366 /* submit first update job descriptor after init */
1367 static int ahash_update_first(struct ahash_request
*req
)
1369 struct crypto_ahash
*ahash
= crypto_ahash_reqtfm(req
);
1370 struct caam_hash_ctx
*ctx
= crypto_ahash_ctx(ahash
);
1371 struct caam_hash_state
*state
= ahash_request_ctx(req
);
1372 struct device
*jrdev
= ctx
->jrdev
;
1373 gfp_t flags
= (req
->base
.flags
& (CRYPTO_TFM_REQ_MAY_BACKLOG
|
1374 CRYPTO_TFM_REQ_MAY_SLEEP
)) ? GFP_KERNEL
: GFP_ATOMIC
;
1375 u8
*next_buf
= alt_buf(state
);
1376 int *next_buflen
= alt_buflen(state
);
1379 int src_nents
, mapped_nents
;
1380 struct ahash_edesc
*edesc
;
1383 *next_buflen
= req
->nbytes
& (crypto_tfm_alg_blocksize(&ahash
->base
) -
1385 to_hash
= req
->nbytes
- *next_buflen
;
1388 src_nents
= sg_nents_for_len(req
->src
,
1389 req
->nbytes
- *next_buflen
);
1390 if (src_nents
< 0) {
1391 dev_err(jrdev
, "Invalid number of src SG.\n");
1396 mapped_nents
= dma_map_sg(jrdev
, req
->src
, src_nents
,
1398 if (!mapped_nents
) {
1399 dev_err(jrdev
, "unable to map source for DMA\n");
1407 * allocate space for base edesc and hw desc commands,
1410 edesc
= ahash_edesc_alloc(ctx
, mapped_nents
> 1 ?
1412 ctx
->sh_desc_update_first
,
1413 ctx
->sh_desc_update_first_dma
,
1416 dma_unmap_sg(jrdev
, req
->src
, src_nents
, DMA_TO_DEVICE
);
1420 edesc
->src_nents
= src_nents
;
1423 ret
= ahash_edesc_add_src(ctx
, edesc
, req
, mapped_nents
, 0, 0,
1429 scatterwalk_map_and_copy(next_buf
, req
->src
, to_hash
,
1432 desc
= edesc
->hw_desc
;
1434 ret
= map_seq_out_ptr_ctx(desc
, jrdev
, state
, ctx
->ctx_len
);
1439 print_hex_dump(KERN_ERR
, "jobdesc@"__stringify(__LINE__
)": ",
1440 DUMP_PREFIX_ADDRESS
, 16, 4, desc
,
1441 desc_bytes(desc
), 1);
1444 ret
= caam_jr_enqueue(jrdev
, desc
, ahash_done_ctx_dst
, req
);
1449 state
->update
= ahash_update_ctx
;
1450 state
->finup
= ahash_finup_ctx
;
1451 state
->final
= ahash_final_ctx
;
1452 } else if (*next_buflen
) {
1453 state
->update
= ahash_update_no_ctx
;
1454 state
->finup
= ahash_finup_no_ctx
;
1455 state
->final
= ahash_final_no_ctx
;
1456 scatterwalk_map_and_copy(next_buf
, req
->src
, 0,
1461 print_hex_dump(KERN_ERR
, "next buf@"__stringify(__LINE__
)": ",
1462 DUMP_PREFIX_ADDRESS
, 16, 4, next_buf
,
1468 ahash_unmap_ctx(jrdev
, edesc
, req
, ctx
->ctx_len
, DMA_TO_DEVICE
);
1473 static int ahash_finup_first(struct ahash_request
*req
)
1475 return ahash_digest(req
);
1478 static int ahash_init(struct ahash_request
*req
)
1480 struct caam_hash_state
*state
= ahash_request_ctx(req
);
1482 state
->update
= ahash_update_first
;
1483 state
->finup
= ahash_finup_first
;
1484 state
->final
= ahash_final_no_ctx
;
1487 state
->current_buf
= 0;
1489 state
->buflen_0
= 0;
1490 state
->buflen_1
= 0;
1495 static int ahash_update(struct ahash_request
*req
)
1497 struct caam_hash_state
*state
= ahash_request_ctx(req
);
1499 return state
->update(req
);
1502 static int ahash_finup(struct ahash_request
*req
)
1504 struct caam_hash_state
*state
= ahash_request_ctx(req
);
1506 return state
->finup(req
);
1509 static int ahash_final(struct ahash_request
*req
)
1511 struct caam_hash_state
*state
= ahash_request_ctx(req
);
1513 return state
->final(req
);
1516 static int ahash_export(struct ahash_request
*req
, void *out
)
1518 struct caam_hash_state
*state
= ahash_request_ctx(req
);
1519 struct caam_export_state
*export
= out
;
1523 if (state
->current_buf
) {
1525 len
= state
->buflen_1
;
1528 len
= state
->buflen_0
;
1531 memcpy(export
->buf
, buf
, len
);
1532 memcpy(export
->caam_ctx
, state
->caam_ctx
, sizeof(export
->caam_ctx
));
1533 export
->buflen
= len
;
1534 export
->update
= state
->update
;
1535 export
->final
= state
->final
;
1536 export
->finup
= state
->finup
;
1541 static int ahash_import(struct ahash_request
*req
, const void *in
)
1543 struct caam_hash_state
*state
= ahash_request_ctx(req
);
1544 const struct caam_export_state
*export
= in
;
1546 memset(state
, 0, sizeof(*state
));
1547 memcpy(state
->buf_0
, export
->buf
, export
->buflen
);
1548 memcpy(state
->caam_ctx
, export
->caam_ctx
, sizeof(state
->caam_ctx
));
1549 state
->buflen_0
= export
->buflen
;
1550 state
->update
= export
->update
;
1551 state
->final
= export
->final
;
1552 state
->finup
= export
->finup
;
1557 struct caam_hash_template
{
1558 char name
[CRYPTO_MAX_ALG_NAME
];
1559 char driver_name
[CRYPTO_MAX_ALG_NAME
];
1560 char hmac_name
[CRYPTO_MAX_ALG_NAME
];
1561 char hmac_driver_name
[CRYPTO_MAX_ALG_NAME
];
1562 unsigned int blocksize
;
1563 struct ahash_alg template_ahash
;
1567 /* ahash descriptors */
1568 static struct caam_hash_template driver_hash
[] = {
1571 .driver_name
= "sha1-caam",
1572 .hmac_name
= "hmac(sha1)",
1573 .hmac_driver_name
= "hmac-sha1-caam",
1574 .blocksize
= SHA1_BLOCK_SIZE
,
1577 .update
= ahash_update
,
1578 .final
= ahash_final
,
1579 .finup
= ahash_finup
,
1580 .digest
= ahash_digest
,
1581 .export
= ahash_export
,
1582 .import
= ahash_import
,
1583 .setkey
= ahash_setkey
,
1585 .digestsize
= SHA1_DIGEST_SIZE
,
1586 .statesize
= sizeof(struct caam_export_state
),
1589 .alg_type
= OP_ALG_ALGSEL_SHA1
,
1592 .driver_name
= "sha224-caam",
1593 .hmac_name
= "hmac(sha224)",
1594 .hmac_driver_name
= "hmac-sha224-caam",
1595 .blocksize
= SHA224_BLOCK_SIZE
,
1598 .update
= ahash_update
,
1599 .final
= ahash_final
,
1600 .finup
= ahash_finup
,
1601 .digest
= ahash_digest
,
1602 .export
= ahash_export
,
1603 .import
= ahash_import
,
1604 .setkey
= ahash_setkey
,
1606 .digestsize
= SHA224_DIGEST_SIZE
,
1607 .statesize
= sizeof(struct caam_export_state
),
1610 .alg_type
= OP_ALG_ALGSEL_SHA224
,
1613 .driver_name
= "sha256-caam",
1614 .hmac_name
= "hmac(sha256)",
1615 .hmac_driver_name
= "hmac-sha256-caam",
1616 .blocksize
= SHA256_BLOCK_SIZE
,
1619 .update
= ahash_update
,
1620 .final
= ahash_final
,
1621 .finup
= ahash_finup
,
1622 .digest
= ahash_digest
,
1623 .export
= ahash_export
,
1624 .import
= ahash_import
,
1625 .setkey
= ahash_setkey
,
1627 .digestsize
= SHA256_DIGEST_SIZE
,
1628 .statesize
= sizeof(struct caam_export_state
),
1631 .alg_type
= OP_ALG_ALGSEL_SHA256
,
1634 .driver_name
= "sha384-caam",
1635 .hmac_name
= "hmac(sha384)",
1636 .hmac_driver_name
= "hmac-sha384-caam",
1637 .blocksize
= SHA384_BLOCK_SIZE
,
1640 .update
= ahash_update
,
1641 .final
= ahash_final
,
1642 .finup
= ahash_finup
,
1643 .digest
= ahash_digest
,
1644 .export
= ahash_export
,
1645 .import
= ahash_import
,
1646 .setkey
= ahash_setkey
,
1648 .digestsize
= SHA384_DIGEST_SIZE
,
1649 .statesize
= sizeof(struct caam_export_state
),
1652 .alg_type
= OP_ALG_ALGSEL_SHA384
,
1655 .driver_name
= "sha512-caam",
1656 .hmac_name
= "hmac(sha512)",
1657 .hmac_driver_name
= "hmac-sha512-caam",
1658 .blocksize
= SHA512_BLOCK_SIZE
,
1661 .update
= ahash_update
,
1662 .final
= ahash_final
,
1663 .finup
= ahash_finup
,
1664 .digest
= ahash_digest
,
1665 .export
= ahash_export
,
1666 .import
= ahash_import
,
1667 .setkey
= ahash_setkey
,
1669 .digestsize
= SHA512_DIGEST_SIZE
,
1670 .statesize
= sizeof(struct caam_export_state
),
1673 .alg_type
= OP_ALG_ALGSEL_SHA512
,
1676 .driver_name
= "md5-caam",
1677 .hmac_name
= "hmac(md5)",
1678 .hmac_driver_name
= "hmac-md5-caam",
1679 .blocksize
= MD5_BLOCK_WORDS
* 4,
1682 .update
= ahash_update
,
1683 .final
= ahash_final
,
1684 .finup
= ahash_finup
,
1685 .digest
= ahash_digest
,
1686 .export
= ahash_export
,
1687 .import
= ahash_import
,
1688 .setkey
= ahash_setkey
,
1690 .digestsize
= MD5_DIGEST_SIZE
,
1691 .statesize
= sizeof(struct caam_export_state
),
1694 .alg_type
= OP_ALG_ALGSEL_MD5
,
1698 struct caam_hash_alg
{
1699 struct list_head entry
;
1701 struct ahash_alg ahash_alg
;
1704 static int caam_hash_cra_init(struct crypto_tfm
*tfm
)
1706 struct crypto_ahash
*ahash
= __crypto_ahash_cast(tfm
);
1707 struct crypto_alg
*base
= tfm
->__crt_alg
;
1708 struct hash_alg_common
*halg
=
1709 container_of(base
, struct hash_alg_common
, base
);
1710 struct ahash_alg
*alg
=
1711 container_of(halg
, struct ahash_alg
, halg
);
1712 struct caam_hash_alg
*caam_hash
=
1713 container_of(alg
, struct caam_hash_alg
, ahash_alg
);
1714 struct caam_hash_ctx
*ctx
= crypto_tfm_ctx(tfm
);
1715 /* Sizes for MDHA running digests: MD5, SHA1, 224, 256, 384, 512 */
1716 static const u8 runninglen
[] = { HASH_MSG_LEN
+ MD5_DIGEST_SIZE
,
1717 HASH_MSG_LEN
+ SHA1_DIGEST_SIZE
,
1719 HASH_MSG_LEN
+ SHA256_DIGEST_SIZE
,
1721 HASH_MSG_LEN
+ SHA512_DIGEST_SIZE
};
1722 dma_addr_t dma_addr
;
1725 * Get a Job ring from Job Ring driver to ensure in-order
1726 * crypto request processing per tfm
1728 ctx
->jrdev
= caam_jr_alloc();
1729 if (IS_ERR(ctx
->jrdev
)) {
1730 pr_err("Job Ring Device allocation for transform failed\n");
1731 return PTR_ERR(ctx
->jrdev
);
1734 dma_addr
= dma_map_single_attrs(ctx
->jrdev
, ctx
->sh_desc_update
,
1735 offsetof(struct caam_hash_ctx
,
1736 sh_desc_update_dma
),
1737 DMA_TO_DEVICE
, DMA_ATTR_SKIP_CPU_SYNC
);
1738 if (dma_mapping_error(ctx
->jrdev
, dma_addr
)) {
1739 dev_err(ctx
->jrdev
, "unable to map shared descriptors\n");
1740 caam_jr_free(ctx
->jrdev
);
1744 ctx
->sh_desc_update_dma
= dma_addr
;
1745 ctx
->sh_desc_update_first_dma
= dma_addr
+
1746 offsetof(struct caam_hash_ctx
,
1747 sh_desc_update_first
);
1748 ctx
->sh_desc_fin_dma
= dma_addr
+ offsetof(struct caam_hash_ctx
,
1750 ctx
->sh_desc_digest_dma
= dma_addr
+ offsetof(struct caam_hash_ctx
,
1753 /* copy descriptor header template value */
1754 ctx
->adata
.algtype
= OP_TYPE_CLASS2_ALG
| caam_hash
->alg_type
;
1756 ctx
->ctx_len
= runninglen
[(ctx
->adata
.algtype
&
1757 OP_ALG_ALGSEL_SUBMASK
) >>
1758 OP_ALG_ALGSEL_SHIFT
];
1760 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm
),
1761 sizeof(struct caam_hash_state
));
1762 return ahash_set_sh_desc(ahash
);
1765 static void caam_hash_cra_exit(struct crypto_tfm
*tfm
)
1767 struct caam_hash_ctx
*ctx
= crypto_tfm_ctx(tfm
);
1769 dma_unmap_single_attrs(ctx
->jrdev
, ctx
->sh_desc_update_dma
,
1770 offsetof(struct caam_hash_ctx
,
1771 sh_desc_update_dma
),
1772 DMA_TO_DEVICE
, DMA_ATTR_SKIP_CPU_SYNC
);
1773 caam_jr_free(ctx
->jrdev
);
1776 static void __exit
caam_algapi_hash_exit(void)
1778 struct caam_hash_alg
*t_alg
, *n
;
1780 if (!hash_list
.next
)
1783 list_for_each_entry_safe(t_alg
, n
, &hash_list
, entry
) {
1784 crypto_unregister_ahash(&t_alg
->ahash_alg
);
1785 list_del(&t_alg
->entry
);
1790 static struct caam_hash_alg
*
1791 caam_hash_alloc(struct caam_hash_template
*template,
1794 struct caam_hash_alg
*t_alg
;
1795 struct ahash_alg
*halg
;
1796 struct crypto_alg
*alg
;
1798 t_alg
= kzalloc(sizeof(*t_alg
), GFP_KERNEL
);
1800 pr_err("failed to allocate t_alg\n");
1801 return ERR_PTR(-ENOMEM
);
1804 t_alg
->ahash_alg
= template->template_ahash
;
1805 halg
= &t_alg
->ahash_alg
;
1806 alg
= &halg
->halg
.base
;
1809 snprintf(alg
->cra_name
, CRYPTO_MAX_ALG_NAME
, "%s",
1810 template->hmac_name
);
1811 snprintf(alg
->cra_driver_name
, CRYPTO_MAX_ALG_NAME
, "%s",
1812 template->hmac_driver_name
);
1814 snprintf(alg
->cra_name
, CRYPTO_MAX_ALG_NAME
, "%s",
1816 snprintf(alg
->cra_driver_name
, CRYPTO_MAX_ALG_NAME
, "%s",
1817 template->driver_name
);
1818 t_alg
->ahash_alg
.setkey
= NULL
;
1820 alg
->cra_module
= THIS_MODULE
;
1821 alg
->cra_init
= caam_hash_cra_init
;
1822 alg
->cra_exit
= caam_hash_cra_exit
;
1823 alg
->cra_ctxsize
= sizeof(struct caam_hash_ctx
);
1824 alg
->cra_priority
= CAAM_CRA_PRIORITY
;
1825 alg
->cra_blocksize
= template->blocksize
;
1826 alg
->cra_alignmask
= 0;
1827 alg
->cra_flags
= CRYPTO_ALG_ASYNC
| CRYPTO_ALG_TYPE_AHASH
;
1828 alg
->cra_type
= &crypto_ahash_type
;
1830 t_alg
->alg_type
= template->alg_type
;
1835 static int __init
caam_algapi_hash_init(void)
1837 struct device_node
*dev_node
;
1838 struct platform_device
*pdev
;
1839 struct device
*ctrldev
;
1841 struct caam_drv_private
*priv
;
1842 unsigned int md_limit
= SHA512_DIGEST_SIZE
;
1843 u32 cha_inst
, cha_vid
;
1845 dev_node
= of_find_compatible_node(NULL
, NULL
, "fsl,sec-v4.0");
1847 dev_node
= of_find_compatible_node(NULL
, NULL
, "fsl,sec4.0");
1852 pdev
= of_find_device_by_node(dev_node
);
1854 of_node_put(dev_node
);
1858 ctrldev
= &pdev
->dev
;
1859 priv
= dev_get_drvdata(ctrldev
);
1860 of_node_put(dev_node
);
1863 * If priv is NULL, it's probably because the caam driver wasn't
1864 * properly initialized (e.g. RNG4 init failed). Thus, bail out here.
1870 * Register crypto algorithms the device supports. First, identify
1871 * presence and attributes of MD block.
1873 cha_vid
= rd_reg32(&priv
->ctrl
->perfmon
.cha_id_ls
);
1874 cha_inst
= rd_reg32(&priv
->ctrl
->perfmon
.cha_num_ls
);
1877 * Skip registration of any hashing algorithms if MD block
1880 if (!((cha_inst
& CHA_ID_LS_MD_MASK
) >> CHA_ID_LS_MD_SHIFT
))
1883 /* Limit digest size based on LP256 */
1884 if ((cha_vid
& CHA_ID_LS_MD_MASK
) == CHA_ID_LS_MD_LP256
)
1885 md_limit
= SHA256_DIGEST_SIZE
;
1887 INIT_LIST_HEAD(&hash_list
);
1889 /* register crypto algorithms the device supports */
1890 for (i
= 0; i
< ARRAY_SIZE(driver_hash
); i
++) {
1891 struct caam_hash_alg
*t_alg
;
1892 struct caam_hash_template
*alg
= driver_hash
+ i
;
1894 /* If MD size is not supported by device, skip registration */
1895 if (alg
->template_ahash
.halg
.digestsize
> md_limit
)
1898 /* register hmac version */
1899 t_alg
= caam_hash_alloc(alg
, true);
1900 if (IS_ERR(t_alg
)) {
1901 err
= PTR_ERR(t_alg
);
1902 pr_warn("%s alg allocation failed\n", alg
->driver_name
);
1906 err
= crypto_register_ahash(&t_alg
->ahash_alg
);
1908 pr_warn("%s alg registration failed: %d\n",
1909 t_alg
->ahash_alg
.halg
.base
.cra_driver_name
,
1913 list_add_tail(&t_alg
->entry
, &hash_list
);
1915 /* register unkeyed version */
1916 t_alg
= caam_hash_alloc(alg
, false);
1917 if (IS_ERR(t_alg
)) {
1918 err
= PTR_ERR(t_alg
);
1919 pr_warn("%s alg allocation failed\n", alg
->driver_name
);
1923 err
= crypto_register_ahash(&t_alg
->ahash_alg
);
1925 pr_warn("%s alg registration failed: %d\n",
1926 t_alg
->ahash_alg
.halg
.base
.cra_driver_name
,
1930 list_add_tail(&t_alg
->entry
, &hash_list
);
1936 module_init(caam_algapi_hash_init
);
1937 module_exit(caam_algapi_hash_exit
);
1939 MODULE_LICENSE("GPL");
1940 MODULE_DESCRIPTION("FSL CAAM support for ahash functions of crypto API");
1941 MODULE_AUTHOR("Freescale Semiconductor - NMG");