2 * caam - Freescale FSL CAAM support for ahash functions of crypto API
4 * Copyright 2011 Freescale Semiconductor, Inc.
6 * Based on caamalg.c crypto API driver.
8 * relationship of digest job descriptor or first job descriptor after init to
11 * --------------- ---------------
12 * | JobDesc #1 |-------------------->| ShareDesc |
13 * | *(packet 1) | | (hashKey) |
14 * --------------- | (operation) |
17 * relationship of subsequent job descriptors to shared descriptors:
19 * --------------- ---------------
20 * | JobDesc #2 |-------------------->| ShareDesc |
21 * | *(packet 2) | |------------->| (hashKey) |
22 * --------------- | |-------->| (operation) |
23 * . | | | (load ctx2) |
24 * . | | ---------------
26 * | JobDesc #3 |------| |
32 * | JobDesc #4 |------------
36 * The SharedDesc never changes for a connection unless rekeyed, but
37 * each packet will likely be in a different place. So all we need
38 * to know to process the packet is where the input is, where the
39 * output goes, and what context we want to process with. Context is
40 * in the SharedDesc, packet references in the JobDesc.
42 * So, a job desc looks like:
44 * ---------------------
46 * | ShareDesc Pointer |
53 * ---------------------
60 #include "desc_constr.h"
63 #include "sg_sw_sec4.h"
66 #define CAAM_CRA_PRIORITY 3000
68 /* max hash key is max split key size */
69 #define CAAM_MAX_HASH_KEY_SIZE (SHA512_DIGEST_SIZE * 2)
71 #define CAAM_MAX_HASH_BLOCK_SIZE SHA512_BLOCK_SIZE
72 #define CAAM_MAX_HASH_DIGEST_SIZE SHA512_DIGEST_SIZE
74 /* length of descriptors text */
75 #define DESC_AHASH_BASE (3 * CAAM_CMD_SZ)
76 #define DESC_AHASH_UPDATE_LEN (6 * CAAM_CMD_SZ)
77 #define DESC_AHASH_UPDATE_FIRST_LEN (DESC_AHASH_BASE + 4 * CAAM_CMD_SZ)
78 #define DESC_AHASH_FINAL_LEN (DESC_AHASH_BASE + 5 * CAAM_CMD_SZ)
79 #define DESC_AHASH_FINUP_LEN (DESC_AHASH_BASE + 5 * CAAM_CMD_SZ)
80 #define DESC_AHASH_DIGEST_LEN (DESC_AHASH_BASE + 4 * CAAM_CMD_SZ)
82 #define DESC_HASH_MAX_USED_BYTES (DESC_AHASH_FINAL_LEN + \
83 CAAM_MAX_HASH_KEY_SIZE)
84 #define DESC_HASH_MAX_USED_LEN (DESC_HASH_MAX_USED_BYTES / CAAM_CMD_SZ)
86 /* caam context sizes for hashes: running digest + 8 */
87 #define HASH_MSG_LEN 8
88 #define MAX_CTX_LEN (HASH_MSG_LEN + SHA512_DIGEST_SIZE)
91 /* for print_hex_dumps with line references */
92 #define debug(format, arg...) printk(format, arg)
94 #define debug(format, arg...)
98 static struct list_head hash_list
;
100 /* ahash per-session context */
101 struct caam_hash_ctx
{
102 u32 sh_desc_update
[DESC_HASH_MAX_USED_LEN
] ____cacheline_aligned
;
103 u32 sh_desc_update_first
[DESC_HASH_MAX_USED_LEN
] ____cacheline_aligned
;
104 u32 sh_desc_fin
[DESC_HASH_MAX_USED_LEN
] ____cacheline_aligned
;
105 u32 sh_desc_digest
[DESC_HASH_MAX_USED_LEN
] ____cacheline_aligned
;
106 dma_addr_t sh_desc_update_dma ____cacheline_aligned
;
107 dma_addr_t sh_desc_update_first_dma
;
108 dma_addr_t sh_desc_fin_dma
;
109 dma_addr_t sh_desc_digest_dma
;
110 enum dma_data_direction dir
;
111 struct device
*jrdev
;
112 u8 key
[CAAM_MAX_HASH_KEY_SIZE
];
114 struct alginfo adata
;
118 struct caam_hash_state
{
121 u8 buf_0
[CAAM_MAX_HASH_BLOCK_SIZE
] ____cacheline_aligned
;
123 u8 buf_1
[CAAM_MAX_HASH_BLOCK_SIZE
] ____cacheline_aligned
;
125 u8 caam_ctx
[MAX_CTX_LEN
] ____cacheline_aligned
;
126 int (*update
)(struct ahash_request
*req
);
127 int (*final
)(struct ahash_request
*req
);
128 int (*finup
)(struct ahash_request
*req
);
132 struct caam_export_state
{
133 u8 buf
[CAAM_MAX_HASH_BLOCK_SIZE
];
134 u8 caam_ctx
[MAX_CTX_LEN
];
136 int (*update
)(struct ahash_request
*req
);
137 int (*final
)(struct ahash_request
*req
);
138 int (*finup
)(struct ahash_request
*req
);
141 static inline void switch_buf(struct caam_hash_state
*state
)
143 state
->current_buf
^= 1;
146 static inline u8
*current_buf(struct caam_hash_state
*state
)
148 return state
->current_buf
? state
->buf_1
: state
->buf_0
;
151 static inline u8
*alt_buf(struct caam_hash_state
*state
)
153 return state
->current_buf
? state
->buf_0
: state
->buf_1
;
156 static inline int *current_buflen(struct caam_hash_state
*state
)
158 return state
->current_buf
? &state
->buflen_1
: &state
->buflen_0
;
161 static inline int *alt_buflen(struct caam_hash_state
*state
)
163 return state
->current_buf
? &state
->buflen_0
: &state
->buflen_1
;
166 /* Common job descriptor seq in/out ptr routines */
168 /* Map state->caam_ctx, and append seq_out_ptr command that points to it */
169 static inline int map_seq_out_ptr_ctx(u32
*desc
, struct device
*jrdev
,
170 struct caam_hash_state
*state
,
173 state
->ctx_dma
= dma_map_single(jrdev
, state
->caam_ctx
,
174 ctx_len
, DMA_FROM_DEVICE
);
175 if (dma_mapping_error(jrdev
, state
->ctx_dma
)) {
176 dev_err(jrdev
, "unable to map ctx\n");
181 append_seq_out_ptr(desc
, state
->ctx_dma
, ctx_len
, 0);
186 /* Map req->result, and append seq_out_ptr command that points to it */
187 static inline dma_addr_t
map_seq_out_ptr_result(u32
*desc
, struct device
*jrdev
,
188 u8
*result
, int digestsize
)
192 dst_dma
= dma_map_single(jrdev
, result
, digestsize
, DMA_FROM_DEVICE
);
193 append_seq_out_ptr(desc
, dst_dma
, digestsize
, 0);
198 /* Map current buffer in state (if length > 0) and put it in link table */
199 static inline int buf_map_to_sec4_sg(struct device
*jrdev
,
200 struct sec4_sg_entry
*sec4_sg
,
201 struct caam_hash_state
*state
)
203 int buflen
= *current_buflen(state
);
208 state
->buf_dma
= dma_map_single(jrdev
, current_buf(state
), buflen
,
210 if (dma_mapping_error(jrdev
, state
->buf_dma
)) {
211 dev_err(jrdev
, "unable to map buf\n");
216 dma_to_sec4_sg_one(sec4_sg
, state
->buf_dma
, buflen
, 0);
221 /* Map state->caam_ctx, and add it to link table */
222 static inline int ctx_map_to_sec4_sg(struct device
*jrdev
,
223 struct caam_hash_state
*state
, int ctx_len
,
224 struct sec4_sg_entry
*sec4_sg
, u32 flag
)
226 state
->ctx_dma
= dma_map_single(jrdev
, state
->caam_ctx
, ctx_len
, flag
);
227 if (dma_mapping_error(jrdev
, state
->ctx_dma
)) {
228 dev_err(jrdev
, "unable to map ctx\n");
233 dma_to_sec4_sg_one(sec4_sg
, state
->ctx_dma
, ctx_len
, 0);
239 * For ahash update, final and finup (import_ctx = true)
240 * import context, read and write to seqout
241 * For ahash firsts and digest (import_ctx = false)
242 * read and write to seqout
244 static inline void ahash_gen_sh_desc(u32
*desc
, u32 state
, int digestsize
,
245 struct caam_hash_ctx
*ctx
, bool import_ctx
,
248 u32 op
= ctx
->adata
.algtype
;
251 init_sh_desc(desc
, HDR_SHARE_SERIAL
);
253 /* Append key if it has been set; ahash update excluded */
254 if ((state
!= OP_ALG_AS_UPDATE
) && (ctx
->adata
.keylen
)) {
255 /* Skip key loading if already shared */
256 skip_key_load
= append_jump(desc
, JUMP_JSL
| JUMP_TEST_ALL
|
260 append_key_as_imm(desc
, ctx
->key
, ctx
->adata
.keylen_pad
,
261 ctx
->adata
.keylen
, CLASS_2
|
262 KEY_DEST_MDHA_SPLIT
| KEY_ENC
);
264 append_proto_dkp(desc
, &ctx
->adata
);
266 set_jump_tgt_here(desc
, skip_key_load
);
268 op
|= OP_ALG_AAI_HMAC_PRECOMP
;
271 /* If needed, import context from software */
273 append_seq_load(desc
, ctx
->ctx_len
, LDST_CLASS_2_CCB
|
274 LDST_SRCDST_BYTE_CONTEXT
);
276 /* Class 2 operation */
277 append_operation(desc
, op
| state
| OP_ALG_ENCRYPT
);
280 * Load from buf and/or src and write to req->result or state->context
281 * Calculate remaining bytes to read
283 append_math_add(desc
, VARSEQINLEN
, SEQINLEN
, REG0
, CAAM_CMD_SZ
);
284 /* Read remaining bytes */
285 append_seq_fifo_load(desc
, 0, FIFOLD_CLASS_CLASS2
| FIFOLD_TYPE_LAST2
|
286 FIFOLD_TYPE_MSG
| KEY_VLF
);
287 /* Store class2 context bytes */
288 append_seq_store(desc
, digestsize
, LDST_CLASS_2_CCB
|
289 LDST_SRCDST_BYTE_CONTEXT
);
292 static int ahash_set_sh_desc(struct crypto_ahash
*ahash
)
294 struct caam_hash_ctx
*ctx
= crypto_ahash_ctx(ahash
);
295 int digestsize
= crypto_ahash_digestsize(ahash
);
296 struct device
*jrdev
= ctx
->jrdev
;
297 struct caam_drv_private
*ctrlpriv
= dev_get_drvdata(jrdev
->parent
);
300 ctx
->adata
.key_virt
= ctx
->key
;
302 /* ahash_update shared descriptor */
303 desc
= ctx
->sh_desc_update
;
304 ahash_gen_sh_desc(desc
, OP_ALG_AS_UPDATE
, ctx
->ctx_len
, ctx
, true,
306 dma_sync_single_for_device(jrdev
, ctx
->sh_desc_update_dma
,
307 desc_bytes(desc
), ctx
->dir
);
309 print_hex_dump(KERN_ERR
,
310 "ahash update shdesc@"__stringify(__LINE__
)": ",
311 DUMP_PREFIX_ADDRESS
, 16, 4, desc
, desc_bytes(desc
), 1);
314 /* ahash_update_first shared descriptor */
315 desc
= ctx
->sh_desc_update_first
;
316 ahash_gen_sh_desc(desc
, OP_ALG_AS_INIT
, ctx
->ctx_len
, ctx
, false,
318 dma_sync_single_for_device(jrdev
, ctx
->sh_desc_update_first_dma
,
319 desc_bytes(desc
), ctx
->dir
);
321 print_hex_dump(KERN_ERR
,
322 "ahash update first shdesc@"__stringify(__LINE__
)": ",
323 DUMP_PREFIX_ADDRESS
, 16, 4, desc
, desc_bytes(desc
), 1);
326 /* ahash_final shared descriptor */
327 desc
= ctx
->sh_desc_fin
;
328 ahash_gen_sh_desc(desc
, OP_ALG_AS_FINALIZE
, digestsize
, ctx
, true,
330 dma_sync_single_for_device(jrdev
, ctx
->sh_desc_fin_dma
,
331 desc_bytes(desc
), ctx
->dir
);
333 print_hex_dump(KERN_ERR
, "ahash final shdesc@"__stringify(__LINE__
)": ",
334 DUMP_PREFIX_ADDRESS
, 16, 4, desc
,
335 desc_bytes(desc
), 1);
338 /* ahash_digest shared descriptor */
339 desc
= ctx
->sh_desc_digest
;
340 ahash_gen_sh_desc(desc
, OP_ALG_AS_INITFINAL
, digestsize
, ctx
, false,
342 dma_sync_single_for_device(jrdev
, ctx
->sh_desc_digest_dma
,
343 desc_bytes(desc
), ctx
->dir
);
345 print_hex_dump(KERN_ERR
,
346 "ahash digest shdesc@"__stringify(__LINE__
)": ",
347 DUMP_PREFIX_ADDRESS
, 16, 4, desc
,
348 desc_bytes(desc
), 1);
354 /* Digest hash size if it is too large */
355 static int hash_digest_key(struct caam_hash_ctx
*ctx
, const u8
*key_in
,
356 u32
*keylen
, u8
*key_out
, u32 digestsize
)
358 struct device
*jrdev
= ctx
->jrdev
;
360 struct split_key_result result
;
361 dma_addr_t src_dma
, dst_dma
;
364 desc
= kmalloc(CAAM_CMD_SZ
* 8 + CAAM_PTR_SZ
* 2, GFP_KERNEL
| GFP_DMA
);
366 dev_err(jrdev
, "unable to allocate key input memory\n");
370 init_job_desc(desc
, 0);
372 src_dma
= dma_map_single(jrdev
, (void *)key_in
, *keylen
,
374 if (dma_mapping_error(jrdev
, src_dma
)) {
375 dev_err(jrdev
, "unable to map key input memory\n");
379 dst_dma
= dma_map_single(jrdev
, (void *)key_out
, digestsize
,
381 if (dma_mapping_error(jrdev
, dst_dma
)) {
382 dev_err(jrdev
, "unable to map key output memory\n");
383 dma_unmap_single(jrdev
, src_dma
, *keylen
, DMA_TO_DEVICE
);
388 /* Job descriptor to perform unkeyed hash on key_in */
389 append_operation(desc
, ctx
->adata
.algtype
| OP_ALG_ENCRYPT
|
390 OP_ALG_AS_INITFINAL
);
391 append_seq_in_ptr(desc
, src_dma
, *keylen
, 0);
392 append_seq_fifo_load(desc
, *keylen
, FIFOLD_CLASS_CLASS2
|
393 FIFOLD_TYPE_LAST2
| FIFOLD_TYPE_MSG
);
394 append_seq_out_ptr(desc
, dst_dma
, digestsize
, 0);
395 append_seq_store(desc
, digestsize
, LDST_CLASS_2_CCB
|
396 LDST_SRCDST_BYTE_CONTEXT
);
399 print_hex_dump(KERN_ERR
, "key_in@"__stringify(__LINE__
)": ",
400 DUMP_PREFIX_ADDRESS
, 16, 4, key_in
, *keylen
, 1);
401 print_hex_dump(KERN_ERR
, "jobdesc@"__stringify(__LINE__
)": ",
402 DUMP_PREFIX_ADDRESS
, 16, 4, desc
, desc_bytes(desc
), 1);
406 init_completion(&result
.completion
);
408 ret
= caam_jr_enqueue(jrdev
, desc
, split_key_done
, &result
);
411 wait_for_completion(&result
.completion
);
414 print_hex_dump(KERN_ERR
,
415 "digested key@"__stringify(__LINE__
)": ",
416 DUMP_PREFIX_ADDRESS
, 16, 4, key_in
,
420 dma_unmap_single(jrdev
, src_dma
, *keylen
, DMA_TO_DEVICE
);
421 dma_unmap_single(jrdev
, dst_dma
, digestsize
, DMA_FROM_DEVICE
);
423 *keylen
= digestsize
;
430 static int ahash_setkey(struct crypto_ahash
*ahash
,
431 const u8
*key
, unsigned int keylen
)
433 struct caam_hash_ctx
*ctx
= crypto_ahash_ctx(ahash
);
434 int blocksize
= crypto_tfm_alg_blocksize(&ahash
->base
);
435 int digestsize
= crypto_ahash_digestsize(ahash
);
436 struct caam_drv_private
*ctrlpriv
= dev_get_drvdata(ctx
->jrdev
->parent
);
438 u8
*hashed_key
= NULL
;
441 printk(KERN_ERR
"keylen %d\n", keylen
);
444 if (keylen
> blocksize
) {
445 hashed_key
= kmalloc_array(digestsize
,
447 GFP_KERNEL
| GFP_DMA
);
450 ret
= hash_digest_key(ctx
, key
, &keylen
, hashed_key
,
458 * If DKP is supported, use it in the shared descriptor to generate
461 if (ctrlpriv
->era
>= 6) {
462 ctx
->adata
.key_inline
= true;
463 ctx
->adata
.keylen
= keylen
;
464 ctx
->adata
.keylen_pad
= split_key_len(ctx
->adata
.algtype
&
467 if (ctx
->adata
.keylen_pad
> CAAM_MAX_HASH_KEY_SIZE
)
470 memcpy(ctx
->key
, key
, keylen
);
472 ret
= gen_split_key(ctx
->jrdev
, ctx
->key
, &ctx
->adata
, key
,
473 keylen
, CAAM_MAX_HASH_KEY_SIZE
);
479 return ahash_set_sh_desc(ahash
);
482 crypto_ahash_set_flags(ahash
, CRYPTO_TFM_RES_BAD_KEY_LEN
);
487 * ahash_edesc - s/w-extended ahash descriptor
488 * @dst_dma: physical mapped address of req->result
489 * @sec4_sg_dma: physical mapped address of h/w link table
490 * @src_nents: number of segments in input scatterlist
491 * @sec4_sg_bytes: length of dma mapped sec4_sg space
492 * @hw_desc: the h/w job descriptor followed by any referenced link tables
493 * @sec4_sg: h/w link table
497 dma_addr_t sec4_sg_dma
;
500 u32 hw_desc
[DESC_JOB_IO_LEN
/ sizeof(u32
)] ____cacheline_aligned
;
501 struct sec4_sg_entry sec4_sg
[0];
504 static inline void ahash_unmap(struct device
*dev
,
505 struct ahash_edesc
*edesc
,
506 struct ahash_request
*req
, int dst_len
)
508 struct caam_hash_state
*state
= ahash_request_ctx(req
);
510 if (edesc
->src_nents
)
511 dma_unmap_sg(dev
, req
->src
, edesc
->src_nents
, DMA_TO_DEVICE
);
513 dma_unmap_single(dev
, edesc
->dst_dma
, dst_len
, DMA_FROM_DEVICE
);
515 if (edesc
->sec4_sg_bytes
)
516 dma_unmap_single(dev
, edesc
->sec4_sg_dma
,
517 edesc
->sec4_sg_bytes
, DMA_TO_DEVICE
);
519 if (state
->buf_dma
) {
520 dma_unmap_single(dev
, state
->buf_dma
, *current_buflen(state
),
526 static inline void ahash_unmap_ctx(struct device
*dev
,
527 struct ahash_edesc
*edesc
,
528 struct ahash_request
*req
, int dst_len
, u32 flag
)
530 struct crypto_ahash
*ahash
= crypto_ahash_reqtfm(req
);
531 struct caam_hash_ctx
*ctx
= crypto_ahash_ctx(ahash
);
532 struct caam_hash_state
*state
= ahash_request_ctx(req
);
534 if (state
->ctx_dma
) {
535 dma_unmap_single(dev
, state
->ctx_dma
, ctx
->ctx_len
, flag
);
538 ahash_unmap(dev
, edesc
, req
, dst_len
);
541 static void ahash_done(struct device
*jrdev
, u32
*desc
, u32 err
,
544 struct ahash_request
*req
= context
;
545 struct ahash_edesc
*edesc
;
546 struct crypto_ahash
*ahash
= crypto_ahash_reqtfm(req
);
547 int digestsize
= crypto_ahash_digestsize(ahash
);
549 struct caam_hash_ctx
*ctx
= crypto_ahash_ctx(ahash
);
550 struct caam_hash_state
*state
= ahash_request_ctx(req
);
552 dev_err(jrdev
, "%s %d: err 0x%x\n", __func__
, __LINE__
, err
);
555 edesc
= container_of(desc
, struct ahash_edesc
, hw_desc
[0]);
557 caam_jr_strstatus(jrdev
, err
);
559 ahash_unmap(jrdev
, edesc
, req
, digestsize
);
563 print_hex_dump(KERN_ERR
, "ctx@"__stringify(__LINE__
)": ",
564 DUMP_PREFIX_ADDRESS
, 16, 4, state
->caam_ctx
,
567 print_hex_dump(KERN_ERR
, "result@"__stringify(__LINE__
)": ",
568 DUMP_PREFIX_ADDRESS
, 16, 4, req
->result
,
572 req
->base
.complete(&req
->base
, err
);
575 static void ahash_done_bi(struct device
*jrdev
, u32
*desc
, u32 err
,
578 struct ahash_request
*req
= context
;
579 struct ahash_edesc
*edesc
;
580 struct crypto_ahash
*ahash
= crypto_ahash_reqtfm(req
);
581 struct caam_hash_ctx
*ctx
= crypto_ahash_ctx(ahash
);
582 struct caam_hash_state
*state
= ahash_request_ctx(req
);
584 int digestsize
= crypto_ahash_digestsize(ahash
);
586 dev_err(jrdev
, "%s %d: err 0x%x\n", __func__
, __LINE__
, err
);
589 edesc
= container_of(desc
, struct ahash_edesc
, hw_desc
[0]);
591 caam_jr_strstatus(jrdev
, err
);
593 ahash_unmap_ctx(jrdev
, edesc
, req
, ctx
->ctx_len
, DMA_BIDIRECTIONAL
);
598 print_hex_dump(KERN_ERR
, "ctx@"__stringify(__LINE__
)": ",
599 DUMP_PREFIX_ADDRESS
, 16, 4, state
->caam_ctx
,
602 print_hex_dump(KERN_ERR
, "result@"__stringify(__LINE__
)": ",
603 DUMP_PREFIX_ADDRESS
, 16, 4, req
->result
,
607 req
->base
.complete(&req
->base
, err
);
610 static void ahash_done_ctx_src(struct device
*jrdev
, u32
*desc
, u32 err
,
613 struct ahash_request
*req
= context
;
614 struct ahash_edesc
*edesc
;
615 struct crypto_ahash
*ahash
= crypto_ahash_reqtfm(req
);
616 int digestsize
= crypto_ahash_digestsize(ahash
);
618 struct caam_hash_ctx
*ctx
= crypto_ahash_ctx(ahash
);
619 struct caam_hash_state
*state
= ahash_request_ctx(req
);
621 dev_err(jrdev
, "%s %d: err 0x%x\n", __func__
, __LINE__
, err
);
624 edesc
= container_of(desc
, struct ahash_edesc
, hw_desc
[0]);
626 caam_jr_strstatus(jrdev
, err
);
628 ahash_unmap_ctx(jrdev
, edesc
, req
, digestsize
, DMA_TO_DEVICE
);
632 print_hex_dump(KERN_ERR
, "ctx@"__stringify(__LINE__
)": ",
633 DUMP_PREFIX_ADDRESS
, 16, 4, state
->caam_ctx
,
636 print_hex_dump(KERN_ERR
, "result@"__stringify(__LINE__
)": ",
637 DUMP_PREFIX_ADDRESS
, 16, 4, req
->result
,
641 req
->base
.complete(&req
->base
, err
);
644 static void ahash_done_ctx_dst(struct device
*jrdev
, u32
*desc
, u32 err
,
647 struct ahash_request
*req
= context
;
648 struct ahash_edesc
*edesc
;
649 struct crypto_ahash
*ahash
= crypto_ahash_reqtfm(req
);
650 struct caam_hash_ctx
*ctx
= crypto_ahash_ctx(ahash
);
651 struct caam_hash_state
*state
= ahash_request_ctx(req
);
653 int digestsize
= crypto_ahash_digestsize(ahash
);
655 dev_err(jrdev
, "%s %d: err 0x%x\n", __func__
, __LINE__
, err
);
658 edesc
= container_of(desc
, struct ahash_edesc
, hw_desc
[0]);
660 caam_jr_strstatus(jrdev
, err
);
662 ahash_unmap_ctx(jrdev
, edesc
, req
, ctx
->ctx_len
, DMA_FROM_DEVICE
);
667 print_hex_dump(KERN_ERR
, "ctx@"__stringify(__LINE__
)": ",
668 DUMP_PREFIX_ADDRESS
, 16, 4, state
->caam_ctx
,
671 print_hex_dump(KERN_ERR
, "result@"__stringify(__LINE__
)": ",
672 DUMP_PREFIX_ADDRESS
, 16, 4, req
->result
,
676 req
->base
.complete(&req
->base
, err
);
680 * Allocate an enhanced descriptor, which contains the hardware descriptor
681 * and space for hardware scatter table containing sg_num entries.
683 static struct ahash_edesc
*ahash_edesc_alloc(struct caam_hash_ctx
*ctx
,
684 int sg_num
, u32
*sh_desc
,
685 dma_addr_t sh_desc_dma
,
688 struct ahash_edesc
*edesc
;
689 unsigned int sg_size
= sg_num
* sizeof(struct sec4_sg_entry
);
691 edesc
= kzalloc(sizeof(*edesc
) + sg_size
, GFP_DMA
| flags
);
693 dev_err(ctx
->jrdev
, "could not allocate extended descriptor\n");
697 init_job_desc_shared(edesc
->hw_desc
, sh_desc_dma
, desc_len(sh_desc
),
698 HDR_SHARE_DEFER
| HDR_REVERSE
);
703 static int ahash_edesc_add_src(struct caam_hash_ctx
*ctx
,
704 struct ahash_edesc
*edesc
,
705 struct ahash_request
*req
, int nents
,
706 unsigned int first_sg
,
707 unsigned int first_bytes
, size_t to_hash
)
712 if (nents
> 1 || first_sg
) {
713 struct sec4_sg_entry
*sg
= edesc
->sec4_sg
;
714 unsigned int sgsize
= sizeof(*sg
) * (first_sg
+ nents
);
716 sg_to_sec4_sg_last(req
->src
, nents
, sg
+ first_sg
, 0);
718 src_dma
= dma_map_single(ctx
->jrdev
, sg
, sgsize
, DMA_TO_DEVICE
);
719 if (dma_mapping_error(ctx
->jrdev
, src_dma
)) {
720 dev_err(ctx
->jrdev
, "unable to map S/G table\n");
724 edesc
->sec4_sg_bytes
= sgsize
;
725 edesc
->sec4_sg_dma
= src_dma
;
728 src_dma
= sg_dma_address(req
->src
);
732 append_seq_in_ptr(edesc
->hw_desc
, src_dma
, first_bytes
+ to_hash
,
738 /* submit update job descriptor */
739 static int ahash_update_ctx(struct ahash_request
*req
)
741 struct crypto_ahash
*ahash
= crypto_ahash_reqtfm(req
);
742 struct caam_hash_ctx
*ctx
= crypto_ahash_ctx(ahash
);
743 struct caam_hash_state
*state
= ahash_request_ctx(req
);
744 struct device
*jrdev
= ctx
->jrdev
;
745 gfp_t flags
= (req
->base
.flags
& CRYPTO_TFM_REQ_MAY_SLEEP
) ?
746 GFP_KERNEL
: GFP_ATOMIC
;
747 u8
*buf
= current_buf(state
);
748 int *buflen
= current_buflen(state
);
749 u8
*next_buf
= alt_buf(state
);
750 int *next_buflen
= alt_buflen(state
), last_buflen
;
751 int in_len
= *buflen
+ req
->nbytes
, to_hash
;
753 int src_nents
, mapped_nents
, sec4_sg_bytes
, sec4_sg_src_index
;
754 struct ahash_edesc
*edesc
;
757 last_buflen
= *next_buflen
;
758 *next_buflen
= in_len
& (crypto_tfm_alg_blocksize(&ahash
->base
) - 1);
759 to_hash
= in_len
- *next_buflen
;
762 src_nents
= sg_nents_for_len(req
->src
,
763 req
->nbytes
- (*next_buflen
));
765 dev_err(jrdev
, "Invalid number of src SG.\n");
770 mapped_nents
= dma_map_sg(jrdev
, req
->src
, src_nents
,
773 dev_err(jrdev
, "unable to DMA map source\n");
780 sec4_sg_src_index
= 1 + (*buflen
? 1 : 0);
781 sec4_sg_bytes
= (sec4_sg_src_index
+ mapped_nents
) *
782 sizeof(struct sec4_sg_entry
);
785 * allocate space for base edesc and hw desc commands,
788 edesc
= ahash_edesc_alloc(ctx
, sec4_sg_src_index
+ mapped_nents
,
790 ctx
->sh_desc_update_dma
, flags
);
792 dma_unmap_sg(jrdev
, req
->src
, src_nents
, DMA_TO_DEVICE
);
796 edesc
->src_nents
= src_nents
;
797 edesc
->sec4_sg_bytes
= sec4_sg_bytes
;
799 ret
= ctx_map_to_sec4_sg(jrdev
, state
, ctx
->ctx_len
,
800 edesc
->sec4_sg
, DMA_BIDIRECTIONAL
);
804 ret
= buf_map_to_sec4_sg(jrdev
, edesc
->sec4_sg
+ 1, state
);
809 sg_to_sec4_sg_last(req
->src
, mapped_nents
,
810 edesc
->sec4_sg
+ sec4_sg_src_index
,
813 scatterwalk_map_and_copy(next_buf
, req
->src
,
817 sg_to_sec4_set_last(edesc
->sec4_sg
+ sec4_sg_src_index
-
821 desc
= edesc
->hw_desc
;
823 edesc
->sec4_sg_dma
= dma_map_single(jrdev
, edesc
->sec4_sg
,
826 if (dma_mapping_error(jrdev
, edesc
->sec4_sg_dma
)) {
827 dev_err(jrdev
, "unable to map S/G table\n");
832 append_seq_in_ptr(desc
, edesc
->sec4_sg_dma
, ctx
->ctx_len
+
835 append_seq_out_ptr(desc
, state
->ctx_dma
, ctx
->ctx_len
, 0);
838 print_hex_dump(KERN_ERR
, "jobdesc@"__stringify(__LINE__
)": ",
839 DUMP_PREFIX_ADDRESS
, 16, 4, desc
,
840 desc_bytes(desc
), 1);
843 ret
= caam_jr_enqueue(jrdev
, desc
, ahash_done_bi
, req
);
848 } else if (*next_buflen
) {
849 scatterwalk_map_and_copy(buf
+ *buflen
, req
->src
, 0,
851 *buflen
= *next_buflen
;
852 *next_buflen
= last_buflen
;
855 print_hex_dump(KERN_ERR
, "buf@"__stringify(__LINE__
)": ",
856 DUMP_PREFIX_ADDRESS
, 16, 4, buf
, *buflen
, 1);
857 print_hex_dump(KERN_ERR
, "next buf@"__stringify(__LINE__
)": ",
858 DUMP_PREFIX_ADDRESS
, 16, 4, next_buf
,
864 ahash_unmap_ctx(jrdev
, edesc
, req
, ctx
->ctx_len
, DMA_BIDIRECTIONAL
);
869 static int ahash_final_ctx(struct ahash_request
*req
)
871 struct crypto_ahash
*ahash
= crypto_ahash_reqtfm(req
);
872 struct caam_hash_ctx
*ctx
= crypto_ahash_ctx(ahash
);
873 struct caam_hash_state
*state
= ahash_request_ctx(req
);
874 struct device
*jrdev
= ctx
->jrdev
;
875 gfp_t flags
= (req
->base
.flags
& CRYPTO_TFM_REQ_MAY_SLEEP
) ?
876 GFP_KERNEL
: GFP_ATOMIC
;
877 int buflen
= *current_buflen(state
);
879 int sec4_sg_bytes
, sec4_sg_src_index
;
880 int digestsize
= crypto_ahash_digestsize(ahash
);
881 struct ahash_edesc
*edesc
;
884 sec4_sg_src_index
= 1 + (buflen
? 1 : 0);
885 sec4_sg_bytes
= sec4_sg_src_index
* sizeof(struct sec4_sg_entry
);
887 /* allocate space for base edesc and hw desc commands, link tables */
888 edesc
= ahash_edesc_alloc(ctx
, sec4_sg_src_index
,
889 ctx
->sh_desc_fin
, ctx
->sh_desc_fin_dma
,
894 desc
= edesc
->hw_desc
;
896 edesc
->sec4_sg_bytes
= sec4_sg_bytes
;
898 ret
= ctx_map_to_sec4_sg(jrdev
, state
, ctx
->ctx_len
,
899 edesc
->sec4_sg
, DMA_TO_DEVICE
);
903 ret
= buf_map_to_sec4_sg(jrdev
, edesc
->sec4_sg
+ 1, state
);
907 sg_to_sec4_set_last(edesc
->sec4_sg
+ sec4_sg_src_index
- 1);
909 edesc
->sec4_sg_dma
= dma_map_single(jrdev
, edesc
->sec4_sg
,
910 sec4_sg_bytes
, DMA_TO_DEVICE
);
911 if (dma_mapping_error(jrdev
, edesc
->sec4_sg_dma
)) {
912 dev_err(jrdev
, "unable to map S/G table\n");
917 append_seq_in_ptr(desc
, edesc
->sec4_sg_dma
, ctx
->ctx_len
+ buflen
,
920 edesc
->dst_dma
= map_seq_out_ptr_result(desc
, jrdev
, req
->result
,
922 if (dma_mapping_error(jrdev
, edesc
->dst_dma
)) {
923 dev_err(jrdev
, "unable to map dst\n");
929 print_hex_dump(KERN_ERR
, "jobdesc@"__stringify(__LINE__
)": ",
930 DUMP_PREFIX_ADDRESS
, 16, 4, desc
, desc_bytes(desc
), 1);
933 ret
= caam_jr_enqueue(jrdev
, desc
, ahash_done_ctx_src
, req
);
939 ahash_unmap_ctx(jrdev
, edesc
, req
, digestsize
, DMA_FROM_DEVICE
);
944 static int ahash_finup_ctx(struct ahash_request
*req
)
946 struct crypto_ahash
*ahash
= crypto_ahash_reqtfm(req
);
947 struct caam_hash_ctx
*ctx
= crypto_ahash_ctx(ahash
);
948 struct caam_hash_state
*state
= ahash_request_ctx(req
);
949 struct device
*jrdev
= ctx
->jrdev
;
950 gfp_t flags
= (req
->base
.flags
& CRYPTO_TFM_REQ_MAY_SLEEP
) ?
951 GFP_KERNEL
: GFP_ATOMIC
;
952 int buflen
= *current_buflen(state
);
954 int sec4_sg_src_index
;
955 int src_nents
, mapped_nents
;
956 int digestsize
= crypto_ahash_digestsize(ahash
);
957 struct ahash_edesc
*edesc
;
960 src_nents
= sg_nents_for_len(req
->src
, req
->nbytes
);
962 dev_err(jrdev
, "Invalid number of src SG.\n");
967 mapped_nents
= dma_map_sg(jrdev
, req
->src
, src_nents
,
970 dev_err(jrdev
, "unable to DMA map source\n");
977 sec4_sg_src_index
= 1 + (buflen
? 1 : 0);
979 /* allocate space for base edesc and hw desc commands, link tables */
980 edesc
= ahash_edesc_alloc(ctx
, sec4_sg_src_index
+ mapped_nents
,
981 ctx
->sh_desc_fin
, ctx
->sh_desc_fin_dma
,
984 dma_unmap_sg(jrdev
, req
->src
, src_nents
, DMA_TO_DEVICE
);
988 desc
= edesc
->hw_desc
;
990 edesc
->src_nents
= src_nents
;
992 ret
= ctx_map_to_sec4_sg(jrdev
, state
, ctx
->ctx_len
,
993 edesc
->sec4_sg
, DMA_TO_DEVICE
);
997 ret
= buf_map_to_sec4_sg(jrdev
, edesc
->sec4_sg
+ 1, state
);
1001 ret
= ahash_edesc_add_src(ctx
, edesc
, req
, mapped_nents
,
1002 sec4_sg_src_index
, ctx
->ctx_len
+ buflen
,
1007 edesc
->dst_dma
= map_seq_out_ptr_result(desc
, jrdev
, req
->result
,
1009 if (dma_mapping_error(jrdev
, edesc
->dst_dma
)) {
1010 dev_err(jrdev
, "unable to map dst\n");
1016 print_hex_dump(KERN_ERR
, "jobdesc@"__stringify(__LINE__
)": ",
1017 DUMP_PREFIX_ADDRESS
, 16, 4, desc
, desc_bytes(desc
), 1);
1020 ret
= caam_jr_enqueue(jrdev
, desc
, ahash_done_ctx_src
, req
);
1024 return -EINPROGRESS
;
1026 ahash_unmap_ctx(jrdev
, edesc
, req
, digestsize
, DMA_FROM_DEVICE
);
1031 static int ahash_digest(struct ahash_request
*req
)
1033 struct crypto_ahash
*ahash
= crypto_ahash_reqtfm(req
);
1034 struct caam_hash_ctx
*ctx
= crypto_ahash_ctx(ahash
);
1035 struct caam_hash_state
*state
= ahash_request_ctx(req
);
1036 struct device
*jrdev
= ctx
->jrdev
;
1037 gfp_t flags
= (req
->base
.flags
& CRYPTO_TFM_REQ_MAY_SLEEP
) ?
1038 GFP_KERNEL
: GFP_ATOMIC
;
1040 int digestsize
= crypto_ahash_digestsize(ahash
);
1041 int src_nents
, mapped_nents
;
1042 struct ahash_edesc
*edesc
;
1047 src_nents
= sg_nents_for_len(req
->src
, req
->nbytes
);
1048 if (src_nents
< 0) {
1049 dev_err(jrdev
, "Invalid number of src SG.\n");
1054 mapped_nents
= dma_map_sg(jrdev
, req
->src
, src_nents
,
1056 if (!mapped_nents
) {
1057 dev_err(jrdev
, "unable to map source for DMA\n");
1064 /* allocate space for base edesc and hw desc commands, link tables */
1065 edesc
= ahash_edesc_alloc(ctx
, mapped_nents
> 1 ? mapped_nents
: 0,
1066 ctx
->sh_desc_digest
, ctx
->sh_desc_digest_dma
,
1069 dma_unmap_sg(jrdev
, req
->src
, src_nents
, DMA_TO_DEVICE
);
1073 edesc
->src_nents
= src_nents
;
1075 ret
= ahash_edesc_add_src(ctx
, edesc
, req
, mapped_nents
, 0, 0,
1078 ahash_unmap(jrdev
, edesc
, req
, digestsize
);
1083 desc
= edesc
->hw_desc
;
1085 edesc
->dst_dma
= map_seq_out_ptr_result(desc
, jrdev
, req
->result
,
1087 if (dma_mapping_error(jrdev
, edesc
->dst_dma
)) {
1088 dev_err(jrdev
, "unable to map dst\n");
1089 ahash_unmap(jrdev
, edesc
, req
, digestsize
);
1095 print_hex_dump(KERN_ERR
, "jobdesc@"__stringify(__LINE__
)": ",
1096 DUMP_PREFIX_ADDRESS
, 16, 4, desc
, desc_bytes(desc
), 1);
1099 ret
= caam_jr_enqueue(jrdev
, desc
, ahash_done
, req
);
1103 ahash_unmap(jrdev
, edesc
, req
, digestsize
);
1110 /* submit ahash final if it the first job descriptor */
1111 static int ahash_final_no_ctx(struct ahash_request
*req
)
1113 struct crypto_ahash
*ahash
= crypto_ahash_reqtfm(req
);
1114 struct caam_hash_ctx
*ctx
= crypto_ahash_ctx(ahash
);
1115 struct caam_hash_state
*state
= ahash_request_ctx(req
);
1116 struct device
*jrdev
= ctx
->jrdev
;
1117 gfp_t flags
= (req
->base
.flags
& CRYPTO_TFM_REQ_MAY_SLEEP
) ?
1118 GFP_KERNEL
: GFP_ATOMIC
;
1119 u8
*buf
= current_buf(state
);
1120 int buflen
= *current_buflen(state
);
1122 int digestsize
= crypto_ahash_digestsize(ahash
);
1123 struct ahash_edesc
*edesc
;
1126 /* allocate space for base edesc and hw desc commands, link tables */
1127 edesc
= ahash_edesc_alloc(ctx
, 0, ctx
->sh_desc_digest
,
1128 ctx
->sh_desc_digest_dma
, flags
);
1132 desc
= edesc
->hw_desc
;
1134 state
->buf_dma
= dma_map_single(jrdev
, buf
, buflen
, DMA_TO_DEVICE
);
1135 if (dma_mapping_error(jrdev
, state
->buf_dma
)) {
1136 dev_err(jrdev
, "unable to map src\n");
1140 append_seq_in_ptr(desc
, state
->buf_dma
, buflen
, 0);
1142 edesc
->dst_dma
= map_seq_out_ptr_result(desc
, jrdev
, req
->result
,
1144 if (dma_mapping_error(jrdev
, edesc
->dst_dma
)) {
1145 dev_err(jrdev
, "unable to map dst\n");
1150 print_hex_dump(KERN_ERR
, "jobdesc@"__stringify(__LINE__
)": ",
1151 DUMP_PREFIX_ADDRESS
, 16, 4, desc
, desc_bytes(desc
), 1);
1154 ret
= caam_jr_enqueue(jrdev
, desc
, ahash_done
, req
);
1158 ahash_unmap(jrdev
, edesc
, req
, digestsize
);
1164 ahash_unmap(jrdev
, edesc
, req
, digestsize
);
1170 /* submit ahash update if it the first job descriptor after update */
1171 static int ahash_update_no_ctx(struct ahash_request
*req
)
1173 struct crypto_ahash
*ahash
= crypto_ahash_reqtfm(req
);
1174 struct caam_hash_ctx
*ctx
= crypto_ahash_ctx(ahash
);
1175 struct caam_hash_state
*state
= ahash_request_ctx(req
);
1176 struct device
*jrdev
= ctx
->jrdev
;
1177 gfp_t flags
= (req
->base
.flags
& CRYPTO_TFM_REQ_MAY_SLEEP
) ?
1178 GFP_KERNEL
: GFP_ATOMIC
;
1179 u8
*buf
= current_buf(state
);
1180 int *buflen
= current_buflen(state
);
1181 u8
*next_buf
= alt_buf(state
);
1182 int *next_buflen
= alt_buflen(state
);
1183 int in_len
= *buflen
+ req
->nbytes
, to_hash
;
1184 int sec4_sg_bytes
, src_nents
, mapped_nents
;
1185 struct ahash_edesc
*edesc
;
1189 *next_buflen
= in_len
& (crypto_tfm_alg_blocksize(&ahash
->base
) - 1);
1190 to_hash
= in_len
- *next_buflen
;
1193 src_nents
= sg_nents_for_len(req
->src
,
1194 req
->nbytes
- *next_buflen
);
1195 if (src_nents
< 0) {
1196 dev_err(jrdev
, "Invalid number of src SG.\n");
1201 mapped_nents
= dma_map_sg(jrdev
, req
->src
, src_nents
,
1203 if (!mapped_nents
) {
1204 dev_err(jrdev
, "unable to DMA map source\n");
1211 sec4_sg_bytes
= (1 + mapped_nents
) *
1212 sizeof(struct sec4_sg_entry
);
1215 * allocate space for base edesc and hw desc commands,
1218 edesc
= ahash_edesc_alloc(ctx
, 1 + mapped_nents
,
1219 ctx
->sh_desc_update_first
,
1220 ctx
->sh_desc_update_first_dma
,
1223 dma_unmap_sg(jrdev
, req
->src
, src_nents
, DMA_TO_DEVICE
);
1227 edesc
->src_nents
= src_nents
;
1228 edesc
->sec4_sg_bytes
= sec4_sg_bytes
;
1230 ret
= buf_map_to_sec4_sg(jrdev
, edesc
->sec4_sg
, state
);
1234 sg_to_sec4_sg_last(req
->src
, mapped_nents
,
1235 edesc
->sec4_sg
+ 1, 0);
1238 scatterwalk_map_and_copy(next_buf
, req
->src
,
1243 desc
= edesc
->hw_desc
;
1245 edesc
->sec4_sg_dma
= dma_map_single(jrdev
, edesc
->sec4_sg
,
1248 if (dma_mapping_error(jrdev
, edesc
->sec4_sg_dma
)) {
1249 dev_err(jrdev
, "unable to map S/G table\n");
1254 append_seq_in_ptr(desc
, edesc
->sec4_sg_dma
, to_hash
, LDST_SGF
);
1256 ret
= map_seq_out_ptr_ctx(desc
, jrdev
, state
, ctx
->ctx_len
);
1261 print_hex_dump(KERN_ERR
, "jobdesc@"__stringify(__LINE__
)": ",
1262 DUMP_PREFIX_ADDRESS
, 16, 4, desc
,
1263 desc_bytes(desc
), 1);
1266 ret
= caam_jr_enqueue(jrdev
, desc
, ahash_done_ctx_dst
, req
);
1271 state
->update
= ahash_update_ctx
;
1272 state
->finup
= ahash_finup_ctx
;
1273 state
->final
= ahash_final_ctx
;
1274 } else if (*next_buflen
) {
1275 scatterwalk_map_and_copy(buf
+ *buflen
, req
->src
, 0,
1277 *buflen
= *next_buflen
;
1281 print_hex_dump(KERN_ERR
, "buf@"__stringify(__LINE__
)": ",
1282 DUMP_PREFIX_ADDRESS
, 16, 4, buf
, *buflen
, 1);
1283 print_hex_dump(KERN_ERR
, "next buf@"__stringify(__LINE__
)": ",
1284 DUMP_PREFIX_ADDRESS
, 16, 4, next_buf
,
1290 ahash_unmap_ctx(jrdev
, edesc
, req
, ctx
->ctx_len
, DMA_TO_DEVICE
);
1295 /* submit ahash finup if it the first job descriptor after update */
1296 static int ahash_finup_no_ctx(struct ahash_request
*req
)
1298 struct crypto_ahash
*ahash
= crypto_ahash_reqtfm(req
);
1299 struct caam_hash_ctx
*ctx
= crypto_ahash_ctx(ahash
);
1300 struct caam_hash_state
*state
= ahash_request_ctx(req
);
1301 struct device
*jrdev
= ctx
->jrdev
;
1302 gfp_t flags
= (req
->base
.flags
& CRYPTO_TFM_REQ_MAY_SLEEP
) ?
1303 GFP_KERNEL
: GFP_ATOMIC
;
1304 int buflen
= *current_buflen(state
);
1306 int sec4_sg_bytes
, sec4_sg_src_index
, src_nents
, mapped_nents
;
1307 int digestsize
= crypto_ahash_digestsize(ahash
);
1308 struct ahash_edesc
*edesc
;
1311 src_nents
= sg_nents_for_len(req
->src
, req
->nbytes
);
1312 if (src_nents
< 0) {
1313 dev_err(jrdev
, "Invalid number of src SG.\n");
1318 mapped_nents
= dma_map_sg(jrdev
, req
->src
, src_nents
,
1320 if (!mapped_nents
) {
1321 dev_err(jrdev
, "unable to DMA map source\n");
1328 sec4_sg_src_index
= 2;
1329 sec4_sg_bytes
= (sec4_sg_src_index
+ mapped_nents
) *
1330 sizeof(struct sec4_sg_entry
);
1332 /* allocate space for base edesc and hw desc commands, link tables */
1333 edesc
= ahash_edesc_alloc(ctx
, sec4_sg_src_index
+ mapped_nents
,
1334 ctx
->sh_desc_digest
, ctx
->sh_desc_digest_dma
,
1337 dma_unmap_sg(jrdev
, req
->src
, src_nents
, DMA_TO_DEVICE
);
1341 desc
= edesc
->hw_desc
;
1343 edesc
->src_nents
= src_nents
;
1344 edesc
->sec4_sg_bytes
= sec4_sg_bytes
;
1346 ret
= buf_map_to_sec4_sg(jrdev
, edesc
->sec4_sg
, state
);
1350 ret
= ahash_edesc_add_src(ctx
, edesc
, req
, mapped_nents
, 1, buflen
,
1353 dev_err(jrdev
, "unable to map S/G table\n");
1357 edesc
->dst_dma
= map_seq_out_ptr_result(desc
, jrdev
, req
->result
,
1359 if (dma_mapping_error(jrdev
, edesc
->dst_dma
)) {
1360 dev_err(jrdev
, "unable to map dst\n");
1365 print_hex_dump(KERN_ERR
, "jobdesc@"__stringify(__LINE__
)": ",
1366 DUMP_PREFIX_ADDRESS
, 16, 4, desc
, desc_bytes(desc
), 1);
1369 ret
= caam_jr_enqueue(jrdev
, desc
, ahash_done
, req
);
1373 ahash_unmap(jrdev
, edesc
, req
, digestsize
);
1379 ahash_unmap(jrdev
, edesc
, req
, digestsize
);
1385 /* submit first update job descriptor after init */
1386 static int ahash_update_first(struct ahash_request
*req
)
1388 struct crypto_ahash
*ahash
= crypto_ahash_reqtfm(req
);
1389 struct caam_hash_ctx
*ctx
= crypto_ahash_ctx(ahash
);
1390 struct caam_hash_state
*state
= ahash_request_ctx(req
);
1391 struct device
*jrdev
= ctx
->jrdev
;
1392 gfp_t flags
= (req
->base
.flags
& CRYPTO_TFM_REQ_MAY_SLEEP
) ?
1393 GFP_KERNEL
: GFP_ATOMIC
;
1394 u8
*next_buf
= alt_buf(state
);
1395 int *next_buflen
= alt_buflen(state
);
1398 int src_nents
, mapped_nents
;
1399 struct ahash_edesc
*edesc
;
1402 *next_buflen
= req
->nbytes
& (crypto_tfm_alg_blocksize(&ahash
->base
) -
1404 to_hash
= req
->nbytes
- *next_buflen
;
1407 src_nents
= sg_nents_for_len(req
->src
,
1408 req
->nbytes
- *next_buflen
);
1409 if (src_nents
< 0) {
1410 dev_err(jrdev
, "Invalid number of src SG.\n");
1415 mapped_nents
= dma_map_sg(jrdev
, req
->src
, src_nents
,
1417 if (!mapped_nents
) {
1418 dev_err(jrdev
, "unable to map source for DMA\n");
1426 * allocate space for base edesc and hw desc commands,
1429 edesc
= ahash_edesc_alloc(ctx
, mapped_nents
> 1 ?
1431 ctx
->sh_desc_update_first
,
1432 ctx
->sh_desc_update_first_dma
,
1435 dma_unmap_sg(jrdev
, req
->src
, src_nents
, DMA_TO_DEVICE
);
1439 edesc
->src_nents
= src_nents
;
1441 ret
= ahash_edesc_add_src(ctx
, edesc
, req
, mapped_nents
, 0, 0,
1447 scatterwalk_map_and_copy(next_buf
, req
->src
, to_hash
,
1450 desc
= edesc
->hw_desc
;
1452 ret
= map_seq_out_ptr_ctx(desc
, jrdev
, state
, ctx
->ctx_len
);
1457 print_hex_dump(KERN_ERR
, "jobdesc@"__stringify(__LINE__
)": ",
1458 DUMP_PREFIX_ADDRESS
, 16, 4, desc
,
1459 desc_bytes(desc
), 1);
1462 ret
= caam_jr_enqueue(jrdev
, desc
, ahash_done_ctx_dst
, req
);
1467 state
->update
= ahash_update_ctx
;
1468 state
->finup
= ahash_finup_ctx
;
1469 state
->final
= ahash_final_ctx
;
1470 } else if (*next_buflen
) {
1471 state
->update
= ahash_update_no_ctx
;
1472 state
->finup
= ahash_finup_no_ctx
;
1473 state
->final
= ahash_final_no_ctx
;
1474 scatterwalk_map_and_copy(next_buf
, req
->src
, 0,
1479 print_hex_dump(KERN_ERR
, "next buf@"__stringify(__LINE__
)": ",
1480 DUMP_PREFIX_ADDRESS
, 16, 4, next_buf
,
1486 ahash_unmap_ctx(jrdev
, edesc
, req
, ctx
->ctx_len
, DMA_TO_DEVICE
);
1491 static int ahash_finup_first(struct ahash_request
*req
)
1493 return ahash_digest(req
);
1496 static int ahash_init(struct ahash_request
*req
)
1498 struct caam_hash_state
*state
= ahash_request_ctx(req
);
1500 state
->update
= ahash_update_first
;
1501 state
->finup
= ahash_finup_first
;
1502 state
->final
= ahash_final_no_ctx
;
1505 state
->current_buf
= 0;
1507 state
->buflen_0
= 0;
1508 state
->buflen_1
= 0;
1513 static int ahash_update(struct ahash_request
*req
)
1515 struct caam_hash_state
*state
= ahash_request_ctx(req
);
1517 return state
->update(req
);
1520 static int ahash_finup(struct ahash_request
*req
)
1522 struct caam_hash_state
*state
= ahash_request_ctx(req
);
1524 return state
->finup(req
);
1527 static int ahash_final(struct ahash_request
*req
)
1529 struct caam_hash_state
*state
= ahash_request_ctx(req
);
1531 return state
->final(req
);
1534 static int ahash_export(struct ahash_request
*req
, void *out
)
1536 struct caam_hash_state
*state
= ahash_request_ctx(req
);
1537 struct caam_export_state
*export
= out
;
1541 if (state
->current_buf
) {
1543 len
= state
->buflen_1
;
1546 len
= state
->buflen_0
;
1549 memcpy(export
->buf
, buf
, len
);
1550 memcpy(export
->caam_ctx
, state
->caam_ctx
, sizeof(export
->caam_ctx
));
1551 export
->buflen
= len
;
1552 export
->update
= state
->update
;
1553 export
->final
= state
->final
;
1554 export
->finup
= state
->finup
;
1559 static int ahash_import(struct ahash_request
*req
, const void *in
)
1561 struct caam_hash_state
*state
= ahash_request_ctx(req
);
1562 const struct caam_export_state
*export
= in
;
1564 memset(state
, 0, sizeof(*state
));
1565 memcpy(state
->buf_0
, export
->buf
, export
->buflen
);
1566 memcpy(state
->caam_ctx
, export
->caam_ctx
, sizeof(state
->caam_ctx
));
1567 state
->buflen_0
= export
->buflen
;
1568 state
->update
= export
->update
;
1569 state
->final
= export
->final
;
1570 state
->finup
= export
->finup
;
1575 struct caam_hash_template
{
1576 char name
[CRYPTO_MAX_ALG_NAME
];
1577 char driver_name
[CRYPTO_MAX_ALG_NAME
];
1578 char hmac_name
[CRYPTO_MAX_ALG_NAME
];
1579 char hmac_driver_name
[CRYPTO_MAX_ALG_NAME
];
1580 unsigned int blocksize
;
1581 struct ahash_alg template_ahash
;
1585 /* ahash descriptors */
1586 static struct caam_hash_template driver_hash
[] = {
1589 .driver_name
= "sha1-caam",
1590 .hmac_name
= "hmac(sha1)",
1591 .hmac_driver_name
= "hmac-sha1-caam",
1592 .blocksize
= SHA1_BLOCK_SIZE
,
1595 .update
= ahash_update
,
1596 .final
= ahash_final
,
1597 .finup
= ahash_finup
,
1598 .digest
= ahash_digest
,
1599 .export
= ahash_export
,
1600 .import
= ahash_import
,
1601 .setkey
= ahash_setkey
,
1603 .digestsize
= SHA1_DIGEST_SIZE
,
1604 .statesize
= sizeof(struct caam_export_state
),
1607 .alg_type
= OP_ALG_ALGSEL_SHA1
,
1610 .driver_name
= "sha224-caam",
1611 .hmac_name
= "hmac(sha224)",
1612 .hmac_driver_name
= "hmac-sha224-caam",
1613 .blocksize
= SHA224_BLOCK_SIZE
,
1616 .update
= ahash_update
,
1617 .final
= ahash_final
,
1618 .finup
= ahash_finup
,
1619 .digest
= ahash_digest
,
1620 .export
= ahash_export
,
1621 .import
= ahash_import
,
1622 .setkey
= ahash_setkey
,
1624 .digestsize
= SHA224_DIGEST_SIZE
,
1625 .statesize
= sizeof(struct caam_export_state
),
1628 .alg_type
= OP_ALG_ALGSEL_SHA224
,
1631 .driver_name
= "sha256-caam",
1632 .hmac_name
= "hmac(sha256)",
1633 .hmac_driver_name
= "hmac-sha256-caam",
1634 .blocksize
= SHA256_BLOCK_SIZE
,
1637 .update
= ahash_update
,
1638 .final
= ahash_final
,
1639 .finup
= ahash_finup
,
1640 .digest
= ahash_digest
,
1641 .export
= ahash_export
,
1642 .import
= ahash_import
,
1643 .setkey
= ahash_setkey
,
1645 .digestsize
= SHA256_DIGEST_SIZE
,
1646 .statesize
= sizeof(struct caam_export_state
),
1649 .alg_type
= OP_ALG_ALGSEL_SHA256
,
1652 .driver_name
= "sha384-caam",
1653 .hmac_name
= "hmac(sha384)",
1654 .hmac_driver_name
= "hmac-sha384-caam",
1655 .blocksize
= SHA384_BLOCK_SIZE
,
1658 .update
= ahash_update
,
1659 .final
= ahash_final
,
1660 .finup
= ahash_finup
,
1661 .digest
= ahash_digest
,
1662 .export
= ahash_export
,
1663 .import
= ahash_import
,
1664 .setkey
= ahash_setkey
,
1666 .digestsize
= SHA384_DIGEST_SIZE
,
1667 .statesize
= sizeof(struct caam_export_state
),
1670 .alg_type
= OP_ALG_ALGSEL_SHA384
,
1673 .driver_name
= "sha512-caam",
1674 .hmac_name
= "hmac(sha512)",
1675 .hmac_driver_name
= "hmac-sha512-caam",
1676 .blocksize
= SHA512_BLOCK_SIZE
,
1679 .update
= ahash_update
,
1680 .final
= ahash_final
,
1681 .finup
= ahash_finup
,
1682 .digest
= ahash_digest
,
1683 .export
= ahash_export
,
1684 .import
= ahash_import
,
1685 .setkey
= ahash_setkey
,
1687 .digestsize
= SHA512_DIGEST_SIZE
,
1688 .statesize
= sizeof(struct caam_export_state
),
1691 .alg_type
= OP_ALG_ALGSEL_SHA512
,
1694 .driver_name
= "md5-caam",
1695 .hmac_name
= "hmac(md5)",
1696 .hmac_driver_name
= "hmac-md5-caam",
1697 .blocksize
= MD5_BLOCK_WORDS
* 4,
1700 .update
= ahash_update
,
1701 .final
= ahash_final
,
1702 .finup
= ahash_finup
,
1703 .digest
= ahash_digest
,
1704 .export
= ahash_export
,
1705 .import
= ahash_import
,
1706 .setkey
= ahash_setkey
,
1708 .digestsize
= MD5_DIGEST_SIZE
,
1709 .statesize
= sizeof(struct caam_export_state
),
1712 .alg_type
= OP_ALG_ALGSEL_MD5
,
1716 struct caam_hash_alg
{
1717 struct list_head entry
;
1719 struct ahash_alg ahash_alg
;
1722 static int caam_hash_cra_init(struct crypto_tfm
*tfm
)
1724 struct crypto_ahash
*ahash
= __crypto_ahash_cast(tfm
);
1725 struct crypto_alg
*base
= tfm
->__crt_alg
;
1726 struct hash_alg_common
*halg
=
1727 container_of(base
, struct hash_alg_common
, base
);
1728 struct ahash_alg
*alg
=
1729 container_of(halg
, struct ahash_alg
, halg
);
1730 struct caam_hash_alg
*caam_hash
=
1731 container_of(alg
, struct caam_hash_alg
, ahash_alg
);
1732 struct caam_hash_ctx
*ctx
= crypto_tfm_ctx(tfm
);
1733 /* Sizes for MDHA running digests: MD5, SHA1, 224, 256, 384, 512 */
1734 static const u8 runninglen
[] = { HASH_MSG_LEN
+ MD5_DIGEST_SIZE
,
1735 HASH_MSG_LEN
+ SHA1_DIGEST_SIZE
,
1737 HASH_MSG_LEN
+ SHA256_DIGEST_SIZE
,
1739 HASH_MSG_LEN
+ SHA512_DIGEST_SIZE
};
1740 dma_addr_t dma_addr
;
1741 struct caam_drv_private
*priv
;
1744 * Get a Job ring from Job Ring driver to ensure in-order
1745 * crypto request processing per tfm
1747 ctx
->jrdev
= caam_jr_alloc();
1748 if (IS_ERR(ctx
->jrdev
)) {
1749 pr_err("Job Ring Device allocation for transform failed\n");
1750 return PTR_ERR(ctx
->jrdev
);
1753 priv
= dev_get_drvdata(ctx
->jrdev
->parent
);
1754 ctx
->dir
= priv
->era
>= 6 ? DMA_BIDIRECTIONAL
: DMA_TO_DEVICE
;
1756 dma_addr
= dma_map_single_attrs(ctx
->jrdev
, ctx
->sh_desc_update
,
1757 offsetof(struct caam_hash_ctx
,
1758 sh_desc_update_dma
),
1759 ctx
->dir
, DMA_ATTR_SKIP_CPU_SYNC
);
1760 if (dma_mapping_error(ctx
->jrdev
, dma_addr
)) {
1761 dev_err(ctx
->jrdev
, "unable to map shared descriptors\n");
1762 caam_jr_free(ctx
->jrdev
);
1766 ctx
->sh_desc_update_dma
= dma_addr
;
1767 ctx
->sh_desc_update_first_dma
= dma_addr
+
1768 offsetof(struct caam_hash_ctx
,
1769 sh_desc_update_first
);
1770 ctx
->sh_desc_fin_dma
= dma_addr
+ offsetof(struct caam_hash_ctx
,
1772 ctx
->sh_desc_digest_dma
= dma_addr
+ offsetof(struct caam_hash_ctx
,
1775 /* copy descriptor header template value */
1776 ctx
->adata
.algtype
= OP_TYPE_CLASS2_ALG
| caam_hash
->alg_type
;
1778 ctx
->ctx_len
= runninglen
[(ctx
->adata
.algtype
&
1779 OP_ALG_ALGSEL_SUBMASK
) >>
1780 OP_ALG_ALGSEL_SHIFT
];
1782 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm
),
1783 sizeof(struct caam_hash_state
));
1784 return ahash_set_sh_desc(ahash
);
1787 static void caam_hash_cra_exit(struct crypto_tfm
*tfm
)
1789 struct caam_hash_ctx
*ctx
= crypto_tfm_ctx(tfm
);
1791 dma_unmap_single_attrs(ctx
->jrdev
, ctx
->sh_desc_update_dma
,
1792 offsetof(struct caam_hash_ctx
,
1793 sh_desc_update_dma
),
1794 ctx
->dir
, DMA_ATTR_SKIP_CPU_SYNC
);
1795 caam_jr_free(ctx
->jrdev
);
1798 static void __exit
caam_algapi_hash_exit(void)
1800 struct caam_hash_alg
*t_alg
, *n
;
1802 if (!hash_list
.next
)
1805 list_for_each_entry_safe(t_alg
, n
, &hash_list
, entry
) {
1806 crypto_unregister_ahash(&t_alg
->ahash_alg
);
1807 list_del(&t_alg
->entry
);
1812 static struct caam_hash_alg
*
1813 caam_hash_alloc(struct caam_hash_template
*template,
1816 struct caam_hash_alg
*t_alg
;
1817 struct ahash_alg
*halg
;
1818 struct crypto_alg
*alg
;
1820 t_alg
= kzalloc(sizeof(*t_alg
), GFP_KERNEL
);
1822 pr_err("failed to allocate t_alg\n");
1823 return ERR_PTR(-ENOMEM
);
1826 t_alg
->ahash_alg
= template->template_ahash
;
1827 halg
= &t_alg
->ahash_alg
;
1828 alg
= &halg
->halg
.base
;
1831 snprintf(alg
->cra_name
, CRYPTO_MAX_ALG_NAME
, "%s",
1832 template->hmac_name
);
1833 snprintf(alg
->cra_driver_name
, CRYPTO_MAX_ALG_NAME
, "%s",
1834 template->hmac_driver_name
);
1836 snprintf(alg
->cra_name
, CRYPTO_MAX_ALG_NAME
, "%s",
1838 snprintf(alg
->cra_driver_name
, CRYPTO_MAX_ALG_NAME
, "%s",
1839 template->driver_name
);
1840 t_alg
->ahash_alg
.setkey
= NULL
;
1842 alg
->cra_module
= THIS_MODULE
;
1843 alg
->cra_init
= caam_hash_cra_init
;
1844 alg
->cra_exit
= caam_hash_cra_exit
;
1845 alg
->cra_ctxsize
= sizeof(struct caam_hash_ctx
);
1846 alg
->cra_priority
= CAAM_CRA_PRIORITY
;
1847 alg
->cra_blocksize
= template->blocksize
;
1848 alg
->cra_alignmask
= 0;
1849 alg
->cra_flags
= CRYPTO_ALG_ASYNC
;
1851 t_alg
->alg_type
= template->alg_type
;
1856 static int __init
caam_algapi_hash_init(void)
1858 struct device_node
*dev_node
;
1859 struct platform_device
*pdev
;
1860 struct device
*ctrldev
;
1862 struct caam_drv_private
*priv
;
1863 unsigned int md_limit
= SHA512_DIGEST_SIZE
;
1864 u32 cha_inst
, cha_vid
;
1866 dev_node
= of_find_compatible_node(NULL
, NULL
, "fsl,sec-v4.0");
1868 dev_node
= of_find_compatible_node(NULL
, NULL
, "fsl,sec4.0");
1873 pdev
= of_find_device_by_node(dev_node
);
1875 of_node_put(dev_node
);
1879 ctrldev
= &pdev
->dev
;
1880 priv
= dev_get_drvdata(ctrldev
);
1881 of_node_put(dev_node
);
1884 * If priv is NULL, it's probably because the caam driver wasn't
1885 * properly initialized (e.g. RNG4 init failed). Thus, bail out here.
1891 * Register crypto algorithms the device supports. First, identify
1892 * presence and attributes of MD block.
1894 cha_vid
= rd_reg32(&priv
->ctrl
->perfmon
.cha_id_ls
);
1895 cha_inst
= rd_reg32(&priv
->ctrl
->perfmon
.cha_num_ls
);
1898 * Skip registration of any hashing algorithms if MD block
1901 if (!((cha_inst
& CHA_ID_LS_MD_MASK
) >> CHA_ID_LS_MD_SHIFT
))
1904 /* Limit digest size based on LP256 */
1905 if ((cha_vid
& CHA_ID_LS_MD_MASK
) == CHA_ID_LS_MD_LP256
)
1906 md_limit
= SHA256_DIGEST_SIZE
;
1908 INIT_LIST_HEAD(&hash_list
);
1910 /* register crypto algorithms the device supports */
1911 for (i
= 0; i
< ARRAY_SIZE(driver_hash
); i
++) {
1912 struct caam_hash_alg
*t_alg
;
1913 struct caam_hash_template
*alg
= driver_hash
+ i
;
1915 /* If MD size is not supported by device, skip registration */
1916 if (alg
->template_ahash
.halg
.digestsize
> md_limit
)
1919 /* register hmac version */
1920 t_alg
= caam_hash_alloc(alg
, true);
1921 if (IS_ERR(t_alg
)) {
1922 err
= PTR_ERR(t_alg
);
1923 pr_warn("%s alg allocation failed\n", alg
->driver_name
);
1927 err
= crypto_register_ahash(&t_alg
->ahash_alg
);
1929 pr_warn("%s alg registration failed: %d\n",
1930 t_alg
->ahash_alg
.halg
.base
.cra_driver_name
,
1934 list_add_tail(&t_alg
->entry
, &hash_list
);
1936 /* register unkeyed version */
1937 t_alg
= caam_hash_alloc(alg
, false);
1938 if (IS_ERR(t_alg
)) {
1939 err
= PTR_ERR(t_alg
);
1940 pr_warn("%s alg allocation failed\n", alg
->driver_name
);
1944 err
= crypto_register_ahash(&t_alg
->ahash_alg
);
1946 pr_warn("%s alg registration failed: %d\n",
1947 t_alg
->ahash_alg
.halg
.base
.cra_driver_name
,
1951 list_add_tail(&t_alg
->entry
, &hash_list
);
1957 module_init(caam_algapi_hash_init
);
1958 module_exit(caam_algapi_hash_exit
);
1960 MODULE_LICENSE("GPL");
1961 MODULE_DESCRIPTION("FSL CAAM support for ahash functions of crypto API");
1962 MODULE_AUTHOR("Freescale Semiconductor - NMG");