2 * caam - Freescale FSL CAAM support for ahash functions of crypto API
4 * Copyright 2011 Freescale Semiconductor, Inc.
6 * Based on caamalg.c crypto API driver.
8 * relationship of digest job descriptor or first job descriptor after init to
11 * --------------- ---------------
12 * | JobDesc #1 |-------------------->| ShareDesc |
13 * | *(packet 1) | | (hashKey) |
14 * --------------- | (operation) |
17 * relationship of subsequent job descriptors to shared descriptors:
19 * --------------- ---------------
20 * | JobDesc #2 |-------------------->| ShareDesc |
21 * | *(packet 2) | |------------->| (hashKey) |
22 * --------------- | |-------->| (operation) |
23 * . | | | (load ctx2) |
24 * . | | ---------------
26 * | JobDesc #3 |------| |
32 * | JobDesc #4 |------------
36 * The SharedDesc never changes for a connection unless rekeyed, but
37 * each packet will likely be in a different place. So all we need
38 * to know to process the packet is where the input is, where the
39 * output goes, and what context we want to process with. Context is
40 * in the SharedDesc, packet references in the JobDesc.
42 * So, a job desc looks like:
44 * ---------------------
46 * | ShareDesc Pointer |
53 * ---------------------
60 #include "desc_constr.h"
63 #include "sg_sw_sec4.h"
66 #define CAAM_CRA_PRIORITY 3000
68 /* max hash key is max split key size */
69 #define CAAM_MAX_HASH_KEY_SIZE (SHA512_DIGEST_SIZE * 2)
71 #define CAAM_MAX_HASH_BLOCK_SIZE SHA512_BLOCK_SIZE
72 #define CAAM_MAX_HASH_DIGEST_SIZE SHA512_DIGEST_SIZE
74 /* length of descriptors text */
75 #define DESC_AHASH_BASE (3 * CAAM_CMD_SZ)
76 #define DESC_AHASH_UPDATE_LEN (6 * CAAM_CMD_SZ)
77 #define DESC_AHASH_UPDATE_FIRST_LEN (DESC_AHASH_BASE + 4 * CAAM_CMD_SZ)
78 #define DESC_AHASH_FINAL_LEN (DESC_AHASH_BASE + 5 * CAAM_CMD_SZ)
79 #define DESC_AHASH_FINUP_LEN (DESC_AHASH_BASE + 5 * CAAM_CMD_SZ)
80 #define DESC_AHASH_DIGEST_LEN (DESC_AHASH_BASE + 4 * CAAM_CMD_SZ)
82 #define DESC_HASH_MAX_USED_BYTES (DESC_AHASH_FINAL_LEN + \
83 CAAM_MAX_HASH_KEY_SIZE)
84 #define DESC_HASH_MAX_USED_LEN (DESC_HASH_MAX_USED_BYTES / CAAM_CMD_SZ)
86 /* caam context sizes for hashes: running digest + 8 */
87 #define HASH_MSG_LEN 8
88 #define MAX_CTX_LEN (HASH_MSG_LEN + SHA512_DIGEST_SIZE)
91 /* for print_hex_dumps with line references */
92 #define debug(format, arg...) printk(format, arg)
94 #define debug(format, arg...)
98 static struct list_head hash_list
;
100 /* ahash per-session context */
101 struct caam_hash_ctx
{
102 u32 sh_desc_update
[DESC_HASH_MAX_USED_LEN
] ____cacheline_aligned
;
103 u32 sh_desc_update_first
[DESC_HASH_MAX_USED_LEN
] ____cacheline_aligned
;
104 u32 sh_desc_fin
[DESC_HASH_MAX_USED_LEN
] ____cacheline_aligned
;
105 u32 sh_desc_digest
[DESC_HASH_MAX_USED_LEN
] ____cacheline_aligned
;
106 dma_addr_t sh_desc_update_dma ____cacheline_aligned
;
107 dma_addr_t sh_desc_update_first_dma
;
108 dma_addr_t sh_desc_fin_dma
;
109 dma_addr_t sh_desc_digest_dma
;
110 struct device
*jrdev
;
111 u8 key
[CAAM_MAX_HASH_KEY_SIZE
];
114 struct alginfo adata
;
118 struct caam_hash_state
{
121 u8 buf_0
[CAAM_MAX_HASH_BLOCK_SIZE
] ____cacheline_aligned
;
123 u8 buf_1
[CAAM_MAX_HASH_BLOCK_SIZE
] ____cacheline_aligned
;
125 u8 caam_ctx
[MAX_CTX_LEN
] ____cacheline_aligned
;
126 int (*update
)(struct ahash_request
*req
);
127 int (*final
)(struct ahash_request
*req
);
128 int (*finup
)(struct ahash_request
*req
);
132 struct caam_export_state
{
133 u8 buf
[CAAM_MAX_HASH_BLOCK_SIZE
];
134 u8 caam_ctx
[MAX_CTX_LEN
];
136 int (*update
)(struct ahash_request
*req
);
137 int (*final
)(struct ahash_request
*req
);
138 int (*finup
)(struct ahash_request
*req
);
141 /* Common job descriptor seq in/out ptr routines */
143 /* Map state->caam_ctx, and append seq_out_ptr command that points to it */
144 static inline int map_seq_out_ptr_ctx(u32
*desc
, struct device
*jrdev
,
145 struct caam_hash_state
*state
,
148 state
->ctx_dma
= dma_map_single(jrdev
, state
->caam_ctx
,
149 ctx_len
, DMA_FROM_DEVICE
);
150 if (dma_mapping_error(jrdev
, state
->ctx_dma
)) {
151 dev_err(jrdev
, "unable to map ctx\n");
155 append_seq_out_ptr(desc
, state
->ctx_dma
, ctx_len
, 0);
160 /* Map req->result, and append seq_out_ptr command that points to it */
161 static inline dma_addr_t
map_seq_out_ptr_result(u32
*desc
, struct device
*jrdev
,
162 u8
*result
, int digestsize
)
166 dst_dma
= dma_map_single(jrdev
, result
, digestsize
, DMA_FROM_DEVICE
);
167 append_seq_out_ptr(desc
, dst_dma
, digestsize
, 0);
172 /* Map current buffer in state and put it in link table */
173 static inline dma_addr_t
buf_map_to_sec4_sg(struct device
*jrdev
,
174 struct sec4_sg_entry
*sec4_sg
,
179 buf_dma
= dma_map_single(jrdev
, buf
, buflen
, DMA_TO_DEVICE
);
180 dma_to_sec4_sg_one(sec4_sg
, buf_dma
, buflen
, 0);
186 * Only put buffer in link table if it contains data, which is possible,
187 * since a buffer has previously been used, and needs to be unmapped,
189 static inline dma_addr_t
190 try_buf_map_to_sec4_sg(struct device
*jrdev
, struct sec4_sg_entry
*sec4_sg
,
191 u8
*buf
, dma_addr_t buf_dma
, int buflen
,
194 if (buf_dma
&& !dma_mapping_error(jrdev
, buf_dma
))
195 dma_unmap_single(jrdev
, buf_dma
, last_buflen
, DMA_TO_DEVICE
);
197 buf_dma
= buf_map_to_sec4_sg(jrdev
, sec4_sg
, buf
, buflen
);
204 /* Map state->caam_ctx, and add it to link table */
205 static inline int ctx_map_to_sec4_sg(u32
*desc
, struct device
*jrdev
,
206 struct caam_hash_state
*state
, int ctx_len
,
207 struct sec4_sg_entry
*sec4_sg
, u32 flag
)
209 state
->ctx_dma
= dma_map_single(jrdev
, state
->caam_ctx
, ctx_len
, flag
);
210 if (dma_mapping_error(jrdev
, state
->ctx_dma
)) {
211 dev_err(jrdev
, "unable to map ctx\n");
215 dma_to_sec4_sg_one(sec4_sg
, state
->ctx_dma
, ctx_len
, 0);
221 * For ahash update, final and finup (import_ctx = true)
222 * import context, read and write to seqout
223 * For ahash firsts and digest (import_ctx = false)
224 * read and write to seqout
226 static inline void ahash_gen_sh_desc(u32
*desc
, u32 state
, int digestsize
,
227 struct caam_hash_ctx
*ctx
, bool import_ctx
)
229 u32 op
= ctx
->adata
.algtype
;
232 init_sh_desc(desc
, HDR_SHARE_SERIAL
);
234 /* Append key if it has been set; ahash update excluded */
235 if ((state
!= OP_ALG_AS_UPDATE
) && (ctx
->adata
.keylen
)) {
236 /* Skip key loading if already shared */
237 skip_key_load
= append_jump(desc
, JUMP_JSL
| JUMP_TEST_ALL
|
240 append_key_as_imm(desc
, ctx
->key
, ctx
->adata
.keylen_pad
,
241 ctx
->adata
.keylen
, CLASS_2
|
242 KEY_DEST_MDHA_SPLIT
| KEY_ENC
);
244 set_jump_tgt_here(desc
, skip_key_load
);
246 op
|= OP_ALG_AAI_HMAC_PRECOMP
;
249 /* If needed, import context from software */
251 append_seq_load(desc
, ctx
->ctx_len
, LDST_CLASS_2_CCB
|
252 LDST_SRCDST_BYTE_CONTEXT
);
254 /* Class 2 operation */
255 append_operation(desc
, op
| state
| OP_ALG_ENCRYPT
);
258 * Load from buf and/or src and write to req->result or state->context
259 * Calculate remaining bytes to read
261 append_math_add(desc
, VARSEQINLEN
, SEQINLEN
, REG0
, CAAM_CMD_SZ
);
262 /* Read remaining bytes */
263 append_seq_fifo_load(desc
, 0, FIFOLD_CLASS_CLASS2
| FIFOLD_TYPE_LAST2
|
264 FIFOLD_TYPE_MSG
| KEY_VLF
);
265 /* Store class2 context bytes */
266 append_seq_store(desc
, digestsize
, LDST_CLASS_2_CCB
|
267 LDST_SRCDST_BYTE_CONTEXT
);
270 static int ahash_set_sh_desc(struct crypto_ahash
*ahash
)
272 struct caam_hash_ctx
*ctx
= crypto_ahash_ctx(ahash
);
273 int digestsize
= crypto_ahash_digestsize(ahash
);
274 struct device
*jrdev
= ctx
->jrdev
;
277 /* ahash_update shared descriptor */
278 desc
= ctx
->sh_desc_update
;
279 ahash_gen_sh_desc(desc
, OP_ALG_AS_UPDATE
, ctx
->ctx_len
, ctx
, true);
280 ctx
->sh_desc_update_dma
= dma_map_single(jrdev
, desc
, desc_bytes(desc
),
282 if (dma_mapping_error(jrdev
, ctx
->sh_desc_update_dma
)) {
283 dev_err(jrdev
, "unable to map shared descriptor\n");
287 print_hex_dump(KERN_ERR
,
288 "ahash update shdesc@"__stringify(__LINE__
)": ",
289 DUMP_PREFIX_ADDRESS
, 16, 4, desc
, desc_bytes(desc
), 1);
292 /* ahash_update_first shared descriptor */
293 desc
= ctx
->sh_desc_update_first
;
294 ahash_gen_sh_desc(desc
, OP_ALG_AS_INIT
, ctx
->ctx_len
, ctx
, false);
295 ctx
->sh_desc_update_first_dma
= dma_map_single(jrdev
, desc
,
298 if (dma_mapping_error(jrdev
, ctx
->sh_desc_update_first_dma
)) {
299 dev_err(jrdev
, "unable to map shared descriptor\n");
303 print_hex_dump(KERN_ERR
,
304 "ahash update first shdesc@"__stringify(__LINE__
)": ",
305 DUMP_PREFIX_ADDRESS
, 16, 4, desc
, desc_bytes(desc
), 1);
308 /* ahash_final shared descriptor */
309 desc
= ctx
->sh_desc_fin
;
310 ahash_gen_sh_desc(desc
, OP_ALG_AS_FINALIZE
, digestsize
, ctx
, true);
311 ctx
->sh_desc_fin_dma
= dma_map_single(jrdev
, desc
, desc_bytes(desc
),
313 if (dma_mapping_error(jrdev
, ctx
->sh_desc_fin_dma
)) {
314 dev_err(jrdev
, "unable to map shared descriptor\n");
318 print_hex_dump(KERN_ERR
, "ahash final shdesc@"__stringify(__LINE__
)": ",
319 DUMP_PREFIX_ADDRESS
, 16, 4, desc
,
320 desc_bytes(desc
), 1);
323 /* ahash_digest shared descriptor */
324 desc
= ctx
->sh_desc_digest
;
325 ahash_gen_sh_desc(desc
, OP_ALG_AS_INITFINAL
, digestsize
, ctx
, false);
326 ctx
->sh_desc_digest_dma
= dma_map_single(jrdev
, desc
,
329 if (dma_mapping_error(jrdev
, ctx
->sh_desc_digest_dma
)) {
330 dev_err(jrdev
, "unable to map shared descriptor\n");
334 print_hex_dump(KERN_ERR
,
335 "ahash digest shdesc@"__stringify(__LINE__
)": ",
336 DUMP_PREFIX_ADDRESS
, 16, 4, desc
,
337 desc_bytes(desc
), 1);
343 /* Digest hash size if it is too large */
344 static int hash_digest_key(struct caam_hash_ctx
*ctx
, const u8
*key_in
,
345 u32
*keylen
, u8
*key_out
, u32 digestsize
)
347 struct device
*jrdev
= ctx
->jrdev
;
349 struct split_key_result result
;
350 dma_addr_t src_dma
, dst_dma
;
353 desc
= kmalloc(CAAM_CMD_SZ
* 8 + CAAM_PTR_SZ
* 2, GFP_KERNEL
| GFP_DMA
);
355 dev_err(jrdev
, "unable to allocate key input memory\n");
359 init_job_desc(desc
, 0);
361 src_dma
= dma_map_single(jrdev
, (void *)key_in
, *keylen
,
363 if (dma_mapping_error(jrdev
, src_dma
)) {
364 dev_err(jrdev
, "unable to map key input memory\n");
368 dst_dma
= dma_map_single(jrdev
, (void *)key_out
, digestsize
,
370 if (dma_mapping_error(jrdev
, dst_dma
)) {
371 dev_err(jrdev
, "unable to map key output memory\n");
372 dma_unmap_single(jrdev
, src_dma
, *keylen
, DMA_TO_DEVICE
);
377 /* Job descriptor to perform unkeyed hash on key_in */
378 append_operation(desc
, ctx
->adata
.algtype
| OP_ALG_ENCRYPT
|
379 OP_ALG_AS_INITFINAL
);
380 append_seq_in_ptr(desc
, src_dma
, *keylen
, 0);
381 append_seq_fifo_load(desc
, *keylen
, FIFOLD_CLASS_CLASS2
|
382 FIFOLD_TYPE_LAST2
| FIFOLD_TYPE_MSG
);
383 append_seq_out_ptr(desc
, dst_dma
, digestsize
, 0);
384 append_seq_store(desc
, digestsize
, LDST_CLASS_2_CCB
|
385 LDST_SRCDST_BYTE_CONTEXT
);
388 print_hex_dump(KERN_ERR
, "key_in@"__stringify(__LINE__
)": ",
389 DUMP_PREFIX_ADDRESS
, 16, 4, key_in
, *keylen
, 1);
390 print_hex_dump(KERN_ERR
, "jobdesc@"__stringify(__LINE__
)": ",
391 DUMP_PREFIX_ADDRESS
, 16, 4, desc
, desc_bytes(desc
), 1);
395 init_completion(&result
.completion
);
397 ret
= caam_jr_enqueue(jrdev
, desc
, split_key_done
, &result
);
400 wait_for_completion_interruptible(&result
.completion
);
403 print_hex_dump(KERN_ERR
,
404 "digested key@"__stringify(__LINE__
)": ",
405 DUMP_PREFIX_ADDRESS
, 16, 4, key_in
,
409 dma_unmap_single(jrdev
, src_dma
, *keylen
, DMA_TO_DEVICE
);
410 dma_unmap_single(jrdev
, dst_dma
, digestsize
, DMA_FROM_DEVICE
);
412 *keylen
= digestsize
;
419 static int ahash_setkey(struct crypto_ahash
*ahash
,
420 const u8
*key
, unsigned int keylen
)
422 struct caam_hash_ctx
*ctx
= crypto_ahash_ctx(ahash
);
423 struct device
*jrdev
= ctx
->jrdev
;
424 int blocksize
= crypto_tfm_alg_blocksize(&ahash
->base
);
425 int digestsize
= crypto_ahash_digestsize(ahash
);
427 u8
*hashed_key
= NULL
;
430 printk(KERN_ERR
"keylen %d\n", keylen
);
433 if (keylen
> blocksize
) {
434 hashed_key
= kmalloc_array(digestsize
,
436 GFP_KERNEL
| GFP_DMA
);
439 ret
= hash_digest_key(ctx
, key
, &keylen
, hashed_key
,
446 ret
= gen_split_key(ctx
->jrdev
, ctx
->key
, &ctx
->adata
, key
, keylen
,
447 CAAM_MAX_HASH_KEY_SIZE
);
451 ctx
->key_dma
= dma_map_single(jrdev
, ctx
->key
, ctx
->adata
.keylen_pad
,
453 if (dma_mapping_error(jrdev
, ctx
->key_dma
)) {
454 dev_err(jrdev
, "unable to map key i/o memory\n");
459 print_hex_dump(KERN_ERR
, "ctx.key@"__stringify(__LINE__
)": ",
460 DUMP_PREFIX_ADDRESS
, 16, 4, ctx
->key
,
461 ctx
->adata
.keylen_pad
, 1);
464 ret
= ahash_set_sh_desc(ahash
);
466 dma_unmap_single(jrdev
, ctx
->key_dma
, ctx
->adata
.keylen_pad
,
475 crypto_ahash_set_flags(ahash
, CRYPTO_TFM_RES_BAD_KEY_LEN
);
480 * ahash_edesc - s/w-extended ahash descriptor
481 * @dst_dma: physical mapped address of req->result
482 * @sec4_sg_dma: physical mapped address of h/w link table
483 * @src_nents: number of segments in input scatterlist
484 * @sec4_sg_bytes: length of dma mapped sec4_sg space
485 * @hw_desc: the h/w job descriptor followed by any referenced link tables
486 * @sec4_sg: h/w link table
490 dma_addr_t sec4_sg_dma
;
493 u32 hw_desc
[DESC_JOB_IO_LEN
/ sizeof(u32
)] ____cacheline_aligned
;
494 struct sec4_sg_entry sec4_sg
[0];
497 static inline void ahash_unmap(struct device
*dev
,
498 struct ahash_edesc
*edesc
,
499 struct ahash_request
*req
, int dst_len
)
501 if (edesc
->src_nents
)
502 dma_unmap_sg(dev
, req
->src
, edesc
->src_nents
, DMA_TO_DEVICE
);
504 dma_unmap_single(dev
, edesc
->dst_dma
, dst_len
, DMA_FROM_DEVICE
);
506 if (edesc
->sec4_sg_bytes
)
507 dma_unmap_single(dev
, edesc
->sec4_sg_dma
,
508 edesc
->sec4_sg_bytes
, DMA_TO_DEVICE
);
511 static inline void ahash_unmap_ctx(struct device
*dev
,
512 struct ahash_edesc
*edesc
,
513 struct ahash_request
*req
, int dst_len
, u32 flag
)
515 struct crypto_ahash
*ahash
= crypto_ahash_reqtfm(req
);
516 struct caam_hash_ctx
*ctx
= crypto_ahash_ctx(ahash
);
517 struct caam_hash_state
*state
= ahash_request_ctx(req
);
520 dma_unmap_single(dev
, state
->ctx_dma
, ctx
->ctx_len
, flag
);
521 ahash_unmap(dev
, edesc
, req
, dst_len
);
524 static void ahash_done(struct device
*jrdev
, u32
*desc
, u32 err
,
527 struct ahash_request
*req
= context
;
528 struct ahash_edesc
*edesc
;
529 struct crypto_ahash
*ahash
= crypto_ahash_reqtfm(req
);
530 int digestsize
= crypto_ahash_digestsize(ahash
);
532 struct caam_hash_ctx
*ctx
= crypto_ahash_ctx(ahash
);
533 struct caam_hash_state
*state
= ahash_request_ctx(req
);
535 dev_err(jrdev
, "%s %d: err 0x%x\n", __func__
, __LINE__
, err
);
538 edesc
= container_of(desc
, struct ahash_edesc
, hw_desc
[0]);
540 caam_jr_strstatus(jrdev
, err
);
542 ahash_unmap(jrdev
, edesc
, req
, digestsize
);
546 print_hex_dump(KERN_ERR
, "ctx@"__stringify(__LINE__
)": ",
547 DUMP_PREFIX_ADDRESS
, 16, 4, state
->caam_ctx
,
550 print_hex_dump(KERN_ERR
, "result@"__stringify(__LINE__
)": ",
551 DUMP_PREFIX_ADDRESS
, 16, 4, req
->result
,
555 req
->base
.complete(&req
->base
, err
);
558 static void ahash_done_bi(struct device
*jrdev
, u32
*desc
, u32 err
,
561 struct ahash_request
*req
= context
;
562 struct ahash_edesc
*edesc
;
563 struct crypto_ahash
*ahash
= crypto_ahash_reqtfm(req
);
564 struct caam_hash_ctx
*ctx
= crypto_ahash_ctx(ahash
);
566 struct caam_hash_state
*state
= ahash_request_ctx(req
);
567 int digestsize
= crypto_ahash_digestsize(ahash
);
569 dev_err(jrdev
, "%s %d: err 0x%x\n", __func__
, __LINE__
, err
);
572 edesc
= container_of(desc
, struct ahash_edesc
, hw_desc
[0]);
574 caam_jr_strstatus(jrdev
, err
);
576 ahash_unmap_ctx(jrdev
, edesc
, req
, ctx
->ctx_len
, DMA_BIDIRECTIONAL
);
580 print_hex_dump(KERN_ERR
, "ctx@"__stringify(__LINE__
)": ",
581 DUMP_PREFIX_ADDRESS
, 16, 4, state
->caam_ctx
,
584 print_hex_dump(KERN_ERR
, "result@"__stringify(__LINE__
)": ",
585 DUMP_PREFIX_ADDRESS
, 16, 4, req
->result
,
589 req
->base
.complete(&req
->base
, err
);
592 static void ahash_done_ctx_src(struct device
*jrdev
, u32
*desc
, u32 err
,
595 struct ahash_request
*req
= context
;
596 struct ahash_edesc
*edesc
;
597 struct crypto_ahash
*ahash
= crypto_ahash_reqtfm(req
);
598 int digestsize
= crypto_ahash_digestsize(ahash
);
600 struct caam_hash_ctx
*ctx
= crypto_ahash_ctx(ahash
);
601 struct caam_hash_state
*state
= ahash_request_ctx(req
);
603 dev_err(jrdev
, "%s %d: err 0x%x\n", __func__
, __LINE__
, err
);
606 edesc
= container_of(desc
, struct ahash_edesc
, hw_desc
[0]);
608 caam_jr_strstatus(jrdev
, err
);
610 ahash_unmap_ctx(jrdev
, edesc
, req
, digestsize
, DMA_TO_DEVICE
);
614 print_hex_dump(KERN_ERR
, "ctx@"__stringify(__LINE__
)": ",
615 DUMP_PREFIX_ADDRESS
, 16, 4, state
->caam_ctx
,
618 print_hex_dump(KERN_ERR
, "result@"__stringify(__LINE__
)": ",
619 DUMP_PREFIX_ADDRESS
, 16, 4, req
->result
,
623 req
->base
.complete(&req
->base
, err
);
626 static void ahash_done_ctx_dst(struct device
*jrdev
, u32
*desc
, u32 err
,
629 struct ahash_request
*req
= context
;
630 struct ahash_edesc
*edesc
;
631 struct crypto_ahash
*ahash
= crypto_ahash_reqtfm(req
);
632 struct caam_hash_ctx
*ctx
= crypto_ahash_ctx(ahash
);
634 struct caam_hash_state
*state
= ahash_request_ctx(req
);
635 int digestsize
= crypto_ahash_digestsize(ahash
);
637 dev_err(jrdev
, "%s %d: err 0x%x\n", __func__
, __LINE__
, err
);
640 edesc
= container_of(desc
, struct ahash_edesc
, hw_desc
[0]);
642 caam_jr_strstatus(jrdev
, err
);
644 ahash_unmap_ctx(jrdev
, edesc
, req
, ctx
->ctx_len
, DMA_FROM_DEVICE
);
648 print_hex_dump(KERN_ERR
, "ctx@"__stringify(__LINE__
)": ",
649 DUMP_PREFIX_ADDRESS
, 16, 4, state
->caam_ctx
,
652 print_hex_dump(KERN_ERR
, "result@"__stringify(__LINE__
)": ",
653 DUMP_PREFIX_ADDRESS
, 16, 4, req
->result
,
657 req
->base
.complete(&req
->base
, err
);
661 * Allocate an enhanced descriptor, which contains the hardware descriptor
662 * and space for hardware scatter table containing sg_num entries.
664 static struct ahash_edesc
*ahash_edesc_alloc(struct caam_hash_ctx
*ctx
,
665 int sg_num
, u32
*sh_desc
,
666 dma_addr_t sh_desc_dma
,
669 struct ahash_edesc
*edesc
;
670 unsigned int sg_size
= sg_num
* sizeof(struct sec4_sg_entry
);
672 edesc
= kzalloc(sizeof(*edesc
) + sg_size
, GFP_DMA
| flags
);
674 dev_err(ctx
->jrdev
, "could not allocate extended descriptor\n");
678 init_job_desc_shared(edesc
->hw_desc
, sh_desc_dma
, desc_len(sh_desc
),
679 HDR_SHARE_DEFER
| HDR_REVERSE
);
684 static int ahash_edesc_add_src(struct caam_hash_ctx
*ctx
,
685 struct ahash_edesc
*edesc
,
686 struct ahash_request
*req
, int nents
,
687 unsigned int first_sg
,
688 unsigned int first_bytes
, size_t to_hash
)
693 if (nents
> 1 || first_sg
) {
694 struct sec4_sg_entry
*sg
= edesc
->sec4_sg
;
695 unsigned int sgsize
= sizeof(*sg
) * (first_sg
+ nents
);
697 sg_to_sec4_sg_last(req
->src
, nents
, sg
+ first_sg
, 0);
699 src_dma
= dma_map_single(ctx
->jrdev
, sg
, sgsize
, DMA_TO_DEVICE
);
700 if (dma_mapping_error(ctx
->jrdev
, src_dma
)) {
701 dev_err(ctx
->jrdev
, "unable to map S/G table\n");
705 edesc
->sec4_sg_bytes
= sgsize
;
706 edesc
->sec4_sg_dma
= src_dma
;
709 src_dma
= sg_dma_address(req
->src
);
713 append_seq_in_ptr(edesc
->hw_desc
, src_dma
, first_bytes
+ to_hash
,
719 /* submit update job descriptor */
720 static int ahash_update_ctx(struct ahash_request
*req
)
722 struct crypto_ahash
*ahash
= crypto_ahash_reqtfm(req
);
723 struct caam_hash_ctx
*ctx
= crypto_ahash_ctx(ahash
);
724 struct caam_hash_state
*state
= ahash_request_ctx(req
);
725 struct device
*jrdev
= ctx
->jrdev
;
726 gfp_t flags
= (req
->base
.flags
& (CRYPTO_TFM_REQ_MAY_BACKLOG
|
727 CRYPTO_TFM_REQ_MAY_SLEEP
)) ? GFP_KERNEL
: GFP_ATOMIC
;
728 u8
*buf
= state
->current_buf
? state
->buf_1
: state
->buf_0
;
729 int *buflen
= state
->current_buf
? &state
->buflen_1
: &state
->buflen_0
;
730 u8
*next_buf
= state
->current_buf
? state
->buf_0
: state
->buf_1
;
731 int *next_buflen
= state
->current_buf
? &state
->buflen_0
:
732 &state
->buflen_1
, last_buflen
;
733 int in_len
= *buflen
+ req
->nbytes
, to_hash
;
735 int src_nents
, mapped_nents
, sec4_sg_bytes
, sec4_sg_src_index
;
736 struct ahash_edesc
*edesc
;
739 last_buflen
= *next_buflen
;
740 *next_buflen
= in_len
& (crypto_tfm_alg_blocksize(&ahash
->base
) - 1);
741 to_hash
= in_len
- *next_buflen
;
744 src_nents
= sg_nents_for_len(req
->src
,
745 req
->nbytes
- (*next_buflen
));
747 dev_err(jrdev
, "Invalid number of src SG.\n");
752 mapped_nents
= dma_map_sg(jrdev
, req
->src
, src_nents
,
755 dev_err(jrdev
, "unable to DMA map source\n");
762 sec4_sg_src_index
= 1 + (*buflen
? 1 : 0);
763 sec4_sg_bytes
= (sec4_sg_src_index
+ mapped_nents
) *
764 sizeof(struct sec4_sg_entry
);
767 * allocate space for base edesc and hw desc commands,
770 edesc
= ahash_edesc_alloc(ctx
, sec4_sg_src_index
+ mapped_nents
,
772 ctx
->sh_desc_update_dma
, flags
);
774 dma_unmap_sg(jrdev
, req
->src
, src_nents
, DMA_TO_DEVICE
);
778 edesc
->src_nents
= src_nents
;
779 edesc
->sec4_sg_bytes
= sec4_sg_bytes
;
781 ret
= ctx_map_to_sec4_sg(desc
, jrdev
, state
, ctx
->ctx_len
,
782 edesc
->sec4_sg
, DMA_BIDIRECTIONAL
);
786 state
->buf_dma
= try_buf_map_to_sec4_sg(jrdev
,
789 *buflen
, last_buflen
);
792 sg_to_sec4_sg_last(req
->src
, mapped_nents
,
793 edesc
->sec4_sg
+ sec4_sg_src_index
,
796 scatterwalk_map_and_copy(next_buf
, req
->src
,
800 (edesc
->sec4_sg
+ sec4_sg_src_index
- 1)->len
|=
801 cpu_to_caam32(SEC4_SG_LEN_FIN
);
804 state
->current_buf
= !state
->current_buf
;
806 desc
= edesc
->hw_desc
;
808 edesc
->sec4_sg_dma
= dma_map_single(jrdev
, edesc
->sec4_sg
,
811 if (dma_mapping_error(jrdev
, edesc
->sec4_sg_dma
)) {
812 dev_err(jrdev
, "unable to map S/G table\n");
817 append_seq_in_ptr(desc
, edesc
->sec4_sg_dma
, ctx
->ctx_len
+
820 append_seq_out_ptr(desc
, state
->ctx_dma
, ctx
->ctx_len
, 0);
823 print_hex_dump(KERN_ERR
, "jobdesc@"__stringify(__LINE__
)": ",
824 DUMP_PREFIX_ADDRESS
, 16, 4, desc
,
825 desc_bytes(desc
), 1);
828 ret
= caam_jr_enqueue(jrdev
, desc
, ahash_done_bi
, req
);
833 } else if (*next_buflen
) {
834 scatterwalk_map_and_copy(buf
+ *buflen
, req
->src
, 0,
836 *buflen
= *next_buflen
;
837 *next_buflen
= last_buflen
;
840 print_hex_dump(KERN_ERR
, "buf@"__stringify(__LINE__
)": ",
841 DUMP_PREFIX_ADDRESS
, 16, 4, buf
, *buflen
, 1);
842 print_hex_dump(KERN_ERR
, "next buf@"__stringify(__LINE__
)": ",
843 DUMP_PREFIX_ADDRESS
, 16, 4, next_buf
,
849 ahash_unmap_ctx(jrdev
, edesc
, req
, ctx
->ctx_len
, DMA_BIDIRECTIONAL
);
854 static int ahash_final_ctx(struct ahash_request
*req
)
856 struct crypto_ahash
*ahash
= crypto_ahash_reqtfm(req
);
857 struct caam_hash_ctx
*ctx
= crypto_ahash_ctx(ahash
);
858 struct caam_hash_state
*state
= ahash_request_ctx(req
);
859 struct device
*jrdev
= ctx
->jrdev
;
860 gfp_t flags
= (req
->base
.flags
& (CRYPTO_TFM_REQ_MAY_BACKLOG
|
861 CRYPTO_TFM_REQ_MAY_SLEEP
)) ? GFP_KERNEL
: GFP_ATOMIC
;
862 u8
*buf
= state
->current_buf
? state
->buf_1
: state
->buf_0
;
863 int buflen
= state
->current_buf
? state
->buflen_1
: state
->buflen_0
;
864 int last_buflen
= state
->current_buf
? state
->buflen_0
:
867 int sec4_sg_bytes
, sec4_sg_src_index
;
868 int digestsize
= crypto_ahash_digestsize(ahash
);
869 struct ahash_edesc
*edesc
;
872 sec4_sg_src_index
= 1 + (buflen
? 1 : 0);
873 sec4_sg_bytes
= sec4_sg_src_index
* sizeof(struct sec4_sg_entry
);
875 /* allocate space for base edesc and hw desc commands, link tables */
876 edesc
= ahash_edesc_alloc(ctx
, sec4_sg_src_index
,
877 ctx
->sh_desc_fin
, ctx
->sh_desc_fin_dma
,
882 desc
= edesc
->hw_desc
;
884 edesc
->sec4_sg_bytes
= sec4_sg_bytes
;
885 edesc
->src_nents
= 0;
887 ret
= ctx_map_to_sec4_sg(desc
, jrdev
, state
, ctx
->ctx_len
,
888 edesc
->sec4_sg
, DMA_TO_DEVICE
);
892 state
->buf_dma
= try_buf_map_to_sec4_sg(jrdev
, edesc
->sec4_sg
+ 1,
893 buf
, state
->buf_dma
, buflen
,
895 (edesc
->sec4_sg
+ sec4_sg_src_index
- 1)->len
|=
896 cpu_to_caam32(SEC4_SG_LEN_FIN
);
898 edesc
->sec4_sg_dma
= dma_map_single(jrdev
, edesc
->sec4_sg
,
899 sec4_sg_bytes
, DMA_TO_DEVICE
);
900 if (dma_mapping_error(jrdev
, edesc
->sec4_sg_dma
)) {
901 dev_err(jrdev
, "unable to map S/G table\n");
906 append_seq_in_ptr(desc
, edesc
->sec4_sg_dma
, ctx
->ctx_len
+ buflen
,
909 edesc
->dst_dma
= map_seq_out_ptr_result(desc
, jrdev
, req
->result
,
911 if (dma_mapping_error(jrdev
, edesc
->dst_dma
)) {
912 dev_err(jrdev
, "unable to map dst\n");
918 print_hex_dump(KERN_ERR
, "jobdesc@"__stringify(__LINE__
)": ",
919 DUMP_PREFIX_ADDRESS
, 16, 4, desc
, desc_bytes(desc
), 1);
922 ret
= caam_jr_enqueue(jrdev
, desc
, ahash_done_ctx_src
, req
);
928 ahash_unmap_ctx(jrdev
, edesc
, req
, digestsize
, DMA_FROM_DEVICE
);
933 static int ahash_finup_ctx(struct ahash_request
*req
)
935 struct crypto_ahash
*ahash
= crypto_ahash_reqtfm(req
);
936 struct caam_hash_ctx
*ctx
= crypto_ahash_ctx(ahash
);
937 struct caam_hash_state
*state
= ahash_request_ctx(req
);
938 struct device
*jrdev
= ctx
->jrdev
;
939 gfp_t flags
= (req
->base
.flags
& (CRYPTO_TFM_REQ_MAY_BACKLOG
|
940 CRYPTO_TFM_REQ_MAY_SLEEP
)) ? GFP_KERNEL
: GFP_ATOMIC
;
941 u8
*buf
= state
->current_buf
? state
->buf_1
: state
->buf_0
;
942 int buflen
= state
->current_buf
? state
->buflen_1
: state
->buflen_0
;
943 int last_buflen
= state
->current_buf
? state
->buflen_0
:
946 int sec4_sg_src_index
;
947 int src_nents
, mapped_nents
;
948 int digestsize
= crypto_ahash_digestsize(ahash
);
949 struct ahash_edesc
*edesc
;
952 src_nents
= sg_nents_for_len(req
->src
, req
->nbytes
);
954 dev_err(jrdev
, "Invalid number of src SG.\n");
959 mapped_nents
= dma_map_sg(jrdev
, req
->src
, src_nents
,
962 dev_err(jrdev
, "unable to DMA map source\n");
969 sec4_sg_src_index
= 1 + (buflen
? 1 : 0);
971 /* allocate space for base edesc and hw desc commands, link tables */
972 edesc
= ahash_edesc_alloc(ctx
, sec4_sg_src_index
+ mapped_nents
,
973 ctx
->sh_desc_fin
, ctx
->sh_desc_fin_dma
,
976 dma_unmap_sg(jrdev
, req
->src
, src_nents
, DMA_TO_DEVICE
);
980 desc
= edesc
->hw_desc
;
982 edesc
->src_nents
= src_nents
;
984 ret
= ctx_map_to_sec4_sg(desc
, jrdev
, state
, ctx
->ctx_len
,
985 edesc
->sec4_sg
, DMA_TO_DEVICE
);
989 state
->buf_dma
= try_buf_map_to_sec4_sg(jrdev
, edesc
->sec4_sg
+ 1,
990 buf
, state
->buf_dma
, buflen
,
993 ret
= ahash_edesc_add_src(ctx
, edesc
, req
, mapped_nents
,
994 sec4_sg_src_index
, ctx
->ctx_len
+ buflen
,
999 edesc
->dst_dma
= map_seq_out_ptr_result(desc
, jrdev
, req
->result
,
1001 if (dma_mapping_error(jrdev
, edesc
->dst_dma
)) {
1002 dev_err(jrdev
, "unable to map dst\n");
1008 print_hex_dump(KERN_ERR
, "jobdesc@"__stringify(__LINE__
)": ",
1009 DUMP_PREFIX_ADDRESS
, 16, 4, desc
, desc_bytes(desc
), 1);
1012 ret
= caam_jr_enqueue(jrdev
, desc
, ahash_done_ctx_src
, req
);
1016 return -EINPROGRESS
;
1018 ahash_unmap_ctx(jrdev
, edesc
, req
, digestsize
, DMA_FROM_DEVICE
);
1023 static int ahash_digest(struct ahash_request
*req
)
1025 struct crypto_ahash
*ahash
= crypto_ahash_reqtfm(req
);
1026 struct caam_hash_ctx
*ctx
= crypto_ahash_ctx(ahash
);
1027 struct device
*jrdev
= ctx
->jrdev
;
1028 gfp_t flags
= (req
->base
.flags
& (CRYPTO_TFM_REQ_MAY_BACKLOG
|
1029 CRYPTO_TFM_REQ_MAY_SLEEP
)) ? GFP_KERNEL
: GFP_ATOMIC
;
1031 int digestsize
= crypto_ahash_digestsize(ahash
);
1032 int src_nents
, mapped_nents
;
1033 struct ahash_edesc
*edesc
;
1036 src_nents
= sg_nents_for_len(req
->src
, req
->nbytes
);
1037 if (src_nents
< 0) {
1038 dev_err(jrdev
, "Invalid number of src SG.\n");
1043 mapped_nents
= dma_map_sg(jrdev
, req
->src
, src_nents
,
1045 if (!mapped_nents
) {
1046 dev_err(jrdev
, "unable to map source for DMA\n");
1053 /* allocate space for base edesc and hw desc commands, link tables */
1054 edesc
= ahash_edesc_alloc(ctx
, mapped_nents
> 1 ? mapped_nents
: 0,
1055 ctx
->sh_desc_digest
, ctx
->sh_desc_digest_dma
,
1058 dma_unmap_sg(jrdev
, req
->src
, src_nents
, DMA_TO_DEVICE
);
1062 edesc
->src_nents
= src_nents
;
1064 ret
= ahash_edesc_add_src(ctx
, edesc
, req
, mapped_nents
, 0, 0,
1067 ahash_unmap(jrdev
, edesc
, req
, digestsize
);
1072 desc
= edesc
->hw_desc
;
1074 edesc
->dst_dma
= map_seq_out_ptr_result(desc
, jrdev
, req
->result
,
1076 if (dma_mapping_error(jrdev
, edesc
->dst_dma
)) {
1077 dev_err(jrdev
, "unable to map dst\n");
1078 ahash_unmap(jrdev
, edesc
, req
, digestsize
);
1084 print_hex_dump(KERN_ERR
, "jobdesc@"__stringify(__LINE__
)": ",
1085 DUMP_PREFIX_ADDRESS
, 16, 4, desc
, desc_bytes(desc
), 1);
1088 ret
= caam_jr_enqueue(jrdev
, desc
, ahash_done
, req
);
1092 ahash_unmap(jrdev
, edesc
, req
, digestsize
);
1099 /* submit ahash final if it the first job descriptor */
1100 static int ahash_final_no_ctx(struct ahash_request
*req
)
1102 struct crypto_ahash
*ahash
= crypto_ahash_reqtfm(req
);
1103 struct caam_hash_ctx
*ctx
= crypto_ahash_ctx(ahash
);
1104 struct caam_hash_state
*state
= ahash_request_ctx(req
);
1105 struct device
*jrdev
= ctx
->jrdev
;
1106 gfp_t flags
= (req
->base
.flags
& (CRYPTO_TFM_REQ_MAY_BACKLOG
|
1107 CRYPTO_TFM_REQ_MAY_SLEEP
)) ? GFP_KERNEL
: GFP_ATOMIC
;
1108 u8
*buf
= state
->current_buf
? state
->buf_1
: state
->buf_0
;
1109 int buflen
= state
->current_buf
? state
->buflen_1
: state
->buflen_0
;
1111 int digestsize
= crypto_ahash_digestsize(ahash
);
1112 struct ahash_edesc
*edesc
;
1115 /* allocate space for base edesc and hw desc commands, link tables */
1116 edesc
= ahash_edesc_alloc(ctx
, 0, ctx
->sh_desc_digest
,
1117 ctx
->sh_desc_digest_dma
, flags
);
1121 desc
= edesc
->hw_desc
;
1123 state
->buf_dma
= dma_map_single(jrdev
, buf
, buflen
, DMA_TO_DEVICE
);
1124 if (dma_mapping_error(jrdev
, state
->buf_dma
)) {
1125 dev_err(jrdev
, "unable to map src\n");
1129 append_seq_in_ptr(desc
, state
->buf_dma
, buflen
, 0);
1131 edesc
->dst_dma
= map_seq_out_ptr_result(desc
, jrdev
, req
->result
,
1133 if (dma_mapping_error(jrdev
, edesc
->dst_dma
)) {
1134 dev_err(jrdev
, "unable to map dst\n");
1137 edesc
->src_nents
= 0;
1140 print_hex_dump(KERN_ERR
, "jobdesc@"__stringify(__LINE__
)": ",
1141 DUMP_PREFIX_ADDRESS
, 16, 4, desc
, desc_bytes(desc
), 1);
1144 ret
= caam_jr_enqueue(jrdev
, desc
, ahash_done
, req
);
1148 ahash_unmap(jrdev
, edesc
, req
, digestsize
);
1154 ahash_unmap(jrdev
, edesc
, req
, digestsize
);
1160 /* submit ahash update if it the first job descriptor after update */
1161 static int ahash_update_no_ctx(struct ahash_request
*req
)
1163 struct crypto_ahash
*ahash
= crypto_ahash_reqtfm(req
);
1164 struct caam_hash_ctx
*ctx
= crypto_ahash_ctx(ahash
);
1165 struct caam_hash_state
*state
= ahash_request_ctx(req
);
1166 struct device
*jrdev
= ctx
->jrdev
;
1167 gfp_t flags
= (req
->base
.flags
& (CRYPTO_TFM_REQ_MAY_BACKLOG
|
1168 CRYPTO_TFM_REQ_MAY_SLEEP
)) ? GFP_KERNEL
: GFP_ATOMIC
;
1169 u8
*buf
= state
->current_buf
? state
->buf_1
: state
->buf_0
;
1170 int *buflen
= state
->current_buf
? &state
->buflen_1
: &state
->buflen_0
;
1171 u8
*next_buf
= state
->current_buf
? state
->buf_0
: state
->buf_1
;
1172 int *next_buflen
= state
->current_buf
? &state
->buflen_0
:
1174 int in_len
= *buflen
+ req
->nbytes
, to_hash
;
1175 int sec4_sg_bytes
, src_nents
, mapped_nents
;
1176 struct ahash_edesc
*edesc
;
1180 *next_buflen
= in_len
& (crypto_tfm_alg_blocksize(&ahash
->base
) - 1);
1181 to_hash
= in_len
- *next_buflen
;
1184 src_nents
= sg_nents_for_len(req
->src
,
1185 req
->nbytes
- *next_buflen
);
1186 if (src_nents
< 0) {
1187 dev_err(jrdev
, "Invalid number of src SG.\n");
1192 mapped_nents
= dma_map_sg(jrdev
, req
->src
, src_nents
,
1194 if (!mapped_nents
) {
1195 dev_err(jrdev
, "unable to DMA map source\n");
1202 sec4_sg_bytes
= (1 + mapped_nents
) *
1203 sizeof(struct sec4_sg_entry
);
1206 * allocate space for base edesc and hw desc commands,
1209 edesc
= ahash_edesc_alloc(ctx
, 1 + mapped_nents
,
1210 ctx
->sh_desc_update_first
,
1211 ctx
->sh_desc_update_first_dma
,
1214 dma_unmap_sg(jrdev
, req
->src
, src_nents
, DMA_TO_DEVICE
);
1218 edesc
->src_nents
= src_nents
;
1219 edesc
->sec4_sg_bytes
= sec4_sg_bytes
;
1222 state
->buf_dma
= buf_map_to_sec4_sg(jrdev
, edesc
->sec4_sg
,
1224 sg_to_sec4_sg_last(req
->src
, mapped_nents
,
1225 edesc
->sec4_sg
+ 1, 0);
1228 scatterwalk_map_and_copy(next_buf
, req
->src
,
1233 state
->current_buf
= !state
->current_buf
;
1235 desc
= edesc
->hw_desc
;
1237 edesc
->sec4_sg_dma
= dma_map_single(jrdev
, edesc
->sec4_sg
,
1240 if (dma_mapping_error(jrdev
, edesc
->sec4_sg_dma
)) {
1241 dev_err(jrdev
, "unable to map S/G table\n");
1246 append_seq_in_ptr(desc
, edesc
->sec4_sg_dma
, to_hash
, LDST_SGF
);
1248 ret
= map_seq_out_ptr_ctx(desc
, jrdev
, state
, ctx
->ctx_len
);
1253 print_hex_dump(KERN_ERR
, "jobdesc@"__stringify(__LINE__
)": ",
1254 DUMP_PREFIX_ADDRESS
, 16, 4, desc
,
1255 desc_bytes(desc
), 1);
1258 ret
= caam_jr_enqueue(jrdev
, desc
, ahash_done_ctx_dst
, req
);
1263 state
->update
= ahash_update_ctx
;
1264 state
->finup
= ahash_finup_ctx
;
1265 state
->final
= ahash_final_ctx
;
1266 } else if (*next_buflen
) {
1267 scatterwalk_map_and_copy(buf
+ *buflen
, req
->src
, 0,
1269 *buflen
= *next_buflen
;
1273 print_hex_dump(KERN_ERR
, "buf@"__stringify(__LINE__
)": ",
1274 DUMP_PREFIX_ADDRESS
, 16, 4, buf
, *buflen
, 1);
1275 print_hex_dump(KERN_ERR
, "next buf@"__stringify(__LINE__
)": ",
1276 DUMP_PREFIX_ADDRESS
, 16, 4, next_buf
,
1282 ahash_unmap_ctx(jrdev
, edesc
, req
, ctx
->ctx_len
, DMA_TO_DEVICE
);
1287 /* submit ahash finup if it the first job descriptor after update */
1288 static int ahash_finup_no_ctx(struct ahash_request
*req
)
1290 struct crypto_ahash
*ahash
= crypto_ahash_reqtfm(req
);
1291 struct caam_hash_ctx
*ctx
= crypto_ahash_ctx(ahash
);
1292 struct caam_hash_state
*state
= ahash_request_ctx(req
);
1293 struct device
*jrdev
= ctx
->jrdev
;
1294 gfp_t flags
= (req
->base
.flags
& (CRYPTO_TFM_REQ_MAY_BACKLOG
|
1295 CRYPTO_TFM_REQ_MAY_SLEEP
)) ? GFP_KERNEL
: GFP_ATOMIC
;
1296 u8
*buf
= state
->current_buf
? state
->buf_1
: state
->buf_0
;
1297 int buflen
= state
->current_buf
? state
->buflen_1
: state
->buflen_0
;
1298 int last_buflen
= state
->current_buf
? state
->buflen_0
:
1301 int sec4_sg_bytes
, sec4_sg_src_index
, src_nents
, mapped_nents
;
1302 int digestsize
= crypto_ahash_digestsize(ahash
);
1303 struct ahash_edesc
*edesc
;
1306 src_nents
= sg_nents_for_len(req
->src
, req
->nbytes
);
1307 if (src_nents
< 0) {
1308 dev_err(jrdev
, "Invalid number of src SG.\n");
1313 mapped_nents
= dma_map_sg(jrdev
, req
->src
, src_nents
,
1315 if (!mapped_nents
) {
1316 dev_err(jrdev
, "unable to DMA map source\n");
1323 sec4_sg_src_index
= 2;
1324 sec4_sg_bytes
= (sec4_sg_src_index
+ mapped_nents
) *
1325 sizeof(struct sec4_sg_entry
);
1327 /* allocate space for base edesc and hw desc commands, link tables */
1328 edesc
= ahash_edesc_alloc(ctx
, sec4_sg_src_index
+ mapped_nents
,
1329 ctx
->sh_desc_digest
, ctx
->sh_desc_digest_dma
,
1332 dma_unmap_sg(jrdev
, req
->src
, src_nents
, DMA_TO_DEVICE
);
1336 desc
= edesc
->hw_desc
;
1338 edesc
->src_nents
= src_nents
;
1339 edesc
->sec4_sg_bytes
= sec4_sg_bytes
;
1341 state
->buf_dma
= try_buf_map_to_sec4_sg(jrdev
, edesc
->sec4_sg
, buf
,
1342 state
->buf_dma
, buflen
,
1345 ret
= ahash_edesc_add_src(ctx
, edesc
, req
, mapped_nents
, 1, buflen
,
1348 dev_err(jrdev
, "unable to map S/G table\n");
1352 edesc
->dst_dma
= map_seq_out_ptr_result(desc
, jrdev
, req
->result
,
1354 if (dma_mapping_error(jrdev
, edesc
->dst_dma
)) {
1355 dev_err(jrdev
, "unable to map dst\n");
1360 print_hex_dump(KERN_ERR
, "jobdesc@"__stringify(__LINE__
)": ",
1361 DUMP_PREFIX_ADDRESS
, 16, 4, desc
, desc_bytes(desc
), 1);
1364 ret
= caam_jr_enqueue(jrdev
, desc
, ahash_done
, req
);
1368 ahash_unmap(jrdev
, edesc
, req
, digestsize
);
1374 ahash_unmap(jrdev
, edesc
, req
, digestsize
);
1380 /* submit first update job descriptor after init */
1381 static int ahash_update_first(struct ahash_request
*req
)
1383 struct crypto_ahash
*ahash
= crypto_ahash_reqtfm(req
);
1384 struct caam_hash_ctx
*ctx
= crypto_ahash_ctx(ahash
);
1385 struct caam_hash_state
*state
= ahash_request_ctx(req
);
1386 struct device
*jrdev
= ctx
->jrdev
;
1387 gfp_t flags
= (req
->base
.flags
& (CRYPTO_TFM_REQ_MAY_BACKLOG
|
1388 CRYPTO_TFM_REQ_MAY_SLEEP
)) ? GFP_KERNEL
: GFP_ATOMIC
;
1389 u8
*next_buf
= state
->current_buf
? state
->buf_1
: state
->buf_0
;
1390 int *next_buflen
= state
->current_buf
?
1391 &state
->buflen_1
: &state
->buflen_0
;
1394 int src_nents
, mapped_nents
;
1395 struct ahash_edesc
*edesc
;
1398 *next_buflen
= req
->nbytes
& (crypto_tfm_alg_blocksize(&ahash
->base
) -
1400 to_hash
= req
->nbytes
- *next_buflen
;
1403 src_nents
= sg_nents_for_len(req
->src
,
1404 req
->nbytes
- *next_buflen
);
1405 if (src_nents
< 0) {
1406 dev_err(jrdev
, "Invalid number of src SG.\n");
1411 mapped_nents
= dma_map_sg(jrdev
, req
->src
, src_nents
,
1413 if (!mapped_nents
) {
1414 dev_err(jrdev
, "unable to map source for DMA\n");
1422 * allocate space for base edesc and hw desc commands,
1425 edesc
= ahash_edesc_alloc(ctx
, mapped_nents
> 1 ?
1427 ctx
->sh_desc_update_first
,
1428 ctx
->sh_desc_update_first_dma
,
1431 dma_unmap_sg(jrdev
, req
->src
, src_nents
, DMA_TO_DEVICE
);
1435 edesc
->src_nents
= src_nents
;
1438 ret
= ahash_edesc_add_src(ctx
, edesc
, req
, mapped_nents
, 0, 0,
1444 scatterwalk_map_and_copy(next_buf
, req
->src
, to_hash
,
1447 desc
= edesc
->hw_desc
;
1449 ret
= map_seq_out_ptr_ctx(desc
, jrdev
, state
, ctx
->ctx_len
);
1454 print_hex_dump(KERN_ERR
, "jobdesc@"__stringify(__LINE__
)": ",
1455 DUMP_PREFIX_ADDRESS
, 16, 4, desc
,
1456 desc_bytes(desc
), 1);
1459 ret
= caam_jr_enqueue(jrdev
, desc
, ahash_done_ctx_dst
, req
);
1464 state
->update
= ahash_update_ctx
;
1465 state
->finup
= ahash_finup_ctx
;
1466 state
->final
= ahash_final_ctx
;
1467 } else if (*next_buflen
) {
1468 state
->update
= ahash_update_no_ctx
;
1469 state
->finup
= ahash_finup_no_ctx
;
1470 state
->final
= ahash_final_no_ctx
;
1471 scatterwalk_map_and_copy(next_buf
, req
->src
, 0,
1475 print_hex_dump(KERN_ERR
, "next buf@"__stringify(__LINE__
)": ",
1476 DUMP_PREFIX_ADDRESS
, 16, 4, next_buf
,
1482 ahash_unmap_ctx(jrdev
, edesc
, req
, ctx
->ctx_len
, DMA_TO_DEVICE
);
1487 static int ahash_finup_first(struct ahash_request
*req
)
1489 return ahash_digest(req
);
1492 static int ahash_init(struct ahash_request
*req
)
1494 struct caam_hash_state
*state
= ahash_request_ctx(req
);
1496 state
->update
= ahash_update_first
;
1497 state
->finup
= ahash_finup_first
;
1498 state
->final
= ahash_final_no_ctx
;
1500 state
->current_buf
= 0;
1502 state
->buflen_0
= 0;
1503 state
->buflen_1
= 0;
1508 static int ahash_update(struct ahash_request
*req
)
1510 struct caam_hash_state
*state
= ahash_request_ctx(req
);
1512 return state
->update(req
);
1515 static int ahash_finup(struct ahash_request
*req
)
1517 struct caam_hash_state
*state
= ahash_request_ctx(req
);
1519 return state
->finup(req
);
1522 static int ahash_final(struct ahash_request
*req
)
1524 struct caam_hash_state
*state
= ahash_request_ctx(req
);
1526 return state
->final(req
);
1529 static int ahash_export(struct ahash_request
*req
, void *out
)
1531 struct caam_hash_state
*state
= ahash_request_ctx(req
);
1532 struct caam_export_state
*export
= out
;
1536 if (state
->current_buf
) {
1538 len
= state
->buflen_1
;
1541 len
= state
->buflen_0
;
1544 memcpy(export
->buf
, buf
, len
);
1545 memcpy(export
->caam_ctx
, state
->caam_ctx
, sizeof(export
->caam_ctx
));
1546 export
->buflen
= len
;
1547 export
->update
= state
->update
;
1548 export
->final
= state
->final
;
1549 export
->finup
= state
->finup
;
1554 static int ahash_import(struct ahash_request
*req
, const void *in
)
1556 struct caam_hash_state
*state
= ahash_request_ctx(req
);
1557 const struct caam_export_state
*export
= in
;
1559 memset(state
, 0, sizeof(*state
));
1560 memcpy(state
->buf_0
, export
->buf
, export
->buflen
);
1561 memcpy(state
->caam_ctx
, export
->caam_ctx
, sizeof(state
->caam_ctx
));
1562 state
->buflen_0
= export
->buflen
;
1563 state
->update
= export
->update
;
1564 state
->final
= export
->final
;
1565 state
->finup
= export
->finup
;
1570 struct caam_hash_template
{
1571 char name
[CRYPTO_MAX_ALG_NAME
];
1572 char driver_name
[CRYPTO_MAX_ALG_NAME
];
1573 char hmac_name
[CRYPTO_MAX_ALG_NAME
];
1574 char hmac_driver_name
[CRYPTO_MAX_ALG_NAME
];
1575 unsigned int blocksize
;
1576 struct ahash_alg template_ahash
;
1580 /* ahash descriptors */
1581 static struct caam_hash_template driver_hash
[] = {
1584 .driver_name
= "sha1-caam",
1585 .hmac_name
= "hmac(sha1)",
1586 .hmac_driver_name
= "hmac-sha1-caam",
1587 .blocksize
= SHA1_BLOCK_SIZE
,
1590 .update
= ahash_update
,
1591 .final
= ahash_final
,
1592 .finup
= ahash_finup
,
1593 .digest
= ahash_digest
,
1594 .export
= ahash_export
,
1595 .import
= ahash_import
,
1596 .setkey
= ahash_setkey
,
1598 .digestsize
= SHA1_DIGEST_SIZE
,
1599 .statesize
= sizeof(struct caam_export_state
),
1602 .alg_type
= OP_ALG_ALGSEL_SHA1
,
1605 .driver_name
= "sha224-caam",
1606 .hmac_name
= "hmac(sha224)",
1607 .hmac_driver_name
= "hmac-sha224-caam",
1608 .blocksize
= SHA224_BLOCK_SIZE
,
1611 .update
= ahash_update
,
1612 .final
= ahash_final
,
1613 .finup
= ahash_finup
,
1614 .digest
= ahash_digest
,
1615 .export
= ahash_export
,
1616 .import
= ahash_import
,
1617 .setkey
= ahash_setkey
,
1619 .digestsize
= SHA224_DIGEST_SIZE
,
1620 .statesize
= sizeof(struct caam_export_state
),
1623 .alg_type
= OP_ALG_ALGSEL_SHA224
,
1626 .driver_name
= "sha256-caam",
1627 .hmac_name
= "hmac(sha256)",
1628 .hmac_driver_name
= "hmac-sha256-caam",
1629 .blocksize
= SHA256_BLOCK_SIZE
,
1632 .update
= ahash_update
,
1633 .final
= ahash_final
,
1634 .finup
= ahash_finup
,
1635 .digest
= ahash_digest
,
1636 .export
= ahash_export
,
1637 .import
= ahash_import
,
1638 .setkey
= ahash_setkey
,
1640 .digestsize
= SHA256_DIGEST_SIZE
,
1641 .statesize
= sizeof(struct caam_export_state
),
1644 .alg_type
= OP_ALG_ALGSEL_SHA256
,
1647 .driver_name
= "sha384-caam",
1648 .hmac_name
= "hmac(sha384)",
1649 .hmac_driver_name
= "hmac-sha384-caam",
1650 .blocksize
= SHA384_BLOCK_SIZE
,
1653 .update
= ahash_update
,
1654 .final
= ahash_final
,
1655 .finup
= ahash_finup
,
1656 .digest
= ahash_digest
,
1657 .export
= ahash_export
,
1658 .import
= ahash_import
,
1659 .setkey
= ahash_setkey
,
1661 .digestsize
= SHA384_DIGEST_SIZE
,
1662 .statesize
= sizeof(struct caam_export_state
),
1665 .alg_type
= OP_ALG_ALGSEL_SHA384
,
1668 .driver_name
= "sha512-caam",
1669 .hmac_name
= "hmac(sha512)",
1670 .hmac_driver_name
= "hmac-sha512-caam",
1671 .blocksize
= SHA512_BLOCK_SIZE
,
1674 .update
= ahash_update
,
1675 .final
= ahash_final
,
1676 .finup
= ahash_finup
,
1677 .digest
= ahash_digest
,
1678 .export
= ahash_export
,
1679 .import
= ahash_import
,
1680 .setkey
= ahash_setkey
,
1682 .digestsize
= SHA512_DIGEST_SIZE
,
1683 .statesize
= sizeof(struct caam_export_state
),
1686 .alg_type
= OP_ALG_ALGSEL_SHA512
,
1689 .driver_name
= "md5-caam",
1690 .hmac_name
= "hmac(md5)",
1691 .hmac_driver_name
= "hmac-md5-caam",
1692 .blocksize
= MD5_BLOCK_WORDS
* 4,
1695 .update
= ahash_update
,
1696 .final
= ahash_final
,
1697 .finup
= ahash_finup
,
1698 .digest
= ahash_digest
,
1699 .export
= ahash_export
,
1700 .import
= ahash_import
,
1701 .setkey
= ahash_setkey
,
1703 .digestsize
= MD5_DIGEST_SIZE
,
1704 .statesize
= sizeof(struct caam_export_state
),
1707 .alg_type
= OP_ALG_ALGSEL_MD5
,
1711 struct caam_hash_alg
{
1712 struct list_head entry
;
1714 struct ahash_alg ahash_alg
;
1717 static int caam_hash_cra_init(struct crypto_tfm
*tfm
)
1719 struct crypto_ahash
*ahash
= __crypto_ahash_cast(tfm
);
1720 struct crypto_alg
*base
= tfm
->__crt_alg
;
1721 struct hash_alg_common
*halg
=
1722 container_of(base
, struct hash_alg_common
, base
);
1723 struct ahash_alg
*alg
=
1724 container_of(halg
, struct ahash_alg
, halg
);
1725 struct caam_hash_alg
*caam_hash
=
1726 container_of(alg
, struct caam_hash_alg
, ahash_alg
);
1727 struct caam_hash_ctx
*ctx
= crypto_tfm_ctx(tfm
);
1728 /* Sizes for MDHA running digests: MD5, SHA1, 224, 256, 384, 512 */
1729 static const u8 runninglen
[] = { HASH_MSG_LEN
+ MD5_DIGEST_SIZE
,
1730 HASH_MSG_LEN
+ SHA1_DIGEST_SIZE
,
1732 HASH_MSG_LEN
+ SHA256_DIGEST_SIZE
,
1734 HASH_MSG_LEN
+ SHA512_DIGEST_SIZE
};
1737 * Get a Job ring from Job Ring driver to ensure in-order
1738 * crypto request processing per tfm
1740 ctx
->jrdev
= caam_jr_alloc();
1741 if (IS_ERR(ctx
->jrdev
)) {
1742 pr_err("Job Ring Device allocation for transform failed\n");
1743 return PTR_ERR(ctx
->jrdev
);
1745 /* copy descriptor header template value */
1746 ctx
->adata
.algtype
= OP_TYPE_CLASS2_ALG
| caam_hash
->alg_type
;
1748 ctx
->ctx_len
= runninglen
[(ctx
->adata
.algtype
&
1749 OP_ALG_ALGSEL_SUBMASK
) >>
1750 OP_ALG_ALGSEL_SHIFT
];
1752 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm
),
1753 sizeof(struct caam_hash_state
));
1754 return ahash_set_sh_desc(ahash
);
1757 static void caam_hash_cra_exit(struct crypto_tfm
*tfm
)
1759 struct caam_hash_ctx
*ctx
= crypto_tfm_ctx(tfm
);
1761 if (ctx
->sh_desc_update_dma
&&
1762 !dma_mapping_error(ctx
->jrdev
, ctx
->sh_desc_update_dma
))
1763 dma_unmap_single(ctx
->jrdev
, ctx
->sh_desc_update_dma
,
1764 desc_bytes(ctx
->sh_desc_update
),
1766 if (ctx
->sh_desc_update_first_dma
&&
1767 !dma_mapping_error(ctx
->jrdev
, ctx
->sh_desc_update_first_dma
))
1768 dma_unmap_single(ctx
->jrdev
, ctx
->sh_desc_update_first_dma
,
1769 desc_bytes(ctx
->sh_desc_update_first
),
1771 if (ctx
->sh_desc_fin_dma
&&
1772 !dma_mapping_error(ctx
->jrdev
, ctx
->sh_desc_fin_dma
))
1773 dma_unmap_single(ctx
->jrdev
, ctx
->sh_desc_fin_dma
,
1774 desc_bytes(ctx
->sh_desc_fin
), DMA_TO_DEVICE
);
1775 if (ctx
->sh_desc_digest_dma
&&
1776 !dma_mapping_error(ctx
->jrdev
, ctx
->sh_desc_digest_dma
))
1777 dma_unmap_single(ctx
->jrdev
, ctx
->sh_desc_digest_dma
,
1778 desc_bytes(ctx
->sh_desc_digest
),
1781 caam_jr_free(ctx
->jrdev
);
1784 static void __exit
caam_algapi_hash_exit(void)
1786 struct caam_hash_alg
*t_alg
, *n
;
1788 if (!hash_list
.next
)
1791 list_for_each_entry_safe(t_alg
, n
, &hash_list
, entry
) {
1792 crypto_unregister_ahash(&t_alg
->ahash_alg
);
1793 list_del(&t_alg
->entry
);
1798 static struct caam_hash_alg
*
1799 caam_hash_alloc(struct caam_hash_template
*template,
1802 struct caam_hash_alg
*t_alg
;
1803 struct ahash_alg
*halg
;
1804 struct crypto_alg
*alg
;
1806 t_alg
= kzalloc(sizeof(*t_alg
), GFP_KERNEL
);
1808 pr_err("failed to allocate t_alg\n");
1809 return ERR_PTR(-ENOMEM
);
1812 t_alg
->ahash_alg
= template->template_ahash
;
1813 halg
= &t_alg
->ahash_alg
;
1814 alg
= &halg
->halg
.base
;
1817 snprintf(alg
->cra_name
, CRYPTO_MAX_ALG_NAME
, "%s",
1818 template->hmac_name
);
1819 snprintf(alg
->cra_driver_name
, CRYPTO_MAX_ALG_NAME
, "%s",
1820 template->hmac_driver_name
);
1822 snprintf(alg
->cra_name
, CRYPTO_MAX_ALG_NAME
, "%s",
1824 snprintf(alg
->cra_driver_name
, CRYPTO_MAX_ALG_NAME
, "%s",
1825 template->driver_name
);
1826 t_alg
->ahash_alg
.setkey
= NULL
;
1828 alg
->cra_module
= THIS_MODULE
;
1829 alg
->cra_init
= caam_hash_cra_init
;
1830 alg
->cra_exit
= caam_hash_cra_exit
;
1831 alg
->cra_ctxsize
= sizeof(struct caam_hash_ctx
);
1832 alg
->cra_priority
= CAAM_CRA_PRIORITY
;
1833 alg
->cra_blocksize
= template->blocksize
;
1834 alg
->cra_alignmask
= 0;
1835 alg
->cra_flags
= CRYPTO_ALG_ASYNC
| CRYPTO_ALG_TYPE_AHASH
;
1836 alg
->cra_type
= &crypto_ahash_type
;
1838 t_alg
->alg_type
= template->alg_type
;
1843 static int __init
caam_algapi_hash_init(void)
1845 struct device_node
*dev_node
;
1846 struct platform_device
*pdev
;
1847 struct device
*ctrldev
;
1849 struct caam_drv_private
*priv
;
1850 unsigned int md_limit
= SHA512_DIGEST_SIZE
;
1851 u32 cha_inst
, cha_vid
;
1853 dev_node
= of_find_compatible_node(NULL
, NULL
, "fsl,sec-v4.0");
1855 dev_node
= of_find_compatible_node(NULL
, NULL
, "fsl,sec4.0");
1860 pdev
= of_find_device_by_node(dev_node
);
1862 of_node_put(dev_node
);
1866 ctrldev
= &pdev
->dev
;
1867 priv
= dev_get_drvdata(ctrldev
);
1868 of_node_put(dev_node
);
1871 * If priv is NULL, it's probably because the caam driver wasn't
1872 * properly initialized (e.g. RNG4 init failed). Thus, bail out here.
1878 * Register crypto algorithms the device supports. First, identify
1879 * presence and attributes of MD block.
1881 cha_vid
= rd_reg32(&priv
->ctrl
->perfmon
.cha_id_ls
);
1882 cha_inst
= rd_reg32(&priv
->ctrl
->perfmon
.cha_num_ls
);
1885 * Skip registration of any hashing algorithms if MD block
1888 if (!((cha_inst
& CHA_ID_LS_MD_MASK
) >> CHA_ID_LS_MD_SHIFT
))
1891 /* Limit digest size based on LP256 */
1892 if ((cha_vid
& CHA_ID_LS_MD_MASK
) == CHA_ID_LS_MD_LP256
)
1893 md_limit
= SHA256_DIGEST_SIZE
;
1895 INIT_LIST_HEAD(&hash_list
);
1897 /* register crypto algorithms the device supports */
1898 for (i
= 0; i
< ARRAY_SIZE(driver_hash
); i
++) {
1899 struct caam_hash_alg
*t_alg
;
1900 struct caam_hash_template
*alg
= driver_hash
+ i
;
1902 /* If MD size is not supported by device, skip registration */
1903 if (alg
->template_ahash
.halg
.digestsize
> md_limit
)
1906 /* register hmac version */
1907 t_alg
= caam_hash_alloc(alg
, true);
1908 if (IS_ERR(t_alg
)) {
1909 err
= PTR_ERR(t_alg
);
1910 pr_warn("%s alg allocation failed\n", alg
->driver_name
);
1914 err
= crypto_register_ahash(&t_alg
->ahash_alg
);
1916 pr_warn("%s alg registration failed: %d\n",
1917 t_alg
->ahash_alg
.halg
.base
.cra_driver_name
,
1921 list_add_tail(&t_alg
->entry
, &hash_list
);
1923 /* register unkeyed version */
1924 t_alg
= caam_hash_alloc(alg
, false);
1925 if (IS_ERR(t_alg
)) {
1926 err
= PTR_ERR(t_alg
);
1927 pr_warn("%s alg allocation failed\n", alg
->driver_name
);
1931 err
= crypto_register_ahash(&t_alg
->ahash_alg
);
1933 pr_warn("%s alg registration failed: %d\n",
1934 t_alg
->ahash_alg
.halg
.base
.cra_driver_name
,
1938 list_add_tail(&t_alg
->entry
, &hash_list
);
1944 module_init(caam_algapi_hash_init
);
1945 module_exit(caam_algapi_hash_exit
);
1947 MODULE_LICENSE("GPL");
1948 MODULE_DESCRIPTION("FSL CAAM support for ahash functions of crypto API");
1949 MODULE_AUTHOR("Freescale Semiconductor - NMG");