2 * caam - Freescale FSL CAAM support for ahash functions of crypto API
4 * Copyright 2011 Freescale Semiconductor, Inc.
6 * Based on caamalg.c crypto API driver.
8 * relationship of digest job descriptor or first job descriptor after init to
11 * --------------- ---------------
12 * | JobDesc #1 |-------------------->| ShareDesc |
13 * | *(packet 1) | | (hashKey) |
14 * --------------- | (operation) |
17 * relationship of subsequent job descriptors to shared descriptors:
19 * --------------- ---------------
20 * | JobDesc #2 |-------------------->| ShareDesc |
21 * | *(packet 2) | |------------->| (hashKey) |
22 * --------------- | |-------->| (operation) |
23 * . | | | (load ctx2) |
24 * . | | ---------------
26 * | JobDesc #3 |------| |
32 * | JobDesc #4 |------------
36 * The SharedDesc never changes for a connection unless rekeyed, but
37 * each packet will likely be in a different place. So all we need
38 * to know to process the packet is where the input is, where the
39 * output goes, and what context we want to process with. Context is
40 * in the SharedDesc, packet references in the JobDesc.
42 * So, a job desc looks like:
44 * ---------------------
46 * | ShareDesc Pointer |
53 * ---------------------
60 #include "desc_constr.h"
63 #include "sg_sw_sec4.h"
66 #define CAAM_CRA_PRIORITY 3000
68 /* max hash key is max split key size */
69 #define CAAM_MAX_HASH_KEY_SIZE (SHA512_DIGEST_SIZE * 2)
71 #define CAAM_MAX_HASH_BLOCK_SIZE SHA512_BLOCK_SIZE
72 #define CAAM_MAX_HASH_DIGEST_SIZE SHA512_DIGEST_SIZE
74 /* length of descriptors text */
75 #define DESC_AHASH_BASE (4 * CAAM_CMD_SZ)
76 #define DESC_AHASH_UPDATE_LEN (6 * CAAM_CMD_SZ)
77 #define DESC_AHASH_UPDATE_FIRST_LEN (DESC_AHASH_BASE + 4 * CAAM_CMD_SZ)
78 #define DESC_AHASH_FINAL_LEN (DESC_AHASH_BASE + 5 * CAAM_CMD_SZ)
79 #define DESC_AHASH_FINUP_LEN (DESC_AHASH_BASE + 5 * CAAM_CMD_SZ)
80 #define DESC_AHASH_DIGEST_LEN (DESC_AHASH_BASE + 4 * CAAM_CMD_SZ)
82 #define DESC_HASH_MAX_USED_BYTES (DESC_AHASH_FINAL_LEN + \
83 CAAM_MAX_HASH_KEY_SIZE)
84 #define DESC_HASH_MAX_USED_LEN (DESC_HASH_MAX_USED_BYTES / CAAM_CMD_SZ)
86 /* caam context sizes for hashes: running digest + 8 */
87 #define HASH_MSG_LEN 8
88 #define MAX_CTX_LEN (HASH_MSG_LEN + SHA512_DIGEST_SIZE)
91 /* for print_hex_dumps with line references */
92 #define debug(format, arg...) printk(format, arg)
94 #define debug(format, arg...)
98 static struct list_head hash_list
;
100 /* ahash per-session context */
101 struct caam_hash_ctx
{
102 struct device
*jrdev
;
103 u32 sh_desc_update
[DESC_HASH_MAX_USED_LEN
];
104 u32 sh_desc_update_first
[DESC_HASH_MAX_USED_LEN
];
105 u32 sh_desc_fin
[DESC_HASH_MAX_USED_LEN
];
106 u32 sh_desc_digest
[DESC_HASH_MAX_USED_LEN
];
107 u32 sh_desc_finup
[DESC_HASH_MAX_USED_LEN
];
108 dma_addr_t sh_desc_update_dma
;
109 dma_addr_t sh_desc_update_first_dma
;
110 dma_addr_t sh_desc_fin_dma
;
111 dma_addr_t sh_desc_digest_dma
;
112 dma_addr_t sh_desc_finup_dma
;
115 u8 key
[CAAM_MAX_HASH_KEY_SIZE
];
118 unsigned int split_key_len
;
119 unsigned int split_key_pad_len
;
123 struct caam_hash_state
{
126 u8 buf_0
[CAAM_MAX_HASH_BLOCK_SIZE
] ____cacheline_aligned
;
128 u8 buf_1
[CAAM_MAX_HASH_BLOCK_SIZE
] ____cacheline_aligned
;
130 u8 caam_ctx
[MAX_CTX_LEN
] ____cacheline_aligned
;
131 int (*update
)(struct ahash_request
*req
);
132 int (*final
)(struct ahash_request
*req
);
133 int (*finup
)(struct ahash_request
*req
);
137 struct caam_export_state
{
138 u8 buf
[CAAM_MAX_HASH_BLOCK_SIZE
];
139 u8 caam_ctx
[MAX_CTX_LEN
];
141 int (*update
)(struct ahash_request
*req
);
142 int (*final
)(struct ahash_request
*req
);
143 int (*finup
)(struct ahash_request
*req
);
146 /* Common job descriptor seq in/out ptr routines */
148 /* Map state->caam_ctx, and append seq_out_ptr command that points to it */
149 static inline int map_seq_out_ptr_ctx(u32
*desc
, struct device
*jrdev
,
150 struct caam_hash_state
*state
,
153 state
->ctx_dma
= dma_map_single(jrdev
, state
->caam_ctx
,
154 ctx_len
, DMA_FROM_DEVICE
);
155 if (dma_mapping_error(jrdev
, state
->ctx_dma
)) {
156 dev_err(jrdev
, "unable to map ctx\n");
160 append_seq_out_ptr(desc
, state
->ctx_dma
, ctx_len
, 0);
165 /* Map req->result, and append seq_out_ptr command that points to it */
166 static inline dma_addr_t
map_seq_out_ptr_result(u32
*desc
, struct device
*jrdev
,
167 u8
*result
, int digestsize
)
171 dst_dma
= dma_map_single(jrdev
, result
, digestsize
, DMA_FROM_DEVICE
);
172 append_seq_out_ptr(desc
, dst_dma
, digestsize
, 0);
177 /* Map current buffer in state and put it in link table */
178 static inline dma_addr_t
buf_map_to_sec4_sg(struct device
*jrdev
,
179 struct sec4_sg_entry
*sec4_sg
,
184 buf_dma
= dma_map_single(jrdev
, buf
, buflen
, DMA_TO_DEVICE
);
185 dma_to_sec4_sg_one(sec4_sg
, buf_dma
, buflen
, 0);
190 /* Map req->src and put it in link table */
191 static inline void src_map_to_sec4_sg(struct device
*jrdev
,
192 struct scatterlist
*src
, int src_nents
,
193 struct sec4_sg_entry
*sec4_sg
)
195 dma_map_sg(jrdev
, src
, src_nents
, DMA_TO_DEVICE
);
196 sg_to_sec4_sg_last(src
, src_nents
, sec4_sg
, 0);
200 * Only put buffer in link table if it contains data, which is possible,
201 * since a buffer has previously been used, and needs to be unmapped,
203 static inline dma_addr_t
204 try_buf_map_to_sec4_sg(struct device
*jrdev
, struct sec4_sg_entry
*sec4_sg
,
205 u8
*buf
, dma_addr_t buf_dma
, int buflen
,
208 if (buf_dma
&& !dma_mapping_error(jrdev
, buf_dma
))
209 dma_unmap_single(jrdev
, buf_dma
, last_buflen
, DMA_TO_DEVICE
);
211 buf_dma
= buf_map_to_sec4_sg(jrdev
, sec4_sg
, buf
, buflen
);
218 /* Map state->caam_ctx, and add it to link table */
219 static inline int ctx_map_to_sec4_sg(u32
*desc
, struct device
*jrdev
,
220 struct caam_hash_state
*state
, int ctx_len
,
221 struct sec4_sg_entry
*sec4_sg
, u32 flag
)
223 state
->ctx_dma
= dma_map_single(jrdev
, state
->caam_ctx
, ctx_len
, flag
);
224 if (dma_mapping_error(jrdev
, state
->ctx_dma
)) {
225 dev_err(jrdev
, "unable to map ctx\n");
229 dma_to_sec4_sg_one(sec4_sg
, state
->ctx_dma
, ctx_len
, 0);
234 /* Common shared descriptor commands */
235 static inline void append_key_ahash(u32
*desc
, struct caam_hash_ctx
*ctx
)
237 append_key_as_imm(desc
, ctx
->key
, ctx
->split_key_pad_len
,
238 ctx
->split_key_len
, CLASS_2
|
239 KEY_DEST_MDHA_SPLIT
| KEY_ENC
);
242 /* Append key if it has been set */
243 static inline void init_sh_desc_key_ahash(u32
*desc
, struct caam_hash_ctx
*ctx
)
247 init_sh_desc(desc
, HDR_SHARE_SERIAL
);
249 if (ctx
->split_key_len
) {
250 /* Skip if already shared */
251 key_jump_cmd
= append_jump(desc
, JUMP_JSL
| JUMP_TEST_ALL
|
254 append_key_ahash(desc
, ctx
);
256 set_jump_tgt_here(desc
, key_jump_cmd
);
259 /* Propagate errors from shared to job descriptor */
260 append_cmd(desc
, SET_OK_NO_PROP_ERRORS
| CMD_LOAD
);
264 * For ahash read data from seqin following state->caam_ctx,
265 * and write resulting class2 context to seqout, which may be state->caam_ctx
268 static inline void ahash_append_load_str(u32
*desc
, int digestsize
)
270 /* Calculate remaining bytes to read */
271 append_math_add(desc
, VARSEQINLEN
, SEQINLEN
, REG0
, CAAM_CMD_SZ
);
273 /* Read remaining bytes */
274 append_seq_fifo_load(desc
, 0, FIFOLD_CLASS_CLASS2
| FIFOLD_TYPE_LAST2
|
275 FIFOLD_TYPE_MSG
| KEY_VLF
);
277 /* Store class2 context bytes */
278 append_seq_store(desc
, digestsize
, LDST_CLASS_2_CCB
|
279 LDST_SRCDST_BYTE_CONTEXT
);
283 * For ahash update, final and finup, import context, read and write to seqout
285 static inline void ahash_ctx_data_to_out(u32
*desc
, u32 op
, u32 state
,
287 struct caam_hash_ctx
*ctx
)
289 init_sh_desc_key_ahash(desc
, ctx
);
291 /* Import context from software */
292 append_cmd(desc
, CMD_SEQ_LOAD
| LDST_SRCDST_BYTE_CONTEXT
|
293 LDST_CLASS_2_CCB
| ctx
->ctx_len
);
295 /* Class 2 operation */
296 append_operation(desc
, op
| state
| OP_ALG_ENCRYPT
);
299 * Load from buf and/or src and write to req->result or state->context
301 ahash_append_load_str(desc
, digestsize
);
304 /* For ahash firsts and digest, read and write to seqout */
305 static inline void ahash_data_to_out(u32
*desc
, u32 op
, u32 state
,
306 int digestsize
, struct caam_hash_ctx
*ctx
)
308 init_sh_desc_key_ahash(desc
, ctx
);
310 /* Class 2 operation */
311 append_operation(desc
, op
| state
| OP_ALG_ENCRYPT
);
314 * Load from buf and/or src and write to req->result or state->context
316 ahash_append_load_str(desc
, digestsize
);
319 static int ahash_set_sh_desc(struct crypto_ahash
*ahash
)
321 struct caam_hash_ctx
*ctx
= crypto_ahash_ctx(ahash
);
322 int digestsize
= crypto_ahash_digestsize(ahash
);
323 struct device
*jrdev
= ctx
->jrdev
;
327 if (ctx
->split_key_len
)
328 have_key
= OP_ALG_AAI_HMAC_PRECOMP
;
330 /* ahash_update shared descriptor */
331 desc
= ctx
->sh_desc_update
;
333 init_sh_desc(desc
, HDR_SHARE_SERIAL
);
335 /* Import context from software */
336 append_cmd(desc
, CMD_SEQ_LOAD
| LDST_SRCDST_BYTE_CONTEXT
|
337 LDST_CLASS_2_CCB
| ctx
->ctx_len
);
339 /* Class 2 operation */
340 append_operation(desc
, ctx
->alg_type
| OP_ALG_AS_UPDATE
|
343 /* Load data and write to result or context */
344 ahash_append_load_str(desc
, ctx
->ctx_len
);
346 ctx
->sh_desc_update_dma
= dma_map_single(jrdev
, desc
, desc_bytes(desc
),
348 if (dma_mapping_error(jrdev
, ctx
->sh_desc_update_dma
)) {
349 dev_err(jrdev
, "unable to map shared descriptor\n");
353 print_hex_dump(KERN_ERR
,
354 "ahash update shdesc@"__stringify(__LINE__
)": ",
355 DUMP_PREFIX_ADDRESS
, 16, 4, desc
, desc_bytes(desc
), 1);
358 /* ahash_update_first shared descriptor */
359 desc
= ctx
->sh_desc_update_first
;
361 ahash_data_to_out(desc
, have_key
| ctx
->alg_type
, OP_ALG_AS_INIT
,
364 ctx
->sh_desc_update_first_dma
= dma_map_single(jrdev
, desc
,
367 if (dma_mapping_error(jrdev
, ctx
->sh_desc_update_first_dma
)) {
368 dev_err(jrdev
, "unable to map shared descriptor\n");
372 print_hex_dump(KERN_ERR
,
373 "ahash update first shdesc@"__stringify(__LINE__
)": ",
374 DUMP_PREFIX_ADDRESS
, 16, 4, desc
, desc_bytes(desc
), 1);
377 /* ahash_final shared descriptor */
378 desc
= ctx
->sh_desc_fin
;
380 ahash_ctx_data_to_out(desc
, have_key
| ctx
->alg_type
,
381 OP_ALG_AS_FINALIZE
, digestsize
, ctx
);
383 ctx
->sh_desc_fin_dma
= dma_map_single(jrdev
, desc
, desc_bytes(desc
),
385 if (dma_mapping_error(jrdev
, ctx
->sh_desc_fin_dma
)) {
386 dev_err(jrdev
, "unable to map shared descriptor\n");
390 print_hex_dump(KERN_ERR
, "ahash final shdesc@"__stringify(__LINE__
)": ",
391 DUMP_PREFIX_ADDRESS
, 16, 4, desc
,
392 desc_bytes(desc
), 1);
395 /* ahash_finup shared descriptor */
396 desc
= ctx
->sh_desc_finup
;
398 ahash_ctx_data_to_out(desc
, have_key
| ctx
->alg_type
,
399 OP_ALG_AS_FINALIZE
, digestsize
, ctx
);
401 ctx
->sh_desc_finup_dma
= dma_map_single(jrdev
, desc
, desc_bytes(desc
),
403 if (dma_mapping_error(jrdev
, ctx
->sh_desc_finup_dma
)) {
404 dev_err(jrdev
, "unable to map shared descriptor\n");
408 print_hex_dump(KERN_ERR
, "ahash finup shdesc@"__stringify(__LINE__
)": ",
409 DUMP_PREFIX_ADDRESS
, 16, 4, desc
,
410 desc_bytes(desc
), 1);
413 /* ahash_digest shared descriptor */
414 desc
= ctx
->sh_desc_digest
;
416 ahash_data_to_out(desc
, have_key
| ctx
->alg_type
, OP_ALG_AS_INITFINAL
,
419 ctx
->sh_desc_digest_dma
= dma_map_single(jrdev
, desc
,
422 if (dma_mapping_error(jrdev
, ctx
->sh_desc_digest_dma
)) {
423 dev_err(jrdev
, "unable to map shared descriptor\n");
427 print_hex_dump(KERN_ERR
,
428 "ahash digest shdesc@"__stringify(__LINE__
)": ",
429 DUMP_PREFIX_ADDRESS
, 16, 4, desc
,
430 desc_bytes(desc
), 1);
436 static int gen_split_hash_key(struct caam_hash_ctx
*ctx
, const u8
*key_in
,
439 return gen_split_key(ctx
->jrdev
, ctx
->key
, ctx
->split_key_len
,
440 ctx
->split_key_pad_len
, key_in
, keylen
,
444 /* Digest hash size if it is too large */
445 static int hash_digest_key(struct caam_hash_ctx
*ctx
, const u8
*key_in
,
446 u32
*keylen
, u8
*key_out
, u32 digestsize
)
448 struct device
*jrdev
= ctx
->jrdev
;
450 struct split_key_result result
;
451 dma_addr_t src_dma
, dst_dma
;
454 desc
= kmalloc(CAAM_CMD_SZ
* 8 + CAAM_PTR_SZ
* 2, GFP_KERNEL
| GFP_DMA
);
456 dev_err(jrdev
, "unable to allocate key input memory\n");
460 init_job_desc(desc
, 0);
462 src_dma
= dma_map_single(jrdev
, (void *)key_in
, *keylen
,
464 if (dma_mapping_error(jrdev
, src_dma
)) {
465 dev_err(jrdev
, "unable to map key input memory\n");
469 dst_dma
= dma_map_single(jrdev
, (void *)key_out
, digestsize
,
471 if (dma_mapping_error(jrdev
, dst_dma
)) {
472 dev_err(jrdev
, "unable to map key output memory\n");
473 dma_unmap_single(jrdev
, src_dma
, *keylen
, DMA_TO_DEVICE
);
478 /* Job descriptor to perform unkeyed hash on key_in */
479 append_operation(desc
, ctx
->alg_type
| OP_ALG_ENCRYPT
|
480 OP_ALG_AS_INITFINAL
);
481 append_seq_in_ptr(desc
, src_dma
, *keylen
, 0);
482 append_seq_fifo_load(desc
, *keylen
, FIFOLD_CLASS_CLASS2
|
483 FIFOLD_TYPE_LAST2
| FIFOLD_TYPE_MSG
);
484 append_seq_out_ptr(desc
, dst_dma
, digestsize
, 0);
485 append_seq_store(desc
, digestsize
, LDST_CLASS_2_CCB
|
486 LDST_SRCDST_BYTE_CONTEXT
);
489 print_hex_dump(KERN_ERR
, "key_in@"__stringify(__LINE__
)": ",
490 DUMP_PREFIX_ADDRESS
, 16, 4, key_in
, *keylen
, 1);
491 print_hex_dump(KERN_ERR
, "jobdesc@"__stringify(__LINE__
)": ",
492 DUMP_PREFIX_ADDRESS
, 16, 4, desc
, desc_bytes(desc
), 1);
496 init_completion(&result
.completion
);
498 ret
= caam_jr_enqueue(jrdev
, desc
, split_key_done
, &result
);
501 wait_for_completion(&result
.completion
);
504 print_hex_dump(KERN_ERR
,
505 "digested key@"__stringify(__LINE__
)": ",
506 DUMP_PREFIX_ADDRESS
, 16, 4, key_in
,
510 dma_unmap_single(jrdev
, src_dma
, *keylen
, DMA_TO_DEVICE
);
511 dma_unmap_single(jrdev
, dst_dma
, digestsize
, DMA_FROM_DEVICE
);
513 *keylen
= digestsize
;
520 static int ahash_setkey(struct crypto_ahash
*ahash
,
521 const u8
*key
, unsigned int keylen
)
523 /* Sizes for MDHA pads (*not* keys): MD5, SHA1, 224, 256, 384, 512 */
524 static const u8 mdpadlen
[] = { 16, 20, 32, 32, 64, 64 };
525 struct caam_hash_ctx
*ctx
= crypto_ahash_ctx(ahash
);
526 struct device
*jrdev
= ctx
->jrdev
;
527 int blocksize
= crypto_tfm_alg_blocksize(&ahash
->base
);
528 int digestsize
= crypto_ahash_digestsize(ahash
);
530 u8
*hashed_key
= NULL
;
533 printk(KERN_ERR
"keylen %d\n", keylen
);
536 if (keylen
> blocksize
) {
537 hashed_key
= kmalloc(sizeof(u8
) * digestsize
, GFP_KERNEL
|
541 ret
= hash_digest_key(ctx
, key
, &keylen
, hashed_key
,
548 /* Pick class 2 key length from algorithm submask */
549 ctx
->split_key_len
= mdpadlen
[(ctx
->alg_op
& OP_ALG_ALGSEL_SUBMASK
) >>
550 OP_ALG_ALGSEL_SHIFT
] * 2;
551 ctx
->split_key_pad_len
= ALIGN(ctx
->split_key_len
, 16);
554 printk(KERN_ERR
"split_key_len %d split_key_pad_len %d\n",
555 ctx
->split_key_len
, ctx
->split_key_pad_len
);
556 print_hex_dump(KERN_ERR
, "key in @"__stringify(__LINE__
)": ",
557 DUMP_PREFIX_ADDRESS
, 16, 4, key
, keylen
, 1);
560 ret
= gen_split_hash_key(ctx
, key
, keylen
);
564 ctx
->key_dma
= dma_map_single(jrdev
, ctx
->key
, ctx
->split_key_pad_len
,
566 if (dma_mapping_error(jrdev
, ctx
->key_dma
)) {
567 dev_err(jrdev
, "unable to map key i/o memory\n");
572 print_hex_dump(KERN_ERR
, "ctx.key@"__stringify(__LINE__
)": ",
573 DUMP_PREFIX_ADDRESS
, 16, 4, ctx
->key
,
574 ctx
->split_key_pad_len
, 1);
577 ret
= ahash_set_sh_desc(ahash
);
579 dma_unmap_single(jrdev
, ctx
->key_dma
, ctx
->split_key_pad_len
,
588 crypto_ahash_set_flags(ahash
, CRYPTO_TFM_RES_BAD_KEY_LEN
);
593 * ahash_edesc - s/w-extended ahash descriptor
594 * @dst_dma: physical mapped address of req->result
595 * @sec4_sg_dma: physical mapped address of h/w link table
596 * @src_nents: number of segments in input scatterlist
597 * @sec4_sg_bytes: length of dma mapped sec4_sg space
598 * @sec4_sg: pointer to h/w link table
599 * @hw_desc: the h/w job descriptor followed by any referenced link tables
603 dma_addr_t sec4_sg_dma
;
606 struct sec4_sg_entry
*sec4_sg
;
610 static inline void ahash_unmap(struct device
*dev
,
611 struct ahash_edesc
*edesc
,
612 struct ahash_request
*req
, int dst_len
)
614 if (edesc
->src_nents
)
615 dma_unmap_sg(dev
, req
->src
, edesc
->src_nents
, DMA_TO_DEVICE
);
617 dma_unmap_single(dev
, edesc
->dst_dma
, dst_len
, DMA_FROM_DEVICE
);
619 if (edesc
->sec4_sg_bytes
)
620 dma_unmap_single(dev
, edesc
->sec4_sg_dma
,
621 edesc
->sec4_sg_bytes
, DMA_TO_DEVICE
);
624 static inline void ahash_unmap_ctx(struct device
*dev
,
625 struct ahash_edesc
*edesc
,
626 struct ahash_request
*req
, int dst_len
, u32 flag
)
628 struct crypto_ahash
*ahash
= crypto_ahash_reqtfm(req
);
629 struct caam_hash_ctx
*ctx
= crypto_ahash_ctx(ahash
);
630 struct caam_hash_state
*state
= ahash_request_ctx(req
);
633 dma_unmap_single(dev
, state
->ctx_dma
, ctx
->ctx_len
, flag
);
634 ahash_unmap(dev
, edesc
, req
, dst_len
);
637 static void ahash_done(struct device
*jrdev
, u32
*desc
, u32 err
,
640 struct ahash_request
*req
= context
;
641 struct ahash_edesc
*edesc
;
642 struct crypto_ahash
*ahash
= crypto_ahash_reqtfm(req
);
643 int digestsize
= crypto_ahash_digestsize(ahash
);
645 struct caam_hash_ctx
*ctx
= crypto_ahash_ctx(ahash
);
646 struct caam_hash_state
*state
= ahash_request_ctx(req
);
648 dev_err(jrdev
, "%s %d: err 0x%x\n", __func__
, __LINE__
, err
);
651 edesc
= (struct ahash_edesc
*)((char *)desc
-
652 offsetof(struct ahash_edesc
, hw_desc
));
654 caam_jr_strstatus(jrdev
, err
);
656 ahash_unmap(jrdev
, edesc
, req
, digestsize
);
660 print_hex_dump(KERN_ERR
, "ctx@"__stringify(__LINE__
)": ",
661 DUMP_PREFIX_ADDRESS
, 16, 4, state
->caam_ctx
,
664 print_hex_dump(KERN_ERR
, "result@"__stringify(__LINE__
)": ",
665 DUMP_PREFIX_ADDRESS
, 16, 4, req
->result
,
669 req
->base
.complete(&req
->base
, err
);
672 static void ahash_done_bi(struct device
*jrdev
, u32
*desc
, u32 err
,
675 struct ahash_request
*req
= context
;
676 struct ahash_edesc
*edesc
;
677 struct crypto_ahash
*ahash
= crypto_ahash_reqtfm(req
);
678 struct caam_hash_ctx
*ctx
= crypto_ahash_ctx(ahash
);
680 struct caam_hash_state
*state
= ahash_request_ctx(req
);
681 int digestsize
= crypto_ahash_digestsize(ahash
);
683 dev_err(jrdev
, "%s %d: err 0x%x\n", __func__
, __LINE__
, err
);
686 edesc
= (struct ahash_edesc
*)((char *)desc
-
687 offsetof(struct ahash_edesc
, hw_desc
));
689 caam_jr_strstatus(jrdev
, err
);
691 ahash_unmap_ctx(jrdev
, edesc
, req
, ctx
->ctx_len
, DMA_BIDIRECTIONAL
);
695 print_hex_dump(KERN_ERR
, "ctx@"__stringify(__LINE__
)": ",
696 DUMP_PREFIX_ADDRESS
, 16, 4, state
->caam_ctx
,
699 print_hex_dump(KERN_ERR
, "result@"__stringify(__LINE__
)": ",
700 DUMP_PREFIX_ADDRESS
, 16, 4, req
->result
,
704 req
->base
.complete(&req
->base
, err
);
707 static void ahash_done_ctx_src(struct device
*jrdev
, u32
*desc
, u32 err
,
710 struct ahash_request
*req
= context
;
711 struct ahash_edesc
*edesc
;
712 struct crypto_ahash
*ahash
= crypto_ahash_reqtfm(req
);
713 int digestsize
= crypto_ahash_digestsize(ahash
);
715 struct caam_hash_ctx
*ctx
= crypto_ahash_ctx(ahash
);
716 struct caam_hash_state
*state
= ahash_request_ctx(req
);
718 dev_err(jrdev
, "%s %d: err 0x%x\n", __func__
, __LINE__
, err
);
721 edesc
= (struct ahash_edesc
*)((char *)desc
-
722 offsetof(struct ahash_edesc
, hw_desc
));
724 caam_jr_strstatus(jrdev
, err
);
726 ahash_unmap_ctx(jrdev
, edesc
, req
, digestsize
, DMA_TO_DEVICE
);
730 print_hex_dump(KERN_ERR
, "ctx@"__stringify(__LINE__
)": ",
731 DUMP_PREFIX_ADDRESS
, 16, 4, state
->caam_ctx
,
734 print_hex_dump(KERN_ERR
, "result@"__stringify(__LINE__
)": ",
735 DUMP_PREFIX_ADDRESS
, 16, 4, req
->result
,
739 req
->base
.complete(&req
->base
, err
);
742 static void ahash_done_ctx_dst(struct device
*jrdev
, u32
*desc
, u32 err
,
745 struct ahash_request
*req
= context
;
746 struct ahash_edesc
*edesc
;
747 struct crypto_ahash
*ahash
= crypto_ahash_reqtfm(req
);
748 struct caam_hash_ctx
*ctx
= crypto_ahash_ctx(ahash
);
750 struct caam_hash_state
*state
= ahash_request_ctx(req
);
751 int digestsize
= crypto_ahash_digestsize(ahash
);
753 dev_err(jrdev
, "%s %d: err 0x%x\n", __func__
, __LINE__
, err
);
756 edesc
= (struct ahash_edesc
*)((char *)desc
-
757 offsetof(struct ahash_edesc
, hw_desc
));
759 caam_jr_strstatus(jrdev
, err
);
761 ahash_unmap_ctx(jrdev
, edesc
, req
, ctx
->ctx_len
, DMA_FROM_DEVICE
);
765 print_hex_dump(KERN_ERR
, "ctx@"__stringify(__LINE__
)": ",
766 DUMP_PREFIX_ADDRESS
, 16, 4, state
->caam_ctx
,
769 print_hex_dump(KERN_ERR
, "result@"__stringify(__LINE__
)": ",
770 DUMP_PREFIX_ADDRESS
, 16, 4, req
->result
,
774 req
->base
.complete(&req
->base
, err
);
777 /* submit update job descriptor */
778 static int ahash_update_ctx(struct ahash_request
*req
)
780 struct crypto_ahash
*ahash
= crypto_ahash_reqtfm(req
);
781 struct caam_hash_ctx
*ctx
= crypto_ahash_ctx(ahash
);
782 struct caam_hash_state
*state
= ahash_request_ctx(req
);
783 struct device
*jrdev
= ctx
->jrdev
;
784 gfp_t flags
= (req
->base
.flags
& (CRYPTO_TFM_REQ_MAY_BACKLOG
|
785 CRYPTO_TFM_REQ_MAY_SLEEP
)) ? GFP_KERNEL
: GFP_ATOMIC
;
786 u8
*buf
= state
->current_buf
? state
->buf_1
: state
->buf_0
;
787 int *buflen
= state
->current_buf
? &state
->buflen_1
: &state
->buflen_0
;
788 u8
*next_buf
= state
->current_buf
? state
->buf_0
: state
->buf_1
;
789 int *next_buflen
= state
->current_buf
? &state
->buflen_0
:
790 &state
->buflen_1
, last_buflen
;
791 int in_len
= *buflen
+ req
->nbytes
, to_hash
;
792 u32
*sh_desc
= ctx
->sh_desc_update
, *desc
;
793 dma_addr_t ptr
= ctx
->sh_desc_update_dma
;
794 int src_nents
, sec4_sg_bytes
, sec4_sg_src_index
;
795 struct ahash_edesc
*edesc
;
799 last_buflen
= *next_buflen
;
800 *next_buflen
= in_len
& (crypto_tfm_alg_blocksize(&ahash
->base
) - 1);
801 to_hash
= in_len
- *next_buflen
;
804 src_nents
= sg_nents_for_len(req
->src
,
805 req
->nbytes
- (*next_buflen
));
806 sec4_sg_src_index
= 1 + (*buflen
? 1 : 0);
807 sec4_sg_bytes
= (sec4_sg_src_index
+ src_nents
) *
808 sizeof(struct sec4_sg_entry
);
811 * allocate space for base edesc and hw desc commands,
814 edesc
= kzalloc(sizeof(*edesc
) + DESC_JOB_IO_LEN
+
815 sec4_sg_bytes
, GFP_DMA
| flags
);
818 "could not allocate extended descriptor\n");
822 edesc
->src_nents
= src_nents
;
823 edesc
->sec4_sg_bytes
= sec4_sg_bytes
;
824 edesc
->sec4_sg
= (void *)edesc
+ sizeof(struct ahash_edesc
) +
827 ret
= ctx_map_to_sec4_sg(desc
, jrdev
, state
, ctx
->ctx_len
,
828 edesc
->sec4_sg
, DMA_BIDIRECTIONAL
);
832 state
->buf_dma
= try_buf_map_to_sec4_sg(jrdev
,
835 *buflen
, last_buflen
);
838 src_map_to_sec4_sg(jrdev
, req
->src
, src_nents
,
839 edesc
->sec4_sg
+ sec4_sg_src_index
);
841 scatterwalk_map_and_copy(next_buf
, req
->src
,
845 (edesc
->sec4_sg
+ sec4_sg_src_index
- 1)->len
|=
849 state
->current_buf
= !state
->current_buf
;
851 sh_len
= desc_len(sh_desc
);
852 desc
= edesc
->hw_desc
;
853 init_job_desc_shared(desc
, ptr
, sh_len
, HDR_SHARE_DEFER
|
856 edesc
->sec4_sg_dma
= dma_map_single(jrdev
, edesc
->sec4_sg
,
859 if (dma_mapping_error(jrdev
, edesc
->sec4_sg_dma
)) {
860 dev_err(jrdev
, "unable to map S/G table\n");
864 append_seq_in_ptr(desc
, edesc
->sec4_sg_dma
, ctx
->ctx_len
+
867 append_seq_out_ptr(desc
, state
->ctx_dma
, ctx
->ctx_len
, 0);
870 print_hex_dump(KERN_ERR
, "jobdesc@"__stringify(__LINE__
)": ",
871 DUMP_PREFIX_ADDRESS
, 16, 4, desc
,
872 desc_bytes(desc
), 1);
875 ret
= caam_jr_enqueue(jrdev
, desc
, ahash_done_bi
, req
);
879 ahash_unmap_ctx(jrdev
, edesc
, req
, ctx
->ctx_len
,
883 } else if (*next_buflen
) {
884 scatterwalk_map_and_copy(buf
+ *buflen
, req
->src
, 0,
886 *buflen
= *next_buflen
;
887 *next_buflen
= last_buflen
;
890 print_hex_dump(KERN_ERR
, "buf@"__stringify(__LINE__
)": ",
891 DUMP_PREFIX_ADDRESS
, 16, 4, buf
, *buflen
, 1);
892 print_hex_dump(KERN_ERR
, "next buf@"__stringify(__LINE__
)": ",
893 DUMP_PREFIX_ADDRESS
, 16, 4, next_buf
,
900 static int ahash_final_ctx(struct ahash_request
*req
)
902 struct crypto_ahash
*ahash
= crypto_ahash_reqtfm(req
);
903 struct caam_hash_ctx
*ctx
= crypto_ahash_ctx(ahash
);
904 struct caam_hash_state
*state
= ahash_request_ctx(req
);
905 struct device
*jrdev
= ctx
->jrdev
;
906 gfp_t flags
= (req
->base
.flags
& (CRYPTO_TFM_REQ_MAY_BACKLOG
|
907 CRYPTO_TFM_REQ_MAY_SLEEP
)) ? GFP_KERNEL
: GFP_ATOMIC
;
908 u8
*buf
= state
->current_buf
? state
->buf_1
: state
->buf_0
;
909 int buflen
= state
->current_buf
? state
->buflen_1
: state
->buflen_0
;
910 int last_buflen
= state
->current_buf
? state
->buflen_0
:
912 u32
*sh_desc
= ctx
->sh_desc_fin
, *desc
;
913 dma_addr_t ptr
= ctx
->sh_desc_fin_dma
;
914 int sec4_sg_bytes
, sec4_sg_src_index
;
915 int digestsize
= crypto_ahash_digestsize(ahash
);
916 struct ahash_edesc
*edesc
;
920 sec4_sg_src_index
= 1 + (buflen
? 1 : 0);
921 sec4_sg_bytes
= sec4_sg_src_index
* sizeof(struct sec4_sg_entry
);
923 /* allocate space for base edesc and hw desc commands, link tables */
924 edesc
= kzalloc(sizeof(*edesc
) + DESC_JOB_IO_LEN
+ sec4_sg_bytes
,
927 dev_err(jrdev
, "could not allocate extended descriptor\n");
931 sh_len
= desc_len(sh_desc
);
932 desc
= edesc
->hw_desc
;
933 init_job_desc_shared(desc
, ptr
, sh_len
, HDR_SHARE_DEFER
| HDR_REVERSE
);
935 edesc
->sec4_sg_bytes
= sec4_sg_bytes
;
936 edesc
->sec4_sg
= (void *)edesc
+ sizeof(struct ahash_edesc
) +
938 edesc
->src_nents
= 0;
940 ret
= ctx_map_to_sec4_sg(desc
, jrdev
, state
, ctx
->ctx_len
,
941 edesc
->sec4_sg
, DMA_TO_DEVICE
);
945 state
->buf_dma
= try_buf_map_to_sec4_sg(jrdev
, edesc
->sec4_sg
+ 1,
946 buf
, state
->buf_dma
, buflen
,
948 (edesc
->sec4_sg
+ sec4_sg_src_index
- 1)->len
|= SEC4_SG_LEN_FIN
;
950 edesc
->sec4_sg_dma
= dma_map_single(jrdev
, edesc
->sec4_sg
,
951 sec4_sg_bytes
, DMA_TO_DEVICE
);
952 if (dma_mapping_error(jrdev
, edesc
->sec4_sg_dma
)) {
953 dev_err(jrdev
, "unable to map S/G table\n");
957 append_seq_in_ptr(desc
, edesc
->sec4_sg_dma
, ctx
->ctx_len
+ buflen
,
960 edesc
->dst_dma
= map_seq_out_ptr_result(desc
, jrdev
, req
->result
,
962 if (dma_mapping_error(jrdev
, edesc
->dst_dma
)) {
963 dev_err(jrdev
, "unable to map dst\n");
968 print_hex_dump(KERN_ERR
, "jobdesc@"__stringify(__LINE__
)": ",
969 DUMP_PREFIX_ADDRESS
, 16, 4, desc
, desc_bytes(desc
), 1);
972 ret
= caam_jr_enqueue(jrdev
, desc
, ahash_done_ctx_src
, req
);
976 ahash_unmap_ctx(jrdev
, edesc
, req
, digestsize
, DMA_FROM_DEVICE
);
983 static int ahash_finup_ctx(struct ahash_request
*req
)
985 struct crypto_ahash
*ahash
= crypto_ahash_reqtfm(req
);
986 struct caam_hash_ctx
*ctx
= crypto_ahash_ctx(ahash
);
987 struct caam_hash_state
*state
= ahash_request_ctx(req
);
988 struct device
*jrdev
= ctx
->jrdev
;
989 gfp_t flags
= (req
->base
.flags
& (CRYPTO_TFM_REQ_MAY_BACKLOG
|
990 CRYPTO_TFM_REQ_MAY_SLEEP
)) ? GFP_KERNEL
: GFP_ATOMIC
;
991 u8
*buf
= state
->current_buf
? state
->buf_1
: state
->buf_0
;
992 int buflen
= state
->current_buf
? state
->buflen_1
: state
->buflen_0
;
993 int last_buflen
= state
->current_buf
? state
->buflen_0
:
995 u32
*sh_desc
= ctx
->sh_desc_finup
, *desc
;
996 dma_addr_t ptr
= ctx
->sh_desc_finup_dma
;
997 int sec4_sg_bytes
, sec4_sg_src_index
;
999 int digestsize
= crypto_ahash_digestsize(ahash
);
1000 struct ahash_edesc
*edesc
;
1004 src_nents
= sg_nents_for_len(req
->src
, req
->nbytes
);
1005 sec4_sg_src_index
= 1 + (buflen
? 1 : 0);
1006 sec4_sg_bytes
= (sec4_sg_src_index
+ src_nents
) *
1007 sizeof(struct sec4_sg_entry
);
1009 /* allocate space for base edesc and hw desc commands, link tables */
1010 edesc
= kzalloc(sizeof(*edesc
) + DESC_JOB_IO_LEN
+ sec4_sg_bytes
,
1013 dev_err(jrdev
, "could not allocate extended descriptor\n");
1017 sh_len
= desc_len(sh_desc
);
1018 desc
= edesc
->hw_desc
;
1019 init_job_desc_shared(desc
, ptr
, sh_len
, HDR_SHARE_DEFER
| HDR_REVERSE
);
1021 edesc
->src_nents
= src_nents
;
1022 edesc
->sec4_sg_bytes
= sec4_sg_bytes
;
1023 edesc
->sec4_sg
= (void *)edesc
+ sizeof(struct ahash_edesc
) +
1026 ret
= ctx_map_to_sec4_sg(desc
, jrdev
, state
, ctx
->ctx_len
,
1027 edesc
->sec4_sg
, DMA_TO_DEVICE
);
1031 state
->buf_dma
= try_buf_map_to_sec4_sg(jrdev
, edesc
->sec4_sg
+ 1,
1032 buf
, state
->buf_dma
, buflen
,
1035 src_map_to_sec4_sg(jrdev
, req
->src
, src_nents
, edesc
->sec4_sg
+
1038 edesc
->sec4_sg_dma
= dma_map_single(jrdev
, edesc
->sec4_sg
,
1039 sec4_sg_bytes
, DMA_TO_DEVICE
);
1040 if (dma_mapping_error(jrdev
, edesc
->sec4_sg_dma
)) {
1041 dev_err(jrdev
, "unable to map S/G table\n");
1045 append_seq_in_ptr(desc
, edesc
->sec4_sg_dma
, ctx
->ctx_len
+
1046 buflen
+ req
->nbytes
, LDST_SGF
);
1048 edesc
->dst_dma
= map_seq_out_ptr_result(desc
, jrdev
, req
->result
,
1050 if (dma_mapping_error(jrdev
, edesc
->dst_dma
)) {
1051 dev_err(jrdev
, "unable to map dst\n");
1056 print_hex_dump(KERN_ERR
, "jobdesc@"__stringify(__LINE__
)": ",
1057 DUMP_PREFIX_ADDRESS
, 16, 4, desc
, desc_bytes(desc
), 1);
1060 ret
= caam_jr_enqueue(jrdev
, desc
, ahash_done_ctx_src
, req
);
1064 ahash_unmap_ctx(jrdev
, edesc
, req
, digestsize
, DMA_FROM_DEVICE
);
1071 static int ahash_digest(struct ahash_request
*req
)
1073 struct crypto_ahash
*ahash
= crypto_ahash_reqtfm(req
);
1074 struct caam_hash_ctx
*ctx
= crypto_ahash_ctx(ahash
);
1075 struct device
*jrdev
= ctx
->jrdev
;
1076 gfp_t flags
= (req
->base
.flags
& (CRYPTO_TFM_REQ_MAY_BACKLOG
|
1077 CRYPTO_TFM_REQ_MAY_SLEEP
)) ? GFP_KERNEL
: GFP_ATOMIC
;
1078 u32
*sh_desc
= ctx
->sh_desc_digest
, *desc
;
1079 dma_addr_t ptr
= ctx
->sh_desc_digest_dma
;
1080 int digestsize
= crypto_ahash_digestsize(ahash
);
1081 int src_nents
, sec4_sg_bytes
;
1083 struct ahash_edesc
*edesc
;
1088 src_nents
= sg_count(req
->src
, req
->nbytes
);
1089 dma_map_sg(jrdev
, req
->src
, src_nents
? : 1, DMA_TO_DEVICE
);
1090 sec4_sg_bytes
= src_nents
* sizeof(struct sec4_sg_entry
);
1092 /* allocate space for base edesc and hw desc commands, link tables */
1093 edesc
= kzalloc(sizeof(*edesc
) + sec4_sg_bytes
+ DESC_JOB_IO_LEN
,
1096 dev_err(jrdev
, "could not allocate extended descriptor\n");
1099 edesc
->sec4_sg
= (void *)edesc
+ sizeof(struct ahash_edesc
) +
1101 edesc
->sec4_sg_bytes
= sec4_sg_bytes
;
1102 edesc
->src_nents
= src_nents
;
1104 sh_len
= desc_len(sh_desc
);
1105 desc
= edesc
->hw_desc
;
1106 init_job_desc_shared(desc
, ptr
, sh_len
, HDR_SHARE_DEFER
| HDR_REVERSE
);
1109 sg_to_sec4_sg_last(req
->src
, src_nents
, edesc
->sec4_sg
, 0);
1110 edesc
->sec4_sg_dma
= dma_map_single(jrdev
, edesc
->sec4_sg
,
1111 sec4_sg_bytes
, DMA_TO_DEVICE
);
1112 if (dma_mapping_error(jrdev
, edesc
->sec4_sg_dma
)) {
1113 dev_err(jrdev
, "unable to map S/G table\n");
1116 src_dma
= edesc
->sec4_sg_dma
;
1119 src_dma
= sg_dma_address(req
->src
);
1122 append_seq_in_ptr(desc
, src_dma
, req
->nbytes
, options
);
1124 edesc
->dst_dma
= map_seq_out_ptr_result(desc
, jrdev
, req
->result
,
1126 if (dma_mapping_error(jrdev
, edesc
->dst_dma
)) {
1127 dev_err(jrdev
, "unable to map dst\n");
1132 print_hex_dump(KERN_ERR
, "jobdesc@"__stringify(__LINE__
)": ",
1133 DUMP_PREFIX_ADDRESS
, 16, 4, desc
, desc_bytes(desc
), 1);
1136 ret
= caam_jr_enqueue(jrdev
, desc
, ahash_done
, req
);
1140 ahash_unmap(jrdev
, edesc
, req
, digestsize
);
1147 /* submit ahash final if it the first job descriptor */
1148 static int ahash_final_no_ctx(struct ahash_request
*req
)
1150 struct crypto_ahash
*ahash
= crypto_ahash_reqtfm(req
);
1151 struct caam_hash_ctx
*ctx
= crypto_ahash_ctx(ahash
);
1152 struct caam_hash_state
*state
= ahash_request_ctx(req
);
1153 struct device
*jrdev
= ctx
->jrdev
;
1154 gfp_t flags
= (req
->base
.flags
& (CRYPTO_TFM_REQ_MAY_BACKLOG
|
1155 CRYPTO_TFM_REQ_MAY_SLEEP
)) ? GFP_KERNEL
: GFP_ATOMIC
;
1156 u8
*buf
= state
->current_buf
? state
->buf_1
: state
->buf_0
;
1157 int buflen
= state
->current_buf
? state
->buflen_1
: state
->buflen_0
;
1158 u32
*sh_desc
= ctx
->sh_desc_digest
, *desc
;
1159 dma_addr_t ptr
= ctx
->sh_desc_digest_dma
;
1160 int digestsize
= crypto_ahash_digestsize(ahash
);
1161 struct ahash_edesc
*edesc
;
1165 /* allocate space for base edesc and hw desc commands, link tables */
1166 edesc
= kzalloc(sizeof(*edesc
) + DESC_JOB_IO_LEN
, GFP_DMA
| flags
);
1168 dev_err(jrdev
, "could not allocate extended descriptor\n");
1172 edesc
->sec4_sg_bytes
= 0;
1173 sh_len
= desc_len(sh_desc
);
1174 desc
= edesc
->hw_desc
;
1175 init_job_desc_shared(desc
, ptr
, sh_len
, HDR_SHARE_DEFER
| HDR_REVERSE
);
1177 state
->buf_dma
= dma_map_single(jrdev
, buf
, buflen
, DMA_TO_DEVICE
);
1178 if (dma_mapping_error(jrdev
, state
->buf_dma
)) {
1179 dev_err(jrdev
, "unable to map src\n");
1183 append_seq_in_ptr(desc
, state
->buf_dma
, buflen
, 0);
1185 edesc
->dst_dma
= map_seq_out_ptr_result(desc
, jrdev
, req
->result
,
1187 if (dma_mapping_error(jrdev
, edesc
->dst_dma
)) {
1188 dev_err(jrdev
, "unable to map dst\n");
1191 edesc
->src_nents
= 0;
1194 print_hex_dump(KERN_ERR
, "jobdesc@"__stringify(__LINE__
)": ",
1195 DUMP_PREFIX_ADDRESS
, 16, 4, desc
, desc_bytes(desc
), 1);
1198 ret
= caam_jr_enqueue(jrdev
, desc
, ahash_done
, req
);
1202 ahash_unmap(jrdev
, edesc
, req
, digestsize
);
1209 /* submit ahash update if it the first job descriptor after update */
1210 static int ahash_update_no_ctx(struct ahash_request
*req
)
1212 struct crypto_ahash
*ahash
= crypto_ahash_reqtfm(req
);
1213 struct caam_hash_ctx
*ctx
= crypto_ahash_ctx(ahash
);
1214 struct caam_hash_state
*state
= ahash_request_ctx(req
);
1215 struct device
*jrdev
= ctx
->jrdev
;
1216 gfp_t flags
= (req
->base
.flags
& (CRYPTO_TFM_REQ_MAY_BACKLOG
|
1217 CRYPTO_TFM_REQ_MAY_SLEEP
)) ? GFP_KERNEL
: GFP_ATOMIC
;
1218 u8
*buf
= state
->current_buf
? state
->buf_1
: state
->buf_0
;
1219 int *buflen
= state
->current_buf
? &state
->buflen_1
: &state
->buflen_0
;
1220 u8
*next_buf
= state
->current_buf
? state
->buf_0
: state
->buf_1
;
1221 int *next_buflen
= state
->current_buf
? &state
->buflen_0
:
1223 int in_len
= *buflen
+ req
->nbytes
, to_hash
;
1224 int sec4_sg_bytes
, src_nents
;
1225 struct ahash_edesc
*edesc
;
1226 u32
*desc
, *sh_desc
= ctx
->sh_desc_update_first
;
1227 dma_addr_t ptr
= ctx
->sh_desc_update_first_dma
;
1231 *next_buflen
= in_len
& (crypto_tfm_alg_blocksize(&ahash
->base
) - 1);
1232 to_hash
= in_len
- *next_buflen
;
1235 src_nents
= sg_nents_for_len(req
->src
,
1236 req
->nbytes
- (*next_buflen
));
1237 sec4_sg_bytes
= (1 + src_nents
) *
1238 sizeof(struct sec4_sg_entry
);
1241 * allocate space for base edesc and hw desc commands,
1244 edesc
= kzalloc(sizeof(*edesc
) + DESC_JOB_IO_LEN
+
1245 sec4_sg_bytes
, GFP_DMA
| flags
);
1248 "could not allocate extended descriptor\n");
1252 edesc
->src_nents
= src_nents
;
1253 edesc
->sec4_sg_bytes
= sec4_sg_bytes
;
1254 edesc
->sec4_sg
= (void *)edesc
+ sizeof(struct ahash_edesc
) +
1258 state
->buf_dma
= buf_map_to_sec4_sg(jrdev
, edesc
->sec4_sg
,
1260 src_map_to_sec4_sg(jrdev
, req
->src
, src_nents
,
1261 edesc
->sec4_sg
+ 1);
1263 scatterwalk_map_and_copy(next_buf
, req
->src
,
1268 state
->current_buf
= !state
->current_buf
;
1270 sh_len
= desc_len(sh_desc
);
1271 desc
= edesc
->hw_desc
;
1272 init_job_desc_shared(desc
, ptr
, sh_len
, HDR_SHARE_DEFER
|
1275 edesc
->sec4_sg_dma
= dma_map_single(jrdev
, edesc
->sec4_sg
,
1278 if (dma_mapping_error(jrdev
, edesc
->sec4_sg_dma
)) {
1279 dev_err(jrdev
, "unable to map S/G table\n");
1283 append_seq_in_ptr(desc
, edesc
->sec4_sg_dma
, to_hash
, LDST_SGF
);
1285 ret
= map_seq_out_ptr_ctx(desc
, jrdev
, state
, ctx
->ctx_len
);
1290 print_hex_dump(KERN_ERR
, "jobdesc@"__stringify(__LINE__
)": ",
1291 DUMP_PREFIX_ADDRESS
, 16, 4, desc
,
1292 desc_bytes(desc
), 1);
1295 ret
= caam_jr_enqueue(jrdev
, desc
, ahash_done_ctx_dst
, req
);
1298 state
->update
= ahash_update_ctx
;
1299 state
->finup
= ahash_finup_ctx
;
1300 state
->final
= ahash_final_ctx
;
1302 ahash_unmap_ctx(jrdev
, edesc
, req
, ctx
->ctx_len
,
1306 } else if (*next_buflen
) {
1307 scatterwalk_map_and_copy(buf
+ *buflen
, req
->src
, 0,
1309 *buflen
= *next_buflen
;
1313 print_hex_dump(KERN_ERR
, "buf@"__stringify(__LINE__
)": ",
1314 DUMP_PREFIX_ADDRESS
, 16, 4, buf
, *buflen
, 1);
1315 print_hex_dump(KERN_ERR
, "next buf@"__stringify(__LINE__
)": ",
1316 DUMP_PREFIX_ADDRESS
, 16, 4, next_buf
,
1323 /* submit ahash finup if it the first job descriptor after update */
1324 static int ahash_finup_no_ctx(struct ahash_request
*req
)
1326 struct crypto_ahash
*ahash
= crypto_ahash_reqtfm(req
);
1327 struct caam_hash_ctx
*ctx
= crypto_ahash_ctx(ahash
);
1328 struct caam_hash_state
*state
= ahash_request_ctx(req
);
1329 struct device
*jrdev
= ctx
->jrdev
;
1330 gfp_t flags
= (req
->base
.flags
& (CRYPTO_TFM_REQ_MAY_BACKLOG
|
1331 CRYPTO_TFM_REQ_MAY_SLEEP
)) ? GFP_KERNEL
: GFP_ATOMIC
;
1332 u8
*buf
= state
->current_buf
? state
->buf_1
: state
->buf_0
;
1333 int buflen
= state
->current_buf
? state
->buflen_1
: state
->buflen_0
;
1334 int last_buflen
= state
->current_buf
? state
->buflen_0
:
1336 u32
*sh_desc
= ctx
->sh_desc_digest
, *desc
;
1337 dma_addr_t ptr
= ctx
->sh_desc_digest_dma
;
1338 int sec4_sg_bytes
, sec4_sg_src_index
, src_nents
;
1339 int digestsize
= crypto_ahash_digestsize(ahash
);
1340 struct ahash_edesc
*edesc
;
1344 src_nents
= sg_nents_for_len(req
->src
, req
->nbytes
);
1345 sec4_sg_src_index
= 2;
1346 sec4_sg_bytes
= (sec4_sg_src_index
+ src_nents
) *
1347 sizeof(struct sec4_sg_entry
);
1349 /* allocate space for base edesc and hw desc commands, link tables */
1350 edesc
= kzalloc(sizeof(*edesc
) + DESC_JOB_IO_LEN
+ sec4_sg_bytes
,
1353 dev_err(jrdev
, "could not allocate extended descriptor\n");
1357 sh_len
= desc_len(sh_desc
);
1358 desc
= edesc
->hw_desc
;
1359 init_job_desc_shared(desc
, ptr
, sh_len
, HDR_SHARE_DEFER
| HDR_REVERSE
);
1361 edesc
->src_nents
= src_nents
;
1362 edesc
->sec4_sg_bytes
= sec4_sg_bytes
;
1363 edesc
->sec4_sg
= (void *)edesc
+ sizeof(struct ahash_edesc
) +
1366 state
->buf_dma
= try_buf_map_to_sec4_sg(jrdev
, edesc
->sec4_sg
, buf
,
1367 state
->buf_dma
, buflen
,
1370 src_map_to_sec4_sg(jrdev
, req
->src
, src_nents
, edesc
->sec4_sg
+ 1);
1372 edesc
->sec4_sg_dma
= dma_map_single(jrdev
, edesc
->sec4_sg
,
1373 sec4_sg_bytes
, DMA_TO_DEVICE
);
1374 if (dma_mapping_error(jrdev
, edesc
->sec4_sg_dma
)) {
1375 dev_err(jrdev
, "unable to map S/G table\n");
1379 append_seq_in_ptr(desc
, edesc
->sec4_sg_dma
, buflen
+
1380 req
->nbytes
, LDST_SGF
);
1382 edesc
->dst_dma
= map_seq_out_ptr_result(desc
, jrdev
, req
->result
,
1384 if (dma_mapping_error(jrdev
, edesc
->dst_dma
)) {
1385 dev_err(jrdev
, "unable to map dst\n");
1390 print_hex_dump(KERN_ERR
, "jobdesc@"__stringify(__LINE__
)": ",
1391 DUMP_PREFIX_ADDRESS
, 16, 4, desc
, desc_bytes(desc
), 1);
1394 ret
= caam_jr_enqueue(jrdev
, desc
, ahash_done
, req
);
1398 ahash_unmap(jrdev
, edesc
, req
, digestsize
);
1405 /* submit first update job descriptor after init */
1406 static int ahash_update_first(struct ahash_request
*req
)
1408 struct crypto_ahash
*ahash
= crypto_ahash_reqtfm(req
);
1409 struct caam_hash_ctx
*ctx
= crypto_ahash_ctx(ahash
);
1410 struct caam_hash_state
*state
= ahash_request_ctx(req
);
1411 struct device
*jrdev
= ctx
->jrdev
;
1412 gfp_t flags
= (req
->base
.flags
& (CRYPTO_TFM_REQ_MAY_BACKLOG
|
1413 CRYPTO_TFM_REQ_MAY_SLEEP
)) ? GFP_KERNEL
: GFP_ATOMIC
;
1414 u8
*next_buf
= state
->current_buf
? state
->buf_1
: state
->buf_0
;
1415 int *next_buflen
= state
->current_buf
?
1416 &state
->buflen_1
: &state
->buflen_0
;
1418 u32
*sh_desc
= ctx
->sh_desc_update_first
, *desc
;
1419 dma_addr_t ptr
= ctx
->sh_desc_update_first_dma
;
1420 int sec4_sg_bytes
, src_nents
;
1423 struct ahash_edesc
*edesc
;
1427 *next_buflen
= req
->nbytes
& (crypto_tfm_alg_blocksize(&ahash
->base
) -
1429 to_hash
= req
->nbytes
- *next_buflen
;
1432 src_nents
= sg_count(req
->src
, req
->nbytes
- (*next_buflen
));
1433 dma_map_sg(jrdev
, req
->src
, src_nents
? : 1, DMA_TO_DEVICE
);
1434 sec4_sg_bytes
= src_nents
* sizeof(struct sec4_sg_entry
);
1437 * allocate space for base edesc and hw desc commands,
1440 edesc
= kzalloc(sizeof(*edesc
) + DESC_JOB_IO_LEN
+
1441 sec4_sg_bytes
, GFP_DMA
| flags
);
1444 "could not allocate extended descriptor\n");
1448 edesc
->src_nents
= src_nents
;
1449 edesc
->sec4_sg_bytes
= sec4_sg_bytes
;
1450 edesc
->sec4_sg
= (void *)edesc
+ sizeof(struct ahash_edesc
) +
1455 sg_to_sec4_sg_last(req
->src
, src_nents
,
1457 edesc
->sec4_sg_dma
= dma_map_single(jrdev
,
1461 if (dma_mapping_error(jrdev
, edesc
->sec4_sg_dma
)) {
1462 dev_err(jrdev
, "unable to map S/G table\n");
1465 src_dma
= edesc
->sec4_sg_dma
;
1468 src_dma
= sg_dma_address(req
->src
);
1473 scatterwalk_map_and_copy(next_buf
, req
->src
, to_hash
,
1476 sh_len
= desc_len(sh_desc
);
1477 desc
= edesc
->hw_desc
;
1478 init_job_desc_shared(desc
, ptr
, sh_len
, HDR_SHARE_DEFER
|
1481 append_seq_in_ptr(desc
, src_dma
, to_hash
, options
);
1483 ret
= map_seq_out_ptr_ctx(desc
, jrdev
, state
, ctx
->ctx_len
);
1488 print_hex_dump(KERN_ERR
, "jobdesc@"__stringify(__LINE__
)": ",
1489 DUMP_PREFIX_ADDRESS
, 16, 4, desc
,
1490 desc_bytes(desc
), 1);
1493 ret
= caam_jr_enqueue(jrdev
, desc
, ahash_done_ctx_dst
,
1497 state
->update
= ahash_update_ctx
;
1498 state
->finup
= ahash_finup_ctx
;
1499 state
->final
= ahash_final_ctx
;
1501 ahash_unmap_ctx(jrdev
, edesc
, req
, ctx
->ctx_len
,
1505 } else if (*next_buflen
) {
1506 state
->update
= ahash_update_no_ctx
;
1507 state
->finup
= ahash_finup_no_ctx
;
1508 state
->final
= ahash_final_no_ctx
;
1509 scatterwalk_map_and_copy(next_buf
, req
->src
, 0,
1513 print_hex_dump(KERN_ERR
, "next buf@"__stringify(__LINE__
)": ",
1514 DUMP_PREFIX_ADDRESS
, 16, 4, next_buf
,
1521 static int ahash_finup_first(struct ahash_request
*req
)
1523 return ahash_digest(req
);
1526 static int ahash_init(struct ahash_request
*req
)
1528 struct caam_hash_state
*state
= ahash_request_ctx(req
);
1530 state
->update
= ahash_update_first
;
1531 state
->finup
= ahash_finup_first
;
1532 state
->final
= ahash_final_no_ctx
;
1534 state
->current_buf
= 0;
1536 state
->buflen_0
= 0;
1537 state
->buflen_1
= 0;
1542 static int ahash_update(struct ahash_request
*req
)
1544 struct caam_hash_state
*state
= ahash_request_ctx(req
);
1546 return state
->update(req
);
1549 static int ahash_finup(struct ahash_request
*req
)
1551 struct caam_hash_state
*state
= ahash_request_ctx(req
);
1553 return state
->finup(req
);
1556 static int ahash_final(struct ahash_request
*req
)
1558 struct caam_hash_state
*state
= ahash_request_ctx(req
);
1560 return state
->final(req
);
1563 static int ahash_export(struct ahash_request
*req
, void *out
)
1565 struct caam_hash_state
*state
= ahash_request_ctx(req
);
1566 struct caam_export_state
*export
= out
;
1570 if (state
->current_buf
) {
1572 len
= state
->buflen_1
;
1575 len
= state
->buflen_1
;
1578 memcpy(export
->buf
, buf
, len
);
1579 memcpy(export
->caam_ctx
, state
->caam_ctx
, sizeof(export
->caam_ctx
));
1580 export
->buflen
= len
;
1581 export
->update
= state
->update
;
1582 export
->final
= state
->final
;
1583 export
->finup
= state
->finup
;
1588 static int ahash_import(struct ahash_request
*req
, const void *in
)
1590 struct caam_hash_state
*state
= ahash_request_ctx(req
);
1591 const struct caam_export_state
*export
= in
;
1593 memset(state
, 0, sizeof(*state
));
1594 memcpy(state
->buf_0
, export
->buf
, export
->buflen
);
1595 memcpy(state
->caam_ctx
, export
->caam_ctx
, sizeof(state
->caam_ctx
));
1596 state
->buflen_0
= export
->buflen
;
1597 state
->update
= export
->update
;
1598 state
->final
= export
->final
;
1599 state
->finup
= export
->finup
;
1604 struct caam_hash_template
{
1605 char name
[CRYPTO_MAX_ALG_NAME
];
1606 char driver_name
[CRYPTO_MAX_ALG_NAME
];
1607 char hmac_name
[CRYPTO_MAX_ALG_NAME
];
1608 char hmac_driver_name
[CRYPTO_MAX_ALG_NAME
];
1609 unsigned int blocksize
;
1610 struct ahash_alg template_ahash
;
1615 /* ahash descriptors */
1616 static struct caam_hash_template driver_hash
[] = {
1619 .driver_name
= "sha1-caam",
1620 .hmac_name
= "hmac(sha1)",
1621 .hmac_driver_name
= "hmac-sha1-caam",
1622 .blocksize
= SHA1_BLOCK_SIZE
,
1625 .update
= ahash_update
,
1626 .final
= ahash_final
,
1627 .finup
= ahash_finup
,
1628 .digest
= ahash_digest
,
1629 .export
= ahash_export
,
1630 .import
= ahash_import
,
1631 .setkey
= ahash_setkey
,
1633 .digestsize
= SHA1_DIGEST_SIZE
,
1634 .statesize
= sizeof(struct caam_export_state
),
1637 .alg_type
= OP_ALG_ALGSEL_SHA1
,
1638 .alg_op
= OP_ALG_ALGSEL_SHA1
| OP_ALG_AAI_HMAC
,
1641 .driver_name
= "sha224-caam",
1642 .hmac_name
= "hmac(sha224)",
1643 .hmac_driver_name
= "hmac-sha224-caam",
1644 .blocksize
= SHA224_BLOCK_SIZE
,
1647 .update
= ahash_update
,
1648 .final
= ahash_final
,
1649 .finup
= ahash_finup
,
1650 .digest
= ahash_digest
,
1651 .export
= ahash_export
,
1652 .import
= ahash_import
,
1653 .setkey
= ahash_setkey
,
1655 .digestsize
= SHA224_DIGEST_SIZE
,
1656 .statesize
= sizeof(struct caam_export_state
),
1659 .alg_type
= OP_ALG_ALGSEL_SHA224
,
1660 .alg_op
= OP_ALG_ALGSEL_SHA224
| OP_ALG_AAI_HMAC
,
1663 .driver_name
= "sha256-caam",
1664 .hmac_name
= "hmac(sha256)",
1665 .hmac_driver_name
= "hmac-sha256-caam",
1666 .blocksize
= SHA256_BLOCK_SIZE
,
1669 .update
= ahash_update
,
1670 .final
= ahash_final
,
1671 .finup
= ahash_finup
,
1672 .digest
= ahash_digest
,
1673 .export
= ahash_export
,
1674 .import
= ahash_import
,
1675 .setkey
= ahash_setkey
,
1677 .digestsize
= SHA256_DIGEST_SIZE
,
1678 .statesize
= sizeof(struct caam_export_state
),
1681 .alg_type
= OP_ALG_ALGSEL_SHA256
,
1682 .alg_op
= OP_ALG_ALGSEL_SHA256
| OP_ALG_AAI_HMAC
,
1685 .driver_name
= "sha384-caam",
1686 .hmac_name
= "hmac(sha384)",
1687 .hmac_driver_name
= "hmac-sha384-caam",
1688 .blocksize
= SHA384_BLOCK_SIZE
,
1691 .update
= ahash_update
,
1692 .final
= ahash_final
,
1693 .finup
= ahash_finup
,
1694 .digest
= ahash_digest
,
1695 .export
= ahash_export
,
1696 .import
= ahash_import
,
1697 .setkey
= ahash_setkey
,
1699 .digestsize
= SHA384_DIGEST_SIZE
,
1700 .statesize
= sizeof(struct caam_export_state
),
1703 .alg_type
= OP_ALG_ALGSEL_SHA384
,
1704 .alg_op
= OP_ALG_ALGSEL_SHA384
| OP_ALG_AAI_HMAC
,
1707 .driver_name
= "sha512-caam",
1708 .hmac_name
= "hmac(sha512)",
1709 .hmac_driver_name
= "hmac-sha512-caam",
1710 .blocksize
= SHA512_BLOCK_SIZE
,
1713 .update
= ahash_update
,
1714 .final
= ahash_final
,
1715 .finup
= ahash_finup
,
1716 .digest
= ahash_digest
,
1717 .export
= ahash_export
,
1718 .import
= ahash_import
,
1719 .setkey
= ahash_setkey
,
1721 .digestsize
= SHA512_DIGEST_SIZE
,
1722 .statesize
= sizeof(struct caam_export_state
),
1725 .alg_type
= OP_ALG_ALGSEL_SHA512
,
1726 .alg_op
= OP_ALG_ALGSEL_SHA512
| OP_ALG_AAI_HMAC
,
1729 .driver_name
= "md5-caam",
1730 .hmac_name
= "hmac(md5)",
1731 .hmac_driver_name
= "hmac-md5-caam",
1732 .blocksize
= MD5_BLOCK_WORDS
* 4,
1735 .update
= ahash_update
,
1736 .final
= ahash_final
,
1737 .finup
= ahash_finup
,
1738 .digest
= ahash_digest
,
1739 .export
= ahash_export
,
1740 .import
= ahash_import
,
1741 .setkey
= ahash_setkey
,
1743 .digestsize
= MD5_DIGEST_SIZE
,
1744 .statesize
= sizeof(struct caam_export_state
),
1747 .alg_type
= OP_ALG_ALGSEL_MD5
,
1748 .alg_op
= OP_ALG_ALGSEL_MD5
| OP_ALG_AAI_HMAC
,
1752 struct caam_hash_alg
{
1753 struct list_head entry
;
1756 struct ahash_alg ahash_alg
;
1759 static int caam_hash_cra_init(struct crypto_tfm
*tfm
)
1761 struct crypto_ahash
*ahash
= __crypto_ahash_cast(tfm
);
1762 struct crypto_alg
*base
= tfm
->__crt_alg
;
1763 struct hash_alg_common
*halg
=
1764 container_of(base
, struct hash_alg_common
, base
);
1765 struct ahash_alg
*alg
=
1766 container_of(halg
, struct ahash_alg
, halg
);
1767 struct caam_hash_alg
*caam_hash
=
1768 container_of(alg
, struct caam_hash_alg
, ahash_alg
);
1769 struct caam_hash_ctx
*ctx
= crypto_tfm_ctx(tfm
);
1770 /* Sizes for MDHA running digests: MD5, SHA1, 224, 256, 384, 512 */
1771 static const u8 runninglen
[] = { HASH_MSG_LEN
+ MD5_DIGEST_SIZE
,
1772 HASH_MSG_LEN
+ SHA1_DIGEST_SIZE
,
1774 HASH_MSG_LEN
+ SHA256_DIGEST_SIZE
,
1776 HASH_MSG_LEN
+ SHA512_DIGEST_SIZE
};
1780 * Get a Job ring from Job Ring driver to ensure in-order
1781 * crypto request processing per tfm
1783 ctx
->jrdev
= caam_jr_alloc();
1784 if (IS_ERR(ctx
->jrdev
)) {
1785 pr_err("Job Ring Device allocation for transform failed\n");
1786 return PTR_ERR(ctx
->jrdev
);
1788 /* copy descriptor header template value */
1789 ctx
->alg_type
= OP_TYPE_CLASS2_ALG
| caam_hash
->alg_type
;
1790 ctx
->alg_op
= OP_TYPE_CLASS2_ALG
| caam_hash
->alg_op
;
1792 ctx
->ctx_len
= runninglen
[(ctx
->alg_op
& OP_ALG_ALGSEL_SUBMASK
) >>
1793 OP_ALG_ALGSEL_SHIFT
];
1795 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm
),
1796 sizeof(struct caam_hash_state
));
1798 ret
= ahash_set_sh_desc(ahash
);
1803 static void caam_hash_cra_exit(struct crypto_tfm
*tfm
)
1805 struct caam_hash_ctx
*ctx
= crypto_tfm_ctx(tfm
);
1807 if (ctx
->sh_desc_update_dma
&&
1808 !dma_mapping_error(ctx
->jrdev
, ctx
->sh_desc_update_dma
))
1809 dma_unmap_single(ctx
->jrdev
, ctx
->sh_desc_update_dma
,
1810 desc_bytes(ctx
->sh_desc_update
),
1812 if (ctx
->sh_desc_update_first_dma
&&
1813 !dma_mapping_error(ctx
->jrdev
, ctx
->sh_desc_update_first_dma
))
1814 dma_unmap_single(ctx
->jrdev
, ctx
->sh_desc_update_first_dma
,
1815 desc_bytes(ctx
->sh_desc_update_first
),
1817 if (ctx
->sh_desc_fin_dma
&&
1818 !dma_mapping_error(ctx
->jrdev
, ctx
->sh_desc_fin_dma
))
1819 dma_unmap_single(ctx
->jrdev
, ctx
->sh_desc_fin_dma
,
1820 desc_bytes(ctx
->sh_desc_fin
), DMA_TO_DEVICE
);
1821 if (ctx
->sh_desc_digest_dma
&&
1822 !dma_mapping_error(ctx
->jrdev
, ctx
->sh_desc_digest_dma
))
1823 dma_unmap_single(ctx
->jrdev
, ctx
->sh_desc_digest_dma
,
1824 desc_bytes(ctx
->sh_desc_digest
),
1826 if (ctx
->sh_desc_finup_dma
&&
1827 !dma_mapping_error(ctx
->jrdev
, ctx
->sh_desc_finup_dma
))
1828 dma_unmap_single(ctx
->jrdev
, ctx
->sh_desc_finup_dma
,
1829 desc_bytes(ctx
->sh_desc_finup
), DMA_TO_DEVICE
);
1831 caam_jr_free(ctx
->jrdev
);
1834 static void __exit
caam_algapi_hash_exit(void)
1836 struct caam_hash_alg
*t_alg
, *n
;
1838 if (!hash_list
.next
)
1841 list_for_each_entry_safe(t_alg
, n
, &hash_list
, entry
) {
1842 crypto_unregister_ahash(&t_alg
->ahash_alg
);
1843 list_del(&t_alg
->entry
);
1848 static struct caam_hash_alg
*
1849 caam_hash_alloc(struct caam_hash_template
*template,
1852 struct caam_hash_alg
*t_alg
;
1853 struct ahash_alg
*halg
;
1854 struct crypto_alg
*alg
;
1856 t_alg
= kzalloc(sizeof(*t_alg
), GFP_KERNEL
);
1858 pr_err("failed to allocate t_alg\n");
1859 return ERR_PTR(-ENOMEM
);
1862 t_alg
->ahash_alg
= template->template_ahash
;
1863 halg
= &t_alg
->ahash_alg
;
1864 alg
= &halg
->halg
.base
;
1867 snprintf(alg
->cra_name
, CRYPTO_MAX_ALG_NAME
, "%s",
1868 template->hmac_name
);
1869 snprintf(alg
->cra_driver_name
, CRYPTO_MAX_ALG_NAME
, "%s",
1870 template->hmac_driver_name
);
1872 snprintf(alg
->cra_name
, CRYPTO_MAX_ALG_NAME
, "%s",
1874 snprintf(alg
->cra_driver_name
, CRYPTO_MAX_ALG_NAME
, "%s",
1875 template->driver_name
);
1876 t_alg
->ahash_alg
.setkey
= NULL
;
1878 alg
->cra_module
= THIS_MODULE
;
1879 alg
->cra_init
= caam_hash_cra_init
;
1880 alg
->cra_exit
= caam_hash_cra_exit
;
1881 alg
->cra_ctxsize
= sizeof(struct caam_hash_ctx
);
1882 alg
->cra_priority
= CAAM_CRA_PRIORITY
;
1883 alg
->cra_blocksize
= template->blocksize
;
1884 alg
->cra_alignmask
= 0;
1885 alg
->cra_flags
= CRYPTO_ALG_ASYNC
| CRYPTO_ALG_TYPE_AHASH
;
1886 alg
->cra_type
= &crypto_ahash_type
;
1888 t_alg
->alg_type
= template->alg_type
;
1889 t_alg
->alg_op
= template->alg_op
;
1894 static int __init
caam_algapi_hash_init(void)
1896 struct device_node
*dev_node
;
1897 struct platform_device
*pdev
;
1898 struct device
*ctrldev
;
1900 struct caam_drv_private
*priv
;
1901 unsigned int md_limit
= SHA512_DIGEST_SIZE
;
1902 u32 cha_inst
, cha_vid
;
1904 dev_node
= of_find_compatible_node(NULL
, NULL
, "fsl,sec-v4.0");
1906 dev_node
= of_find_compatible_node(NULL
, NULL
, "fsl,sec4.0");
1911 pdev
= of_find_device_by_node(dev_node
);
1913 of_node_put(dev_node
);
1917 ctrldev
= &pdev
->dev
;
1918 priv
= dev_get_drvdata(ctrldev
);
1919 of_node_put(dev_node
);
1922 * If priv is NULL, it's probably because the caam driver wasn't
1923 * properly initialized (e.g. RNG4 init failed). Thus, bail out here.
1929 * Register crypto algorithms the device supports. First, identify
1930 * presence and attributes of MD block.
1932 cha_vid
= rd_reg32(&priv
->ctrl
->perfmon
.cha_id_ls
);
1933 cha_inst
= rd_reg32(&priv
->ctrl
->perfmon
.cha_num_ls
);
1936 * Skip registration of any hashing algorithms if MD block
1939 if (!((cha_inst
& CHA_ID_LS_MD_MASK
) >> CHA_ID_LS_MD_SHIFT
))
1942 /* Limit digest size based on LP256 */
1943 if ((cha_vid
& CHA_ID_LS_MD_MASK
) == CHA_ID_LS_MD_LP256
)
1944 md_limit
= SHA256_DIGEST_SIZE
;
1946 INIT_LIST_HEAD(&hash_list
);
1948 /* register crypto algorithms the device supports */
1949 for (i
= 0; i
< ARRAY_SIZE(driver_hash
); i
++) {
1950 struct caam_hash_alg
*t_alg
;
1951 struct caam_hash_template
*alg
= driver_hash
+ i
;
1953 /* If MD size is not supported by device, skip registration */
1954 if (alg
->template_ahash
.halg
.digestsize
> md_limit
)
1957 /* register hmac version */
1958 t_alg
= caam_hash_alloc(alg
, true);
1959 if (IS_ERR(t_alg
)) {
1960 err
= PTR_ERR(t_alg
);
1961 pr_warn("%s alg allocation failed\n", alg
->driver_name
);
1965 err
= crypto_register_ahash(&t_alg
->ahash_alg
);
1967 pr_warn("%s alg registration failed: %d\n",
1968 t_alg
->ahash_alg
.halg
.base
.cra_driver_name
,
1972 list_add_tail(&t_alg
->entry
, &hash_list
);
1974 /* register unkeyed version */
1975 t_alg
= caam_hash_alloc(alg
, false);
1976 if (IS_ERR(t_alg
)) {
1977 err
= PTR_ERR(t_alg
);
1978 pr_warn("%s alg allocation failed\n", alg
->driver_name
);
1982 err
= crypto_register_ahash(&t_alg
->ahash_alg
);
1984 pr_warn("%s alg registration failed: %d\n",
1985 t_alg
->ahash_alg
.halg
.base
.cra_driver_name
,
1989 list_add_tail(&t_alg
->entry
, &hash_list
);
1995 module_init(caam_algapi_hash_init
);
1996 module_exit(caam_algapi_hash_exit
);
1998 MODULE_LICENSE("GPL");
1999 MODULE_DESCRIPTION("FSL CAAM support for ahash functions of crypto API");
2000 MODULE_AUTHOR("Freescale Semiconductor - NMG");