1 // SPDX-License-Identifier: GPL-2.0+
3 * caam - Freescale FSL CAAM support for ahash functions of crypto API
5 * Copyright 2011 Freescale Semiconductor, Inc.
6 * Copyright 2018-2019 NXP
8 * Based on caamalg.c crypto API driver.
10 * relationship of digest job descriptor or first job descriptor after init to
13 * --------------- ---------------
14 * | JobDesc #1 |-------------------->| ShareDesc |
15 * | *(packet 1) | | (hashKey) |
16 * --------------- | (operation) |
19 * relationship of subsequent job descriptors to shared descriptors:
21 * --------------- ---------------
22 * | JobDesc #2 |-------------------->| ShareDesc |
23 * | *(packet 2) | |------------->| (hashKey) |
24 * --------------- | |-------->| (operation) |
25 * . | | | (load ctx2) |
26 * . | | ---------------
28 * | JobDesc #3 |------| |
34 * | JobDesc #4 |------------
38 * The SharedDesc never changes for a connection unless rekeyed, but
39 * each packet will likely be in a different place. So all we need
40 * to know to process the packet is where the input is, where the
41 * output goes, and what context we want to process with. Context is
42 * in the SharedDesc, packet references in the JobDesc.
44 * So, a job desc looks like:
46 * ---------------------
48 * | ShareDesc Pointer |
55 * ---------------------
62 #include "desc_constr.h"
65 #include "sg_sw_sec4.h"
67 #include "caamhash_desc.h"
68 #include <crypto/engine.h>
70 #define CAAM_CRA_PRIORITY 3000
72 /* max hash key is max split key size */
73 #define CAAM_MAX_HASH_KEY_SIZE (SHA512_DIGEST_SIZE * 2)
75 #define CAAM_MAX_HASH_BLOCK_SIZE SHA512_BLOCK_SIZE
76 #define CAAM_MAX_HASH_DIGEST_SIZE SHA512_DIGEST_SIZE
78 #define DESC_HASH_MAX_USED_BYTES (DESC_AHASH_FINAL_LEN + \
79 CAAM_MAX_HASH_KEY_SIZE)
80 #define DESC_HASH_MAX_USED_LEN (DESC_HASH_MAX_USED_BYTES / CAAM_CMD_SZ)
82 /* caam context sizes for hashes: running digest + 8 */
83 #define HASH_MSG_LEN 8
84 #define MAX_CTX_LEN (HASH_MSG_LEN + SHA512_DIGEST_SIZE)
86 static struct list_head hash_list
;
88 /* ahash per-session context */
89 struct caam_hash_ctx
{
90 struct crypto_engine_ctx enginectx
;
91 u32 sh_desc_update
[DESC_HASH_MAX_USED_LEN
] ____cacheline_aligned
;
92 u32 sh_desc_update_first
[DESC_HASH_MAX_USED_LEN
] ____cacheline_aligned
;
93 u32 sh_desc_fin
[DESC_HASH_MAX_USED_LEN
] ____cacheline_aligned
;
94 u32 sh_desc_digest
[DESC_HASH_MAX_USED_LEN
] ____cacheline_aligned
;
95 u8 key
[CAAM_MAX_HASH_KEY_SIZE
] ____cacheline_aligned
;
96 dma_addr_t sh_desc_update_dma ____cacheline_aligned
;
97 dma_addr_t sh_desc_update_first_dma
;
98 dma_addr_t sh_desc_fin_dma
;
99 dma_addr_t sh_desc_digest_dma
;
100 enum dma_data_direction dir
;
101 enum dma_data_direction key_dir
;
102 struct device
*jrdev
;
104 struct alginfo adata
;
108 struct caam_hash_state
{
112 u8 buf
[CAAM_MAX_HASH_BLOCK_SIZE
] ____cacheline_aligned
;
115 u8 caam_ctx
[MAX_CTX_LEN
] ____cacheline_aligned
;
116 int (*update
)(struct ahash_request
*req
) ____cacheline_aligned
;
117 int (*final
)(struct ahash_request
*req
);
118 int (*finup
)(struct ahash_request
*req
);
119 struct ahash_edesc
*edesc
;
120 void (*ahash_op_done
)(struct device
*jrdev
, u32
*desc
, u32 err
,
124 struct caam_export_state
{
125 u8 buf
[CAAM_MAX_HASH_BLOCK_SIZE
];
126 u8 caam_ctx
[MAX_CTX_LEN
];
128 int (*update
)(struct ahash_request
*req
);
129 int (*final
)(struct ahash_request
*req
);
130 int (*finup
)(struct ahash_request
*req
);
133 static inline bool is_cmac_aes(u32 algtype
)
135 return (algtype
& (OP_ALG_ALGSEL_MASK
| OP_ALG_AAI_MASK
)) ==
136 (OP_ALG_ALGSEL_AES
| OP_ALG_AAI_CMAC
);
138 /* Common job descriptor seq in/out ptr routines */
140 /* Map state->caam_ctx, and append seq_out_ptr command that points to it */
141 static inline int map_seq_out_ptr_ctx(u32
*desc
, struct device
*jrdev
,
142 struct caam_hash_state
*state
,
145 state
->ctx_dma_len
= ctx_len
;
146 state
->ctx_dma
= dma_map_single(jrdev
, state
->caam_ctx
,
147 ctx_len
, DMA_FROM_DEVICE
);
148 if (dma_mapping_error(jrdev
, state
->ctx_dma
)) {
149 dev_err(jrdev
, "unable to map ctx\n");
154 append_seq_out_ptr(desc
, state
->ctx_dma
, ctx_len
, 0);
159 /* Map current buffer in state (if length > 0) and put it in link table */
160 static inline int buf_map_to_sec4_sg(struct device
*jrdev
,
161 struct sec4_sg_entry
*sec4_sg
,
162 struct caam_hash_state
*state
)
164 int buflen
= state
->buflen
;
169 state
->buf_dma
= dma_map_single(jrdev
, state
->buf
, buflen
,
171 if (dma_mapping_error(jrdev
, state
->buf_dma
)) {
172 dev_err(jrdev
, "unable to map buf\n");
177 dma_to_sec4_sg_one(sec4_sg
, state
->buf_dma
, buflen
, 0);
182 /* Map state->caam_ctx, and add it to link table */
183 static inline int ctx_map_to_sec4_sg(struct device
*jrdev
,
184 struct caam_hash_state
*state
, int ctx_len
,
185 struct sec4_sg_entry
*sec4_sg
, u32 flag
)
187 state
->ctx_dma_len
= ctx_len
;
188 state
->ctx_dma
= dma_map_single(jrdev
, state
->caam_ctx
, ctx_len
, flag
);
189 if (dma_mapping_error(jrdev
, state
->ctx_dma
)) {
190 dev_err(jrdev
, "unable to map ctx\n");
195 dma_to_sec4_sg_one(sec4_sg
, state
->ctx_dma
, ctx_len
, 0);
200 static int ahash_set_sh_desc(struct crypto_ahash
*ahash
)
202 struct caam_hash_ctx
*ctx
= crypto_ahash_ctx(ahash
);
203 int digestsize
= crypto_ahash_digestsize(ahash
);
204 struct device
*jrdev
= ctx
->jrdev
;
205 struct caam_drv_private
*ctrlpriv
= dev_get_drvdata(jrdev
->parent
);
208 ctx
->adata
.key_virt
= ctx
->key
;
210 /* ahash_update shared descriptor */
211 desc
= ctx
->sh_desc_update
;
212 cnstr_shdsc_ahash(desc
, &ctx
->adata
, OP_ALG_AS_UPDATE
, ctx
->ctx_len
,
213 ctx
->ctx_len
, true, ctrlpriv
->era
);
214 dma_sync_single_for_device(jrdev
, ctx
->sh_desc_update_dma
,
215 desc_bytes(desc
), ctx
->dir
);
217 print_hex_dump_debug("ahash update shdesc@"__stringify(__LINE__
)": ",
218 DUMP_PREFIX_ADDRESS
, 16, 4, desc
, desc_bytes(desc
),
221 /* ahash_update_first shared descriptor */
222 desc
= ctx
->sh_desc_update_first
;
223 cnstr_shdsc_ahash(desc
, &ctx
->adata
, OP_ALG_AS_INIT
, ctx
->ctx_len
,
224 ctx
->ctx_len
, false, ctrlpriv
->era
);
225 dma_sync_single_for_device(jrdev
, ctx
->sh_desc_update_first_dma
,
226 desc_bytes(desc
), ctx
->dir
);
227 print_hex_dump_debug("ahash update first shdesc@"__stringify(__LINE__
)
228 ": ", DUMP_PREFIX_ADDRESS
, 16, 4, desc
,
229 desc_bytes(desc
), 1);
231 /* ahash_final shared descriptor */
232 desc
= ctx
->sh_desc_fin
;
233 cnstr_shdsc_ahash(desc
, &ctx
->adata
, OP_ALG_AS_FINALIZE
, digestsize
,
234 ctx
->ctx_len
, true, ctrlpriv
->era
);
235 dma_sync_single_for_device(jrdev
, ctx
->sh_desc_fin_dma
,
236 desc_bytes(desc
), ctx
->dir
);
238 print_hex_dump_debug("ahash final shdesc@"__stringify(__LINE__
)": ",
239 DUMP_PREFIX_ADDRESS
, 16, 4, desc
,
240 desc_bytes(desc
), 1);
242 /* ahash_digest shared descriptor */
243 desc
= ctx
->sh_desc_digest
;
244 cnstr_shdsc_ahash(desc
, &ctx
->adata
, OP_ALG_AS_INITFINAL
, digestsize
,
245 ctx
->ctx_len
, false, ctrlpriv
->era
);
246 dma_sync_single_for_device(jrdev
, ctx
->sh_desc_digest_dma
,
247 desc_bytes(desc
), ctx
->dir
);
249 print_hex_dump_debug("ahash digest shdesc@"__stringify(__LINE__
)": ",
250 DUMP_PREFIX_ADDRESS
, 16, 4, desc
,
251 desc_bytes(desc
), 1);
256 static int axcbc_set_sh_desc(struct crypto_ahash
*ahash
)
258 struct caam_hash_ctx
*ctx
= crypto_ahash_ctx(ahash
);
259 int digestsize
= crypto_ahash_digestsize(ahash
);
260 struct device
*jrdev
= ctx
->jrdev
;
263 /* shared descriptor for ahash_update */
264 desc
= ctx
->sh_desc_update
;
265 cnstr_shdsc_sk_hash(desc
, &ctx
->adata
, OP_ALG_AS_UPDATE
,
266 ctx
->ctx_len
, ctx
->ctx_len
);
267 dma_sync_single_for_device(jrdev
, ctx
->sh_desc_update_dma
,
268 desc_bytes(desc
), ctx
->dir
);
269 print_hex_dump_debug("axcbc update shdesc@" __stringify(__LINE__
)" : ",
270 DUMP_PREFIX_ADDRESS
, 16, 4, desc
, desc_bytes(desc
),
273 /* shared descriptor for ahash_{final,finup} */
274 desc
= ctx
->sh_desc_fin
;
275 cnstr_shdsc_sk_hash(desc
, &ctx
->adata
, OP_ALG_AS_FINALIZE
,
276 digestsize
, ctx
->ctx_len
);
277 dma_sync_single_for_device(jrdev
, ctx
->sh_desc_fin_dma
,
278 desc_bytes(desc
), ctx
->dir
);
279 print_hex_dump_debug("axcbc finup shdesc@" __stringify(__LINE__
)" : ",
280 DUMP_PREFIX_ADDRESS
, 16, 4, desc
, desc_bytes(desc
),
283 /* key is immediate data for INIT and INITFINAL states */
284 ctx
->adata
.key_virt
= ctx
->key
;
286 /* shared descriptor for first invocation of ahash_update */
287 desc
= ctx
->sh_desc_update_first
;
288 cnstr_shdsc_sk_hash(desc
, &ctx
->adata
, OP_ALG_AS_INIT
, ctx
->ctx_len
,
290 dma_sync_single_for_device(jrdev
, ctx
->sh_desc_update_first_dma
,
291 desc_bytes(desc
), ctx
->dir
);
292 print_hex_dump_debug("axcbc update first shdesc@" __stringify(__LINE__
)
293 " : ", DUMP_PREFIX_ADDRESS
, 16, 4, desc
,
294 desc_bytes(desc
), 1);
296 /* shared descriptor for ahash_digest */
297 desc
= ctx
->sh_desc_digest
;
298 cnstr_shdsc_sk_hash(desc
, &ctx
->adata
, OP_ALG_AS_INITFINAL
,
299 digestsize
, ctx
->ctx_len
);
300 dma_sync_single_for_device(jrdev
, ctx
->sh_desc_digest_dma
,
301 desc_bytes(desc
), ctx
->dir
);
302 print_hex_dump_debug("axcbc digest shdesc@" __stringify(__LINE__
)" : ",
303 DUMP_PREFIX_ADDRESS
, 16, 4, desc
, desc_bytes(desc
),
308 static int acmac_set_sh_desc(struct crypto_ahash
*ahash
)
310 struct caam_hash_ctx
*ctx
= crypto_ahash_ctx(ahash
);
311 int digestsize
= crypto_ahash_digestsize(ahash
);
312 struct device
*jrdev
= ctx
->jrdev
;
315 /* shared descriptor for ahash_update */
316 desc
= ctx
->sh_desc_update
;
317 cnstr_shdsc_sk_hash(desc
, &ctx
->adata
, OP_ALG_AS_UPDATE
,
318 ctx
->ctx_len
, ctx
->ctx_len
);
319 dma_sync_single_for_device(jrdev
, ctx
->sh_desc_update_dma
,
320 desc_bytes(desc
), ctx
->dir
);
321 print_hex_dump_debug("acmac update shdesc@" __stringify(__LINE__
)" : ",
322 DUMP_PREFIX_ADDRESS
, 16, 4, desc
,
323 desc_bytes(desc
), 1);
325 /* shared descriptor for ahash_{final,finup} */
326 desc
= ctx
->sh_desc_fin
;
327 cnstr_shdsc_sk_hash(desc
, &ctx
->adata
, OP_ALG_AS_FINALIZE
,
328 digestsize
, ctx
->ctx_len
);
329 dma_sync_single_for_device(jrdev
, ctx
->sh_desc_fin_dma
,
330 desc_bytes(desc
), ctx
->dir
);
331 print_hex_dump_debug("acmac finup shdesc@" __stringify(__LINE__
)" : ",
332 DUMP_PREFIX_ADDRESS
, 16, 4, desc
,
333 desc_bytes(desc
), 1);
335 /* shared descriptor for first invocation of ahash_update */
336 desc
= ctx
->sh_desc_update_first
;
337 cnstr_shdsc_sk_hash(desc
, &ctx
->adata
, OP_ALG_AS_INIT
, ctx
->ctx_len
,
339 dma_sync_single_for_device(jrdev
, ctx
->sh_desc_update_first_dma
,
340 desc_bytes(desc
), ctx
->dir
);
341 print_hex_dump_debug("acmac update first shdesc@" __stringify(__LINE__
)
342 " : ", DUMP_PREFIX_ADDRESS
, 16, 4, desc
,
343 desc_bytes(desc
), 1);
345 /* shared descriptor for ahash_digest */
346 desc
= ctx
->sh_desc_digest
;
347 cnstr_shdsc_sk_hash(desc
, &ctx
->adata
, OP_ALG_AS_INITFINAL
,
348 digestsize
, ctx
->ctx_len
);
349 dma_sync_single_for_device(jrdev
, ctx
->sh_desc_digest_dma
,
350 desc_bytes(desc
), ctx
->dir
);
351 print_hex_dump_debug("acmac digest shdesc@" __stringify(__LINE__
)" : ",
352 DUMP_PREFIX_ADDRESS
, 16, 4, desc
,
353 desc_bytes(desc
), 1);
358 /* Digest hash size if it is too large */
359 static int hash_digest_key(struct caam_hash_ctx
*ctx
, u32
*keylen
, u8
*key
,
362 struct device
*jrdev
= ctx
->jrdev
;
364 struct split_key_result result
;
368 desc
= kmalloc(CAAM_CMD_SZ
* 8 + CAAM_PTR_SZ
* 2, GFP_KERNEL
| GFP_DMA
);
370 dev_err(jrdev
, "unable to allocate key input memory\n");
374 init_job_desc(desc
, 0);
376 key_dma
= dma_map_single(jrdev
, key
, *keylen
, DMA_BIDIRECTIONAL
);
377 if (dma_mapping_error(jrdev
, key_dma
)) {
378 dev_err(jrdev
, "unable to map key memory\n");
383 /* Job descriptor to perform unkeyed hash on key_in */
384 append_operation(desc
, ctx
->adata
.algtype
| OP_ALG_ENCRYPT
|
385 OP_ALG_AS_INITFINAL
);
386 append_seq_in_ptr(desc
, key_dma
, *keylen
, 0);
387 append_seq_fifo_load(desc
, *keylen
, FIFOLD_CLASS_CLASS2
|
388 FIFOLD_TYPE_LAST2
| FIFOLD_TYPE_MSG
);
389 append_seq_out_ptr(desc
, key_dma
, digestsize
, 0);
390 append_seq_store(desc
, digestsize
, LDST_CLASS_2_CCB
|
391 LDST_SRCDST_BYTE_CONTEXT
);
393 print_hex_dump_debug("key_in@"__stringify(__LINE__
)": ",
394 DUMP_PREFIX_ADDRESS
, 16, 4, key
, *keylen
, 1);
395 print_hex_dump_debug("jobdesc@"__stringify(__LINE__
)": ",
396 DUMP_PREFIX_ADDRESS
, 16, 4, desc
, desc_bytes(desc
),
400 init_completion(&result
.completion
);
402 ret
= caam_jr_enqueue(jrdev
, desc
, split_key_done
, &result
);
403 if (ret
== -EINPROGRESS
) {
405 wait_for_completion(&result
.completion
);
408 print_hex_dump_debug("digested key@"__stringify(__LINE__
)": ",
409 DUMP_PREFIX_ADDRESS
, 16, 4, key
,
412 dma_unmap_single(jrdev
, key_dma
, *keylen
, DMA_BIDIRECTIONAL
);
414 *keylen
= digestsize
;
421 static int ahash_setkey(struct crypto_ahash
*ahash
,
422 const u8
*key
, unsigned int keylen
)
424 struct caam_hash_ctx
*ctx
= crypto_ahash_ctx(ahash
);
425 struct device
*jrdev
= ctx
->jrdev
;
426 int blocksize
= crypto_tfm_alg_blocksize(&ahash
->base
);
427 int digestsize
= crypto_ahash_digestsize(ahash
);
428 struct caam_drv_private
*ctrlpriv
= dev_get_drvdata(ctx
->jrdev
->parent
);
430 u8
*hashed_key
= NULL
;
432 dev_dbg(jrdev
, "keylen %d\n", keylen
);
434 if (keylen
> blocksize
) {
435 hashed_key
= kmemdup(key
, keylen
, GFP_KERNEL
| GFP_DMA
);
438 ret
= hash_digest_key(ctx
, &keylen
, hashed_key
, digestsize
);
445 * If DKP is supported, use it in the shared descriptor to generate
448 if (ctrlpriv
->era
>= 6) {
449 ctx
->adata
.key_inline
= true;
450 ctx
->adata
.keylen
= keylen
;
451 ctx
->adata
.keylen_pad
= split_key_len(ctx
->adata
.algtype
&
454 if (ctx
->adata
.keylen_pad
> CAAM_MAX_HASH_KEY_SIZE
)
457 memcpy(ctx
->key
, key
, keylen
);
460 * In case |user key| > |derived key|, using DKP<imm,imm>
461 * would result in invalid opcodes (last bytes of user key) in
462 * the resulting descriptor. Use DKP<ptr,imm> instead => both
463 * virtual and dma key addresses are needed.
465 if (keylen
> ctx
->adata
.keylen_pad
)
466 dma_sync_single_for_device(ctx
->jrdev
,
468 ctx
->adata
.keylen_pad
,
471 ret
= gen_split_key(ctx
->jrdev
, ctx
->key
, &ctx
->adata
, key
,
472 keylen
, CAAM_MAX_HASH_KEY_SIZE
);
478 return ahash_set_sh_desc(ahash
);
484 static int axcbc_setkey(struct crypto_ahash
*ahash
, const u8
*key
,
487 struct caam_hash_ctx
*ctx
= crypto_ahash_ctx(ahash
);
488 struct device
*jrdev
= ctx
->jrdev
;
490 if (keylen
!= AES_KEYSIZE_128
)
493 memcpy(ctx
->key
, key
, keylen
);
494 dma_sync_single_for_device(jrdev
, ctx
->adata
.key_dma
, keylen
,
496 ctx
->adata
.keylen
= keylen
;
498 print_hex_dump_debug("axcbc ctx.key@" __stringify(__LINE__
)" : ",
499 DUMP_PREFIX_ADDRESS
, 16, 4, ctx
->key
, keylen
, 1);
501 return axcbc_set_sh_desc(ahash
);
504 static int acmac_setkey(struct crypto_ahash
*ahash
, const u8
*key
,
507 struct caam_hash_ctx
*ctx
= crypto_ahash_ctx(ahash
);
510 err
= aes_check_keylen(keylen
);
514 /* key is immediate data for all cmac shared descriptors */
515 ctx
->adata
.key_virt
= key
;
516 ctx
->adata
.keylen
= keylen
;
518 print_hex_dump_debug("acmac ctx.key@" __stringify(__LINE__
)" : ",
519 DUMP_PREFIX_ADDRESS
, 16, 4, key
, keylen
, 1);
521 return acmac_set_sh_desc(ahash
);
525 * ahash_edesc - s/w-extended ahash descriptor
526 * @sec4_sg_dma: physical mapped address of h/w link table
527 * @src_nents: number of segments in input scatterlist
528 * @sec4_sg_bytes: length of dma mapped sec4_sg space
529 * @bklog: stored to determine if the request needs backlog
530 * @hw_desc: the h/w job descriptor followed by any referenced link tables
531 * @sec4_sg: h/w link table
534 dma_addr_t sec4_sg_dma
;
538 u32 hw_desc
[DESC_JOB_IO_LEN_MAX
/ sizeof(u32
)] ____cacheline_aligned
;
539 struct sec4_sg_entry sec4_sg
[];
542 static inline void ahash_unmap(struct device
*dev
,
543 struct ahash_edesc
*edesc
,
544 struct ahash_request
*req
, int dst_len
)
546 struct caam_hash_state
*state
= ahash_request_ctx(req
);
548 if (edesc
->src_nents
)
549 dma_unmap_sg(dev
, req
->src
, edesc
->src_nents
, DMA_TO_DEVICE
);
551 if (edesc
->sec4_sg_bytes
)
552 dma_unmap_single(dev
, edesc
->sec4_sg_dma
,
553 edesc
->sec4_sg_bytes
, DMA_TO_DEVICE
);
555 if (state
->buf_dma
) {
556 dma_unmap_single(dev
, state
->buf_dma
, state
->buflen
,
562 static inline void ahash_unmap_ctx(struct device
*dev
,
563 struct ahash_edesc
*edesc
,
564 struct ahash_request
*req
, int dst_len
, u32 flag
)
566 struct caam_hash_state
*state
= ahash_request_ctx(req
);
568 if (state
->ctx_dma
) {
569 dma_unmap_single(dev
, state
->ctx_dma
, state
->ctx_dma_len
, flag
);
572 ahash_unmap(dev
, edesc
, req
, dst_len
);
575 static inline void ahash_done_cpy(struct device
*jrdev
, u32
*desc
, u32 err
,
576 void *context
, enum dma_data_direction dir
)
578 struct ahash_request
*req
= context
;
579 struct caam_drv_private_jr
*jrp
= dev_get_drvdata(jrdev
);
580 struct ahash_edesc
*edesc
;
581 struct crypto_ahash
*ahash
= crypto_ahash_reqtfm(req
);
582 int digestsize
= crypto_ahash_digestsize(ahash
);
583 struct caam_hash_state
*state
= ahash_request_ctx(req
);
584 struct caam_hash_ctx
*ctx
= crypto_ahash_ctx(ahash
);
588 dev_dbg(jrdev
, "%s %d: err 0x%x\n", __func__
, __LINE__
, err
);
590 edesc
= state
->edesc
;
591 has_bklog
= edesc
->bklog
;
594 ecode
= caam_jr_strstatus(jrdev
, err
);
596 ahash_unmap_ctx(jrdev
, edesc
, req
, digestsize
, dir
);
597 memcpy(req
->result
, state
->caam_ctx
, digestsize
);
600 print_hex_dump_debug("ctx@"__stringify(__LINE__
)": ",
601 DUMP_PREFIX_ADDRESS
, 16, 4, state
->caam_ctx
,
605 * If no backlog flag, the completion of the request is done
606 * by CAAM, not crypto engine.
609 req
->base
.complete(&req
->base
, ecode
);
611 crypto_finalize_hash_request(jrp
->engine
, req
, ecode
);
614 static void ahash_done(struct device
*jrdev
, u32
*desc
, u32 err
,
617 ahash_done_cpy(jrdev
, desc
, err
, context
, DMA_FROM_DEVICE
);
620 static void ahash_done_ctx_src(struct device
*jrdev
, u32
*desc
, u32 err
,
623 ahash_done_cpy(jrdev
, desc
, err
, context
, DMA_BIDIRECTIONAL
);
626 static inline void ahash_done_switch(struct device
*jrdev
, u32
*desc
, u32 err
,
627 void *context
, enum dma_data_direction dir
)
629 struct ahash_request
*req
= context
;
630 struct caam_drv_private_jr
*jrp
= dev_get_drvdata(jrdev
);
631 struct ahash_edesc
*edesc
;
632 struct crypto_ahash
*ahash
= crypto_ahash_reqtfm(req
);
633 struct caam_hash_ctx
*ctx
= crypto_ahash_ctx(ahash
);
634 struct caam_hash_state
*state
= ahash_request_ctx(req
);
635 int digestsize
= crypto_ahash_digestsize(ahash
);
639 dev_dbg(jrdev
, "%s %d: err 0x%x\n", __func__
, __LINE__
, err
);
641 edesc
= state
->edesc
;
642 has_bklog
= edesc
->bklog
;
644 ecode
= caam_jr_strstatus(jrdev
, err
);
646 ahash_unmap_ctx(jrdev
, edesc
, req
, ctx
->ctx_len
, dir
);
649 scatterwalk_map_and_copy(state
->buf
, req
->src
,
650 req
->nbytes
- state
->next_buflen
,
651 state
->next_buflen
, 0);
652 state
->buflen
= state
->next_buflen
;
654 print_hex_dump_debug("buf@" __stringify(__LINE__
)": ",
655 DUMP_PREFIX_ADDRESS
, 16, 4, state
->buf
,
658 print_hex_dump_debug("ctx@"__stringify(__LINE__
)": ",
659 DUMP_PREFIX_ADDRESS
, 16, 4, state
->caam_ctx
,
662 print_hex_dump_debug("result@"__stringify(__LINE__
)": ",
663 DUMP_PREFIX_ADDRESS
, 16, 4, req
->result
,
667 * If no backlog flag, the completion of the request is done
668 * by CAAM, not crypto engine.
671 req
->base
.complete(&req
->base
, ecode
);
673 crypto_finalize_hash_request(jrp
->engine
, req
, ecode
);
677 static void ahash_done_bi(struct device
*jrdev
, u32
*desc
, u32 err
,
680 ahash_done_switch(jrdev
, desc
, err
, context
, DMA_BIDIRECTIONAL
);
683 static void ahash_done_ctx_dst(struct device
*jrdev
, u32
*desc
, u32 err
,
686 ahash_done_switch(jrdev
, desc
, err
, context
, DMA_FROM_DEVICE
);
690 * Allocate an enhanced descriptor, which contains the hardware descriptor
691 * and space for hardware scatter table containing sg_num entries.
693 static struct ahash_edesc
*ahash_edesc_alloc(struct ahash_request
*req
,
694 int sg_num
, u32
*sh_desc
,
695 dma_addr_t sh_desc_dma
)
697 struct crypto_ahash
*ahash
= crypto_ahash_reqtfm(req
);
698 struct caam_hash_ctx
*ctx
= crypto_ahash_ctx(ahash
);
699 struct caam_hash_state
*state
= ahash_request_ctx(req
);
700 gfp_t flags
= (req
->base
.flags
& CRYPTO_TFM_REQ_MAY_SLEEP
) ?
701 GFP_KERNEL
: GFP_ATOMIC
;
702 struct ahash_edesc
*edesc
;
703 unsigned int sg_size
= sg_num
* sizeof(struct sec4_sg_entry
);
705 edesc
= kzalloc(sizeof(*edesc
) + sg_size
, GFP_DMA
| flags
);
707 dev_err(ctx
->jrdev
, "could not allocate extended descriptor\n");
711 state
->edesc
= edesc
;
713 init_job_desc_shared(edesc
->hw_desc
, sh_desc_dma
, desc_len(sh_desc
),
714 HDR_SHARE_DEFER
| HDR_REVERSE
);
719 static int ahash_edesc_add_src(struct caam_hash_ctx
*ctx
,
720 struct ahash_edesc
*edesc
,
721 struct ahash_request
*req
, int nents
,
722 unsigned int first_sg
,
723 unsigned int first_bytes
, size_t to_hash
)
728 if (nents
> 1 || first_sg
) {
729 struct sec4_sg_entry
*sg
= edesc
->sec4_sg
;
730 unsigned int sgsize
= sizeof(*sg
) *
731 pad_sg_nents(first_sg
+ nents
);
733 sg_to_sec4_sg_last(req
->src
, to_hash
, sg
+ first_sg
, 0);
735 src_dma
= dma_map_single(ctx
->jrdev
, sg
, sgsize
, DMA_TO_DEVICE
);
736 if (dma_mapping_error(ctx
->jrdev
, src_dma
)) {
737 dev_err(ctx
->jrdev
, "unable to map S/G table\n");
741 edesc
->sec4_sg_bytes
= sgsize
;
742 edesc
->sec4_sg_dma
= src_dma
;
745 src_dma
= sg_dma_address(req
->src
);
749 append_seq_in_ptr(edesc
->hw_desc
, src_dma
, first_bytes
+ to_hash
,
755 static int ahash_do_one_req(struct crypto_engine
*engine
, void *areq
)
757 struct ahash_request
*req
= ahash_request_cast(areq
);
758 struct caam_hash_ctx
*ctx
= crypto_ahash_ctx(crypto_ahash_reqtfm(req
));
759 struct caam_hash_state
*state
= ahash_request_ctx(req
);
760 struct device
*jrdev
= ctx
->jrdev
;
761 u32
*desc
= state
->edesc
->hw_desc
;
764 state
->edesc
->bklog
= true;
766 ret
= caam_jr_enqueue(jrdev
, desc
, state
->ahash_op_done
, req
);
768 if (ret
!= -EINPROGRESS
) {
769 ahash_unmap(jrdev
, state
->edesc
, req
, 0);
778 static int ahash_enqueue_req(struct device
*jrdev
,
779 void (*cbk
)(struct device
*jrdev
, u32
*desc
,
780 u32 err
, void *context
),
781 struct ahash_request
*req
,
782 int dst_len
, enum dma_data_direction dir
)
784 struct caam_drv_private_jr
*jrpriv
= dev_get_drvdata(jrdev
);
785 struct caam_hash_state
*state
= ahash_request_ctx(req
);
786 struct ahash_edesc
*edesc
= state
->edesc
;
787 u32
*desc
= edesc
->hw_desc
;
790 state
->ahash_op_done
= cbk
;
793 * Only the backlog request are sent to crypto-engine since the others
794 * can be handled by CAAM, if free, especially since JR has up to 1024
795 * entries (more than the 10 entries from crypto-engine).
797 if (req
->base
.flags
& CRYPTO_TFM_REQ_MAY_BACKLOG
)
798 ret
= crypto_transfer_hash_request_to_engine(jrpriv
->engine
,
801 ret
= caam_jr_enqueue(jrdev
, desc
, cbk
, req
);
803 if ((ret
!= -EINPROGRESS
) && (ret
!= -EBUSY
)) {
804 ahash_unmap_ctx(jrdev
, edesc
, req
, dst_len
, dir
);
811 /* submit update job descriptor */
812 static int ahash_update_ctx(struct ahash_request
*req
)
814 struct crypto_ahash
*ahash
= crypto_ahash_reqtfm(req
);
815 struct caam_hash_ctx
*ctx
= crypto_ahash_ctx(ahash
);
816 struct caam_hash_state
*state
= ahash_request_ctx(req
);
817 struct device
*jrdev
= ctx
->jrdev
;
818 u8
*buf
= state
->buf
;
819 int *buflen
= &state
->buflen
;
820 int *next_buflen
= &state
->next_buflen
;
821 int blocksize
= crypto_ahash_blocksize(ahash
);
822 int in_len
= *buflen
+ req
->nbytes
, to_hash
;
824 int src_nents
, mapped_nents
, sec4_sg_bytes
, sec4_sg_src_index
;
825 struct ahash_edesc
*edesc
;
828 *next_buflen
= in_len
& (blocksize
- 1);
829 to_hash
= in_len
- *next_buflen
;
832 * For XCBC and CMAC, if to_hash is multiple of block size,
833 * keep last block in internal buffer
835 if ((is_xcbc_aes(ctx
->adata
.algtype
) ||
836 is_cmac_aes(ctx
->adata
.algtype
)) && to_hash
>= blocksize
&&
837 (*next_buflen
== 0)) {
838 *next_buflen
= blocksize
;
839 to_hash
-= blocksize
;
844 int src_len
= req
->nbytes
- *next_buflen
;
846 src_nents
= sg_nents_for_len(req
->src
, src_len
);
848 dev_err(jrdev
, "Invalid number of src SG.\n");
853 mapped_nents
= dma_map_sg(jrdev
, req
->src
, src_nents
,
856 dev_err(jrdev
, "unable to DMA map source\n");
863 sec4_sg_src_index
= 1 + (*buflen
? 1 : 0);
864 pad_nents
= pad_sg_nents(sec4_sg_src_index
+ mapped_nents
);
865 sec4_sg_bytes
= pad_nents
* sizeof(struct sec4_sg_entry
);
868 * allocate space for base edesc and hw desc commands,
871 edesc
= ahash_edesc_alloc(req
, pad_nents
, ctx
->sh_desc_update
,
872 ctx
->sh_desc_update_dma
);
874 dma_unmap_sg(jrdev
, req
->src
, src_nents
, DMA_TO_DEVICE
);
878 edesc
->src_nents
= src_nents
;
879 edesc
->sec4_sg_bytes
= sec4_sg_bytes
;
881 ret
= ctx_map_to_sec4_sg(jrdev
, state
, ctx
->ctx_len
,
882 edesc
->sec4_sg
, DMA_BIDIRECTIONAL
);
886 ret
= buf_map_to_sec4_sg(jrdev
, edesc
->sec4_sg
+ 1, state
);
891 sg_to_sec4_sg_last(req
->src
, src_len
,
892 edesc
->sec4_sg
+ sec4_sg_src_index
,
895 sg_to_sec4_set_last(edesc
->sec4_sg
+ sec4_sg_src_index
-
898 desc
= edesc
->hw_desc
;
900 edesc
->sec4_sg_dma
= dma_map_single(jrdev
, edesc
->sec4_sg
,
903 if (dma_mapping_error(jrdev
, edesc
->sec4_sg_dma
)) {
904 dev_err(jrdev
, "unable to map S/G table\n");
909 append_seq_in_ptr(desc
, edesc
->sec4_sg_dma
, ctx
->ctx_len
+
912 append_seq_out_ptr(desc
, state
->ctx_dma
, ctx
->ctx_len
, 0);
914 print_hex_dump_debug("jobdesc@"__stringify(__LINE__
)": ",
915 DUMP_PREFIX_ADDRESS
, 16, 4, desc
,
916 desc_bytes(desc
), 1);
918 ret
= ahash_enqueue_req(jrdev
, ahash_done_bi
, req
,
919 ctx
->ctx_len
, DMA_BIDIRECTIONAL
);
920 } else if (*next_buflen
) {
921 scatterwalk_map_and_copy(buf
+ *buflen
, req
->src
, 0,
923 *buflen
= *next_buflen
;
925 print_hex_dump_debug("buf@" __stringify(__LINE__
)": ",
926 DUMP_PREFIX_ADDRESS
, 16, 4, buf
,
932 ahash_unmap_ctx(jrdev
, edesc
, req
, ctx
->ctx_len
, DMA_BIDIRECTIONAL
);
937 static int ahash_final_ctx(struct ahash_request
*req
)
939 struct crypto_ahash
*ahash
= crypto_ahash_reqtfm(req
);
940 struct caam_hash_ctx
*ctx
= crypto_ahash_ctx(ahash
);
941 struct caam_hash_state
*state
= ahash_request_ctx(req
);
942 struct device
*jrdev
= ctx
->jrdev
;
943 int buflen
= state
->buflen
;
946 int digestsize
= crypto_ahash_digestsize(ahash
);
947 struct ahash_edesc
*edesc
;
950 sec4_sg_bytes
= pad_sg_nents(1 + (buflen
? 1 : 0)) *
951 sizeof(struct sec4_sg_entry
);
953 /* allocate space for base edesc and hw desc commands, link tables */
954 edesc
= ahash_edesc_alloc(req
, 4, ctx
->sh_desc_fin
,
955 ctx
->sh_desc_fin_dma
);
959 desc
= edesc
->hw_desc
;
961 edesc
->sec4_sg_bytes
= sec4_sg_bytes
;
963 ret
= ctx_map_to_sec4_sg(jrdev
, state
, ctx
->ctx_len
,
964 edesc
->sec4_sg
, DMA_BIDIRECTIONAL
);
968 ret
= buf_map_to_sec4_sg(jrdev
, edesc
->sec4_sg
+ 1, state
);
972 sg_to_sec4_set_last(edesc
->sec4_sg
+ (buflen
? 1 : 0));
974 edesc
->sec4_sg_dma
= dma_map_single(jrdev
, edesc
->sec4_sg
,
975 sec4_sg_bytes
, DMA_TO_DEVICE
);
976 if (dma_mapping_error(jrdev
, edesc
->sec4_sg_dma
)) {
977 dev_err(jrdev
, "unable to map S/G table\n");
982 append_seq_in_ptr(desc
, edesc
->sec4_sg_dma
, ctx
->ctx_len
+ buflen
,
984 append_seq_out_ptr(desc
, state
->ctx_dma
, digestsize
, 0);
986 print_hex_dump_debug("jobdesc@"__stringify(__LINE__
)": ",
987 DUMP_PREFIX_ADDRESS
, 16, 4, desc
, desc_bytes(desc
),
990 return ahash_enqueue_req(jrdev
, ahash_done_ctx_src
, req
,
991 digestsize
, DMA_BIDIRECTIONAL
);
993 ahash_unmap_ctx(jrdev
, edesc
, req
, digestsize
, DMA_BIDIRECTIONAL
);
998 static int ahash_finup_ctx(struct ahash_request
*req
)
1000 struct crypto_ahash
*ahash
= crypto_ahash_reqtfm(req
);
1001 struct caam_hash_ctx
*ctx
= crypto_ahash_ctx(ahash
);
1002 struct caam_hash_state
*state
= ahash_request_ctx(req
);
1003 struct device
*jrdev
= ctx
->jrdev
;
1004 int buflen
= state
->buflen
;
1006 int sec4_sg_src_index
;
1007 int src_nents
, mapped_nents
;
1008 int digestsize
= crypto_ahash_digestsize(ahash
);
1009 struct ahash_edesc
*edesc
;
1012 src_nents
= sg_nents_for_len(req
->src
, req
->nbytes
);
1013 if (src_nents
< 0) {
1014 dev_err(jrdev
, "Invalid number of src SG.\n");
1019 mapped_nents
= dma_map_sg(jrdev
, req
->src
, src_nents
,
1021 if (!mapped_nents
) {
1022 dev_err(jrdev
, "unable to DMA map source\n");
1029 sec4_sg_src_index
= 1 + (buflen
? 1 : 0);
1031 /* allocate space for base edesc and hw desc commands, link tables */
1032 edesc
= ahash_edesc_alloc(req
, sec4_sg_src_index
+ mapped_nents
,
1033 ctx
->sh_desc_fin
, ctx
->sh_desc_fin_dma
);
1035 dma_unmap_sg(jrdev
, req
->src
, src_nents
, DMA_TO_DEVICE
);
1039 desc
= edesc
->hw_desc
;
1041 edesc
->src_nents
= src_nents
;
1043 ret
= ctx_map_to_sec4_sg(jrdev
, state
, ctx
->ctx_len
,
1044 edesc
->sec4_sg
, DMA_BIDIRECTIONAL
);
1048 ret
= buf_map_to_sec4_sg(jrdev
, edesc
->sec4_sg
+ 1, state
);
1052 ret
= ahash_edesc_add_src(ctx
, edesc
, req
, mapped_nents
,
1053 sec4_sg_src_index
, ctx
->ctx_len
+ buflen
,
1058 append_seq_out_ptr(desc
, state
->ctx_dma
, digestsize
, 0);
1060 print_hex_dump_debug("jobdesc@"__stringify(__LINE__
)": ",
1061 DUMP_PREFIX_ADDRESS
, 16, 4, desc
, desc_bytes(desc
),
1064 return ahash_enqueue_req(jrdev
, ahash_done_ctx_src
, req
,
1065 digestsize
, DMA_BIDIRECTIONAL
);
1067 ahash_unmap_ctx(jrdev
, edesc
, req
, digestsize
, DMA_BIDIRECTIONAL
);
1072 static int ahash_digest(struct ahash_request
*req
)
1074 struct crypto_ahash
*ahash
= crypto_ahash_reqtfm(req
);
1075 struct caam_hash_ctx
*ctx
= crypto_ahash_ctx(ahash
);
1076 struct caam_hash_state
*state
= ahash_request_ctx(req
);
1077 struct device
*jrdev
= ctx
->jrdev
;
1079 int digestsize
= crypto_ahash_digestsize(ahash
);
1080 int src_nents
, mapped_nents
;
1081 struct ahash_edesc
*edesc
;
1086 src_nents
= sg_nents_for_len(req
->src
, req
->nbytes
);
1087 if (src_nents
< 0) {
1088 dev_err(jrdev
, "Invalid number of src SG.\n");
1093 mapped_nents
= dma_map_sg(jrdev
, req
->src
, src_nents
,
1095 if (!mapped_nents
) {
1096 dev_err(jrdev
, "unable to map source for DMA\n");
1103 /* allocate space for base edesc and hw desc commands, link tables */
1104 edesc
= ahash_edesc_alloc(req
, mapped_nents
> 1 ? mapped_nents
: 0,
1105 ctx
->sh_desc_digest
, ctx
->sh_desc_digest_dma
);
1107 dma_unmap_sg(jrdev
, req
->src
, src_nents
, DMA_TO_DEVICE
);
1111 edesc
->src_nents
= src_nents
;
1113 ret
= ahash_edesc_add_src(ctx
, edesc
, req
, mapped_nents
, 0, 0,
1116 ahash_unmap(jrdev
, edesc
, req
, digestsize
);
1121 desc
= edesc
->hw_desc
;
1123 ret
= map_seq_out_ptr_ctx(desc
, jrdev
, state
, digestsize
);
1125 ahash_unmap(jrdev
, edesc
, req
, digestsize
);
1130 print_hex_dump_debug("jobdesc@"__stringify(__LINE__
)": ",
1131 DUMP_PREFIX_ADDRESS
, 16, 4, desc
, desc_bytes(desc
),
1134 return ahash_enqueue_req(jrdev
, ahash_done
, req
, digestsize
,
1138 /* submit ahash final if it the first job descriptor */
1139 static int ahash_final_no_ctx(struct ahash_request
*req
)
1141 struct crypto_ahash
*ahash
= crypto_ahash_reqtfm(req
);
1142 struct caam_hash_ctx
*ctx
= crypto_ahash_ctx(ahash
);
1143 struct caam_hash_state
*state
= ahash_request_ctx(req
);
1144 struct device
*jrdev
= ctx
->jrdev
;
1145 u8
*buf
= state
->buf
;
1146 int buflen
= state
->buflen
;
1148 int digestsize
= crypto_ahash_digestsize(ahash
);
1149 struct ahash_edesc
*edesc
;
1152 /* allocate space for base edesc and hw desc commands, link tables */
1153 edesc
= ahash_edesc_alloc(req
, 0, ctx
->sh_desc_digest
,
1154 ctx
->sh_desc_digest_dma
);
1158 desc
= edesc
->hw_desc
;
1161 state
->buf_dma
= dma_map_single(jrdev
, buf
, buflen
,
1163 if (dma_mapping_error(jrdev
, state
->buf_dma
)) {
1164 dev_err(jrdev
, "unable to map src\n");
1168 append_seq_in_ptr(desc
, state
->buf_dma
, buflen
, 0);
1171 ret
= map_seq_out_ptr_ctx(desc
, jrdev
, state
, digestsize
);
1175 print_hex_dump_debug("jobdesc@"__stringify(__LINE__
)": ",
1176 DUMP_PREFIX_ADDRESS
, 16, 4, desc
, desc_bytes(desc
),
1179 return ahash_enqueue_req(jrdev
, ahash_done
, req
,
1180 digestsize
, DMA_FROM_DEVICE
);
1182 ahash_unmap(jrdev
, edesc
, req
, digestsize
);
1187 /* submit ahash update if it the first job descriptor after update */
1188 static int ahash_update_no_ctx(struct ahash_request
*req
)
1190 struct crypto_ahash
*ahash
= crypto_ahash_reqtfm(req
);
1191 struct caam_hash_ctx
*ctx
= crypto_ahash_ctx(ahash
);
1192 struct caam_hash_state
*state
= ahash_request_ctx(req
);
1193 struct device
*jrdev
= ctx
->jrdev
;
1194 u8
*buf
= state
->buf
;
1195 int *buflen
= &state
->buflen
;
1196 int *next_buflen
= &state
->next_buflen
;
1197 int blocksize
= crypto_ahash_blocksize(ahash
);
1198 int in_len
= *buflen
+ req
->nbytes
, to_hash
;
1199 int sec4_sg_bytes
, src_nents
, mapped_nents
;
1200 struct ahash_edesc
*edesc
;
1204 *next_buflen
= in_len
& (blocksize
- 1);
1205 to_hash
= in_len
- *next_buflen
;
1208 * For XCBC and CMAC, if to_hash is multiple of block size,
1209 * keep last block in internal buffer
1211 if ((is_xcbc_aes(ctx
->adata
.algtype
) ||
1212 is_cmac_aes(ctx
->adata
.algtype
)) && to_hash
>= blocksize
&&
1213 (*next_buflen
== 0)) {
1214 *next_buflen
= blocksize
;
1215 to_hash
-= blocksize
;
1220 int src_len
= req
->nbytes
- *next_buflen
;
1222 src_nents
= sg_nents_for_len(req
->src
, src_len
);
1223 if (src_nents
< 0) {
1224 dev_err(jrdev
, "Invalid number of src SG.\n");
1229 mapped_nents
= dma_map_sg(jrdev
, req
->src
, src_nents
,
1231 if (!mapped_nents
) {
1232 dev_err(jrdev
, "unable to DMA map source\n");
1239 pad_nents
= pad_sg_nents(1 + mapped_nents
);
1240 sec4_sg_bytes
= pad_nents
* sizeof(struct sec4_sg_entry
);
1243 * allocate space for base edesc and hw desc commands,
1246 edesc
= ahash_edesc_alloc(req
, pad_nents
,
1247 ctx
->sh_desc_update_first
,
1248 ctx
->sh_desc_update_first_dma
);
1250 dma_unmap_sg(jrdev
, req
->src
, src_nents
, DMA_TO_DEVICE
);
1254 edesc
->src_nents
= src_nents
;
1255 edesc
->sec4_sg_bytes
= sec4_sg_bytes
;
1257 ret
= buf_map_to_sec4_sg(jrdev
, edesc
->sec4_sg
, state
);
1261 sg_to_sec4_sg_last(req
->src
, src_len
, edesc
->sec4_sg
+ 1, 0);
1263 desc
= edesc
->hw_desc
;
1265 edesc
->sec4_sg_dma
= dma_map_single(jrdev
, edesc
->sec4_sg
,
1268 if (dma_mapping_error(jrdev
, edesc
->sec4_sg_dma
)) {
1269 dev_err(jrdev
, "unable to map S/G table\n");
1274 append_seq_in_ptr(desc
, edesc
->sec4_sg_dma
, to_hash
, LDST_SGF
);
1276 ret
= map_seq_out_ptr_ctx(desc
, jrdev
, state
, ctx
->ctx_len
);
1280 print_hex_dump_debug("jobdesc@"__stringify(__LINE__
)": ",
1281 DUMP_PREFIX_ADDRESS
, 16, 4, desc
,
1282 desc_bytes(desc
), 1);
1284 ret
= ahash_enqueue_req(jrdev
, ahash_done_ctx_dst
, req
,
1285 ctx
->ctx_len
, DMA_TO_DEVICE
);
1286 if ((ret
!= -EINPROGRESS
) && (ret
!= -EBUSY
))
1288 state
->update
= ahash_update_ctx
;
1289 state
->finup
= ahash_finup_ctx
;
1290 state
->final
= ahash_final_ctx
;
1291 } else if (*next_buflen
) {
1292 scatterwalk_map_and_copy(buf
+ *buflen
, req
->src
, 0,
1294 *buflen
= *next_buflen
;
1296 print_hex_dump_debug("buf@" __stringify(__LINE__
)": ",
1297 DUMP_PREFIX_ADDRESS
, 16, 4, buf
,
1303 ahash_unmap_ctx(jrdev
, edesc
, req
, ctx
->ctx_len
, DMA_TO_DEVICE
);
1308 /* submit ahash finup if it the first job descriptor after update */
1309 static int ahash_finup_no_ctx(struct ahash_request
*req
)
1311 struct crypto_ahash
*ahash
= crypto_ahash_reqtfm(req
);
1312 struct caam_hash_ctx
*ctx
= crypto_ahash_ctx(ahash
);
1313 struct caam_hash_state
*state
= ahash_request_ctx(req
);
1314 struct device
*jrdev
= ctx
->jrdev
;
1315 int buflen
= state
->buflen
;
1317 int sec4_sg_bytes
, sec4_sg_src_index
, src_nents
, mapped_nents
;
1318 int digestsize
= crypto_ahash_digestsize(ahash
);
1319 struct ahash_edesc
*edesc
;
1322 src_nents
= sg_nents_for_len(req
->src
, req
->nbytes
);
1323 if (src_nents
< 0) {
1324 dev_err(jrdev
, "Invalid number of src SG.\n");
1329 mapped_nents
= dma_map_sg(jrdev
, req
->src
, src_nents
,
1331 if (!mapped_nents
) {
1332 dev_err(jrdev
, "unable to DMA map source\n");
1339 sec4_sg_src_index
= 2;
1340 sec4_sg_bytes
= (sec4_sg_src_index
+ mapped_nents
) *
1341 sizeof(struct sec4_sg_entry
);
1343 /* allocate space for base edesc and hw desc commands, link tables */
1344 edesc
= ahash_edesc_alloc(req
, sec4_sg_src_index
+ mapped_nents
,
1345 ctx
->sh_desc_digest
, ctx
->sh_desc_digest_dma
);
1347 dma_unmap_sg(jrdev
, req
->src
, src_nents
, DMA_TO_DEVICE
);
1351 desc
= edesc
->hw_desc
;
1353 edesc
->src_nents
= src_nents
;
1354 edesc
->sec4_sg_bytes
= sec4_sg_bytes
;
1356 ret
= buf_map_to_sec4_sg(jrdev
, edesc
->sec4_sg
, state
);
1360 ret
= ahash_edesc_add_src(ctx
, edesc
, req
, mapped_nents
, 1, buflen
,
1363 dev_err(jrdev
, "unable to map S/G table\n");
1367 ret
= map_seq_out_ptr_ctx(desc
, jrdev
, state
, digestsize
);
1371 print_hex_dump_debug("jobdesc@"__stringify(__LINE__
)": ",
1372 DUMP_PREFIX_ADDRESS
, 16, 4, desc
, desc_bytes(desc
),
1375 return ahash_enqueue_req(jrdev
, ahash_done
, req
,
1376 digestsize
, DMA_FROM_DEVICE
);
1378 ahash_unmap(jrdev
, edesc
, req
, digestsize
);
1384 /* submit first update job descriptor after init */
1385 static int ahash_update_first(struct ahash_request
*req
)
1387 struct crypto_ahash
*ahash
= crypto_ahash_reqtfm(req
);
1388 struct caam_hash_ctx
*ctx
= crypto_ahash_ctx(ahash
);
1389 struct caam_hash_state
*state
= ahash_request_ctx(req
);
1390 struct device
*jrdev
= ctx
->jrdev
;
1391 u8
*buf
= state
->buf
;
1392 int *buflen
= &state
->buflen
;
1393 int *next_buflen
= &state
->next_buflen
;
1395 int blocksize
= crypto_ahash_blocksize(ahash
);
1397 int src_nents
, mapped_nents
;
1398 struct ahash_edesc
*edesc
;
1401 *next_buflen
= req
->nbytes
& (blocksize
- 1);
1402 to_hash
= req
->nbytes
- *next_buflen
;
1405 * For XCBC and CMAC, if to_hash is multiple of block size,
1406 * keep last block in internal buffer
1408 if ((is_xcbc_aes(ctx
->adata
.algtype
) ||
1409 is_cmac_aes(ctx
->adata
.algtype
)) && to_hash
>= blocksize
&&
1410 (*next_buflen
== 0)) {
1411 *next_buflen
= blocksize
;
1412 to_hash
-= blocksize
;
1416 src_nents
= sg_nents_for_len(req
->src
,
1417 req
->nbytes
- *next_buflen
);
1418 if (src_nents
< 0) {
1419 dev_err(jrdev
, "Invalid number of src SG.\n");
1424 mapped_nents
= dma_map_sg(jrdev
, req
->src
, src_nents
,
1426 if (!mapped_nents
) {
1427 dev_err(jrdev
, "unable to map source for DMA\n");
1435 * allocate space for base edesc and hw desc commands,
1438 edesc
= ahash_edesc_alloc(req
, mapped_nents
> 1 ?
1440 ctx
->sh_desc_update_first
,
1441 ctx
->sh_desc_update_first_dma
);
1443 dma_unmap_sg(jrdev
, req
->src
, src_nents
, DMA_TO_DEVICE
);
1447 edesc
->src_nents
= src_nents
;
1449 ret
= ahash_edesc_add_src(ctx
, edesc
, req
, mapped_nents
, 0, 0,
1454 desc
= edesc
->hw_desc
;
1456 ret
= map_seq_out_ptr_ctx(desc
, jrdev
, state
, ctx
->ctx_len
);
1460 print_hex_dump_debug("jobdesc@"__stringify(__LINE__
)": ",
1461 DUMP_PREFIX_ADDRESS
, 16, 4, desc
,
1462 desc_bytes(desc
), 1);
1464 ret
= ahash_enqueue_req(jrdev
, ahash_done_ctx_dst
, req
,
1465 ctx
->ctx_len
, DMA_TO_DEVICE
);
1466 if ((ret
!= -EINPROGRESS
) && (ret
!= -EBUSY
))
1468 state
->update
= ahash_update_ctx
;
1469 state
->finup
= ahash_finup_ctx
;
1470 state
->final
= ahash_final_ctx
;
1471 } else if (*next_buflen
) {
1472 state
->update
= ahash_update_no_ctx
;
1473 state
->finup
= ahash_finup_no_ctx
;
1474 state
->final
= ahash_final_no_ctx
;
1475 scatterwalk_map_and_copy(buf
, req
->src
, 0,
1477 *buflen
= *next_buflen
;
1479 print_hex_dump_debug("buf@" __stringify(__LINE__
)": ",
1480 DUMP_PREFIX_ADDRESS
, 16, 4, buf
,
1486 ahash_unmap_ctx(jrdev
, edesc
, req
, ctx
->ctx_len
, DMA_TO_DEVICE
);
1491 static int ahash_finup_first(struct ahash_request
*req
)
1493 return ahash_digest(req
);
1496 static int ahash_init(struct ahash_request
*req
)
1498 struct caam_hash_state
*state
= ahash_request_ctx(req
);
1500 state
->update
= ahash_update_first
;
1501 state
->finup
= ahash_finup_first
;
1502 state
->final
= ahash_final_no_ctx
;
1505 state
->ctx_dma_len
= 0;
1508 state
->next_buflen
= 0;
1513 static int ahash_update(struct ahash_request
*req
)
1515 struct caam_hash_state
*state
= ahash_request_ctx(req
);
1517 return state
->update(req
);
1520 static int ahash_finup(struct ahash_request
*req
)
1522 struct caam_hash_state
*state
= ahash_request_ctx(req
);
1524 return state
->finup(req
);
1527 static int ahash_final(struct ahash_request
*req
)
1529 struct caam_hash_state
*state
= ahash_request_ctx(req
);
1531 return state
->final(req
);
1534 static int ahash_export(struct ahash_request
*req
, void *out
)
1536 struct caam_hash_state
*state
= ahash_request_ctx(req
);
1537 struct caam_export_state
*export
= out
;
1538 u8
*buf
= state
->buf
;
1539 int len
= state
->buflen
;
1541 memcpy(export
->buf
, buf
, len
);
1542 memcpy(export
->caam_ctx
, state
->caam_ctx
, sizeof(export
->caam_ctx
));
1543 export
->buflen
= len
;
1544 export
->update
= state
->update
;
1545 export
->final
= state
->final
;
1546 export
->finup
= state
->finup
;
1551 static int ahash_import(struct ahash_request
*req
, const void *in
)
1553 struct caam_hash_state
*state
= ahash_request_ctx(req
);
1554 const struct caam_export_state
*export
= in
;
1556 memset(state
, 0, sizeof(*state
));
1557 memcpy(state
->buf
, export
->buf
, export
->buflen
);
1558 memcpy(state
->caam_ctx
, export
->caam_ctx
, sizeof(state
->caam_ctx
));
1559 state
->buflen
= export
->buflen
;
1560 state
->update
= export
->update
;
1561 state
->final
= export
->final
;
1562 state
->finup
= export
->finup
;
1567 struct caam_hash_template
{
1568 char name
[CRYPTO_MAX_ALG_NAME
];
1569 char driver_name
[CRYPTO_MAX_ALG_NAME
];
1570 char hmac_name
[CRYPTO_MAX_ALG_NAME
];
1571 char hmac_driver_name
[CRYPTO_MAX_ALG_NAME
];
1572 unsigned int blocksize
;
1573 struct ahash_alg template_ahash
;
1577 /* ahash descriptors */
1578 static struct caam_hash_template driver_hash
[] = {
1581 .driver_name
= "sha1-caam",
1582 .hmac_name
= "hmac(sha1)",
1583 .hmac_driver_name
= "hmac-sha1-caam",
1584 .blocksize
= SHA1_BLOCK_SIZE
,
1587 .update
= ahash_update
,
1588 .final
= ahash_final
,
1589 .finup
= ahash_finup
,
1590 .digest
= ahash_digest
,
1591 .export
= ahash_export
,
1592 .import
= ahash_import
,
1593 .setkey
= ahash_setkey
,
1595 .digestsize
= SHA1_DIGEST_SIZE
,
1596 .statesize
= sizeof(struct caam_export_state
),
1599 .alg_type
= OP_ALG_ALGSEL_SHA1
,
1602 .driver_name
= "sha224-caam",
1603 .hmac_name
= "hmac(sha224)",
1604 .hmac_driver_name
= "hmac-sha224-caam",
1605 .blocksize
= SHA224_BLOCK_SIZE
,
1608 .update
= ahash_update
,
1609 .final
= ahash_final
,
1610 .finup
= ahash_finup
,
1611 .digest
= ahash_digest
,
1612 .export
= ahash_export
,
1613 .import
= ahash_import
,
1614 .setkey
= ahash_setkey
,
1616 .digestsize
= SHA224_DIGEST_SIZE
,
1617 .statesize
= sizeof(struct caam_export_state
),
1620 .alg_type
= OP_ALG_ALGSEL_SHA224
,
1623 .driver_name
= "sha256-caam",
1624 .hmac_name
= "hmac(sha256)",
1625 .hmac_driver_name
= "hmac-sha256-caam",
1626 .blocksize
= SHA256_BLOCK_SIZE
,
1629 .update
= ahash_update
,
1630 .final
= ahash_final
,
1631 .finup
= ahash_finup
,
1632 .digest
= ahash_digest
,
1633 .export
= ahash_export
,
1634 .import
= ahash_import
,
1635 .setkey
= ahash_setkey
,
1637 .digestsize
= SHA256_DIGEST_SIZE
,
1638 .statesize
= sizeof(struct caam_export_state
),
1641 .alg_type
= OP_ALG_ALGSEL_SHA256
,
1644 .driver_name
= "sha384-caam",
1645 .hmac_name
= "hmac(sha384)",
1646 .hmac_driver_name
= "hmac-sha384-caam",
1647 .blocksize
= SHA384_BLOCK_SIZE
,
1650 .update
= ahash_update
,
1651 .final
= ahash_final
,
1652 .finup
= ahash_finup
,
1653 .digest
= ahash_digest
,
1654 .export
= ahash_export
,
1655 .import
= ahash_import
,
1656 .setkey
= ahash_setkey
,
1658 .digestsize
= SHA384_DIGEST_SIZE
,
1659 .statesize
= sizeof(struct caam_export_state
),
1662 .alg_type
= OP_ALG_ALGSEL_SHA384
,
1665 .driver_name
= "sha512-caam",
1666 .hmac_name
= "hmac(sha512)",
1667 .hmac_driver_name
= "hmac-sha512-caam",
1668 .blocksize
= SHA512_BLOCK_SIZE
,
1671 .update
= ahash_update
,
1672 .final
= ahash_final
,
1673 .finup
= ahash_finup
,
1674 .digest
= ahash_digest
,
1675 .export
= ahash_export
,
1676 .import
= ahash_import
,
1677 .setkey
= ahash_setkey
,
1679 .digestsize
= SHA512_DIGEST_SIZE
,
1680 .statesize
= sizeof(struct caam_export_state
),
1683 .alg_type
= OP_ALG_ALGSEL_SHA512
,
1686 .driver_name
= "md5-caam",
1687 .hmac_name
= "hmac(md5)",
1688 .hmac_driver_name
= "hmac-md5-caam",
1689 .blocksize
= MD5_BLOCK_WORDS
* 4,
1692 .update
= ahash_update
,
1693 .final
= ahash_final
,
1694 .finup
= ahash_finup
,
1695 .digest
= ahash_digest
,
1696 .export
= ahash_export
,
1697 .import
= ahash_import
,
1698 .setkey
= ahash_setkey
,
1700 .digestsize
= MD5_DIGEST_SIZE
,
1701 .statesize
= sizeof(struct caam_export_state
),
1704 .alg_type
= OP_ALG_ALGSEL_MD5
,
1706 .hmac_name
= "xcbc(aes)",
1707 .hmac_driver_name
= "xcbc-aes-caam",
1708 .blocksize
= AES_BLOCK_SIZE
,
1711 .update
= ahash_update
,
1712 .final
= ahash_final
,
1713 .finup
= ahash_finup
,
1714 .digest
= ahash_digest
,
1715 .export
= ahash_export
,
1716 .import
= ahash_import
,
1717 .setkey
= axcbc_setkey
,
1719 .digestsize
= AES_BLOCK_SIZE
,
1720 .statesize
= sizeof(struct caam_export_state
),
1723 .alg_type
= OP_ALG_ALGSEL_AES
| OP_ALG_AAI_XCBC_MAC
,
1725 .hmac_name
= "cmac(aes)",
1726 .hmac_driver_name
= "cmac-aes-caam",
1727 .blocksize
= AES_BLOCK_SIZE
,
1730 .update
= ahash_update
,
1731 .final
= ahash_final
,
1732 .finup
= ahash_finup
,
1733 .digest
= ahash_digest
,
1734 .export
= ahash_export
,
1735 .import
= ahash_import
,
1736 .setkey
= acmac_setkey
,
1738 .digestsize
= AES_BLOCK_SIZE
,
1739 .statesize
= sizeof(struct caam_export_state
),
1742 .alg_type
= OP_ALG_ALGSEL_AES
| OP_ALG_AAI_CMAC
,
1746 struct caam_hash_alg
{
1747 struct list_head entry
;
1749 struct ahash_alg ahash_alg
;
1752 static int caam_hash_cra_init(struct crypto_tfm
*tfm
)
1754 struct crypto_ahash
*ahash
= __crypto_ahash_cast(tfm
);
1755 struct crypto_alg
*base
= tfm
->__crt_alg
;
1756 struct hash_alg_common
*halg
=
1757 container_of(base
, struct hash_alg_common
, base
);
1758 struct ahash_alg
*alg
=
1759 container_of(halg
, struct ahash_alg
, halg
);
1760 struct caam_hash_alg
*caam_hash
=
1761 container_of(alg
, struct caam_hash_alg
, ahash_alg
);
1762 struct caam_hash_ctx
*ctx
= crypto_tfm_ctx(tfm
);
1763 /* Sizes for MDHA running digests: MD5, SHA1, 224, 256, 384, 512 */
1764 static const u8 runninglen
[] = { HASH_MSG_LEN
+ MD5_DIGEST_SIZE
,
1765 HASH_MSG_LEN
+ SHA1_DIGEST_SIZE
,
1767 HASH_MSG_LEN
+ SHA256_DIGEST_SIZE
,
1769 HASH_MSG_LEN
+ SHA512_DIGEST_SIZE
};
1770 const size_t sh_desc_update_offset
= offsetof(struct caam_hash_ctx
,
1772 dma_addr_t dma_addr
;
1773 struct caam_drv_private
*priv
;
1776 * Get a Job ring from Job Ring driver to ensure in-order
1777 * crypto request processing per tfm
1779 ctx
->jrdev
= caam_jr_alloc();
1780 if (IS_ERR(ctx
->jrdev
)) {
1781 pr_err("Job Ring Device allocation for transform failed\n");
1782 return PTR_ERR(ctx
->jrdev
);
1785 priv
= dev_get_drvdata(ctx
->jrdev
->parent
);
1787 if (is_xcbc_aes(caam_hash
->alg_type
)) {
1788 ctx
->dir
= DMA_TO_DEVICE
;
1789 ctx
->key_dir
= DMA_BIDIRECTIONAL
;
1790 ctx
->adata
.algtype
= OP_TYPE_CLASS1_ALG
| caam_hash
->alg_type
;
1792 } else if (is_cmac_aes(caam_hash
->alg_type
)) {
1793 ctx
->dir
= DMA_TO_DEVICE
;
1794 ctx
->key_dir
= DMA_NONE
;
1795 ctx
->adata
.algtype
= OP_TYPE_CLASS1_ALG
| caam_hash
->alg_type
;
1798 if (priv
->era
>= 6) {
1799 ctx
->dir
= DMA_BIDIRECTIONAL
;
1800 ctx
->key_dir
= alg
->setkey
? DMA_TO_DEVICE
: DMA_NONE
;
1802 ctx
->dir
= DMA_TO_DEVICE
;
1803 ctx
->key_dir
= DMA_NONE
;
1805 ctx
->adata
.algtype
= OP_TYPE_CLASS2_ALG
| caam_hash
->alg_type
;
1806 ctx
->ctx_len
= runninglen
[(ctx
->adata
.algtype
&
1807 OP_ALG_ALGSEL_SUBMASK
) >>
1808 OP_ALG_ALGSEL_SHIFT
];
1811 if (ctx
->key_dir
!= DMA_NONE
) {
1812 ctx
->adata
.key_dma
= dma_map_single_attrs(ctx
->jrdev
, ctx
->key
,
1813 ARRAY_SIZE(ctx
->key
),
1815 DMA_ATTR_SKIP_CPU_SYNC
);
1816 if (dma_mapping_error(ctx
->jrdev
, ctx
->adata
.key_dma
)) {
1817 dev_err(ctx
->jrdev
, "unable to map key\n");
1818 caam_jr_free(ctx
->jrdev
);
1823 dma_addr
= dma_map_single_attrs(ctx
->jrdev
, ctx
->sh_desc_update
,
1824 offsetof(struct caam_hash_ctx
, key
) -
1825 sh_desc_update_offset
,
1826 ctx
->dir
, DMA_ATTR_SKIP_CPU_SYNC
);
1827 if (dma_mapping_error(ctx
->jrdev
, dma_addr
)) {
1828 dev_err(ctx
->jrdev
, "unable to map shared descriptors\n");
1830 if (ctx
->key_dir
!= DMA_NONE
)
1831 dma_unmap_single_attrs(ctx
->jrdev
, ctx
->adata
.key_dma
,
1832 ARRAY_SIZE(ctx
->key
),
1834 DMA_ATTR_SKIP_CPU_SYNC
);
1836 caam_jr_free(ctx
->jrdev
);
1840 ctx
->sh_desc_update_dma
= dma_addr
;
1841 ctx
->sh_desc_update_first_dma
= dma_addr
+
1842 offsetof(struct caam_hash_ctx
,
1843 sh_desc_update_first
) -
1844 sh_desc_update_offset
;
1845 ctx
->sh_desc_fin_dma
= dma_addr
+ offsetof(struct caam_hash_ctx
,
1847 sh_desc_update_offset
;
1848 ctx
->sh_desc_digest_dma
= dma_addr
+ offsetof(struct caam_hash_ctx
,
1850 sh_desc_update_offset
;
1852 ctx
->enginectx
.op
.do_one_request
= ahash_do_one_req
;
1854 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm
),
1855 sizeof(struct caam_hash_state
));
1858 * For keyed hash algorithms shared descriptors
1859 * will be created later in setkey() callback
1861 return alg
->setkey
? 0 : ahash_set_sh_desc(ahash
);
1864 static void caam_hash_cra_exit(struct crypto_tfm
*tfm
)
1866 struct caam_hash_ctx
*ctx
= crypto_tfm_ctx(tfm
);
1868 dma_unmap_single_attrs(ctx
->jrdev
, ctx
->sh_desc_update_dma
,
1869 offsetof(struct caam_hash_ctx
, key
) -
1870 offsetof(struct caam_hash_ctx
, sh_desc_update
),
1871 ctx
->dir
, DMA_ATTR_SKIP_CPU_SYNC
);
1872 if (ctx
->key_dir
!= DMA_NONE
)
1873 dma_unmap_single_attrs(ctx
->jrdev
, ctx
->adata
.key_dma
,
1874 ARRAY_SIZE(ctx
->key
), ctx
->key_dir
,
1875 DMA_ATTR_SKIP_CPU_SYNC
);
1876 caam_jr_free(ctx
->jrdev
);
1879 void caam_algapi_hash_exit(void)
1881 struct caam_hash_alg
*t_alg
, *n
;
1883 if (!hash_list
.next
)
1886 list_for_each_entry_safe(t_alg
, n
, &hash_list
, entry
) {
1887 crypto_unregister_ahash(&t_alg
->ahash_alg
);
1888 list_del(&t_alg
->entry
);
1893 static struct caam_hash_alg
*
1894 caam_hash_alloc(struct caam_hash_template
*template,
1897 struct caam_hash_alg
*t_alg
;
1898 struct ahash_alg
*halg
;
1899 struct crypto_alg
*alg
;
1901 t_alg
= kzalloc(sizeof(*t_alg
), GFP_KERNEL
);
1903 pr_err("failed to allocate t_alg\n");
1904 return ERR_PTR(-ENOMEM
);
1907 t_alg
->ahash_alg
= template->template_ahash
;
1908 halg
= &t_alg
->ahash_alg
;
1909 alg
= &halg
->halg
.base
;
1912 snprintf(alg
->cra_name
, CRYPTO_MAX_ALG_NAME
, "%s",
1913 template->hmac_name
);
1914 snprintf(alg
->cra_driver_name
, CRYPTO_MAX_ALG_NAME
, "%s",
1915 template->hmac_driver_name
);
1917 snprintf(alg
->cra_name
, CRYPTO_MAX_ALG_NAME
, "%s",
1919 snprintf(alg
->cra_driver_name
, CRYPTO_MAX_ALG_NAME
, "%s",
1920 template->driver_name
);
1921 t_alg
->ahash_alg
.setkey
= NULL
;
1923 alg
->cra_module
= THIS_MODULE
;
1924 alg
->cra_init
= caam_hash_cra_init
;
1925 alg
->cra_exit
= caam_hash_cra_exit
;
1926 alg
->cra_ctxsize
= sizeof(struct caam_hash_ctx
);
1927 alg
->cra_priority
= CAAM_CRA_PRIORITY
;
1928 alg
->cra_blocksize
= template->blocksize
;
1929 alg
->cra_alignmask
= 0;
1930 alg
->cra_flags
= CRYPTO_ALG_ASYNC
| CRYPTO_ALG_ALLOCATES_MEMORY
;
1932 t_alg
->alg_type
= template->alg_type
;
1937 int caam_algapi_hash_init(struct device
*ctrldev
)
1940 struct caam_drv_private
*priv
= dev_get_drvdata(ctrldev
);
1941 unsigned int md_limit
= SHA512_DIGEST_SIZE
;
1942 u32 md_inst
, md_vid
;
1945 * Register crypto algorithms the device supports. First, identify
1946 * presence and attributes of MD block.
1948 if (priv
->era
< 10) {
1949 md_vid
= (rd_reg32(&priv
->ctrl
->perfmon
.cha_id_ls
) &
1950 CHA_ID_LS_MD_MASK
) >> CHA_ID_LS_MD_SHIFT
;
1951 md_inst
= (rd_reg32(&priv
->ctrl
->perfmon
.cha_num_ls
) &
1952 CHA_ID_LS_MD_MASK
) >> CHA_ID_LS_MD_SHIFT
;
1954 u32 mdha
= rd_reg32(&priv
->ctrl
->vreg
.mdha
);
1956 md_vid
= (mdha
& CHA_VER_VID_MASK
) >> CHA_VER_VID_SHIFT
;
1957 md_inst
= mdha
& CHA_VER_NUM_MASK
;
1961 * Skip registration of any hashing algorithms if MD block
1967 /* Limit digest size based on LP256 */
1968 if (md_vid
== CHA_VER_VID_MD_LP256
)
1969 md_limit
= SHA256_DIGEST_SIZE
;
1971 INIT_LIST_HEAD(&hash_list
);
1973 /* register crypto algorithms the device supports */
1974 for (i
= 0; i
< ARRAY_SIZE(driver_hash
); i
++) {
1975 struct caam_hash_alg
*t_alg
;
1976 struct caam_hash_template
*alg
= driver_hash
+ i
;
1978 /* If MD size is not supported by device, skip registration */
1979 if (is_mdha(alg
->alg_type
) &&
1980 alg
->template_ahash
.halg
.digestsize
> md_limit
)
1983 /* register hmac version */
1984 t_alg
= caam_hash_alloc(alg
, true);
1985 if (IS_ERR(t_alg
)) {
1986 err
= PTR_ERR(t_alg
);
1987 pr_warn("%s alg allocation failed\n",
1988 alg
->hmac_driver_name
);
1992 err
= crypto_register_ahash(&t_alg
->ahash_alg
);
1994 pr_warn("%s alg registration failed: %d\n",
1995 t_alg
->ahash_alg
.halg
.base
.cra_driver_name
,
1999 list_add_tail(&t_alg
->entry
, &hash_list
);
2001 if ((alg
->alg_type
& OP_ALG_ALGSEL_MASK
) == OP_ALG_ALGSEL_AES
)
2004 /* register unkeyed version */
2005 t_alg
= caam_hash_alloc(alg
, false);
2006 if (IS_ERR(t_alg
)) {
2007 err
= PTR_ERR(t_alg
);
2008 pr_warn("%s alg allocation failed\n", alg
->driver_name
);
2012 err
= crypto_register_ahash(&t_alg
->ahash_alg
);
2014 pr_warn("%s alg registration failed: %d\n",
2015 t_alg
->ahash_alg
.halg
.base
.cra_driver_name
,
2019 list_add_tail(&t_alg
->entry
, &hash_list
);