1 // SPDX-License-Identifier: GPL-2.0+
3 * caam - Freescale FSL CAAM support for ahash functions of crypto API
5 * Copyright 2011 Freescale Semiconductor, Inc.
6 * Copyright 2018-2019 NXP
8 * Based on caamalg.c crypto API driver.
10 * relationship of digest job descriptor or first job descriptor after init to
13 * --------------- ---------------
14 * | JobDesc #1 |-------------------->| ShareDesc |
15 * | *(packet 1) | | (hashKey) |
16 * --------------- | (operation) |
19 * relationship of subsequent job descriptors to shared descriptors:
21 * --------------- ---------------
22 * | JobDesc #2 |-------------------->| ShareDesc |
23 * | *(packet 2) | |------------->| (hashKey) |
24 * --------------- | |-------->| (operation) |
25 * . | | | (load ctx2) |
26 * . | | ---------------
28 * | JobDesc #3 |------| |
34 * | JobDesc #4 |------------
38 * The SharedDesc never changes for a connection unless rekeyed, but
39 * each packet will likely be in a different place. So all we need
40 * to know to process the packet is where the input is, where the
41 * output goes, and what context we want to process with. Context is
42 * in the SharedDesc, packet references in the JobDesc.
44 * So, a job desc looks like:
46 * ---------------------
48 * | ShareDesc Pointer |
55 * ---------------------
62 #include "desc_constr.h"
65 #include "sg_sw_sec4.h"
67 #include "caamhash_desc.h"
68 #include <crypto/engine.h>
70 #define CAAM_CRA_PRIORITY 3000
72 /* max hash key is max split key size */
73 #define CAAM_MAX_HASH_KEY_SIZE (SHA512_DIGEST_SIZE * 2)
75 #define CAAM_MAX_HASH_BLOCK_SIZE SHA512_BLOCK_SIZE
76 #define CAAM_MAX_HASH_DIGEST_SIZE SHA512_DIGEST_SIZE
78 #define DESC_HASH_MAX_USED_BYTES (DESC_AHASH_FINAL_LEN + \
79 CAAM_MAX_HASH_KEY_SIZE)
80 #define DESC_HASH_MAX_USED_LEN (DESC_HASH_MAX_USED_BYTES / CAAM_CMD_SZ)
82 /* caam context sizes for hashes: running digest + 8 */
83 #define HASH_MSG_LEN 8
84 #define MAX_CTX_LEN (HASH_MSG_LEN + SHA512_DIGEST_SIZE)
86 static struct list_head hash_list
;
88 /* ahash per-session context */
89 struct caam_hash_ctx
{
90 struct crypto_engine_ctx enginectx
;
91 u32 sh_desc_update
[DESC_HASH_MAX_USED_LEN
] ____cacheline_aligned
;
92 u32 sh_desc_update_first
[DESC_HASH_MAX_USED_LEN
] ____cacheline_aligned
;
93 u32 sh_desc_fin
[DESC_HASH_MAX_USED_LEN
] ____cacheline_aligned
;
94 u32 sh_desc_digest
[DESC_HASH_MAX_USED_LEN
] ____cacheline_aligned
;
95 u8 key
[CAAM_MAX_HASH_KEY_SIZE
] ____cacheline_aligned
;
96 dma_addr_t sh_desc_update_dma ____cacheline_aligned
;
97 dma_addr_t sh_desc_update_first_dma
;
98 dma_addr_t sh_desc_fin_dma
;
99 dma_addr_t sh_desc_digest_dma
;
100 enum dma_data_direction dir
;
101 enum dma_data_direction key_dir
;
102 struct device
*jrdev
;
104 struct alginfo adata
;
108 struct caam_hash_state
{
112 u8 buf
[CAAM_MAX_HASH_BLOCK_SIZE
] ____cacheline_aligned
;
115 u8 caam_ctx
[MAX_CTX_LEN
] ____cacheline_aligned
;
116 int (*update
)(struct ahash_request
*req
) ____cacheline_aligned
;
117 int (*final
)(struct ahash_request
*req
);
118 int (*finup
)(struct ahash_request
*req
);
119 struct ahash_edesc
*edesc
;
120 void (*ahash_op_done
)(struct device
*jrdev
, u32
*desc
, u32 err
,
124 struct caam_export_state
{
125 u8 buf
[CAAM_MAX_HASH_BLOCK_SIZE
];
126 u8 caam_ctx
[MAX_CTX_LEN
];
128 int (*update
)(struct ahash_request
*req
);
129 int (*final
)(struct ahash_request
*req
);
130 int (*finup
)(struct ahash_request
*req
);
133 static inline bool is_cmac_aes(u32 algtype
)
135 return (algtype
& (OP_ALG_ALGSEL_MASK
| OP_ALG_AAI_MASK
)) ==
136 (OP_ALG_ALGSEL_AES
| OP_ALG_AAI_CMAC
);
138 /* Common job descriptor seq in/out ptr routines */
140 /* Map state->caam_ctx, and append seq_out_ptr command that points to it */
141 static inline int map_seq_out_ptr_ctx(u32
*desc
, struct device
*jrdev
,
142 struct caam_hash_state
*state
,
145 state
->ctx_dma_len
= ctx_len
;
146 state
->ctx_dma
= dma_map_single(jrdev
, state
->caam_ctx
,
147 ctx_len
, DMA_FROM_DEVICE
);
148 if (dma_mapping_error(jrdev
, state
->ctx_dma
)) {
149 dev_err(jrdev
, "unable to map ctx\n");
154 append_seq_out_ptr(desc
, state
->ctx_dma
, ctx_len
, 0);
159 /* Map current buffer in state (if length > 0) and put it in link table */
160 static inline int buf_map_to_sec4_sg(struct device
*jrdev
,
161 struct sec4_sg_entry
*sec4_sg
,
162 struct caam_hash_state
*state
)
164 int buflen
= state
->buflen
;
169 state
->buf_dma
= dma_map_single(jrdev
, state
->buf
, buflen
,
171 if (dma_mapping_error(jrdev
, state
->buf_dma
)) {
172 dev_err(jrdev
, "unable to map buf\n");
177 dma_to_sec4_sg_one(sec4_sg
, state
->buf_dma
, buflen
, 0);
182 /* Map state->caam_ctx, and add it to link table */
183 static inline int ctx_map_to_sec4_sg(struct device
*jrdev
,
184 struct caam_hash_state
*state
, int ctx_len
,
185 struct sec4_sg_entry
*sec4_sg
, u32 flag
)
187 state
->ctx_dma_len
= ctx_len
;
188 state
->ctx_dma
= dma_map_single(jrdev
, state
->caam_ctx
, ctx_len
, flag
);
189 if (dma_mapping_error(jrdev
, state
->ctx_dma
)) {
190 dev_err(jrdev
, "unable to map ctx\n");
195 dma_to_sec4_sg_one(sec4_sg
, state
->ctx_dma
, ctx_len
, 0);
200 static int ahash_set_sh_desc(struct crypto_ahash
*ahash
)
202 struct caam_hash_ctx
*ctx
= crypto_ahash_ctx(ahash
);
203 int digestsize
= crypto_ahash_digestsize(ahash
);
204 struct device
*jrdev
= ctx
->jrdev
;
205 struct caam_drv_private
*ctrlpriv
= dev_get_drvdata(jrdev
->parent
);
208 ctx
->adata
.key_virt
= ctx
->key
;
210 /* ahash_update shared descriptor */
211 desc
= ctx
->sh_desc_update
;
212 cnstr_shdsc_ahash(desc
, &ctx
->adata
, OP_ALG_AS_UPDATE
, ctx
->ctx_len
,
213 ctx
->ctx_len
, true, ctrlpriv
->era
);
214 dma_sync_single_for_device(jrdev
, ctx
->sh_desc_update_dma
,
215 desc_bytes(desc
), ctx
->dir
);
217 print_hex_dump_debug("ahash update shdesc@"__stringify(__LINE__
)": ",
218 DUMP_PREFIX_ADDRESS
, 16, 4, desc
, desc_bytes(desc
),
221 /* ahash_update_first shared descriptor */
222 desc
= ctx
->sh_desc_update_first
;
223 cnstr_shdsc_ahash(desc
, &ctx
->adata
, OP_ALG_AS_INIT
, ctx
->ctx_len
,
224 ctx
->ctx_len
, false, ctrlpriv
->era
);
225 dma_sync_single_for_device(jrdev
, ctx
->sh_desc_update_first_dma
,
226 desc_bytes(desc
), ctx
->dir
);
227 print_hex_dump_debug("ahash update first shdesc@"__stringify(__LINE__
)
228 ": ", DUMP_PREFIX_ADDRESS
, 16, 4, desc
,
229 desc_bytes(desc
), 1);
231 /* ahash_final shared descriptor */
232 desc
= ctx
->sh_desc_fin
;
233 cnstr_shdsc_ahash(desc
, &ctx
->adata
, OP_ALG_AS_FINALIZE
, digestsize
,
234 ctx
->ctx_len
, true, ctrlpriv
->era
);
235 dma_sync_single_for_device(jrdev
, ctx
->sh_desc_fin_dma
,
236 desc_bytes(desc
), ctx
->dir
);
238 print_hex_dump_debug("ahash final shdesc@"__stringify(__LINE__
)": ",
239 DUMP_PREFIX_ADDRESS
, 16, 4, desc
,
240 desc_bytes(desc
), 1);
242 /* ahash_digest shared descriptor */
243 desc
= ctx
->sh_desc_digest
;
244 cnstr_shdsc_ahash(desc
, &ctx
->adata
, OP_ALG_AS_INITFINAL
, digestsize
,
245 ctx
->ctx_len
, false, ctrlpriv
->era
);
246 dma_sync_single_for_device(jrdev
, ctx
->sh_desc_digest_dma
,
247 desc_bytes(desc
), ctx
->dir
);
249 print_hex_dump_debug("ahash digest shdesc@"__stringify(__LINE__
)": ",
250 DUMP_PREFIX_ADDRESS
, 16, 4, desc
,
251 desc_bytes(desc
), 1);
256 static int axcbc_set_sh_desc(struct crypto_ahash
*ahash
)
258 struct caam_hash_ctx
*ctx
= crypto_ahash_ctx(ahash
);
259 int digestsize
= crypto_ahash_digestsize(ahash
);
260 struct device
*jrdev
= ctx
->jrdev
;
263 /* shared descriptor for ahash_update */
264 desc
= ctx
->sh_desc_update
;
265 cnstr_shdsc_sk_hash(desc
, &ctx
->adata
, OP_ALG_AS_UPDATE
,
266 ctx
->ctx_len
, ctx
->ctx_len
);
267 dma_sync_single_for_device(jrdev
, ctx
->sh_desc_update_dma
,
268 desc_bytes(desc
), ctx
->dir
);
269 print_hex_dump_debug("axcbc update shdesc@" __stringify(__LINE__
)" : ",
270 DUMP_PREFIX_ADDRESS
, 16, 4, desc
, desc_bytes(desc
),
273 /* shared descriptor for ahash_{final,finup} */
274 desc
= ctx
->sh_desc_fin
;
275 cnstr_shdsc_sk_hash(desc
, &ctx
->adata
, OP_ALG_AS_FINALIZE
,
276 digestsize
, ctx
->ctx_len
);
277 dma_sync_single_for_device(jrdev
, ctx
->sh_desc_fin_dma
,
278 desc_bytes(desc
), ctx
->dir
);
279 print_hex_dump_debug("axcbc finup shdesc@" __stringify(__LINE__
)" : ",
280 DUMP_PREFIX_ADDRESS
, 16, 4, desc
, desc_bytes(desc
),
283 /* key is immediate data for INIT and INITFINAL states */
284 ctx
->adata
.key_virt
= ctx
->key
;
286 /* shared descriptor for first invocation of ahash_update */
287 desc
= ctx
->sh_desc_update_first
;
288 cnstr_shdsc_sk_hash(desc
, &ctx
->adata
, OP_ALG_AS_INIT
, ctx
->ctx_len
,
290 dma_sync_single_for_device(jrdev
, ctx
->sh_desc_update_first_dma
,
291 desc_bytes(desc
), ctx
->dir
);
292 print_hex_dump_debug("axcbc update first shdesc@" __stringify(__LINE__
)
293 " : ", DUMP_PREFIX_ADDRESS
, 16, 4, desc
,
294 desc_bytes(desc
), 1);
296 /* shared descriptor for ahash_digest */
297 desc
= ctx
->sh_desc_digest
;
298 cnstr_shdsc_sk_hash(desc
, &ctx
->adata
, OP_ALG_AS_INITFINAL
,
299 digestsize
, ctx
->ctx_len
);
300 dma_sync_single_for_device(jrdev
, ctx
->sh_desc_digest_dma
,
301 desc_bytes(desc
), ctx
->dir
);
302 print_hex_dump_debug("axcbc digest shdesc@" __stringify(__LINE__
)" : ",
303 DUMP_PREFIX_ADDRESS
, 16, 4, desc
, desc_bytes(desc
),
308 static int acmac_set_sh_desc(struct crypto_ahash
*ahash
)
310 struct caam_hash_ctx
*ctx
= crypto_ahash_ctx(ahash
);
311 int digestsize
= crypto_ahash_digestsize(ahash
);
312 struct device
*jrdev
= ctx
->jrdev
;
315 /* shared descriptor for ahash_update */
316 desc
= ctx
->sh_desc_update
;
317 cnstr_shdsc_sk_hash(desc
, &ctx
->adata
, OP_ALG_AS_UPDATE
,
318 ctx
->ctx_len
, ctx
->ctx_len
);
319 dma_sync_single_for_device(jrdev
, ctx
->sh_desc_update_dma
,
320 desc_bytes(desc
), ctx
->dir
);
321 print_hex_dump_debug("acmac update shdesc@" __stringify(__LINE__
)" : ",
322 DUMP_PREFIX_ADDRESS
, 16, 4, desc
,
323 desc_bytes(desc
), 1);
325 /* shared descriptor for ahash_{final,finup} */
326 desc
= ctx
->sh_desc_fin
;
327 cnstr_shdsc_sk_hash(desc
, &ctx
->adata
, OP_ALG_AS_FINALIZE
,
328 digestsize
, ctx
->ctx_len
);
329 dma_sync_single_for_device(jrdev
, ctx
->sh_desc_fin_dma
,
330 desc_bytes(desc
), ctx
->dir
);
331 print_hex_dump_debug("acmac finup shdesc@" __stringify(__LINE__
)" : ",
332 DUMP_PREFIX_ADDRESS
, 16, 4, desc
,
333 desc_bytes(desc
), 1);
335 /* shared descriptor for first invocation of ahash_update */
336 desc
= ctx
->sh_desc_update_first
;
337 cnstr_shdsc_sk_hash(desc
, &ctx
->adata
, OP_ALG_AS_INIT
, ctx
->ctx_len
,
339 dma_sync_single_for_device(jrdev
, ctx
->sh_desc_update_first_dma
,
340 desc_bytes(desc
), ctx
->dir
);
341 print_hex_dump_debug("acmac update first shdesc@" __stringify(__LINE__
)
342 " : ", DUMP_PREFIX_ADDRESS
, 16, 4, desc
,
343 desc_bytes(desc
), 1);
345 /* shared descriptor for ahash_digest */
346 desc
= ctx
->sh_desc_digest
;
347 cnstr_shdsc_sk_hash(desc
, &ctx
->adata
, OP_ALG_AS_INITFINAL
,
348 digestsize
, ctx
->ctx_len
);
349 dma_sync_single_for_device(jrdev
, ctx
->sh_desc_digest_dma
,
350 desc_bytes(desc
), ctx
->dir
);
351 print_hex_dump_debug("acmac digest shdesc@" __stringify(__LINE__
)" : ",
352 DUMP_PREFIX_ADDRESS
, 16, 4, desc
,
353 desc_bytes(desc
), 1);
358 /* Digest hash size if it is too large */
359 static int hash_digest_key(struct caam_hash_ctx
*ctx
, u32
*keylen
, u8
*key
,
362 struct device
*jrdev
= ctx
->jrdev
;
364 struct split_key_result result
;
368 desc
= kmalloc(CAAM_CMD_SZ
* 8 + CAAM_PTR_SZ
* 2, GFP_KERNEL
| GFP_DMA
);
370 dev_err(jrdev
, "unable to allocate key input memory\n");
374 init_job_desc(desc
, 0);
376 key_dma
= dma_map_single(jrdev
, key
, *keylen
, DMA_BIDIRECTIONAL
);
377 if (dma_mapping_error(jrdev
, key_dma
)) {
378 dev_err(jrdev
, "unable to map key memory\n");
383 /* Job descriptor to perform unkeyed hash on key_in */
384 append_operation(desc
, ctx
->adata
.algtype
| OP_ALG_ENCRYPT
|
385 OP_ALG_AS_INITFINAL
);
386 append_seq_in_ptr(desc
, key_dma
, *keylen
, 0);
387 append_seq_fifo_load(desc
, *keylen
, FIFOLD_CLASS_CLASS2
|
388 FIFOLD_TYPE_LAST2
| FIFOLD_TYPE_MSG
);
389 append_seq_out_ptr(desc
, key_dma
, digestsize
, 0);
390 append_seq_store(desc
, digestsize
, LDST_CLASS_2_CCB
|
391 LDST_SRCDST_BYTE_CONTEXT
);
393 print_hex_dump_debug("key_in@"__stringify(__LINE__
)": ",
394 DUMP_PREFIX_ADDRESS
, 16, 4, key
, *keylen
, 1);
395 print_hex_dump_debug("jobdesc@"__stringify(__LINE__
)": ",
396 DUMP_PREFIX_ADDRESS
, 16, 4, desc
, desc_bytes(desc
),
400 init_completion(&result
.completion
);
402 ret
= caam_jr_enqueue(jrdev
, desc
, split_key_done
, &result
);
403 if (ret
== -EINPROGRESS
) {
405 wait_for_completion(&result
.completion
);
408 print_hex_dump_debug("digested key@"__stringify(__LINE__
)": ",
409 DUMP_PREFIX_ADDRESS
, 16, 4, key
,
412 dma_unmap_single(jrdev
, key_dma
, *keylen
, DMA_BIDIRECTIONAL
);
414 *keylen
= digestsize
;
421 static int ahash_setkey(struct crypto_ahash
*ahash
,
422 const u8
*key
, unsigned int keylen
)
424 struct caam_hash_ctx
*ctx
= crypto_ahash_ctx(ahash
);
425 struct device
*jrdev
= ctx
->jrdev
;
426 int blocksize
= crypto_tfm_alg_blocksize(&ahash
->base
);
427 int digestsize
= crypto_ahash_digestsize(ahash
);
428 struct caam_drv_private
*ctrlpriv
= dev_get_drvdata(ctx
->jrdev
->parent
);
430 u8
*hashed_key
= NULL
;
432 dev_dbg(jrdev
, "keylen %d\n", keylen
);
434 if (keylen
> blocksize
) {
435 hashed_key
= kmemdup(key
, keylen
, GFP_KERNEL
| GFP_DMA
);
438 ret
= hash_digest_key(ctx
, &keylen
, hashed_key
, digestsize
);
445 * If DKP is supported, use it in the shared descriptor to generate
448 if (ctrlpriv
->era
>= 6) {
449 ctx
->adata
.key_inline
= true;
450 ctx
->adata
.keylen
= keylen
;
451 ctx
->adata
.keylen_pad
= split_key_len(ctx
->adata
.algtype
&
454 if (ctx
->adata
.keylen_pad
> CAAM_MAX_HASH_KEY_SIZE
)
457 memcpy(ctx
->key
, key
, keylen
);
460 * In case |user key| > |derived key|, using DKP<imm,imm>
461 * would result in invalid opcodes (last bytes of user key) in
462 * the resulting descriptor. Use DKP<ptr,imm> instead => both
463 * virtual and dma key addresses are needed.
465 if (keylen
> ctx
->adata
.keylen_pad
)
466 dma_sync_single_for_device(ctx
->jrdev
,
468 ctx
->adata
.keylen_pad
,
471 ret
= gen_split_key(ctx
->jrdev
, ctx
->key
, &ctx
->adata
, key
,
472 keylen
, CAAM_MAX_HASH_KEY_SIZE
);
478 return ahash_set_sh_desc(ahash
);
484 static int axcbc_setkey(struct crypto_ahash
*ahash
, const u8
*key
,
487 struct caam_hash_ctx
*ctx
= crypto_ahash_ctx(ahash
);
488 struct device
*jrdev
= ctx
->jrdev
;
490 if (keylen
!= AES_KEYSIZE_128
)
493 memcpy(ctx
->key
, key
, keylen
);
494 dma_sync_single_for_device(jrdev
, ctx
->adata
.key_dma
, keylen
,
496 ctx
->adata
.keylen
= keylen
;
498 print_hex_dump_debug("axcbc ctx.key@" __stringify(__LINE__
)" : ",
499 DUMP_PREFIX_ADDRESS
, 16, 4, ctx
->key
, keylen
, 1);
501 return axcbc_set_sh_desc(ahash
);
504 static int acmac_setkey(struct crypto_ahash
*ahash
, const u8
*key
,
507 struct caam_hash_ctx
*ctx
= crypto_ahash_ctx(ahash
);
510 err
= aes_check_keylen(keylen
);
514 /* key is immediate data for all cmac shared descriptors */
515 ctx
->adata
.key_virt
= key
;
516 ctx
->adata
.keylen
= keylen
;
518 print_hex_dump_debug("acmac ctx.key@" __stringify(__LINE__
)" : ",
519 DUMP_PREFIX_ADDRESS
, 16, 4, key
, keylen
, 1);
521 return acmac_set_sh_desc(ahash
);
525 * ahash_edesc - s/w-extended ahash descriptor
526 * @sec4_sg_dma: physical mapped address of h/w link table
527 * @src_nents: number of segments in input scatterlist
528 * @sec4_sg_bytes: length of dma mapped sec4_sg space
529 * @bklog: stored to determine if the request needs backlog
530 * @hw_desc: the h/w job descriptor followed by any referenced link tables
531 * @sec4_sg: h/w link table
534 dma_addr_t sec4_sg_dma
;
538 u32 hw_desc
[DESC_JOB_IO_LEN_MAX
/ sizeof(u32
)] ____cacheline_aligned
;
539 struct sec4_sg_entry sec4_sg
[];
542 static inline void ahash_unmap(struct device
*dev
,
543 struct ahash_edesc
*edesc
,
544 struct ahash_request
*req
, int dst_len
)
546 struct caam_hash_state
*state
= ahash_request_ctx(req
);
548 if (edesc
->src_nents
)
549 dma_unmap_sg(dev
, req
->src
, edesc
->src_nents
, DMA_TO_DEVICE
);
551 if (edesc
->sec4_sg_bytes
)
552 dma_unmap_single(dev
, edesc
->sec4_sg_dma
,
553 edesc
->sec4_sg_bytes
, DMA_TO_DEVICE
);
555 if (state
->buf_dma
) {
556 dma_unmap_single(dev
, state
->buf_dma
, state
->buflen
,
562 static inline void ahash_unmap_ctx(struct device
*dev
,
563 struct ahash_edesc
*edesc
,
564 struct ahash_request
*req
, int dst_len
, u32 flag
)
566 struct caam_hash_state
*state
= ahash_request_ctx(req
);
568 if (state
->ctx_dma
) {
569 dma_unmap_single(dev
, state
->ctx_dma
, state
->ctx_dma_len
, flag
);
572 ahash_unmap(dev
, edesc
, req
, dst_len
);
575 static inline void ahash_done_cpy(struct device
*jrdev
, u32
*desc
, u32 err
,
576 void *context
, enum dma_data_direction dir
)
578 struct ahash_request
*req
= context
;
579 struct caam_drv_private_jr
*jrp
= dev_get_drvdata(jrdev
);
580 struct ahash_edesc
*edesc
;
581 struct crypto_ahash
*ahash
= crypto_ahash_reqtfm(req
);
582 int digestsize
= crypto_ahash_digestsize(ahash
);
583 struct caam_hash_state
*state
= ahash_request_ctx(req
);
584 struct caam_hash_ctx
*ctx
= crypto_ahash_ctx(ahash
);
587 dev_dbg(jrdev
, "%s %d: err 0x%x\n", __func__
, __LINE__
, err
);
589 edesc
= state
->edesc
;
592 ecode
= caam_jr_strstatus(jrdev
, err
);
594 ahash_unmap_ctx(jrdev
, edesc
, req
, digestsize
, dir
);
595 memcpy(req
->result
, state
->caam_ctx
, digestsize
);
598 print_hex_dump_debug("ctx@"__stringify(__LINE__
)": ",
599 DUMP_PREFIX_ADDRESS
, 16, 4, state
->caam_ctx
,
603 * If no backlog flag, the completion of the request is done
604 * by CAAM, not crypto engine.
607 req
->base
.complete(&req
->base
, ecode
);
609 crypto_finalize_hash_request(jrp
->engine
, req
, ecode
);
612 static void ahash_done(struct device
*jrdev
, u32
*desc
, u32 err
,
615 ahash_done_cpy(jrdev
, desc
, err
, context
, DMA_FROM_DEVICE
);
618 static void ahash_done_ctx_src(struct device
*jrdev
, u32
*desc
, u32 err
,
621 ahash_done_cpy(jrdev
, desc
, err
, context
, DMA_BIDIRECTIONAL
);
624 static inline void ahash_done_switch(struct device
*jrdev
, u32
*desc
, u32 err
,
625 void *context
, enum dma_data_direction dir
)
627 struct ahash_request
*req
= context
;
628 struct caam_drv_private_jr
*jrp
= dev_get_drvdata(jrdev
);
629 struct ahash_edesc
*edesc
;
630 struct crypto_ahash
*ahash
= crypto_ahash_reqtfm(req
);
631 struct caam_hash_ctx
*ctx
= crypto_ahash_ctx(ahash
);
632 struct caam_hash_state
*state
= ahash_request_ctx(req
);
633 int digestsize
= crypto_ahash_digestsize(ahash
);
636 dev_dbg(jrdev
, "%s %d: err 0x%x\n", __func__
, __LINE__
, err
);
638 edesc
= state
->edesc
;
640 ecode
= caam_jr_strstatus(jrdev
, err
);
642 ahash_unmap_ctx(jrdev
, edesc
, req
, ctx
->ctx_len
, dir
);
645 scatterwalk_map_and_copy(state
->buf
, req
->src
,
646 req
->nbytes
- state
->next_buflen
,
647 state
->next_buflen
, 0);
648 state
->buflen
= state
->next_buflen
;
650 print_hex_dump_debug("buf@" __stringify(__LINE__
)": ",
651 DUMP_PREFIX_ADDRESS
, 16, 4, state
->buf
,
654 print_hex_dump_debug("ctx@"__stringify(__LINE__
)": ",
655 DUMP_PREFIX_ADDRESS
, 16, 4, state
->caam_ctx
,
658 print_hex_dump_debug("result@"__stringify(__LINE__
)": ",
659 DUMP_PREFIX_ADDRESS
, 16, 4, req
->result
,
663 * If no backlog flag, the completion of the request is done
664 * by CAAM, not crypto engine.
667 req
->base
.complete(&req
->base
, ecode
);
669 crypto_finalize_hash_request(jrp
->engine
, req
, ecode
);
673 static void ahash_done_bi(struct device
*jrdev
, u32
*desc
, u32 err
,
676 ahash_done_switch(jrdev
, desc
, err
, context
, DMA_BIDIRECTIONAL
);
679 static void ahash_done_ctx_dst(struct device
*jrdev
, u32
*desc
, u32 err
,
682 ahash_done_switch(jrdev
, desc
, err
, context
, DMA_FROM_DEVICE
);
686 * Allocate an enhanced descriptor, which contains the hardware descriptor
687 * and space for hardware scatter table containing sg_num entries.
689 static struct ahash_edesc
*ahash_edesc_alloc(struct ahash_request
*req
,
690 int sg_num
, u32
*sh_desc
,
691 dma_addr_t sh_desc_dma
)
693 struct crypto_ahash
*ahash
= crypto_ahash_reqtfm(req
);
694 struct caam_hash_ctx
*ctx
= crypto_ahash_ctx(ahash
);
695 struct caam_hash_state
*state
= ahash_request_ctx(req
);
696 gfp_t flags
= (req
->base
.flags
& CRYPTO_TFM_REQ_MAY_SLEEP
) ?
697 GFP_KERNEL
: GFP_ATOMIC
;
698 struct ahash_edesc
*edesc
;
699 unsigned int sg_size
= sg_num
* sizeof(struct sec4_sg_entry
);
701 edesc
= kzalloc(sizeof(*edesc
) + sg_size
, GFP_DMA
| flags
);
703 dev_err(ctx
->jrdev
, "could not allocate extended descriptor\n");
707 state
->edesc
= edesc
;
709 init_job_desc_shared(edesc
->hw_desc
, sh_desc_dma
, desc_len(sh_desc
),
710 HDR_SHARE_DEFER
| HDR_REVERSE
);
715 static int ahash_edesc_add_src(struct caam_hash_ctx
*ctx
,
716 struct ahash_edesc
*edesc
,
717 struct ahash_request
*req
, int nents
,
718 unsigned int first_sg
,
719 unsigned int first_bytes
, size_t to_hash
)
724 if (nents
> 1 || first_sg
) {
725 struct sec4_sg_entry
*sg
= edesc
->sec4_sg
;
726 unsigned int sgsize
= sizeof(*sg
) *
727 pad_sg_nents(first_sg
+ nents
);
729 sg_to_sec4_sg_last(req
->src
, to_hash
, sg
+ first_sg
, 0);
731 src_dma
= dma_map_single(ctx
->jrdev
, sg
, sgsize
, DMA_TO_DEVICE
);
732 if (dma_mapping_error(ctx
->jrdev
, src_dma
)) {
733 dev_err(ctx
->jrdev
, "unable to map S/G table\n");
737 edesc
->sec4_sg_bytes
= sgsize
;
738 edesc
->sec4_sg_dma
= src_dma
;
741 src_dma
= sg_dma_address(req
->src
);
745 append_seq_in_ptr(edesc
->hw_desc
, src_dma
, first_bytes
+ to_hash
,
751 static int ahash_do_one_req(struct crypto_engine
*engine
, void *areq
)
753 struct ahash_request
*req
= ahash_request_cast(areq
);
754 struct caam_hash_ctx
*ctx
= crypto_ahash_ctx(crypto_ahash_reqtfm(req
));
755 struct caam_hash_state
*state
= ahash_request_ctx(req
);
756 struct device
*jrdev
= ctx
->jrdev
;
757 u32
*desc
= state
->edesc
->hw_desc
;
760 state
->edesc
->bklog
= true;
762 ret
= caam_jr_enqueue(jrdev
, desc
, state
->ahash_op_done
, req
);
764 if (ret
!= -EINPROGRESS
) {
765 ahash_unmap(jrdev
, state
->edesc
, req
, 0);
774 static int ahash_enqueue_req(struct device
*jrdev
,
775 void (*cbk
)(struct device
*jrdev
, u32
*desc
,
776 u32 err
, void *context
),
777 struct ahash_request
*req
,
778 int dst_len
, enum dma_data_direction dir
)
780 struct caam_drv_private_jr
*jrpriv
= dev_get_drvdata(jrdev
);
781 struct caam_hash_state
*state
= ahash_request_ctx(req
);
782 struct ahash_edesc
*edesc
= state
->edesc
;
783 u32
*desc
= edesc
->hw_desc
;
786 state
->ahash_op_done
= cbk
;
789 * Only the backlog request are sent to crypto-engine since the others
790 * can be handled by CAAM, if free, especially since JR has up to 1024
791 * entries (more than the 10 entries from crypto-engine).
793 if (req
->base
.flags
& CRYPTO_TFM_REQ_MAY_BACKLOG
)
794 ret
= crypto_transfer_hash_request_to_engine(jrpriv
->engine
,
797 ret
= caam_jr_enqueue(jrdev
, desc
, cbk
, req
);
799 if ((ret
!= -EINPROGRESS
) && (ret
!= -EBUSY
)) {
800 ahash_unmap_ctx(jrdev
, edesc
, req
, dst_len
, dir
);
807 /* submit update job descriptor */
808 static int ahash_update_ctx(struct ahash_request
*req
)
810 struct crypto_ahash
*ahash
= crypto_ahash_reqtfm(req
);
811 struct caam_hash_ctx
*ctx
= crypto_ahash_ctx(ahash
);
812 struct caam_hash_state
*state
= ahash_request_ctx(req
);
813 struct device
*jrdev
= ctx
->jrdev
;
814 u8
*buf
= state
->buf
;
815 int *buflen
= &state
->buflen
;
816 int *next_buflen
= &state
->next_buflen
;
817 int blocksize
= crypto_ahash_blocksize(ahash
);
818 int in_len
= *buflen
+ req
->nbytes
, to_hash
;
820 int src_nents
, mapped_nents
, sec4_sg_bytes
, sec4_sg_src_index
;
821 struct ahash_edesc
*edesc
;
824 *next_buflen
= in_len
& (blocksize
- 1);
825 to_hash
= in_len
- *next_buflen
;
828 * For XCBC and CMAC, if to_hash is multiple of block size,
829 * keep last block in internal buffer
831 if ((is_xcbc_aes(ctx
->adata
.algtype
) ||
832 is_cmac_aes(ctx
->adata
.algtype
)) && to_hash
>= blocksize
&&
833 (*next_buflen
== 0)) {
834 *next_buflen
= blocksize
;
835 to_hash
-= blocksize
;
840 int src_len
= req
->nbytes
- *next_buflen
;
842 src_nents
= sg_nents_for_len(req
->src
, src_len
);
844 dev_err(jrdev
, "Invalid number of src SG.\n");
849 mapped_nents
= dma_map_sg(jrdev
, req
->src
, src_nents
,
852 dev_err(jrdev
, "unable to DMA map source\n");
859 sec4_sg_src_index
= 1 + (*buflen
? 1 : 0);
860 pad_nents
= pad_sg_nents(sec4_sg_src_index
+ mapped_nents
);
861 sec4_sg_bytes
= pad_nents
* sizeof(struct sec4_sg_entry
);
864 * allocate space for base edesc and hw desc commands,
867 edesc
= ahash_edesc_alloc(req
, pad_nents
, ctx
->sh_desc_update
,
868 ctx
->sh_desc_update_dma
);
870 dma_unmap_sg(jrdev
, req
->src
, src_nents
, DMA_TO_DEVICE
);
874 edesc
->src_nents
= src_nents
;
875 edesc
->sec4_sg_bytes
= sec4_sg_bytes
;
877 ret
= ctx_map_to_sec4_sg(jrdev
, state
, ctx
->ctx_len
,
878 edesc
->sec4_sg
, DMA_BIDIRECTIONAL
);
882 ret
= buf_map_to_sec4_sg(jrdev
, edesc
->sec4_sg
+ 1, state
);
887 sg_to_sec4_sg_last(req
->src
, src_len
,
888 edesc
->sec4_sg
+ sec4_sg_src_index
,
891 sg_to_sec4_set_last(edesc
->sec4_sg
+ sec4_sg_src_index
-
894 desc
= edesc
->hw_desc
;
896 edesc
->sec4_sg_dma
= dma_map_single(jrdev
, edesc
->sec4_sg
,
899 if (dma_mapping_error(jrdev
, edesc
->sec4_sg_dma
)) {
900 dev_err(jrdev
, "unable to map S/G table\n");
905 append_seq_in_ptr(desc
, edesc
->sec4_sg_dma
, ctx
->ctx_len
+
908 append_seq_out_ptr(desc
, state
->ctx_dma
, ctx
->ctx_len
, 0);
910 print_hex_dump_debug("jobdesc@"__stringify(__LINE__
)": ",
911 DUMP_PREFIX_ADDRESS
, 16, 4, desc
,
912 desc_bytes(desc
), 1);
914 ret
= ahash_enqueue_req(jrdev
, ahash_done_bi
, req
,
915 ctx
->ctx_len
, DMA_BIDIRECTIONAL
);
916 } else if (*next_buflen
) {
917 scatterwalk_map_and_copy(buf
+ *buflen
, req
->src
, 0,
919 *buflen
= *next_buflen
;
921 print_hex_dump_debug("buf@" __stringify(__LINE__
)": ",
922 DUMP_PREFIX_ADDRESS
, 16, 4, buf
,
928 ahash_unmap_ctx(jrdev
, edesc
, req
, ctx
->ctx_len
, DMA_BIDIRECTIONAL
);
933 static int ahash_final_ctx(struct ahash_request
*req
)
935 struct crypto_ahash
*ahash
= crypto_ahash_reqtfm(req
);
936 struct caam_hash_ctx
*ctx
= crypto_ahash_ctx(ahash
);
937 struct caam_hash_state
*state
= ahash_request_ctx(req
);
938 struct device
*jrdev
= ctx
->jrdev
;
939 int buflen
= state
->buflen
;
942 int digestsize
= crypto_ahash_digestsize(ahash
);
943 struct ahash_edesc
*edesc
;
946 sec4_sg_bytes
= pad_sg_nents(1 + (buflen
? 1 : 0)) *
947 sizeof(struct sec4_sg_entry
);
949 /* allocate space for base edesc and hw desc commands, link tables */
950 edesc
= ahash_edesc_alloc(req
, 4, ctx
->sh_desc_fin
,
951 ctx
->sh_desc_fin_dma
);
955 desc
= edesc
->hw_desc
;
957 edesc
->sec4_sg_bytes
= sec4_sg_bytes
;
959 ret
= ctx_map_to_sec4_sg(jrdev
, state
, ctx
->ctx_len
,
960 edesc
->sec4_sg
, DMA_BIDIRECTIONAL
);
964 ret
= buf_map_to_sec4_sg(jrdev
, edesc
->sec4_sg
+ 1, state
);
968 sg_to_sec4_set_last(edesc
->sec4_sg
+ (buflen
? 1 : 0));
970 edesc
->sec4_sg_dma
= dma_map_single(jrdev
, edesc
->sec4_sg
,
971 sec4_sg_bytes
, DMA_TO_DEVICE
);
972 if (dma_mapping_error(jrdev
, edesc
->sec4_sg_dma
)) {
973 dev_err(jrdev
, "unable to map S/G table\n");
978 append_seq_in_ptr(desc
, edesc
->sec4_sg_dma
, ctx
->ctx_len
+ buflen
,
980 append_seq_out_ptr(desc
, state
->ctx_dma
, digestsize
, 0);
982 print_hex_dump_debug("jobdesc@"__stringify(__LINE__
)": ",
983 DUMP_PREFIX_ADDRESS
, 16, 4, desc
, desc_bytes(desc
),
986 return ahash_enqueue_req(jrdev
, ahash_done_ctx_src
, req
,
987 digestsize
, DMA_BIDIRECTIONAL
);
989 ahash_unmap_ctx(jrdev
, edesc
, req
, digestsize
, DMA_BIDIRECTIONAL
);
994 static int ahash_finup_ctx(struct ahash_request
*req
)
996 struct crypto_ahash
*ahash
= crypto_ahash_reqtfm(req
);
997 struct caam_hash_ctx
*ctx
= crypto_ahash_ctx(ahash
);
998 struct caam_hash_state
*state
= ahash_request_ctx(req
);
999 struct device
*jrdev
= ctx
->jrdev
;
1000 int buflen
= state
->buflen
;
1002 int sec4_sg_src_index
;
1003 int src_nents
, mapped_nents
;
1004 int digestsize
= crypto_ahash_digestsize(ahash
);
1005 struct ahash_edesc
*edesc
;
1008 src_nents
= sg_nents_for_len(req
->src
, req
->nbytes
);
1009 if (src_nents
< 0) {
1010 dev_err(jrdev
, "Invalid number of src SG.\n");
1015 mapped_nents
= dma_map_sg(jrdev
, req
->src
, src_nents
,
1017 if (!mapped_nents
) {
1018 dev_err(jrdev
, "unable to DMA map source\n");
1025 sec4_sg_src_index
= 1 + (buflen
? 1 : 0);
1027 /* allocate space for base edesc and hw desc commands, link tables */
1028 edesc
= ahash_edesc_alloc(req
, sec4_sg_src_index
+ mapped_nents
,
1029 ctx
->sh_desc_fin
, ctx
->sh_desc_fin_dma
);
1031 dma_unmap_sg(jrdev
, req
->src
, src_nents
, DMA_TO_DEVICE
);
1035 desc
= edesc
->hw_desc
;
1037 edesc
->src_nents
= src_nents
;
1039 ret
= ctx_map_to_sec4_sg(jrdev
, state
, ctx
->ctx_len
,
1040 edesc
->sec4_sg
, DMA_BIDIRECTIONAL
);
1044 ret
= buf_map_to_sec4_sg(jrdev
, edesc
->sec4_sg
+ 1, state
);
1048 ret
= ahash_edesc_add_src(ctx
, edesc
, req
, mapped_nents
,
1049 sec4_sg_src_index
, ctx
->ctx_len
+ buflen
,
1054 append_seq_out_ptr(desc
, state
->ctx_dma
, digestsize
, 0);
1056 print_hex_dump_debug("jobdesc@"__stringify(__LINE__
)": ",
1057 DUMP_PREFIX_ADDRESS
, 16, 4, desc
, desc_bytes(desc
),
1060 return ahash_enqueue_req(jrdev
, ahash_done_ctx_src
, req
,
1061 digestsize
, DMA_BIDIRECTIONAL
);
1063 ahash_unmap_ctx(jrdev
, edesc
, req
, digestsize
, DMA_BIDIRECTIONAL
);
1068 static int ahash_digest(struct ahash_request
*req
)
1070 struct crypto_ahash
*ahash
= crypto_ahash_reqtfm(req
);
1071 struct caam_hash_ctx
*ctx
= crypto_ahash_ctx(ahash
);
1072 struct caam_hash_state
*state
= ahash_request_ctx(req
);
1073 struct device
*jrdev
= ctx
->jrdev
;
1075 int digestsize
= crypto_ahash_digestsize(ahash
);
1076 int src_nents
, mapped_nents
;
1077 struct ahash_edesc
*edesc
;
1082 src_nents
= sg_nents_for_len(req
->src
, req
->nbytes
);
1083 if (src_nents
< 0) {
1084 dev_err(jrdev
, "Invalid number of src SG.\n");
1089 mapped_nents
= dma_map_sg(jrdev
, req
->src
, src_nents
,
1091 if (!mapped_nents
) {
1092 dev_err(jrdev
, "unable to map source for DMA\n");
1099 /* allocate space for base edesc and hw desc commands, link tables */
1100 edesc
= ahash_edesc_alloc(req
, mapped_nents
> 1 ? mapped_nents
: 0,
1101 ctx
->sh_desc_digest
, ctx
->sh_desc_digest_dma
);
1103 dma_unmap_sg(jrdev
, req
->src
, src_nents
, DMA_TO_DEVICE
);
1107 edesc
->src_nents
= src_nents
;
1109 ret
= ahash_edesc_add_src(ctx
, edesc
, req
, mapped_nents
, 0, 0,
1112 ahash_unmap(jrdev
, edesc
, req
, digestsize
);
1117 desc
= edesc
->hw_desc
;
1119 ret
= map_seq_out_ptr_ctx(desc
, jrdev
, state
, digestsize
);
1121 ahash_unmap(jrdev
, edesc
, req
, digestsize
);
1126 print_hex_dump_debug("jobdesc@"__stringify(__LINE__
)": ",
1127 DUMP_PREFIX_ADDRESS
, 16, 4, desc
, desc_bytes(desc
),
1130 return ahash_enqueue_req(jrdev
, ahash_done
, req
, digestsize
,
1134 /* submit ahash final if it the first job descriptor */
1135 static int ahash_final_no_ctx(struct ahash_request
*req
)
1137 struct crypto_ahash
*ahash
= crypto_ahash_reqtfm(req
);
1138 struct caam_hash_ctx
*ctx
= crypto_ahash_ctx(ahash
);
1139 struct caam_hash_state
*state
= ahash_request_ctx(req
);
1140 struct device
*jrdev
= ctx
->jrdev
;
1141 u8
*buf
= state
->buf
;
1142 int buflen
= state
->buflen
;
1144 int digestsize
= crypto_ahash_digestsize(ahash
);
1145 struct ahash_edesc
*edesc
;
1148 /* allocate space for base edesc and hw desc commands, link tables */
1149 edesc
= ahash_edesc_alloc(req
, 0, ctx
->sh_desc_digest
,
1150 ctx
->sh_desc_digest_dma
);
1154 desc
= edesc
->hw_desc
;
1157 state
->buf_dma
= dma_map_single(jrdev
, buf
, buflen
,
1159 if (dma_mapping_error(jrdev
, state
->buf_dma
)) {
1160 dev_err(jrdev
, "unable to map src\n");
1164 append_seq_in_ptr(desc
, state
->buf_dma
, buflen
, 0);
1167 ret
= map_seq_out_ptr_ctx(desc
, jrdev
, state
, digestsize
);
1171 print_hex_dump_debug("jobdesc@"__stringify(__LINE__
)": ",
1172 DUMP_PREFIX_ADDRESS
, 16, 4, desc
, desc_bytes(desc
),
1175 return ahash_enqueue_req(jrdev
, ahash_done
, req
,
1176 digestsize
, DMA_FROM_DEVICE
);
1178 ahash_unmap(jrdev
, edesc
, req
, digestsize
);
1183 /* submit ahash update if it the first job descriptor after update */
1184 static int ahash_update_no_ctx(struct ahash_request
*req
)
1186 struct crypto_ahash
*ahash
= crypto_ahash_reqtfm(req
);
1187 struct caam_hash_ctx
*ctx
= crypto_ahash_ctx(ahash
);
1188 struct caam_hash_state
*state
= ahash_request_ctx(req
);
1189 struct device
*jrdev
= ctx
->jrdev
;
1190 u8
*buf
= state
->buf
;
1191 int *buflen
= &state
->buflen
;
1192 int *next_buflen
= &state
->next_buflen
;
1193 int blocksize
= crypto_ahash_blocksize(ahash
);
1194 int in_len
= *buflen
+ req
->nbytes
, to_hash
;
1195 int sec4_sg_bytes
, src_nents
, mapped_nents
;
1196 struct ahash_edesc
*edesc
;
1200 *next_buflen
= in_len
& (blocksize
- 1);
1201 to_hash
= in_len
- *next_buflen
;
1204 * For XCBC and CMAC, if to_hash is multiple of block size,
1205 * keep last block in internal buffer
1207 if ((is_xcbc_aes(ctx
->adata
.algtype
) ||
1208 is_cmac_aes(ctx
->adata
.algtype
)) && to_hash
>= blocksize
&&
1209 (*next_buflen
== 0)) {
1210 *next_buflen
= blocksize
;
1211 to_hash
-= blocksize
;
1216 int src_len
= req
->nbytes
- *next_buflen
;
1218 src_nents
= sg_nents_for_len(req
->src
, src_len
);
1219 if (src_nents
< 0) {
1220 dev_err(jrdev
, "Invalid number of src SG.\n");
1225 mapped_nents
= dma_map_sg(jrdev
, req
->src
, src_nents
,
1227 if (!mapped_nents
) {
1228 dev_err(jrdev
, "unable to DMA map source\n");
1235 pad_nents
= pad_sg_nents(1 + mapped_nents
);
1236 sec4_sg_bytes
= pad_nents
* sizeof(struct sec4_sg_entry
);
1239 * allocate space for base edesc and hw desc commands,
1242 edesc
= ahash_edesc_alloc(req
, pad_nents
,
1243 ctx
->sh_desc_update_first
,
1244 ctx
->sh_desc_update_first_dma
);
1246 dma_unmap_sg(jrdev
, req
->src
, src_nents
, DMA_TO_DEVICE
);
1250 edesc
->src_nents
= src_nents
;
1251 edesc
->sec4_sg_bytes
= sec4_sg_bytes
;
1253 ret
= buf_map_to_sec4_sg(jrdev
, edesc
->sec4_sg
, state
);
1257 sg_to_sec4_sg_last(req
->src
, src_len
, edesc
->sec4_sg
+ 1, 0);
1259 desc
= edesc
->hw_desc
;
1261 edesc
->sec4_sg_dma
= dma_map_single(jrdev
, edesc
->sec4_sg
,
1264 if (dma_mapping_error(jrdev
, edesc
->sec4_sg_dma
)) {
1265 dev_err(jrdev
, "unable to map S/G table\n");
1270 append_seq_in_ptr(desc
, edesc
->sec4_sg_dma
, to_hash
, LDST_SGF
);
1272 ret
= map_seq_out_ptr_ctx(desc
, jrdev
, state
, ctx
->ctx_len
);
1276 print_hex_dump_debug("jobdesc@"__stringify(__LINE__
)": ",
1277 DUMP_PREFIX_ADDRESS
, 16, 4, desc
,
1278 desc_bytes(desc
), 1);
1280 ret
= ahash_enqueue_req(jrdev
, ahash_done_ctx_dst
, req
,
1281 ctx
->ctx_len
, DMA_TO_DEVICE
);
1282 if ((ret
!= -EINPROGRESS
) && (ret
!= -EBUSY
))
1284 state
->update
= ahash_update_ctx
;
1285 state
->finup
= ahash_finup_ctx
;
1286 state
->final
= ahash_final_ctx
;
1287 } else if (*next_buflen
) {
1288 scatterwalk_map_and_copy(buf
+ *buflen
, req
->src
, 0,
1290 *buflen
= *next_buflen
;
1292 print_hex_dump_debug("buf@" __stringify(__LINE__
)": ",
1293 DUMP_PREFIX_ADDRESS
, 16, 4, buf
,
1299 ahash_unmap_ctx(jrdev
, edesc
, req
, ctx
->ctx_len
, DMA_TO_DEVICE
);
1304 /* submit ahash finup if it the first job descriptor after update */
1305 static int ahash_finup_no_ctx(struct ahash_request
*req
)
1307 struct crypto_ahash
*ahash
= crypto_ahash_reqtfm(req
);
1308 struct caam_hash_ctx
*ctx
= crypto_ahash_ctx(ahash
);
1309 struct caam_hash_state
*state
= ahash_request_ctx(req
);
1310 struct device
*jrdev
= ctx
->jrdev
;
1311 int buflen
= state
->buflen
;
1313 int sec4_sg_bytes
, sec4_sg_src_index
, src_nents
, mapped_nents
;
1314 int digestsize
= crypto_ahash_digestsize(ahash
);
1315 struct ahash_edesc
*edesc
;
1318 src_nents
= sg_nents_for_len(req
->src
, req
->nbytes
);
1319 if (src_nents
< 0) {
1320 dev_err(jrdev
, "Invalid number of src SG.\n");
1325 mapped_nents
= dma_map_sg(jrdev
, req
->src
, src_nents
,
1327 if (!mapped_nents
) {
1328 dev_err(jrdev
, "unable to DMA map source\n");
1335 sec4_sg_src_index
= 2;
1336 sec4_sg_bytes
= (sec4_sg_src_index
+ mapped_nents
) *
1337 sizeof(struct sec4_sg_entry
);
1339 /* allocate space for base edesc and hw desc commands, link tables */
1340 edesc
= ahash_edesc_alloc(req
, sec4_sg_src_index
+ mapped_nents
,
1341 ctx
->sh_desc_digest
, ctx
->sh_desc_digest_dma
);
1343 dma_unmap_sg(jrdev
, req
->src
, src_nents
, DMA_TO_DEVICE
);
1347 desc
= edesc
->hw_desc
;
1349 edesc
->src_nents
= src_nents
;
1350 edesc
->sec4_sg_bytes
= sec4_sg_bytes
;
1352 ret
= buf_map_to_sec4_sg(jrdev
, edesc
->sec4_sg
, state
);
1356 ret
= ahash_edesc_add_src(ctx
, edesc
, req
, mapped_nents
, 1, buflen
,
1359 dev_err(jrdev
, "unable to map S/G table\n");
1363 ret
= map_seq_out_ptr_ctx(desc
, jrdev
, state
, digestsize
);
1367 print_hex_dump_debug("jobdesc@"__stringify(__LINE__
)": ",
1368 DUMP_PREFIX_ADDRESS
, 16, 4, desc
, desc_bytes(desc
),
1371 return ahash_enqueue_req(jrdev
, ahash_done
, req
,
1372 digestsize
, DMA_FROM_DEVICE
);
1374 ahash_unmap(jrdev
, edesc
, req
, digestsize
);
1380 /* submit first update job descriptor after init */
1381 static int ahash_update_first(struct ahash_request
*req
)
1383 struct crypto_ahash
*ahash
= crypto_ahash_reqtfm(req
);
1384 struct caam_hash_ctx
*ctx
= crypto_ahash_ctx(ahash
);
1385 struct caam_hash_state
*state
= ahash_request_ctx(req
);
1386 struct device
*jrdev
= ctx
->jrdev
;
1387 u8
*buf
= state
->buf
;
1388 int *buflen
= &state
->buflen
;
1389 int *next_buflen
= &state
->next_buflen
;
1391 int blocksize
= crypto_ahash_blocksize(ahash
);
1393 int src_nents
, mapped_nents
;
1394 struct ahash_edesc
*edesc
;
1397 *next_buflen
= req
->nbytes
& (blocksize
- 1);
1398 to_hash
= req
->nbytes
- *next_buflen
;
1401 * For XCBC and CMAC, if to_hash is multiple of block size,
1402 * keep last block in internal buffer
1404 if ((is_xcbc_aes(ctx
->adata
.algtype
) ||
1405 is_cmac_aes(ctx
->adata
.algtype
)) && to_hash
>= blocksize
&&
1406 (*next_buflen
== 0)) {
1407 *next_buflen
= blocksize
;
1408 to_hash
-= blocksize
;
1412 src_nents
= sg_nents_for_len(req
->src
,
1413 req
->nbytes
- *next_buflen
);
1414 if (src_nents
< 0) {
1415 dev_err(jrdev
, "Invalid number of src SG.\n");
1420 mapped_nents
= dma_map_sg(jrdev
, req
->src
, src_nents
,
1422 if (!mapped_nents
) {
1423 dev_err(jrdev
, "unable to map source for DMA\n");
1431 * allocate space for base edesc and hw desc commands,
1434 edesc
= ahash_edesc_alloc(req
, mapped_nents
> 1 ?
1436 ctx
->sh_desc_update_first
,
1437 ctx
->sh_desc_update_first_dma
);
1439 dma_unmap_sg(jrdev
, req
->src
, src_nents
, DMA_TO_DEVICE
);
1443 edesc
->src_nents
= src_nents
;
1445 ret
= ahash_edesc_add_src(ctx
, edesc
, req
, mapped_nents
, 0, 0,
1450 desc
= edesc
->hw_desc
;
1452 ret
= map_seq_out_ptr_ctx(desc
, jrdev
, state
, ctx
->ctx_len
);
1456 print_hex_dump_debug("jobdesc@"__stringify(__LINE__
)": ",
1457 DUMP_PREFIX_ADDRESS
, 16, 4, desc
,
1458 desc_bytes(desc
), 1);
1460 ret
= ahash_enqueue_req(jrdev
, ahash_done_ctx_dst
, req
,
1461 ctx
->ctx_len
, DMA_TO_DEVICE
);
1462 if ((ret
!= -EINPROGRESS
) && (ret
!= -EBUSY
))
1464 state
->update
= ahash_update_ctx
;
1465 state
->finup
= ahash_finup_ctx
;
1466 state
->final
= ahash_final_ctx
;
1467 } else if (*next_buflen
) {
1468 state
->update
= ahash_update_no_ctx
;
1469 state
->finup
= ahash_finup_no_ctx
;
1470 state
->final
= ahash_final_no_ctx
;
1471 scatterwalk_map_and_copy(buf
, req
->src
, 0,
1473 *buflen
= *next_buflen
;
1475 print_hex_dump_debug("buf@" __stringify(__LINE__
)": ",
1476 DUMP_PREFIX_ADDRESS
, 16, 4, buf
,
1482 ahash_unmap_ctx(jrdev
, edesc
, req
, ctx
->ctx_len
, DMA_TO_DEVICE
);
1487 static int ahash_finup_first(struct ahash_request
*req
)
1489 return ahash_digest(req
);
1492 static int ahash_init(struct ahash_request
*req
)
1494 struct caam_hash_state
*state
= ahash_request_ctx(req
);
1496 state
->update
= ahash_update_first
;
1497 state
->finup
= ahash_finup_first
;
1498 state
->final
= ahash_final_no_ctx
;
1501 state
->ctx_dma_len
= 0;
1504 state
->next_buflen
= 0;
1509 static int ahash_update(struct ahash_request
*req
)
1511 struct caam_hash_state
*state
= ahash_request_ctx(req
);
1513 return state
->update(req
);
1516 static int ahash_finup(struct ahash_request
*req
)
1518 struct caam_hash_state
*state
= ahash_request_ctx(req
);
1520 return state
->finup(req
);
1523 static int ahash_final(struct ahash_request
*req
)
1525 struct caam_hash_state
*state
= ahash_request_ctx(req
);
1527 return state
->final(req
);
1530 static int ahash_export(struct ahash_request
*req
, void *out
)
1532 struct caam_hash_state
*state
= ahash_request_ctx(req
);
1533 struct caam_export_state
*export
= out
;
1534 u8
*buf
= state
->buf
;
1535 int len
= state
->buflen
;
1537 memcpy(export
->buf
, buf
, len
);
1538 memcpy(export
->caam_ctx
, state
->caam_ctx
, sizeof(export
->caam_ctx
));
1539 export
->buflen
= len
;
1540 export
->update
= state
->update
;
1541 export
->final
= state
->final
;
1542 export
->finup
= state
->finup
;
1547 static int ahash_import(struct ahash_request
*req
, const void *in
)
1549 struct caam_hash_state
*state
= ahash_request_ctx(req
);
1550 const struct caam_export_state
*export
= in
;
1552 memset(state
, 0, sizeof(*state
));
1553 memcpy(state
->buf
, export
->buf
, export
->buflen
);
1554 memcpy(state
->caam_ctx
, export
->caam_ctx
, sizeof(state
->caam_ctx
));
1555 state
->buflen
= export
->buflen
;
1556 state
->update
= export
->update
;
1557 state
->final
= export
->final
;
1558 state
->finup
= export
->finup
;
1563 struct caam_hash_template
{
1564 char name
[CRYPTO_MAX_ALG_NAME
];
1565 char driver_name
[CRYPTO_MAX_ALG_NAME
];
1566 char hmac_name
[CRYPTO_MAX_ALG_NAME
];
1567 char hmac_driver_name
[CRYPTO_MAX_ALG_NAME
];
1568 unsigned int blocksize
;
1569 struct ahash_alg template_ahash
;
1573 /* ahash descriptors */
1574 static struct caam_hash_template driver_hash
[] = {
1577 .driver_name
= "sha1-caam",
1578 .hmac_name
= "hmac(sha1)",
1579 .hmac_driver_name
= "hmac-sha1-caam",
1580 .blocksize
= SHA1_BLOCK_SIZE
,
1583 .update
= ahash_update
,
1584 .final
= ahash_final
,
1585 .finup
= ahash_finup
,
1586 .digest
= ahash_digest
,
1587 .export
= ahash_export
,
1588 .import
= ahash_import
,
1589 .setkey
= ahash_setkey
,
1591 .digestsize
= SHA1_DIGEST_SIZE
,
1592 .statesize
= sizeof(struct caam_export_state
),
1595 .alg_type
= OP_ALG_ALGSEL_SHA1
,
1598 .driver_name
= "sha224-caam",
1599 .hmac_name
= "hmac(sha224)",
1600 .hmac_driver_name
= "hmac-sha224-caam",
1601 .blocksize
= SHA224_BLOCK_SIZE
,
1604 .update
= ahash_update
,
1605 .final
= ahash_final
,
1606 .finup
= ahash_finup
,
1607 .digest
= ahash_digest
,
1608 .export
= ahash_export
,
1609 .import
= ahash_import
,
1610 .setkey
= ahash_setkey
,
1612 .digestsize
= SHA224_DIGEST_SIZE
,
1613 .statesize
= sizeof(struct caam_export_state
),
1616 .alg_type
= OP_ALG_ALGSEL_SHA224
,
1619 .driver_name
= "sha256-caam",
1620 .hmac_name
= "hmac(sha256)",
1621 .hmac_driver_name
= "hmac-sha256-caam",
1622 .blocksize
= SHA256_BLOCK_SIZE
,
1625 .update
= ahash_update
,
1626 .final
= ahash_final
,
1627 .finup
= ahash_finup
,
1628 .digest
= ahash_digest
,
1629 .export
= ahash_export
,
1630 .import
= ahash_import
,
1631 .setkey
= ahash_setkey
,
1633 .digestsize
= SHA256_DIGEST_SIZE
,
1634 .statesize
= sizeof(struct caam_export_state
),
1637 .alg_type
= OP_ALG_ALGSEL_SHA256
,
1640 .driver_name
= "sha384-caam",
1641 .hmac_name
= "hmac(sha384)",
1642 .hmac_driver_name
= "hmac-sha384-caam",
1643 .blocksize
= SHA384_BLOCK_SIZE
,
1646 .update
= ahash_update
,
1647 .final
= ahash_final
,
1648 .finup
= ahash_finup
,
1649 .digest
= ahash_digest
,
1650 .export
= ahash_export
,
1651 .import
= ahash_import
,
1652 .setkey
= ahash_setkey
,
1654 .digestsize
= SHA384_DIGEST_SIZE
,
1655 .statesize
= sizeof(struct caam_export_state
),
1658 .alg_type
= OP_ALG_ALGSEL_SHA384
,
1661 .driver_name
= "sha512-caam",
1662 .hmac_name
= "hmac(sha512)",
1663 .hmac_driver_name
= "hmac-sha512-caam",
1664 .blocksize
= SHA512_BLOCK_SIZE
,
1667 .update
= ahash_update
,
1668 .final
= ahash_final
,
1669 .finup
= ahash_finup
,
1670 .digest
= ahash_digest
,
1671 .export
= ahash_export
,
1672 .import
= ahash_import
,
1673 .setkey
= ahash_setkey
,
1675 .digestsize
= SHA512_DIGEST_SIZE
,
1676 .statesize
= sizeof(struct caam_export_state
),
1679 .alg_type
= OP_ALG_ALGSEL_SHA512
,
1682 .driver_name
= "md5-caam",
1683 .hmac_name
= "hmac(md5)",
1684 .hmac_driver_name
= "hmac-md5-caam",
1685 .blocksize
= MD5_BLOCK_WORDS
* 4,
1688 .update
= ahash_update
,
1689 .final
= ahash_final
,
1690 .finup
= ahash_finup
,
1691 .digest
= ahash_digest
,
1692 .export
= ahash_export
,
1693 .import
= ahash_import
,
1694 .setkey
= ahash_setkey
,
1696 .digestsize
= MD5_DIGEST_SIZE
,
1697 .statesize
= sizeof(struct caam_export_state
),
1700 .alg_type
= OP_ALG_ALGSEL_MD5
,
1702 .hmac_name
= "xcbc(aes)",
1703 .hmac_driver_name
= "xcbc-aes-caam",
1704 .blocksize
= AES_BLOCK_SIZE
,
1707 .update
= ahash_update
,
1708 .final
= ahash_final
,
1709 .finup
= ahash_finup
,
1710 .digest
= ahash_digest
,
1711 .export
= ahash_export
,
1712 .import
= ahash_import
,
1713 .setkey
= axcbc_setkey
,
1715 .digestsize
= AES_BLOCK_SIZE
,
1716 .statesize
= sizeof(struct caam_export_state
),
1719 .alg_type
= OP_ALG_ALGSEL_AES
| OP_ALG_AAI_XCBC_MAC
,
1721 .hmac_name
= "cmac(aes)",
1722 .hmac_driver_name
= "cmac-aes-caam",
1723 .blocksize
= AES_BLOCK_SIZE
,
1726 .update
= ahash_update
,
1727 .final
= ahash_final
,
1728 .finup
= ahash_finup
,
1729 .digest
= ahash_digest
,
1730 .export
= ahash_export
,
1731 .import
= ahash_import
,
1732 .setkey
= acmac_setkey
,
1734 .digestsize
= AES_BLOCK_SIZE
,
1735 .statesize
= sizeof(struct caam_export_state
),
1738 .alg_type
= OP_ALG_ALGSEL_AES
| OP_ALG_AAI_CMAC
,
1742 struct caam_hash_alg
{
1743 struct list_head entry
;
1745 struct ahash_alg ahash_alg
;
1748 static int caam_hash_cra_init(struct crypto_tfm
*tfm
)
1750 struct crypto_ahash
*ahash
= __crypto_ahash_cast(tfm
);
1751 struct crypto_alg
*base
= tfm
->__crt_alg
;
1752 struct hash_alg_common
*halg
=
1753 container_of(base
, struct hash_alg_common
, base
);
1754 struct ahash_alg
*alg
=
1755 container_of(halg
, struct ahash_alg
, halg
);
1756 struct caam_hash_alg
*caam_hash
=
1757 container_of(alg
, struct caam_hash_alg
, ahash_alg
);
1758 struct caam_hash_ctx
*ctx
= crypto_tfm_ctx(tfm
);
1759 /* Sizes for MDHA running digests: MD5, SHA1, 224, 256, 384, 512 */
1760 static const u8 runninglen
[] = { HASH_MSG_LEN
+ MD5_DIGEST_SIZE
,
1761 HASH_MSG_LEN
+ SHA1_DIGEST_SIZE
,
1763 HASH_MSG_LEN
+ SHA256_DIGEST_SIZE
,
1765 HASH_MSG_LEN
+ SHA512_DIGEST_SIZE
};
1766 const size_t sh_desc_update_offset
= offsetof(struct caam_hash_ctx
,
1768 dma_addr_t dma_addr
;
1769 struct caam_drv_private
*priv
;
1772 * Get a Job ring from Job Ring driver to ensure in-order
1773 * crypto request processing per tfm
1775 ctx
->jrdev
= caam_jr_alloc();
1776 if (IS_ERR(ctx
->jrdev
)) {
1777 pr_err("Job Ring Device allocation for transform failed\n");
1778 return PTR_ERR(ctx
->jrdev
);
1781 priv
= dev_get_drvdata(ctx
->jrdev
->parent
);
1783 if (is_xcbc_aes(caam_hash
->alg_type
)) {
1784 ctx
->dir
= DMA_TO_DEVICE
;
1785 ctx
->key_dir
= DMA_BIDIRECTIONAL
;
1786 ctx
->adata
.algtype
= OP_TYPE_CLASS1_ALG
| caam_hash
->alg_type
;
1788 } else if (is_cmac_aes(caam_hash
->alg_type
)) {
1789 ctx
->dir
= DMA_TO_DEVICE
;
1790 ctx
->key_dir
= DMA_NONE
;
1791 ctx
->adata
.algtype
= OP_TYPE_CLASS1_ALG
| caam_hash
->alg_type
;
1794 if (priv
->era
>= 6) {
1795 ctx
->dir
= DMA_BIDIRECTIONAL
;
1796 ctx
->key_dir
= alg
->setkey
? DMA_TO_DEVICE
: DMA_NONE
;
1798 ctx
->dir
= DMA_TO_DEVICE
;
1799 ctx
->key_dir
= DMA_NONE
;
1801 ctx
->adata
.algtype
= OP_TYPE_CLASS2_ALG
| caam_hash
->alg_type
;
1802 ctx
->ctx_len
= runninglen
[(ctx
->adata
.algtype
&
1803 OP_ALG_ALGSEL_SUBMASK
) >>
1804 OP_ALG_ALGSEL_SHIFT
];
1807 if (ctx
->key_dir
!= DMA_NONE
) {
1808 ctx
->adata
.key_dma
= dma_map_single_attrs(ctx
->jrdev
, ctx
->key
,
1809 ARRAY_SIZE(ctx
->key
),
1811 DMA_ATTR_SKIP_CPU_SYNC
);
1812 if (dma_mapping_error(ctx
->jrdev
, ctx
->adata
.key_dma
)) {
1813 dev_err(ctx
->jrdev
, "unable to map key\n");
1814 caam_jr_free(ctx
->jrdev
);
1819 dma_addr
= dma_map_single_attrs(ctx
->jrdev
, ctx
->sh_desc_update
,
1820 offsetof(struct caam_hash_ctx
, key
) -
1821 sh_desc_update_offset
,
1822 ctx
->dir
, DMA_ATTR_SKIP_CPU_SYNC
);
1823 if (dma_mapping_error(ctx
->jrdev
, dma_addr
)) {
1824 dev_err(ctx
->jrdev
, "unable to map shared descriptors\n");
1826 if (ctx
->key_dir
!= DMA_NONE
)
1827 dma_unmap_single_attrs(ctx
->jrdev
, ctx
->adata
.key_dma
,
1828 ARRAY_SIZE(ctx
->key
),
1830 DMA_ATTR_SKIP_CPU_SYNC
);
1832 caam_jr_free(ctx
->jrdev
);
1836 ctx
->sh_desc_update_dma
= dma_addr
;
1837 ctx
->sh_desc_update_first_dma
= dma_addr
+
1838 offsetof(struct caam_hash_ctx
,
1839 sh_desc_update_first
) -
1840 sh_desc_update_offset
;
1841 ctx
->sh_desc_fin_dma
= dma_addr
+ offsetof(struct caam_hash_ctx
,
1843 sh_desc_update_offset
;
1844 ctx
->sh_desc_digest_dma
= dma_addr
+ offsetof(struct caam_hash_ctx
,
1846 sh_desc_update_offset
;
1848 ctx
->enginectx
.op
.do_one_request
= ahash_do_one_req
;
1850 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm
),
1851 sizeof(struct caam_hash_state
));
1854 * For keyed hash algorithms shared descriptors
1855 * will be created later in setkey() callback
1857 return alg
->setkey
? 0 : ahash_set_sh_desc(ahash
);
1860 static void caam_hash_cra_exit(struct crypto_tfm
*tfm
)
1862 struct caam_hash_ctx
*ctx
= crypto_tfm_ctx(tfm
);
1864 dma_unmap_single_attrs(ctx
->jrdev
, ctx
->sh_desc_update_dma
,
1865 offsetof(struct caam_hash_ctx
, key
) -
1866 offsetof(struct caam_hash_ctx
, sh_desc_update
),
1867 ctx
->dir
, DMA_ATTR_SKIP_CPU_SYNC
);
1868 if (ctx
->key_dir
!= DMA_NONE
)
1869 dma_unmap_single_attrs(ctx
->jrdev
, ctx
->adata
.key_dma
,
1870 ARRAY_SIZE(ctx
->key
), ctx
->key_dir
,
1871 DMA_ATTR_SKIP_CPU_SYNC
);
1872 caam_jr_free(ctx
->jrdev
);
1875 void caam_algapi_hash_exit(void)
1877 struct caam_hash_alg
*t_alg
, *n
;
1879 if (!hash_list
.next
)
1882 list_for_each_entry_safe(t_alg
, n
, &hash_list
, entry
) {
1883 crypto_unregister_ahash(&t_alg
->ahash_alg
);
1884 list_del(&t_alg
->entry
);
1889 static struct caam_hash_alg
*
1890 caam_hash_alloc(struct caam_hash_template
*template,
1893 struct caam_hash_alg
*t_alg
;
1894 struct ahash_alg
*halg
;
1895 struct crypto_alg
*alg
;
1897 t_alg
= kzalloc(sizeof(*t_alg
), GFP_KERNEL
);
1899 pr_err("failed to allocate t_alg\n");
1900 return ERR_PTR(-ENOMEM
);
1903 t_alg
->ahash_alg
= template->template_ahash
;
1904 halg
= &t_alg
->ahash_alg
;
1905 alg
= &halg
->halg
.base
;
1908 snprintf(alg
->cra_name
, CRYPTO_MAX_ALG_NAME
, "%s",
1909 template->hmac_name
);
1910 snprintf(alg
->cra_driver_name
, CRYPTO_MAX_ALG_NAME
, "%s",
1911 template->hmac_driver_name
);
1913 snprintf(alg
->cra_name
, CRYPTO_MAX_ALG_NAME
, "%s",
1915 snprintf(alg
->cra_driver_name
, CRYPTO_MAX_ALG_NAME
, "%s",
1916 template->driver_name
);
1917 t_alg
->ahash_alg
.setkey
= NULL
;
1919 alg
->cra_module
= THIS_MODULE
;
1920 alg
->cra_init
= caam_hash_cra_init
;
1921 alg
->cra_exit
= caam_hash_cra_exit
;
1922 alg
->cra_ctxsize
= sizeof(struct caam_hash_ctx
);
1923 alg
->cra_priority
= CAAM_CRA_PRIORITY
;
1924 alg
->cra_blocksize
= template->blocksize
;
1925 alg
->cra_alignmask
= 0;
1926 alg
->cra_flags
= CRYPTO_ALG_ASYNC
;
1928 t_alg
->alg_type
= template->alg_type
;
1933 int caam_algapi_hash_init(struct device
*ctrldev
)
1936 struct caam_drv_private
*priv
= dev_get_drvdata(ctrldev
);
1937 unsigned int md_limit
= SHA512_DIGEST_SIZE
;
1938 u32 md_inst
, md_vid
;
1941 * Register crypto algorithms the device supports. First, identify
1942 * presence and attributes of MD block.
1944 if (priv
->era
< 10) {
1945 md_vid
= (rd_reg32(&priv
->ctrl
->perfmon
.cha_id_ls
) &
1946 CHA_ID_LS_MD_MASK
) >> CHA_ID_LS_MD_SHIFT
;
1947 md_inst
= (rd_reg32(&priv
->ctrl
->perfmon
.cha_num_ls
) &
1948 CHA_ID_LS_MD_MASK
) >> CHA_ID_LS_MD_SHIFT
;
1950 u32 mdha
= rd_reg32(&priv
->ctrl
->vreg
.mdha
);
1952 md_vid
= (mdha
& CHA_VER_VID_MASK
) >> CHA_VER_VID_SHIFT
;
1953 md_inst
= mdha
& CHA_VER_NUM_MASK
;
1957 * Skip registration of any hashing algorithms if MD block
1963 /* Limit digest size based on LP256 */
1964 if (md_vid
== CHA_VER_VID_MD_LP256
)
1965 md_limit
= SHA256_DIGEST_SIZE
;
1967 INIT_LIST_HEAD(&hash_list
);
1969 /* register crypto algorithms the device supports */
1970 for (i
= 0; i
< ARRAY_SIZE(driver_hash
); i
++) {
1971 struct caam_hash_alg
*t_alg
;
1972 struct caam_hash_template
*alg
= driver_hash
+ i
;
1974 /* If MD size is not supported by device, skip registration */
1975 if (is_mdha(alg
->alg_type
) &&
1976 alg
->template_ahash
.halg
.digestsize
> md_limit
)
1979 /* register hmac version */
1980 t_alg
= caam_hash_alloc(alg
, true);
1981 if (IS_ERR(t_alg
)) {
1982 err
= PTR_ERR(t_alg
);
1983 pr_warn("%s alg allocation failed\n",
1984 alg
->hmac_driver_name
);
1988 err
= crypto_register_ahash(&t_alg
->ahash_alg
);
1990 pr_warn("%s alg registration failed: %d\n",
1991 t_alg
->ahash_alg
.halg
.base
.cra_driver_name
,
1995 list_add_tail(&t_alg
->entry
, &hash_list
);
1997 if ((alg
->alg_type
& OP_ALG_ALGSEL_MASK
) == OP_ALG_ALGSEL_AES
)
2000 /* register unkeyed version */
2001 t_alg
= caam_hash_alloc(alg
, false);
2002 if (IS_ERR(t_alg
)) {
2003 err
= PTR_ERR(t_alg
);
2004 pr_warn("%s alg allocation failed\n", alg
->driver_name
);
2008 err
= crypto_register_ahash(&t_alg
->ahash_alg
);
2010 pr_warn("%s alg registration failed: %d\n",
2011 t_alg
->ahash_alg
.halg
.base
.cra_driver_name
,
2015 list_add_tail(&t_alg
->entry
, &hash_list
);