1 // SPDX-License-Identifier: GPL-2.0+
3 * caam - Freescale FSL CAAM support for ahash functions of crypto API
5 * Copyright 2011 Freescale Semiconductor, Inc.
6 * Copyright 2018-2019, 2023 NXP
8 * Based on caamalg.c crypto API driver.
10 * relationship of digest job descriptor or first job descriptor after init to
13 * --------------- ---------------
14 * | JobDesc #1 |-------------------->| ShareDesc |
15 * | *(packet 1) | | (hashKey) |
16 * --------------- | (operation) |
19 * relationship of subsequent job descriptors to shared descriptors:
21 * --------------- ---------------
22 * | JobDesc #2 |-------------------->| ShareDesc |
23 * | *(packet 2) | |------------->| (hashKey) |
24 * --------------- | |-------->| (operation) |
25 * . | | | (load ctx2) |
26 * . | | ---------------
28 * | JobDesc #3 |------| |
34 * | JobDesc #4 |------------
38 * The SharedDesc never changes for a connection unless rekeyed, but
39 * each packet will likely be in a different place. So all we need
40 * to know to process the packet is where the input is, where the
41 * output goes, and what context we want to process with. Context is
42 * in the SharedDesc, packet references in the JobDesc.
44 * So, a job desc looks like:
46 * ---------------------
48 * | ShareDesc Pointer |
55 * ---------------------
62 #include "desc_constr.h"
65 #include "sg_sw_sec4.h"
67 #include "caamhash_desc.h"
68 #include <crypto/internal/engine.h>
69 #include <crypto/internal/hash.h>
70 #include <linux/dma-mapping.h>
71 #include <linux/err.h>
72 #include <linux/kernel.h>
73 #include <linux/slab.h>
74 #include <linux/string.h>
76 #define CAAM_CRA_PRIORITY 3000
78 /* max hash key is max split key size */
79 #define CAAM_MAX_HASH_KEY_SIZE (SHA512_DIGEST_SIZE * 2)
81 #define CAAM_MAX_HASH_BLOCK_SIZE SHA512_BLOCK_SIZE
82 #define CAAM_MAX_HASH_DIGEST_SIZE SHA512_DIGEST_SIZE
84 #define DESC_HASH_MAX_USED_BYTES (DESC_AHASH_FINAL_LEN + \
85 CAAM_MAX_HASH_KEY_SIZE)
86 #define DESC_HASH_MAX_USED_LEN (DESC_HASH_MAX_USED_BYTES / CAAM_CMD_SZ)
88 /* caam context sizes for hashes: running digest + 8 */
89 #define HASH_MSG_LEN 8
90 #define MAX_CTX_LEN (HASH_MSG_LEN + SHA512_DIGEST_SIZE)
92 static struct list_head hash_list
;
94 /* ahash per-session context */
95 struct caam_hash_ctx
{
96 u32 sh_desc_update
[DESC_HASH_MAX_USED_LEN
] ____cacheline_aligned
;
97 u32 sh_desc_update_first
[DESC_HASH_MAX_USED_LEN
] ____cacheline_aligned
;
98 u32 sh_desc_fin
[DESC_HASH_MAX_USED_LEN
] ____cacheline_aligned
;
99 u32 sh_desc_digest
[DESC_HASH_MAX_USED_LEN
] ____cacheline_aligned
;
100 u8 key
[CAAM_MAX_HASH_KEY_SIZE
] ____cacheline_aligned
;
101 dma_addr_t sh_desc_update_dma ____cacheline_aligned
;
102 dma_addr_t sh_desc_update_first_dma
;
103 dma_addr_t sh_desc_fin_dma
;
104 dma_addr_t sh_desc_digest_dma
;
105 enum dma_data_direction dir
;
106 enum dma_data_direction key_dir
;
107 struct device
*jrdev
;
109 struct alginfo adata
;
113 struct caam_hash_state
{
117 u8 buf
[CAAM_MAX_HASH_BLOCK_SIZE
] ____cacheline_aligned
;
120 u8 caam_ctx
[MAX_CTX_LEN
] ____cacheline_aligned
;
121 int (*update
)(struct ahash_request
*req
) ____cacheline_aligned
;
122 int (*final
)(struct ahash_request
*req
);
123 int (*finup
)(struct ahash_request
*req
);
124 struct ahash_edesc
*edesc
;
125 void (*ahash_op_done
)(struct device
*jrdev
, u32
*desc
, u32 err
,
129 struct caam_export_state
{
130 u8 buf
[CAAM_MAX_HASH_BLOCK_SIZE
];
131 u8 caam_ctx
[MAX_CTX_LEN
];
133 int (*update
)(struct ahash_request
*req
);
134 int (*final
)(struct ahash_request
*req
);
135 int (*finup
)(struct ahash_request
*req
);
138 static inline bool is_cmac_aes(u32 algtype
)
140 return (algtype
& (OP_ALG_ALGSEL_MASK
| OP_ALG_AAI_MASK
)) ==
141 (OP_ALG_ALGSEL_AES
| OP_ALG_AAI_CMAC
);
143 /* Common job descriptor seq in/out ptr routines */
145 /* Map state->caam_ctx, and append seq_out_ptr command that points to it */
146 static inline int map_seq_out_ptr_ctx(u32
*desc
, struct device
*jrdev
,
147 struct caam_hash_state
*state
,
150 state
->ctx_dma_len
= ctx_len
;
151 state
->ctx_dma
= dma_map_single(jrdev
, state
->caam_ctx
,
152 ctx_len
, DMA_FROM_DEVICE
);
153 if (dma_mapping_error(jrdev
, state
->ctx_dma
)) {
154 dev_err(jrdev
, "unable to map ctx\n");
159 append_seq_out_ptr(desc
, state
->ctx_dma
, ctx_len
, 0);
164 /* Map current buffer in state (if length > 0) and put it in link table */
165 static inline int buf_map_to_sec4_sg(struct device
*jrdev
,
166 struct sec4_sg_entry
*sec4_sg
,
167 struct caam_hash_state
*state
)
169 int buflen
= state
->buflen
;
174 state
->buf_dma
= dma_map_single(jrdev
, state
->buf
, buflen
,
176 if (dma_mapping_error(jrdev
, state
->buf_dma
)) {
177 dev_err(jrdev
, "unable to map buf\n");
182 dma_to_sec4_sg_one(sec4_sg
, state
->buf_dma
, buflen
, 0);
187 /* Map state->caam_ctx, and add it to link table */
188 static inline int ctx_map_to_sec4_sg(struct device
*jrdev
,
189 struct caam_hash_state
*state
, int ctx_len
,
190 struct sec4_sg_entry
*sec4_sg
, u32 flag
)
192 state
->ctx_dma_len
= ctx_len
;
193 state
->ctx_dma
= dma_map_single(jrdev
, state
->caam_ctx
, ctx_len
, flag
);
194 if (dma_mapping_error(jrdev
, state
->ctx_dma
)) {
195 dev_err(jrdev
, "unable to map ctx\n");
200 dma_to_sec4_sg_one(sec4_sg
, state
->ctx_dma
, ctx_len
, 0);
205 static int ahash_set_sh_desc(struct crypto_ahash
*ahash
)
207 struct caam_hash_ctx
*ctx
= crypto_ahash_ctx_dma(ahash
);
208 int digestsize
= crypto_ahash_digestsize(ahash
);
209 struct device
*jrdev
= ctx
->jrdev
;
210 struct caam_drv_private
*ctrlpriv
= dev_get_drvdata(jrdev
->parent
);
213 ctx
->adata
.key_virt
= ctx
->key
;
215 /* ahash_update shared descriptor */
216 desc
= ctx
->sh_desc_update
;
217 cnstr_shdsc_ahash(desc
, &ctx
->adata
, OP_ALG_AS_UPDATE
, ctx
->ctx_len
,
218 ctx
->ctx_len
, true, ctrlpriv
->era
);
219 dma_sync_single_for_device(jrdev
, ctx
->sh_desc_update_dma
,
220 desc_bytes(desc
), ctx
->dir
);
222 print_hex_dump_debug("ahash update shdesc@"__stringify(__LINE__
)": ",
223 DUMP_PREFIX_ADDRESS
, 16, 4, desc
, desc_bytes(desc
),
226 /* ahash_update_first shared descriptor */
227 desc
= ctx
->sh_desc_update_first
;
228 cnstr_shdsc_ahash(desc
, &ctx
->adata
, OP_ALG_AS_INIT
, ctx
->ctx_len
,
229 ctx
->ctx_len
, false, ctrlpriv
->era
);
230 dma_sync_single_for_device(jrdev
, ctx
->sh_desc_update_first_dma
,
231 desc_bytes(desc
), ctx
->dir
);
232 print_hex_dump_debug("ahash update first shdesc@"__stringify(__LINE__
)
233 ": ", DUMP_PREFIX_ADDRESS
, 16, 4, desc
,
234 desc_bytes(desc
), 1);
236 /* ahash_final shared descriptor */
237 desc
= ctx
->sh_desc_fin
;
238 cnstr_shdsc_ahash(desc
, &ctx
->adata
, OP_ALG_AS_FINALIZE
, digestsize
,
239 ctx
->ctx_len
, true, ctrlpriv
->era
);
240 dma_sync_single_for_device(jrdev
, ctx
->sh_desc_fin_dma
,
241 desc_bytes(desc
), ctx
->dir
);
243 print_hex_dump_debug("ahash final shdesc@"__stringify(__LINE__
)": ",
244 DUMP_PREFIX_ADDRESS
, 16, 4, desc
,
245 desc_bytes(desc
), 1);
247 /* ahash_digest shared descriptor */
248 desc
= ctx
->sh_desc_digest
;
249 cnstr_shdsc_ahash(desc
, &ctx
->adata
, OP_ALG_AS_INITFINAL
, digestsize
,
250 ctx
->ctx_len
, false, ctrlpriv
->era
);
251 dma_sync_single_for_device(jrdev
, ctx
->sh_desc_digest_dma
,
252 desc_bytes(desc
), ctx
->dir
);
254 print_hex_dump_debug("ahash digest shdesc@"__stringify(__LINE__
)": ",
255 DUMP_PREFIX_ADDRESS
, 16, 4, desc
,
256 desc_bytes(desc
), 1);
261 static int axcbc_set_sh_desc(struct crypto_ahash
*ahash
)
263 struct caam_hash_ctx
*ctx
= crypto_ahash_ctx_dma(ahash
);
264 int digestsize
= crypto_ahash_digestsize(ahash
);
265 struct device
*jrdev
= ctx
->jrdev
;
268 /* shared descriptor for ahash_update */
269 desc
= ctx
->sh_desc_update
;
270 cnstr_shdsc_sk_hash(desc
, &ctx
->adata
, OP_ALG_AS_UPDATE
,
271 ctx
->ctx_len
, ctx
->ctx_len
);
272 dma_sync_single_for_device(jrdev
, ctx
->sh_desc_update_dma
,
273 desc_bytes(desc
), ctx
->dir
);
274 print_hex_dump_debug("axcbc update shdesc@" __stringify(__LINE__
)" : ",
275 DUMP_PREFIX_ADDRESS
, 16, 4, desc
, desc_bytes(desc
),
278 /* shared descriptor for ahash_{final,finup} */
279 desc
= ctx
->sh_desc_fin
;
280 cnstr_shdsc_sk_hash(desc
, &ctx
->adata
, OP_ALG_AS_FINALIZE
,
281 digestsize
, ctx
->ctx_len
);
282 dma_sync_single_for_device(jrdev
, ctx
->sh_desc_fin_dma
,
283 desc_bytes(desc
), ctx
->dir
);
284 print_hex_dump_debug("axcbc finup shdesc@" __stringify(__LINE__
)" : ",
285 DUMP_PREFIX_ADDRESS
, 16, 4, desc
, desc_bytes(desc
),
288 /* key is immediate data for INIT and INITFINAL states */
289 ctx
->adata
.key_virt
= ctx
->key
;
291 /* shared descriptor for first invocation of ahash_update */
292 desc
= ctx
->sh_desc_update_first
;
293 cnstr_shdsc_sk_hash(desc
, &ctx
->adata
, OP_ALG_AS_INIT
, ctx
->ctx_len
,
295 dma_sync_single_for_device(jrdev
, ctx
->sh_desc_update_first_dma
,
296 desc_bytes(desc
), ctx
->dir
);
297 print_hex_dump_debug("axcbc update first shdesc@" __stringify(__LINE__
)
298 " : ", DUMP_PREFIX_ADDRESS
, 16, 4, desc
,
299 desc_bytes(desc
), 1);
301 /* shared descriptor for ahash_digest */
302 desc
= ctx
->sh_desc_digest
;
303 cnstr_shdsc_sk_hash(desc
, &ctx
->adata
, OP_ALG_AS_INITFINAL
,
304 digestsize
, ctx
->ctx_len
);
305 dma_sync_single_for_device(jrdev
, ctx
->sh_desc_digest_dma
,
306 desc_bytes(desc
), ctx
->dir
);
307 print_hex_dump_debug("axcbc digest shdesc@" __stringify(__LINE__
)" : ",
308 DUMP_PREFIX_ADDRESS
, 16, 4, desc
, desc_bytes(desc
),
313 static int acmac_set_sh_desc(struct crypto_ahash
*ahash
)
315 struct caam_hash_ctx
*ctx
= crypto_ahash_ctx_dma(ahash
);
316 int digestsize
= crypto_ahash_digestsize(ahash
);
317 struct device
*jrdev
= ctx
->jrdev
;
320 /* shared descriptor for ahash_update */
321 desc
= ctx
->sh_desc_update
;
322 cnstr_shdsc_sk_hash(desc
, &ctx
->adata
, OP_ALG_AS_UPDATE
,
323 ctx
->ctx_len
, ctx
->ctx_len
);
324 dma_sync_single_for_device(jrdev
, ctx
->sh_desc_update_dma
,
325 desc_bytes(desc
), ctx
->dir
);
326 print_hex_dump_debug("acmac update shdesc@" __stringify(__LINE__
)" : ",
327 DUMP_PREFIX_ADDRESS
, 16, 4, desc
,
328 desc_bytes(desc
), 1);
330 /* shared descriptor for ahash_{final,finup} */
331 desc
= ctx
->sh_desc_fin
;
332 cnstr_shdsc_sk_hash(desc
, &ctx
->adata
, OP_ALG_AS_FINALIZE
,
333 digestsize
, ctx
->ctx_len
);
334 dma_sync_single_for_device(jrdev
, ctx
->sh_desc_fin_dma
,
335 desc_bytes(desc
), ctx
->dir
);
336 print_hex_dump_debug("acmac finup shdesc@" __stringify(__LINE__
)" : ",
337 DUMP_PREFIX_ADDRESS
, 16, 4, desc
,
338 desc_bytes(desc
), 1);
340 /* shared descriptor for first invocation of ahash_update */
341 desc
= ctx
->sh_desc_update_first
;
342 cnstr_shdsc_sk_hash(desc
, &ctx
->adata
, OP_ALG_AS_INIT
, ctx
->ctx_len
,
344 dma_sync_single_for_device(jrdev
, ctx
->sh_desc_update_first_dma
,
345 desc_bytes(desc
), ctx
->dir
);
346 print_hex_dump_debug("acmac update first shdesc@" __stringify(__LINE__
)
347 " : ", DUMP_PREFIX_ADDRESS
, 16, 4, desc
,
348 desc_bytes(desc
), 1);
350 /* shared descriptor for ahash_digest */
351 desc
= ctx
->sh_desc_digest
;
352 cnstr_shdsc_sk_hash(desc
, &ctx
->adata
, OP_ALG_AS_INITFINAL
,
353 digestsize
, ctx
->ctx_len
);
354 dma_sync_single_for_device(jrdev
, ctx
->sh_desc_digest_dma
,
355 desc_bytes(desc
), ctx
->dir
);
356 print_hex_dump_debug("acmac digest shdesc@" __stringify(__LINE__
)" : ",
357 DUMP_PREFIX_ADDRESS
, 16, 4, desc
,
358 desc_bytes(desc
), 1);
363 /* Digest hash size if it is too large */
364 static int hash_digest_key(struct caam_hash_ctx
*ctx
, u32
*keylen
, u8
*key
,
367 struct device
*jrdev
= ctx
->jrdev
;
369 struct split_key_result result
;
373 desc
= kmalloc(CAAM_CMD_SZ
* 8 + CAAM_PTR_SZ
* 2, GFP_KERNEL
);
377 init_job_desc(desc
, 0);
379 key_dma
= dma_map_single(jrdev
, key
, *keylen
, DMA_BIDIRECTIONAL
);
380 if (dma_mapping_error(jrdev
, key_dma
)) {
381 dev_err(jrdev
, "unable to map key memory\n");
386 /* Job descriptor to perform unkeyed hash on key_in */
387 append_operation(desc
, ctx
->adata
.algtype
| OP_ALG_ENCRYPT
|
388 OP_ALG_AS_INITFINAL
);
389 append_seq_in_ptr(desc
, key_dma
, *keylen
, 0);
390 append_seq_fifo_load(desc
, *keylen
, FIFOLD_CLASS_CLASS2
|
391 FIFOLD_TYPE_LAST2
| FIFOLD_TYPE_MSG
);
392 append_seq_out_ptr(desc
, key_dma
, digestsize
, 0);
393 append_seq_store(desc
, digestsize
, LDST_CLASS_2_CCB
|
394 LDST_SRCDST_BYTE_CONTEXT
);
396 print_hex_dump_debug("key_in@"__stringify(__LINE__
)": ",
397 DUMP_PREFIX_ADDRESS
, 16, 4, key
, *keylen
, 1);
398 print_hex_dump_debug("jobdesc@"__stringify(__LINE__
)": ",
399 DUMP_PREFIX_ADDRESS
, 16, 4, desc
, desc_bytes(desc
),
403 init_completion(&result
.completion
);
405 ret
= caam_jr_enqueue(jrdev
, desc
, split_key_done
, &result
);
406 if (ret
== -EINPROGRESS
) {
408 wait_for_completion(&result
.completion
);
411 print_hex_dump_debug("digested key@"__stringify(__LINE__
)": ",
412 DUMP_PREFIX_ADDRESS
, 16, 4, key
,
415 dma_unmap_single(jrdev
, key_dma
, *keylen
, DMA_BIDIRECTIONAL
);
417 *keylen
= digestsize
;
424 static int ahash_setkey(struct crypto_ahash
*ahash
,
425 const u8
*key
, unsigned int keylen
)
427 struct caam_hash_ctx
*ctx
= crypto_ahash_ctx_dma(ahash
);
428 struct device
*jrdev
= ctx
->jrdev
;
429 int blocksize
= crypto_tfm_alg_blocksize(&ahash
->base
);
430 int digestsize
= crypto_ahash_digestsize(ahash
);
431 struct caam_drv_private
*ctrlpriv
= dev_get_drvdata(ctx
->jrdev
->parent
);
433 u8
*hashed_key
= NULL
;
435 dev_dbg(jrdev
, "keylen %d\n", keylen
);
437 if (keylen
> blocksize
) {
438 unsigned int aligned_len
=
439 ALIGN(keylen
, dma_get_cache_alignment());
441 if (aligned_len
< keylen
)
444 hashed_key
= kmemdup(key
, keylen
, GFP_KERNEL
);
447 ret
= hash_digest_key(ctx
, &keylen
, hashed_key
, digestsize
);
454 * If DKP is supported, use it in the shared descriptor to generate
457 if (ctrlpriv
->era
>= 6) {
458 ctx
->adata
.key_inline
= true;
459 ctx
->adata
.keylen
= keylen
;
460 ctx
->adata
.keylen_pad
= split_key_len(ctx
->adata
.algtype
&
463 if (ctx
->adata
.keylen_pad
> CAAM_MAX_HASH_KEY_SIZE
)
466 memcpy(ctx
->key
, key
, keylen
);
469 * In case |user key| > |derived key|, using DKP<imm,imm>
470 * would result in invalid opcodes (last bytes of user key) in
471 * the resulting descriptor. Use DKP<ptr,imm> instead => both
472 * virtual and dma key addresses are needed.
474 if (keylen
> ctx
->adata
.keylen_pad
)
475 dma_sync_single_for_device(ctx
->jrdev
,
477 ctx
->adata
.keylen_pad
,
480 ret
= gen_split_key(ctx
->jrdev
, ctx
->key
, &ctx
->adata
, key
,
481 keylen
, CAAM_MAX_HASH_KEY_SIZE
);
487 return ahash_set_sh_desc(ahash
);
493 static int axcbc_setkey(struct crypto_ahash
*ahash
, const u8
*key
,
496 struct caam_hash_ctx
*ctx
= crypto_ahash_ctx_dma(ahash
);
497 struct device
*jrdev
= ctx
->jrdev
;
499 if (keylen
!= AES_KEYSIZE_128
)
502 memcpy(ctx
->key
, key
, keylen
);
503 dma_sync_single_for_device(jrdev
, ctx
->adata
.key_dma
, keylen
,
505 ctx
->adata
.keylen
= keylen
;
507 print_hex_dump_debug("axcbc ctx.key@" __stringify(__LINE__
)" : ",
508 DUMP_PREFIX_ADDRESS
, 16, 4, ctx
->key
, keylen
, 1);
510 return axcbc_set_sh_desc(ahash
);
513 static int acmac_setkey(struct crypto_ahash
*ahash
, const u8
*key
,
516 struct caam_hash_ctx
*ctx
= crypto_ahash_ctx_dma(ahash
);
519 err
= aes_check_keylen(keylen
);
523 /* key is immediate data for all cmac shared descriptors */
524 ctx
->adata
.key_virt
= key
;
525 ctx
->adata
.keylen
= keylen
;
527 print_hex_dump_debug("acmac ctx.key@" __stringify(__LINE__
)" : ",
528 DUMP_PREFIX_ADDRESS
, 16, 4, key
, keylen
, 1);
530 return acmac_set_sh_desc(ahash
);
534 * ahash_edesc - s/w-extended ahash descriptor
535 * @sec4_sg_dma: physical mapped address of h/w link table
536 * @src_nents: number of segments in input scatterlist
537 * @sec4_sg_bytes: length of dma mapped sec4_sg space
538 * @bklog: stored to determine if the request needs backlog
539 * @hw_desc: the h/w job descriptor followed by any referenced link tables
540 * @sec4_sg: h/w link table
543 dma_addr_t sec4_sg_dma
;
547 u32 hw_desc
[DESC_JOB_IO_LEN_MAX
/ sizeof(u32
)] ____cacheline_aligned
;
548 struct sec4_sg_entry sec4_sg
[];
551 static inline void ahash_unmap(struct device
*dev
,
552 struct ahash_edesc
*edesc
,
553 struct ahash_request
*req
, int dst_len
)
555 struct caam_hash_state
*state
= ahash_request_ctx_dma(req
);
557 if (edesc
->src_nents
)
558 dma_unmap_sg(dev
, req
->src
, edesc
->src_nents
, DMA_TO_DEVICE
);
560 if (edesc
->sec4_sg_bytes
)
561 dma_unmap_single(dev
, edesc
->sec4_sg_dma
,
562 edesc
->sec4_sg_bytes
, DMA_TO_DEVICE
);
564 if (state
->buf_dma
) {
565 dma_unmap_single(dev
, state
->buf_dma
, state
->buflen
,
571 static inline void ahash_unmap_ctx(struct device
*dev
,
572 struct ahash_edesc
*edesc
,
573 struct ahash_request
*req
, int dst_len
, u32 flag
)
575 struct caam_hash_state
*state
= ahash_request_ctx_dma(req
);
577 if (state
->ctx_dma
) {
578 dma_unmap_single(dev
, state
->ctx_dma
, state
->ctx_dma_len
, flag
);
581 ahash_unmap(dev
, edesc
, req
, dst_len
);
584 static inline void ahash_done_cpy(struct device
*jrdev
, u32
*desc
, u32 err
,
585 void *context
, enum dma_data_direction dir
)
587 struct ahash_request
*req
= context
;
588 struct caam_drv_private_jr
*jrp
= dev_get_drvdata(jrdev
);
589 struct ahash_edesc
*edesc
;
590 struct crypto_ahash
*ahash
= crypto_ahash_reqtfm(req
);
591 int digestsize
= crypto_ahash_digestsize(ahash
);
592 struct caam_hash_state
*state
= ahash_request_ctx_dma(req
);
593 struct caam_hash_ctx
*ctx
= crypto_ahash_ctx_dma(ahash
);
597 dev_dbg(jrdev
, "%s %d: err 0x%x\n", __func__
, __LINE__
, err
);
599 edesc
= state
->edesc
;
600 has_bklog
= edesc
->bklog
;
603 ecode
= caam_jr_strstatus(jrdev
, err
);
605 ahash_unmap_ctx(jrdev
, edesc
, req
, digestsize
, dir
);
606 memcpy(req
->result
, state
->caam_ctx
, digestsize
);
609 print_hex_dump_debug("ctx@"__stringify(__LINE__
)": ",
610 DUMP_PREFIX_ADDRESS
, 16, 4, state
->caam_ctx
,
614 * If no backlog flag, the completion of the request is done
615 * by CAAM, not crypto engine.
618 ahash_request_complete(req
, ecode
);
620 crypto_finalize_hash_request(jrp
->engine
, req
, ecode
);
623 static void ahash_done(struct device
*jrdev
, u32
*desc
, u32 err
,
626 ahash_done_cpy(jrdev
, desc
, err
, context
, DMA_FROM_DEVICE
);
629 static void ahash_done_ctx_src(struct device
*jrdev
, u32
*desc
, u32 err
,
632 ahash_done_cpy(jrdev
, desc
, err
, context
, DMA_BIDIRECTIONAL
);
635 static inline void ahash_done_switch(struct device
*jrdev
, u32
*desc
, u32 err
,
636 void *context
, enum dma_data_direction dir
)
638 struct ahash_request
*req
= context
;
639 struct caam_drv_private_jr
*jrp
= dev_get_drvdata(jrdev
);
640 struct ahash_edesc
*edesc
;
641 struct crypto_ahash
*ahash
= crypto_ahash_reqtfm(req
);
642 struct caam_hash_ctx
*ctx
= crypto_ahash_ctx_dma(ahash
);
643 struct caam_hash_state
*state
= ahash_request_ctx_dma(req
);
644 int digestsize
= crypto_ahash_digestsize(ahash
);
648 dev_dbg(jrdev
, "%s %d: err 0x%x\n", __func__
, __LINE__
, err
);
650 edesc
= state
->edesc
;
651 has_bklog
= edesc
->bklog
;
653 ecode
= caam_jr_strstatus(jrdev
, err
);
655 ahash_unmap_ctx(jrdev
, edesc
, req
, ctx
->ctx_len
, dir
);
658 scatterwalk_map_and_copy(state
->buf
, req
->src
,
659 req
->nbytes
- state
->next_buflen
,
660 state
->next_buflen
, 0);
661 state
->buflen
= state
->next_buflen
;
663 print_hex_dump_debug("buf@" __stringify(__LINE__
)": ",
664 DUMP_PREFIX_ADDRESS
, 16, 4, state
->buf
,
667 print_hex_dump_debug("ctx@"__stringify(__LINE__
)": ",
668 DUMP_PREFIX_ADDRESS
, 16, 4, state
->caam_ctx
,
671 print_hex_dump_debug("result@"__stringify(__LINE__
)": ",
672 DUMP_PREFIX_ADDRESS
, 16, 4, req
->result
,
676 * If no backlog flag, the completion of the request is done
677 * by CAAM, not crypto engine.
680 ahash_request_complete(req
, ecode
);
682 crypto_finalize_hash_request(jrp
->engine
, req
, ecode
);
686 static void ahash_done_bi(struct device
*jrdev
, u32
*desc
, u32 err
,
689 ahash_done_switch(jrdev
, desc
, err
, context
, DMA_BIDIRECTIONAL
);
692 static void ahash_done_ctx_dst(struct device
*jrdev
, u32
*desc
, u32 err
,
695 ahash_done_switch(jrdev
, desc
, err
, context
, DMA_FROM_DEVICE
);
699 * Allocate an enhanced descriptor, which contains the hardware descriptor
700 * and space for hardware scatter table containing sg_num entries.
702 static struct ahash_edesc
*ahash_edesc_alloc(struct ahash_request
*req
,
703 int sg_num
, u32
*sh_desc
,
704 dma_addr_t sh_desc_dma
)
706 struct caam_hash_state
*state
= ahash_request_ctx_dma(req
);
707 gfp_t flags
= (req
->base
.flags
& CRYPTO_TFM_REQ_MAY_SLEEP
) ?
708 GFP_KERNEL
: GFP_ATOMIC
;
709 struct ahash_edesc
*edesc
;
711 sg_num
= pad_sg_nents(sg_num
);
712 edesc
= kzalloc(struct_size(edesc
, sec4_sg
, sg_num
), flags
);
716 state
->edesc
= edesc
;
718 init_job_desc_shared(edesc
->hw_desc
, sh_desc_dma
, desc_len(sh_desc
),
719 HDR_SHARE_DEFER
| HDR_REVERSE
);
724 static int ahash_edesc_add_src(struct caam_hash_ctx
*ctx
,
725 struct ahash_edesc
*edesc
,
726 struct ahash_request
*req
, int nents
,
727 unsigned int first_sg
,
728 unsigned int first_bytes
, size_t to_hash
)
733 if (nents
> 1 || first_sg
) {
734 struct sec4_sg_entry
*sg
= edesc
->sec4_sg
;
735 unsigned int sgsize
= sizeof(*sg
) *
736 pad_sg_nents(first_sg
+ nents
);
738 sg_to_sec4_sg_last(req
->src
, to_hash
, sg
+ first_sg
, 0);
740 src_dma
= dma_map_single(ctx
->jrdev
, sg
, sgsize
, DMA_TO_DEVICE
);
741 if (dma_mapping_error(ctx
->jrdev
, src_dma
)) {
742 dev_err(ctx
->jrdev
, "unable to map S/G table\n");
746 edesc
->sec4_sg_bytes
= sgsize
;
747 edesc
->sec4_sg_dma
= src_dma
;
750 src_dma
= sg_dma_address(req
->src
);
754 append_seq_in_ptr(edesc
->hw_desc
, src_dma
, first_bytes
+ to_hash
,
760 static int ahash_do_one_req(struct crypto_engine
*engine
, void *areq
)
762 struct ahash_request
*req
= ahash_request_cast(areq
);
763 struct caam_hash_ctx
*ctx
= crypto_ahash_ctx_dma(crypto_ahash_reqtfm(req
));
764 struct caam_hash_state
*state
= ahash_request_ctx_dma(req
);
765 struct device
*jrdev
= ctx
->jrdev
;
766 u32
*desc
= state
->edesc
->hw_desc
;
769 state
->edesc
->bklog
= true;
771 ret
= caam_jr_enqueue(jrdev
, desc
, state
->ahash_op_done
, req
);
773 if (ret
== -ENOSPC
&& engine
->retry_support
)
776 if (ret
!= -EINPROGRESS
) {
777 ahash_unmap(jrdev
, state
->edesc
, req
, 0);
786 static int ahash_enqueue_req(struct device
*jrdev
,
787 void (*cbk
)(struct device
*jrdev
, u32
*desc
,
788 u32 err
, void *context
),
789 struct ahash_request
*req
,
790 int dst_len
, enum dma_data_direction dir
)
792 struct caam_drv_private_jr
*jrpriv
= dev_get_drvdata(jrdev
);
793 struct caam_hash_state
*state
= ahash_request_ctx_dma(req
);
794 struct ahash_edesc
*edesc
= state
->edesc
;
795 u32
*desc
= edesc
->hw_desc
;
798 state
->ahash_op_done
= cbk
;
801 * Only the backlog request are sent to crypto-engine since the others
802 * can be handled by CAAM, if free, especially since JR has up to 1024
803 * entries (more than the 10 entries from crypto-engine).
805 if (req
->base
.flags
& CRYPTO_TFM_REQ_MAY_BACKLOG
)
806 ret
= crypto_transfer_hash_request_to_engine(jrpriv
->engine
,
809 ret
= caam_jr_enqueue(jrdev
, desc
, cbk
, req
);
811 if ((ret
!= -EINPROGRESS
) && (ret
!= -EBUSY
)) {
812 ahash_unmap_ctx(jrdev
, edesc
, req
, dst_len
, dir
);
819 /* submit update job descriptor */
820 static int ahash_update_ctx(struct ahash_request
*req
)
822 struct crypto_ahash
*ahash
= crypto_ahash_reqtfm(req
);
823 struct caam_hash_ctx
*ctx
= crypto_ahash_ctx_dma(ahash
);
824 struct caam_hash_state
*state
= ahash_request_ctx_dma(req
);
825 struct device
*jrdev
= ctx
->jrdev
;
826 u8
*buf
= state
->buf
;
827 int *buflen
= &state
->buflen
;
828 int *next_buflen
= &state
->next_buflen
;
829 int blocksize
= crypto_ahash_blocksize(ahash
);
830 int in_len
= *buflen
+ req
->nbytes
, to_hash
;
832 int src_nents
, mapped_nents
, sec4_sg_bytes
, sec4_sg_src_index
;
833 struct ahash_edesc
*edesc
;
836 *next_buflen
= in_len
& (blocksize
- 1);
837 to_hash
= in_len
- *next_buflen
;
840 * For XCBC and CMAC, if to_hash is multiple of block size,
841 * keep last block in internal buffer
843 if ((is_xcbc_aes(ctx
->adata
.algtype
) ||
844 is_cmac_aes(ctx
->adata
.algtype
)) && to_hash
>= blocksize
&&
845 (*next_buflen
== 0)) {
846 *next_buflen
= blocksize
;
847 to_hash
-= blocksize
;
852 int src_len
= req
->nbytes
- *next_buflen
;
854 src_nents
= sg_nents_for_len(req
->src
, src_len
);
856 dev_err(jrdev
, "Invalid number of src SG.\n");
861 mapped_nents
= dma_map_sg(jrdev
, req
->src
, src_nents
,
864 dev_err(jrdev
, "unable to DMA map source\n");
871 sec4_sg_src_index
= 1 + (*buflen
? 1 : 0);
872 pad_nents
= pad_sg_nents(sec4_sg_src_index
+ mapped_nents
);
873 sec4_sg_bytes
= pad_nents
* sizeof(struct sec4_sg_entry
);
876 * allocate space for base edesc and hw desc commands,
879 edesc
= ahash_edesc_alloc(req
, pad_nents
, ctx
->sh_desc_update
,
880 ctx
->sh_desc_update_dma
);
882 dma_unmap_sg(jrdev
, req
->src
, src_nents
, DMA_TO_DEVICE
);
886 edesc
->src_nents
= src_nents
;
887 edesc
->sec4_sg_bytes
= sec4_sg_bytes
;
889 ret
= ctx_map_to_sec4_sg(jrdev
, state
, ctx
->ctx_len
,
890 edesc
->sec4_sg
, DMA_BIDIRECTIONAL
);
894 ret
= buf_map_to_sec4_sg(jrdev
, edesc
->sec4_sg
+ 1, state
);
899 sg_to_sec4_sg_last(req
->src
, src_len
,
900 edesc
->sec4_sg
+ sec4_sg_src_index
,
903 sg_to_sec4_set_last(edesc
->sec4_sg
+ sec4_sg_src_index
-
906 desc
= edesc
->hw_desc
;
908 edesc
->sec4_sg_dma
= dma_map_single(jrdev
, edesc
->sec4_sg
,
911 if (dma_mapping_error(jrdev
, edesc
->sec4_sg_dma
)) {
912 dev_err(jrdev
, "unable to map S/G table\n");
917 append_seq_in_ptr(desc
, edesc
->sec4_sg_dma
, ctx
->ctx_len
+
920 append_seq_out_ptr(desc
, state
->ctx_dma
, ctx
->ctx_len
, 0);
922 print_hex_dump_debug("jobdesc@"__stringify(__LINE__
)": ",
923 DUMP_PREFIX_ADDRESS
, 16, 4, desc
,
924 desc_bytes(desc
), 1);
926 ret
= ahash_enqueue_req(jrdev
, ahash_done_bi
, req
,
927 ctx
->ctx_len
, DMA_BIDIRECTIONAL
);
928 } else if (*next_buflen
) {
929 scatterwalk_map_and_copy(buf
+ *buflen
, req
->src
, 0,
931 *buflen
= *next_buflen
;
933 print_hex_dump_debug("buf@" __stringify(__LINE__
)": ",
934 DUMP_PREFIX_ADDRESS
, 16, 4, buf
,
940 ahash_unmap_ctx(jrdev
, edesc
, req
, ctx
->ctx_len
, DMA_BIDIRECTIONAL
);
945 static int ahash_final_ctx(struct ahash_request
*req
)
947 struct crypto_ahash
*ahash
= crypto_ahash_reqtfm(req
);
948 struct caam_hash_ctx
*ctx
= crypto_ahash_ctx_dma(ahash
);
949 struct caam_hash_state
*state
= ahash_request_ctx_dma(req
);
950 struct device
*jrdev
= ctx
->jrdev
;
951 int buflen
= state
->buflen
;
954 int digestsize
= crypto_ahash_digestsize(ahash
);
955 struct ahash_edesc
*edesc
;
958 sec4_sg_bytes
= pad_sg_nents(1 + (buflen
? 1 : 0)) *
959 sizeof(struct sec4_sg_entry
);
961 /* allocate space for base edesc and hw desc commands, link tables */
962 edesc
= ahash_edesc_alloc(req
, 4, ctx
->sh_desc_fin
,
963 ctx
->sh_desc_fin_dma
);
967 desc
= edesc
->hw_desc
;
969 edesc
->sec4_sg_bytes
= sec4_sg_bytes
;
971 ret
= ctx_map_to_sec4_sg(jrdev
, state
, ctx
->ctx_len
,
972 edesc
->sec4_sg
, DMA_BIDIRECTIONAL
);
976 ret
= buf_map_to_sec4_sg(jrdev
, edesc
->sec4_sg
+ 1, state
);
980 sg_to_sec4_set_last(edesc
->sec4_sg
+ (buflen
? 1 : 0));
982 edesc
->sec4_sg_dma
= dma_map_single(jrdev
, edesc
->sec4_sg
,
983 sec4_sg_bytes
, DMA_TO_DEVICE
);
984 if (dma_mapping_error(jrdev
, edesc
->sec4_sg_dma
)) {
985 dev_err(jrdev
, "unable to map S/G table\n");
990 append_seq_in_ptr(desc
, edesc
->sec4_sg_dma
, ctx
->ctx_len
+ buflen
,
992 append_seq_out_ptr(desc
, state
->ctx_dma
, digestsize
, 0);
994 print_hex_dump_debug("jobdesc@"__stringify(__LINE__
)": ",
995 DUMP_PREFIX_ADDRESS
, 16, 4, desc
, desc_bytes(desc
),
998 return ahash_enqueue_req(jrdev
, ahash_done_ctx_src
, req
,
999 digestsize
, DMA_BIDIRECTIONAL
);
1001 ahash_unmap_ctx(jrdev
, edesc
, req
, digestsize
, DMA_BIDIRECTIONAL
);
1006 static int ahash_finup_ctx(struct ahash_request
*req
)
1008 struct crypto_ahash
*ahash
= crypto_ahash_reqtfm(req
);
1009 struct caam_hash_ctx
*ctx
= crypto_ahash_ctx_dma(ahash
);
1010 struct caam_hash_state
*state
= ahash_request_ctx_dma(req
);
1011 struct device
*jrdev
= ctx
->jrdev
;
1012 int buflen
= state
->buflen
;
1014 int sec4_sg_src_index
;
1015 int src_nents
, mapped_nents
;
1016 int digestsize
= crypto_ahash_digestsize(ahash
);
1017 struct ahash_edesc
*edesc
;
1020 src_nents
= sg_nents_for_len(req
->src
, req
->nbytes
);
1021 if (src_nents
< 0) {
1022 dev_err(jrdev
, "Invalid number of src SG.\n");
1027 mapped_nents
= dma_map_sg(jrdev
, req
->src
, src_nents
,
1029 if (!mapped_nents
) {
1030 dev_err(jrdev
, "unable to DMA map source\n");
1037 sec4_sg_src_index
= 1 + (buflen
? 1 : 0);
1039 /* allocate space for base edesc and hw desc commands, link tables */
1040 edesc
= ahash_edesc_alloc(req
, sec4_sg_src_index
+ mapped_nents
,
1041 ctx
->sh_desc_fin
, ctx
->sh_desc_fin_dma
);
1043 dma_unmap_sg(jrdev
, req
->src
, src_nents
, DMA_TO_DEVICE
);
1047 desc
= edesc
->hw_desc
;
1049 edesc
->src_nents
= src_nents
;
1051 ret
= ctx_map_to_sec4_sg(jrdev
, state
, ctx
->ctx_len
,
1052 edesc
->sec4_sg
, DMA_BIDIRECTIONAL
);
1056 ret
= buf_map_to_sec4_sg(jrdev
, edesc
->sec4_sg
+ 1, state
);
1060 ret
= ahash_edesc_add_src(ctx
, edesc
, req
, mapped_nents
,
1061 sec4_sg_src_index
, ctx
->ctx_len
+ buflen
,
1066 append_seq_out_ptr(desc
, state
->ctx_dma
, digestsize
, 0);
1068 print_hex_dump_debug("jobdesc@"__stringify(__LINE__
)": ",
1069 DUMP_PREFIX_ADDRESS
, 16, 4, desc
, desc_bytes(desc
),
1072 return ahash_enqueue_req(jrdev
, ahash_done_ctx_src
, req
,
1073 digestsize
, DMA_BIDIRECTIONAL
);
1075 ahash_unmap_ctx(jrdev
, edesc
, req
, digestsize
, DMA_BIDIRECTIONAL
);
1080 static int ahash_digest(struct ahash_request
*req
)
1082 struct crypto_ahash
*ahash
= crypto_ahash_reqtfm(req
);
1083 struct caam_hash_ctx
*ctx
= crypto_ahash_ctx_dma(ahash
);
1084 struct caam_hash_state
*state
= ahash_request_ctx_dma(req
);
1085 struct device
*jrdev
= ctx
->jrdev
;
1087 int digestsize
= crypto_ahash_digestsize(ahash
);
1088 int src_nents
, mapped_nents
;
1089 struct ahash_edesc
*edesc
;
1094 src_nents
= sg_nents_for_len(req
->src
, req
->nbytes
);
1095 if (src_nents
< 0) {
1096 dev_err(jrdev
, "Invalid number of src SG.\n");
1101 mapped_nents
= dma_map_sg(jrdev
, req
->src
, src_nents
,
1103 if (!mapped_nents
) {
1104 dev_err(jrdev
, "unable to map source for DMA\n");
1111 /* allocate space for base edesc and hw desc commands, link tables */
1112 edesc
= ahash_edesc_alloc(req
, mapped_nents
> 1 ? mapped_nents
: 0,
1113 ctx
->sh_desc_digest
, ctx
->sh_desc_digest_dma
);
1115 dma_unmap_sg(jrdev
, req
->src
, src_nents
, DMA_TO_DEVICE
);
1119 edesc
->src_nents
= src_nents
;
1121 ret
= ahash_edesc_add_src(ctx
, edesc
, req
, mapped_nents
, 0, 0,
1124 ahash_unmap(jrdev
, edesc
, req
, digestsize
);
1129 desc
= edesc
->hw_desc
;
1131 ret
= map_seq_out_ptr_ctx(desc
, jrdev
, state
, digestsize
);
1133 ahash_unmap(jrdev
, edesc
, req
, digestsize
);
1138 print_hex_dump_debug("jobdesc@"__stringify(__LINE__
)": ",
1139 DUMP_PREFIX_ADDRESS
, 16, 4, desc
, desc_bytes(desc
),
1142 return ahash_enqueue_req(jrdev
, ahash_done
, req
, digestsize
,
1146 /* submit ahash final if it the first job descriptor */
1147 static int ahash_final_no_ctx(struct ahash_request
*req
)
1149 struct crypto_ahash
*ahash
= crypto_ahash_reqtfm(req
);
1150 struct caam_hash_ctx
*ctx
= crypto_ahash_ctx_dma(ahash
);
1151 struct caam_hash_state
*state
= ahash_request_ctx_dma(req
);
1152 struct device
*jrdev
= ctx
->jrdev
;
1153 u8
*buf
= state
->buf
;
1154 int buflen
= state
->buflen
;
1156 int digestsize
= crypto_ahash_digestsize(ahash
);
1157 struct ahash_edesc
*edesc
;
1160 /* allocate space for base edesc and hw desc commands, link tables */
1161 edesc
= ahash_edesc_alloc(req
, 0, ctx
->sh_desc_digest
,
1162 ctx
->sh_desc_digest_dma
);
1166 desc
= edesc
->hw_desc
;
1169 state
->buf_dma
= dma_map_single(jrdev
, buf
, buflen
,
1171 if (dma_mapping_error(jrdev
, state
->buf_dma
)) {
1172 dev_err(jrdev
, "unable to map src\n");
1176 append_seq_in_ptr(desc
, state
->buf_dma
, buflen
, 0);
1179 ret
= map_seq_out_ptr_ctx(desc
, jrdev
, state
, digestsize
);
1183 print_hex_dump_debug("jobdesc@"__stringify(__LINE__
)": ",
1184 DUMP_PREFIX_ADDRESS
, 16, 4, desc
, desc_bytes(desc
),
1187 return ahash_enqueue_req(jrdev
, ahash_done
, req
,
1188 digestsize
, DMA_FROM_DEVICE
);
1190 ahash_unmap(jrdev
, edesc
, req
, digestsize
);
1195 /* submit ahash update if it the first job descriptor after update */
1196 static int ahash_update_no_ctx(struct ahash_request
*req
)
1198 struct crypto_ahash
*ahash
= crypto_ahash_reqtfm(req
);
1199 struct caam_hash_ctx
*ctx
= crypto_ahash_ctx_dma(ahash
);
1200 struct caam_hash_state
*state
= ahash_request_ctx_dma(req
);
1201 struct device
*jrdev
= ctx
->jrdev
;
1202 u8
*buf
= state
->buf
;
1203 int *buflen
= &state
->buflen
;
1204 int *next_buflen
= &state
->next_buflen
;
1205 int blocksize
= crypto_ahash_blocksize(ahash
);
1206 int in_len
= *buflen
+ req
->nbytes
, to_hash
;
1207 int sec4_sg_bytes
, src_nents
, mapped_nents
;
1208 struct ahash_edesc
*edesc
;
1212 *next_buflen
= in_len
& (blocksize
- 1);
1213 to_hash
= in_len
- *next_buflen
;
1216 * For XCBC and CMAC, if to_hash is multiple of block size,
1217 * keep last block in internal buffer
1219 if ((is_xcbc_aes(ctx
->adata
.algtype
) ||
1220 is_cmac_aes(ctx
->adata
.algtype
)) && to_hash
>= blocksize
&&
1221 (*next_buflen
== 0)) {
1222 *next_buflen
= blocksize
;
1223 to_hash
-= blocksize
;
1228 int src_len
= req
->nbytes
- *next_buflen
;
1230 src_nents
= sg_nents_for_len(req
->src
, src_len
);
1231 if (src_nents
< 0) {
1232 dev_err(jrdev
, "Invalid number of src SG.\n");
1237 mapped_nents
= dma_map_sg(jrdev
, req
->src
, src_nents
,
1239 if (!mapped_nents
) {
1240 dev_err(jrdev
, "unable to DMA map source\n");
1247 pad_nents
= pad_sg_nents(1 + mapped_nents
);
1248 sec4_sg_bytes
= pad_nents
* sizeof(struct sec4_sg_entry
);
1251 * allocate space for base edesc and hw desc commands,
1254 edesc
= ahash_edesc_alloc(req
, pad_nents
,
1255 ctx
->sh_desc_update_first
,
1256 ctx
->sh_desc_update_first_dma
);
1258 dma_unmap_sg(jrdev
, req
->src
, src_nents
, DMA_TO_DEVICE
);
1262 edesc
->src_nents
= src_nents
;
1263 edesc
->sec4_sg_bytes
= sec4_sg_bytes
;
1265 ret
= buf_map_to_sec4_sg(jrdev
, edesc
->sec4_sg
, state
);
1269 sg_to_sec4_sg_last(req
->src
, src_len
, edesc
->sec4_sg
+ 1, 0);
1271 desc
= edesc
->hw_desc
;
1273 edesc
->sec4_sg_dma
= dma_map_single(jrdev
, edesc
->sec4_sg
,
1276 if (dma_mapping_error(jrdev
, edesc
->sec4_sg_dma
)) {
1277 dev_err(jrdev
, "unable to map S/G table\n");
1282 append_seq_in_ptr(desc
, edesc
->sec4_sg_dma
, to_hash
, LDST_SGF
);
1284 ret
= map_seq_out_ptr_ctx(desc
, jrdev
, state
, ctx
->ctx_len
);
1288 print_hex_dump_debug("jobdesc@"__stringify(__LINE__
)": ",
1289 DUMP_PREFIX_ADDRESS
, 16, 4, desc
,
1290 desc_bytes(desc
), 1);
1292 ret
= ahash_enqueue_req(jrdev
, ahash_done_ctx_dst
, req
,
1293 ctx
->ctx_len
, DMA_TO_DEVICE
);
1294 if ((ret
!= -EINPROGRESS
) && (ret
!= -EBUSY
))
1296 state
->update
= ahash_update_ctx
;
1297 state
->finup
= ahash_finup_ctx
;
1298 state
->final
= ahash_final_ctx
;
1299 } else if (*next_buflen
) {
1300 scatterwalk_map_and_copy(buf
+ *buflen
, req
->src
, 0,
1302 *buflen
= *next_buflen
;
1304 print_hex_dump_debug("buf@" __stringify(__LINE__
)": ",
1305 DUMP_PREFIX_ADDRESS
, 16, 4, buf
,
1311 ahash_unmap_ctx(jrdev
, edesc
, req
, ctx
->ctx_len
, DMA_TO_DEVICE
);
1316 /* submit ahash finup if it the first job descriptor after update */
1317 static int ahash_finup_no_ctx(struct ahash_request
*req
)
1319 struct crypto_ahash
*ahash
= crypto_ahash_reqtfm(req
);
1320 struct caam_hash_ctx
*ctx
= crypto_ahash_ctx_dma(ahash
);
1321 struct caam_hash_state
*state
= ahash_request_ctx_dma(req
);
1322 struct device
*jrdev
= ctx
->jrdev
;
1323 int buflen
= state
->buflen
;
1325 int sec4_sg_bytes
, sec4_sg_src_index
, src_nents
, mapped_nents
;
1326 int digestsize
= crypto_ahash_digestsize(ahash
);
1327 struct ahash_edesc
*edesc
;
1330 src_nents
= sg_nents_for_len(req
->src
, req
->nbytes
);
1331 if (src_nents
< 0) {
1332 dev_err(jrdev
, "Invalid number of src SG.\n");
1337 mapped_nents
= dma_map_sg(jrdev
, req
->src
, src_nents
,
1339 if (!mapped_nents
) {
1340 dev_err(jrdev
, "unable to DMA map source\n");
1347 sec4_sg_src_index
= 2;
1348 sec4_sg_bytes
= (sec4_sg_src_index
+ mapped_nents
) *
1349 sizeof(struct sec4_sg_entry
);
1351 /* allocate space for base edesc and hw desc commands, link tables */
1352 edesc
= ahash_edesc_alloc(req
, sec4_sg_src_index
+ mapped_nents
,
1353 ctx
->sh_desc_digest
, ctx
->sh_desc_digest_dma
);
1355 dma_unmap_sg(jrdev
, req
->src
, src_nents
, DMA_TO_DEVICE
);
1359 desc
= edesc
->hw_desc
;
1361 edesc
->src_nents
= src_nents
;
1362 edesc
->sec4_sg_bytes
= sec4_sg_bytes
;
1364 ret
= buf_map_to_sec4_sg(jrdev
, edesc
->sec4_sg
, state
);
1368 ret
= ahash_edesc_add_src(ctx
, edesc
, req
, mapped_nents
, 1, buflen
,
1371 dev_err(jrdev
, "unable to map S/G table\n");
1375 ret
= map_seq_out_ptr_ctx(desc
, jrdev
, state
, digestsize
);
1379 print_hex_dump_debug("jobdesc@"__stringify(__LINE__
)": ",
1380 DUMP_PREFIX_ADDRESS
, 16, 4, desc
, desc_bytes(desc
),
1383 return ahash_enqueue_req(jrdev
, ahash_done
, req
,
1384 digestsize
, DMA_FROM_DEVICE
);
1386 ahash_unmap(jrdev
, edesc
, req
, digestsize
);
1392 /* submit first update job descriptor after init */
1393 static int ahash_update_first(struct ahash_request
*req
)
1395 struct crypto_ahash
*ahash
= crypto_ahash_reqtfm(req
);
1396 struct caam_hash_ctx
*ctx
= crypto_ahash_ctx_dma(ahash
);
1397 struct caam_hash_state
*state
= ahash_request_ctx_dma(req
);
1398 struct device
*jrdev
= ctx
->jrdev
;
1399 u8
*buf
= state
->buf
;
1400 int *buflen
= &state
->buflen
;
1401 int *next_buflen
= &state
->next_buflen
;
1403 int blocksize
= crypto_ahash_blocksize(ahash
);
1405 int src_nents
, mapped_nents
;
1406 struct ahash_edesc
*edesc
;
1409 *next_buflen
= req
->nbytes
& (blocksize
- 1);
1410 to_hash
= req
->nbytes
- *next_buflen
;
1413 * For XCBC and CMAC, if to_hash is multiple of block size,
1414 * keep last block in internal buffer
1416 if ((is_xcbc_aes(ctx
->adata
.algtype
) ||
1417 is_cmac_aes(ctx
->adata
.algtype
)) && to_hash
>= blocksize
&&
1418 (*next_buflen
== 0)) {
1419 *next_buflen
= blocksize
;
1420 to_hash
-= blocksize
;
1424 src_nents
= sg_nents_for_len(req
->src
,
1425 req
->nbytes
- *next_buflen
);
1426 if (src_nents
< 0) {
1427 dev_err(jrdev
, "Invalid number of src SG.\n");
1432 mapped_nents
= dma_map_sg(jrdev
, req
->src
, src_nents
,
1434 if (!mapped_nents
) {
1435 dev_err(jrdev
, "unable to map source for DMA\n");
1443 * allocate space for base edesc and hw desc commands,
1446 edesc
= ahash_edesc_alloc(req
, mapped_nents
> 1 ?
1448 ctx
->sh_desc_update_first
,
1449 ctx
->sh_desc_update_first_dma
);
1451 dma_unmap_sg(jrdev
, req
->src
, src_nents
, DMA_TO_DEVICE
);
1455 edesc
->src_nents
= src_nents
;
1457 ret
= ahash_edesc_add_src(ctx
, edesc
, req
, mapped_nents
, 0, 0,
1462 desc
= edesc
->hw_desc
;
1464 ret
= map_seq_out_ptr_ctx(desc
, jrdev
, state
, ctx
->ctx_len
);
1468 print_hex_dump_debug("jobdesc@"__stringify(__LINE__
)": ",
1469 DUMP_PREFIX_ADDRESS
, 16, 4, desc
,
1470 desc_bytes(desc
), 1);
1472 ret
= ahash_enqueue_req(jrdev
, ahash_done_ctx_dst
, req
,
1473 ctx
->ctx_len
, DMA_TO_DEVICE
);
1474 if ((ret
!= -EINPROGRESS
) && (ret
!= -EBUSY
))
1476 state
->update
= ahash_update_ctx
;
1477 state
->finup
= ahash_finup_ctx
;
1478 state
->final
= ahash_final_ctx
;
1479 } else if (*next_buflen
) {
1480 state
->update
= ahash_update_no_ctx
;
1481 state
->finup
= ahash_finup_no_ctx
;
1482 state
->final
= ahash_final_no_ctx
;
1483 scatterwalk_map_and_copy(buf
, req
->src
, 0,
1485 *buflen
= *next_buflen
;
1487 print_hex_dump_debug("buf@" __stringify(__LINE__
)": ",
1488 DUMP_PREFIX_ADDRESS
, 16, 4, buf
,
1494 ahash_unmap_ctx(jrdev
, edesc
, req
, ctx
->ctx_len
, DMA_TO_DEVICE
);
1499 static int ahash_finup_first(struct ahash_request
*req
)
1501 return ahash_digest(req
);
1504 static int ahash_init(struct ahash_request
*req
)
1506 struct caam_hash_state
*state
= ahash_request_ctx_dma(req
);
1508 state
->update
= ahash_update_first
;
1509 state
->finup
= ahash_finup_first
;
1510 state
->final
= ahash_final_no_ctx
;
1513 state
->ctx_dma_len
= 0;
1516 state
->next_buflen
= 0;
1521 static int ahash_update(struct ahash_request
*req
)
1523 struct caam_hash_state
*state
= ahash_request_ctx_dma(req
);
1525 return state
->update(req
);
1528 static int ahash_finup(struct ahash_request
*req
)
1530 struct caam_hash_state
*state
= ahash_request_ctx_dma(req
);
1532 return state
->finup(req
);
1535 static int ahash_final(struct ahash_request
*req
)
1537 struct caam_hash_state
*state
= ahash_request_ctx_dma(req
);
1539 return state
->final(req
);
1542 static int ahash_export(struct ahash_request
*req
, void *out
)
1544 struct caam_hash_state
*state
= ahash_request_ctx_dma(req
);
1545 struct caam_export_state
*export
= out
;
1546 u8
*buf
= state
->buf
;
1547 int len
= state
->buflen
;
1549 memcpy(export
->buf
, buf
, len
);
1550 memcpy(export
->caam_ctx
, state
->caam_ctx
, sizeof(export
->caam_ctx
));
1551 export
->buflen
= len
;
1552 export
->update
= state
->update
;
1553 export
->final
= state
->final
;
1554 export
->finup
= state
->finup
;
1559 static int ahash_import(struct ahash_request
*req
, const void *in
)
1561 struct caam_hash_state
*state
= ahash_request_ctx_dma(req
);
1562 const struct caam_export_state
*export
= in
;
1564 memset(state
, 0, sizeof(*state
));
1565 memcpy(state
->buf
, export
->buf
, export
->buflen
);
1566 memcpy(state
->caam_ctx
, export
->caam_ctx
, sizeof(state
->caam_ctx
));
1567 state
->buflen
= export
->buflen
;
1568 state
->update
= export
->update
;
1569 state
->final
= export
->final
;
1570 state
->finup
= export
->finup
;
1575 struct caam_hash_template
{
1576 char name
[CRYPTO_MAX_ALG_NAME
];
1577 char driver_name
[CRYPTO_MAX_ALG_NAME
];
1578 char hmac_name
[CRYPTO_MAX_ALG_NAME
];
1579 char hmac_driver_name
[CRYPTO_MAX_ALG_NAME
];
1580 unsigned int blocksize
;
1581 struct ahash_alg template_ahash
;
1585 /* ahash descriptors */
1586 static struct caam_hash_template driver_hash
[] = {
1589 .driver_name
= "sha1-caam",
1590 .hmac_name
= "hmac(sha1)",
1591 .hmac_driver_name
= "hmac-sha1-caam",
1592 .blocksize
= SHA1_BLOCK_SIZE
,
1595 .update
= ahash_update
,
1596 .final
= ahash_final
,
1597 .finup
= ahash_finup
,
1598 .digest
= ahash_digest
,
1599 .export
= ahash_export
,
1600 .import
= ahash_import
,
1601 .setkey
= ahash_setkey
,
1603 .digestsize
= SHA1_DIGEST_SIZE
,
1604 .statesize
= sizeof(struct caam_export_state
),
1607 .alg_type
= OP_ALG_ALGSEL_SHA1
,
1610 .driver_name
= "sha224-caam",
1611 .hmac_name
= "hmac(sha224)",
1612 .hmac_driver_name
= "hmac-sha224-caam",
1613 .blocksize
= SHA224_BLOCK_SIZE
,
1616 .update
= ahash_update
,
1617 .final
= ahash_final
,
1618 .finup
= ahash_finup
,
1619 .digest
= ahash_digest
,
1620 .export
= ahash_export
,
1621 .import
= ahash_import
,
1622 .setkey
= ahash_setkey
,
1624 .digestsize
= SHA224_DIGEST_SIZE
,
1625 .statesize
= sizeof(struct caam_export_state
),
1628 .alg_type
= OP_ALG_ALGSEL_SHA224
,
1631 .driver_name
= "sha256-caam",
1632 .hmac_name
= "hmac(sha256)",
1633 .hmac_driver_name
= "hmac-sha256-caam",
1634 .blocksize
= SHA256_BLOCK_SIZE
,
1637 .update
= ahash_update
,
1638 .final
= ahash_final
,
1639 .finup
= ahash_finup
,
1640 .digest
= ahash_digest
,
1641 .export
= ahash_export
,
1642 .import
= ahash_import
,
1643 .setkey
= ahash_setkey
,
1645 .digestsize
= SHA256_DIGEST_SIZE
,
1646 .statesize
= sizeof(struct caam_export_state
),
1649 .alg_type
= OP_ALG_ALGSEL_SHA256
,
1652 .driver_name
= "sha384-caam",
1653 .hmac_name
= "hmac(sha384)",
1654 .hmac_driver_name
= "hmac-sha384-caam",
1655 .blocksize
= SHA384_BLOCK_SIZE
,
1658 .update
= ahash_update
,
1659 .final
= ahash_final
,
1660 .finup
= ahash_finup
,
1661 .digest
= ahash_digest
,
1662 .export
= ahash_export
,
1663 .import
= ahash_import
,
1664 .setkey
= ahash_setkey
,
1666 .digestsize
= SHA384_DIGEST_SIZE
,
1667 .statesize
= sizeof(struct caam_export_state
),
1670 .alg_type
= OP_ALG_ALGSEL_SHA384
,
1673 .driver_name
= "sha512-caam",
1674 .hmac_name
= "hmac(sha512)",
1675 .hmac_driver_name
= "hmac-sha512-caam",
1676 .blocksize
= SHA512_BLOCK_SIZE
,
1679 .update
= ahash_update
,
1680 .final
= ahash_final
,
1681 .finup
= ahash_finup
,
1682 .digest
= ahash_digest
,
1683 .export
= ahash_export
,
1684 .import
= ahash_import
,
1685 .setkey
= ahash_setkey
,
1687 .digestsize
= SHA512_DIGEST_SIZE
,
1688 .statesize
= sizeof(struct caam_export_state
),
1691 .alg_type
= OP_ALG_ALGSEL_SHA512
,
1694 .driver_name
= "md5-caam",
1695 .hmac_name
= "hmac(md5)",
1696 .hmac_driver_name
= "hmac-md5-caam",
1697 .blocksize
= MD5_BLOCK_WORDS
* 4,
1700 .update
= ahash_update
,
1701 .final
= ahash_final
,
1702 .finup
= ahash_finup
,
1703 .digest
= ahash_digest
,
1704 .export
= ahash_export
,
1705 .import
= ahash_import
,
1706 .setkey
= ahash_setkey
,
1708 .digestsize
= MD5_DIGEST_SIZE
,
1709 .statesize
= sizeof(struct caam_export_state
),
1712 .alg_type
= OP_ALG_ALGSEL_MD5
,
1714 .hmac_name
= "xcbc(aes)",
1715 .hmac_driver_name
= "xcbc-aes-caam",
1716 .blocksize
= AES_BLOCK_SIZE
,
1719 .update
= ahash_update
,
1720 .final
= ahash_final
,
1721 .finup
= ahash_finup
,
1722 .digest
= ahash_digest
,
1723 .export
= ahash_export
,
1724 .import
= ahash_import
,
1725 .setkey
= axcbc_setkey
,
1727 .digestsize
= AES_BLOCK_SIZE
,
1728 .statesize
= sizeof(struct caam_export_state
),
1731 .alg_type
= OP_ALG_ALGSEL_AES
| OP_ALG_AAI_XCBC_MAC
,
1733 .hmac_name
= "cmac(aes)",
1734 .hmac_driver_name
= "cmac-aes-caam",
1735 .blocksize
= AES_BLOCK_SIZE
,
1738 .update
= ahash_update
,
1739 .final
= ahash_final
,
1740 .finup
= ahash_finup
,
1741 .digest
= ahash_digest
,
1742 .export
= ahash_export
,
1743 .import
= ahash_import
,
1744 .setkey
= acmac_setkey
,
1746 .digestsize
= AES_BLOCK_SIZE
,
1747 .statesize
= sizeof(struct caam_export_state
),
1750 .alg_type
= OP_ALG_ALGSEL_AES
| OP_ALG_AAI_CMAC
,
1754 struct caam_hash_alg
{
1755 struct list_head entry
;
1758 struct ahash_engine_alg ahash_alg
;
1761 static int caam_hash_cra_init(struct crypto_tfm
*tfm
)
1763 struct crypto_ahash
*ahash
= __crypto_ahash_cast(tfm
);
1764 struct crypto_alg
*base
= tfm
->__crt_alg
;
1765 struct hash_alg_common
*halg
=
1766 container_of(base
, struct hash_alg_common
, base
);
1767 struct ahash_alg
*alg
=
1768 container_of(halg
, struct ahash_alg
, halg
);
1769 struct caam_hash_alg
*caam_hash
=
1770 container_of(alg
, struct caam_hash_alg
, ahash_alg
.base
);
1771 struct caam_hash_ctx
*ctx
= crypto_ahash_ctx_dma(ahash
);
1772 /* Sizes for MDHA running digests: MD5, SHA1, 224, 256, 384, 512 */
1773 static const u8 runninglen
[] = { HASH_MSG_LEN
+ MD5_DIGEST_SIZE
,
1774 HASH_MSG_LEN
+ SHA1_DIGEST_SIZE
,
1776 HASH_MSG_LEN
+ SHA256_DIGEST_SIZE
,
1778 HASH_MSG_LEN
+ SHA512_DIGEST_SIZE
};
1779 const size_t sh_desc_update_offset
= offsetof(struct caam_hash_ctx
,
1781 dma_addr_t dma_addr
;
1782 struct caam_drv_private
*priv
;
1785 * Get a Job ring from Job Ring driver to ensure in-order
1786 * crypto request processing per tfm
1788 ctx
->jrdev
= caam_jr_alloc();
1789 if (IS_ERR(ctx
->jrdev
)) {
1790 pr_err("Job Ring Device allocation for transform failed\n");
1791 return PTR_ERR(ctx
->jrdev
);
1794 priv
= dev_get_drvdata(ctx
->jrdev
->parent
);
1796 if (is_xcbc_aes(caam_hash
->alg_type
)) {
1797 ctx
->dir
= DMA_TO_DEVICE
;
1798 ctx
->key_dir
= DMA_BIDIRECTIONAL
;
1799 ctx
->adata
.algtype
= OP_TYPE_CLASS1_ALG
| caam_hash
->alg_type
;
1801 } else if (is_cmac_aes(caam_hash
->alg_type
)) {
1802 ctx
->dir
= DMA_TO_DEVICE
;
1803 ctx
->key_dir
= DMA_NONE
;
1804 ctx
->adata
.algtype
= OP_TYPE_CLASS1_ALG
| caam_hash
->alg_type
;
1807 if (priv
->era
>= 6) {
1808 ctx
->dir
= DMA_BIDIRECTIONAL
;
1809 ctx
->key_dir
= caam_hash
->is_hmac
? DMA_TO_DEVICE
: DMA_NONE
;
1811 ctx
->dir
= DMA_TO_DEVICE
;
1812 ctx
->key_dir
= DMA_NONE
;
1814 ctx
->adata
.algtype
= OP_TYPE_CLASS2_ALG
| caam_hash
->alg_type
;
1815 ctx
->ctx_len
= runninglen
[(ctx
->adata
.algtype
&
1816 OP_ALG_ALGSEL_SUBMASK
) >>
1817 OP_ALG_ALGSEL_SHIFT
];
1820 if (ctx
->key_dir
!= DMA_NONE
) {
1821 ctx
->adata
.key_dma
= dma_map_single_attrs(ctx
->jrdev
, ctx
->key
,
1822 ARRAY_SIZE(ctx
->key
),
1824 DMA_ATTR_SKIP_CPU_SYNC
);
1825 if (dma_mapping_error(ctx
->jrdev
, ctx
->adata
.key_dma
)) {
1826 dev_err(ctx
->jrdev
, "unable to map key\n");
1827 caam_jr_free(ctx
->jrdev
);
1832 dma_addr
= dma_map_single_attrs(ctx
->jrdev
, ctx
->sh_desc_update
,
1833 offsetof(struct caam_hash_ctx
, key
) -
1834 sh_desc_update_offset
,
1835 ctx
->dir
, DMA_ATTR_SKIP_CPU_SYNC
);
1836 if (dma_mapping_error(ctx
->jrdev
, dma_addr
)) {
1837 dev_err(ctx
->jrdev
, "unable to map shared descriptors\n");
1839 if (ctx
->key_dir
!= DMA_NONE
)
1840 dma_unmap_single_attrs(ctx
->jrdev
, ctx
->adata
.key_dma
,
1841 ARRAY_SIZE(ctx
->key
),
1843 DMA_ATTR_SKIP_CPU_SYNC
);
1845 caam_jr_free(ctx
->jrdev
);
1849 ctx
->sh_desc_update_dma
= dma_addr
;
1850 ctx
->sh_desc_update_first_dma
= dma_addr
+
1851 offsetof(struct caam_hash_ctx
,
1852 sh_desc_update_first
) -
1853 sh_desc_update_offset
;
1854 ctx
->sh_desc_fin_dma
= dma_addr
+ offsetof(struct caam_hash_ctx
,
1856 sh_desc_update_offset
;
1857 ctx
->sh_desc_digest_dma
= dma_addr
+ offsetof(struct caam_hash_ctx
,
1859 sh_desc_update_offset
;
1861 crypto_ahash_set_reqsize_dma(ahash
, sizeof(struct caam_hash_state
));
1864 * For keyed hash algorithms shared descriptors
1865 * will be created later in setkey() callback
1867 return caam_hash
->is_hmac
? 0 : ahash_set_sh_desc(ahash
);
1870 static void caam_hash_cra_exit(struct crypto_tfm
*tfm
)
1872 struct caam_hash_ctx
*ctx
= crypto_tfm_ctx_dma(tfm
);
1874 dma_unmap_single_attrs(ctx
->jrdev
, ctx
->sh_desc_update_dma
,
1875 offsetof(struct caam_hash_ctx
, key
) -
1876 offsetof(struct caam_hash_ctx
, sh_desc_update
),
1877 ctx
->dir
, DMA_ATTR_SKIP_CPU_SYNC
);
1878 if (ctx
->key_dir
!= DMA_NONE
)
1879 dma_unmap_single_attrs(ctx
->jrdev
, ctx
->adata
.key_dma
,
1880 ARRAY_SIZE(ctx
->key
), ctx
->key_dir
,
1881 DMA_ATTR_SKIP_CPU_SYNC
);
1882 caam_jr_free(ctx
->jrdev
);
1885 void caam_algapi_hash_exit(void)
1887 struct caam_hash_alg
*t_alg
, *n
;
1889 if (!hash_list
.next
)
1892 list_for_each_entry_safe(t_alg
, n
, &hash_list
, entry
) {
1893 crypto_engine_unregister_ahash(&t_alg
->ahash_alg
);
1894 list_del(&t_alg
->entry
);
1899 static struct caam_hash_alg
*
1900 caam_hash_alloc(struct caam_hash_template
*template,
1903 struct caam_hash_alg
*t_alg
;
1904 struct ahash_alg
*halg
;
1905 struct crypto_alg
*alg
;
1907 t_alg
= kzalloc(sizeof(*t_alg
), GFP_KERNEL
);
1909 return ERR_PTR(-ENOMEM
);
1911 t_alg
->ahash_alg
.base
= template->template_ahash
;
1912 halg
= &t_alg
->ahash_alg
.base
;
1913 alg
= &halg
->halg
.base
;
1916 snprintf(alg
->cra_name
, CRYPTO_MAX_ALG_NAME
, "%s",
1917 template->hmac_name
);
1918 snprintf(alg
->cra_driver_name
, CRYPTO_MAX_ALG_NAME
, "%s",
1919 template->hmac_driver_name
);
1920 t_alg
->is_hmac
= true;
1922 snprintf(alg
->cra_name
, CRYPTO_MAX_ALG_NAME
, "%s",
1924 snprintf(alg
->cra_driver_name
, CRYPTO_MAX_ALG_NAME
, "%s",
1925 template->driver_name
);
1926 halg
->setkey
= NULL
;
1927 t_alg
->is_hmac
= false;
1929 alg
->cra_module
= THIS_MODULE
;
1930 alg
->cra_init
= caam_hash_cra_init
;
1931 alg
->cra_exit
= caam_hash_cra_exit
;
1932 alg
->cra_ctxsize
= sizeof(struct caam_hash_ctx
) + crypto_dma_padding();
1933 alg
->cra_priority
= CAAM_CRA_PRIORITY
;
1934 alg
->cra_blocksize
= template->blocksize
;
1935 alg
->cra_alignmask
= 0;
1936 alg
->cra_flags
= CRYPTO_ALG_ASYNC
| CRYPTO_ALG_ALLOCATES_MEMORY
;
1938 t_alg
->alg_type
= template->alg_type
;
1939 t_alg
->ahash_alg
.op
.do_one_request
= ahash_do_one_req
;
1944 int caam_algapi_hash_init(struct device
*ctrldev
)
1947 struct caam_drv_private
*priv
= dev_get_drvdata(ctrldev
);
1948 unsigned int md_limit
= SHA512_DIGEST_SIZE
;
1949 u32 md_inst
, md_vid
;
1952 * Register crypto algorithms the device supports. First, identify
1953 * presence and attributes of MD block.
1955 if (priv
->era
< 10) {
1956 struct caam_perfmon __iomem
*perfmon
= &priv
->jr
[0]->perfmon
;
1958 md_vid
= (rd_reg32(&perfmon
->cha_id_ls
) &
1959 CHA_ID_LS_MD_MASK
) >> CHA_ID_LS_MD_SHIFT
;
1960 md_inst
= (rd_reg32(&perfmon
->cha_num_ls
) &
1961 CHA_ID_LS_MD_MASK
) >> CHA_ID_LS_MD_SHIFT
;
1963 u32 mdha
= rd_reg32(&priv
->jr
[0]->vreg
.mdha
);
1965 md_vid
= (mdha
& CHA_VER_VID_MASK
) >> CHA_VER_VID_SHIFT
;
1966 md_inst
= mdha
& CHA_VER_NUM_MASK
;
1970 * Skip registration of any hashing algorithms if MD block
1976 /* Limit digest size based on LP256 */
1977 if (md_vid
== CHA_VER_VID_MD_LP256
)
1978 md_limit
= SHA256_DIGEST_SIZE
;
1980 INIT_LIST_HEAD(&hash_list
);
1982 /* register crypto algorithms the device supports */
1983 for (i
= 0; i
< ARRAY_SIZE(driver_hash
); i
++) {
1984 struct caam_hash_alg
*t_alg
;
1985 struct caam_hash_template
*alg
= driver_hash
+ i
;
1987 /* If MD size is not supported by device, skip registration */
1988 if (is_mdha(alg
->alg_type
) &&
1989 alg
->template_ahash
.halg
.digestsize
> md_limit
)
1992 /* register hmac version */
1993 t_alg
= caam_hash_alloc(alg
, true);
1994 if (IS_ERR(t_alg
)) {
1995 err
= PTR_ERR(t_alg
);
1996 pr_warn("%s alg allocation failed\n",
1997 alg
->hmac_driver_name
);
2001 err
= crypto_engine_register_ahash(&t_alg
->ahash_alg
);
2003 pr_warn("%s alg registration failed: %d\n",
2004 t_alg
->ahash_alg
.base
.halg
.base
.cra_driver_name
,
2008 list_add_tail(&t_alg
->entry
, &hash_list
);
2010 if ((alg
->alg_type
& OP_ALG_ALGSEL_MASK
) == OP_ALG_ALGSEL_AES
)
2013 /* register unkeyed version */
2014 t_alg
= caam_hash_alloc(alg
, false);
2015 if (IS_ERR(t_alg
)) {
2016 err
= PTR_ERR(t_alg
);
2017 pr_warn("%s alg allocation failed\n", alg
->driver_name
);
2021 err
= crypto_engine_register_ahash(&t_alg
->ahash_alg
);
2023 pr_warn("%s alg registration failed: %d\n",
2024 t_alg
->ahash_alg
.base
.halg
.base
.cra_driver_name
,
2028 list_add_tail(&t_alg
->entry
, &hash_list
);