1 // SPDX-License-Identifier: GPL-2.0+
3 * caam - Freescale FSL CAAM support for ahash functions of crypto API
5 * Copyright 2011 Freescale Semiconductor, Inc.
6 * Copyright 2018-2019 NXP
8 * Based on caamalg.c crypto API driver.
10 * relationship of digest job descriptor or first job descriptor after init to
13 * --------------- ---------------
14 * | JobDesc #1 |-------------------->| ShareDesc |
15 * | *(packet 1) | | (hashKey) |
16 * --------------- | (operation) |
19 * relationship of subsequent job descriptors to shared descriptors:
21 * --------------- ---------------
22 * | JobDesc #2 |-------------------->| ShareDesc |
23 * | *(packet 2) | |------------->| (hashKey) |
24 * --------------- | |-------->| (operation) |
25 * . | | | (load ctx2) |
26 * . | | ---------------
28 * | JobDesc #3 |------| |
34 * | JobDesc #4 |------------
38 * The SharedDesc never changes for a connection unless rekeyed, but
39 * each packet will likely be in a different place. So all we need
40 * to know to process the packet is where the input is, where the
41 * output goes, and what context we want to process with. Context is
42 * in the SharedDesc, packet references in the JobDesc.
44 * So, a job desc looks like:
46 * ---------------------
48 * | ShareDesc Pointer |
55 * ---------------------
62 #include "desc_constr.h"
65 #include "sg_sw_sec4.h"
67 #include "caamhash_desc.h"
69 #define CAAM_CRA_PRIORITY 3000
71 /* max hash key is max split key size */
72 #define CAAM_MAX_HASH_KEY_SIZE (SHA512_DIGEST_SIZE * 2)
74 #define CAAM_MAX_HASH_BLOCK_SIZE SHA512_BLOCK_SIZE
75 #define CAAM_MAX_HASH_DIGEST_SIZE SHA512_DIGEST_SIZE
77 #define DESC_HASH_MAX_USED_BYTES (DESC_AHASH_FINAL_LEN + \
78 CAAM_MAX_HASH_KEY_SIZE)
79 #define DESC_HASH_MAX_USED_LEN (DESC_HASH_MAX_USED_BYTES / CAAM_CMD_SZ)
81 /* caam context sizes for hashes: running digest + 8 */
82 #define HASH_MSG_LEN 8
83 #define MAX_CTX_LEN (HASH_MSG_LEN + SHA512_DIGEST_SIZE)
86 /* for print_hex_dumps with line references */
87 #define debug(format, arg...) printk(format, arg)
89 #define debug(format, arg...)
93 static struct list_head hash_list
;
95 /* ahash per-session context */
96 struct caam_hash_ctx
{
97 u32 sh_desc_update
[DESC_HASH_MAX_USED_LEN
] ____cacheline_aligned
;
98 u32 sh_desc_update_first
[DESC_HASH_MAX_USED_LEN
] ____cacheline_aligned
;
99 u32 sh_desc_fin
[DESC_HASH_MAX_USED_LEN
] ____cacheline_aligned
;
100 u32 sh_desc_digest
[DESC_HASH_MAX_USED_LEN
] ____cacheline_aligned
;
101 u8 key
[CAAM_MAX_HASH_KEY_SIZE
] ____cacheline_aligned
;
102 dma_addr_t sh_desc_update_dma ____cacheline_aligned
;
103 dma_addr_t sh_desc_update_first_dma
;
104 dma_addr_t sh_desc_fin_dma
;
105 dma_addr_t sh_desc_digest_dma
;
107 enum dma_data_direction dir
;
108 struct device
*jrdev
;
110 struct alginfo adata
;
114 struct caam_hash_state
{
118 u8 buf_0
[CAAM_MAX_HASH_BLOCK_SIZE
] ____cacheline_aligned
;
120 u8 buf_1
[CAAM_MAX_HASH_BLOCK_SIZE
] ____cacheline_aligned
;
122 u8 caam_ctx
[MAX_CTX_LEN
] ____cacheline_aligned
;
123 int (*update
)(struct ahash_request
*req
);
124 int (*final
)(struct ahash_request
*req
);
125 int (*finup
)(struct ahash_request
*req
);
129 struct caam_export_state
{
130 u8 buf
[CAAM_MAX_HASH_BLOCK_SIZE
];
131 u8 caam_ctx
[MAX_CTX_LEN
];
133 int (*update
)(struct ahash_request
*req
);
134 int (*final
)(struct ahash_request
*req
);
135 int (*finup
)(struct ahash_request
*req
);
138 static inline void switch_buf(struct caam_hash_state
*state
)
140 state
->current_buf
^= 1;
143 static inline u8
*current_buf(struct caam_hash_state
*state
)
145 return state
->current_buf
? state
->buf_1
: state
->buf_0
;
148 static inline u8
*alt_buf(struct caam_hash_state
*state
)
150 return state
->current_buf
? state
->buf_0
: state
->buf_1
;
153 static inline int *current_buflen(struct caam_hash_state
*state
)
155 return state
->current_buf
? &state
->buflen_1
: &state
->buflen_0
;
158 static inline int *alt_buflen(struct caam_hash_state
*state
)
160 return state
->current_buf
? &state
->buflen_0
: &state
->buflen_1
;
163 static inline bool is_cmac_aes(u32 algtype
)
165 return (algtype
& (OP_ALG_ALGSEL_MASK
| OP_ALG_AAI_MASK
)) ==
166 (OP_ALG_ALGSEL_AES
| OP_ALG_AAI_CMAC
);
168 /* Common job descriptor seq in/out ptr routines */
170 /* Map state->caam_ctx, and append seq_out_ptr command that points to it */
171 static inline int map_seq_out_ptr_ctx(u32
*desc
, struct device
*jrdev
,
172 struct caam_hash_state
*state
,
175 state
->ctx_dma_len
= ctx_len
;
176 state
->ctx_dma
= dma_map_single(jrdev
, state
->caam_ctx
,
177 ctx_len
, DMA_FROM_DEVICE
);
178 if (dma_mapping_error(jrdev
, state
->ctx_dma
)) {
179 dev_err(jrdev
, "unable to map ctx\n");
184 append_seq_out_ptr(desc
, state
->ctx_dma
, ctx_len
, 0);
189 /* Map current buffer in state (if length > 0) and put it in link table */
190 static inline int buf_map_to_sec4_sg(struct device
*jrdev
,
191 struct sec4_sg_entry
*sec4_sg
,
192 struct caam_hash_state
*state
)
194 int buflen
= *current_buflen(state
);
199 state
->buf_dma
= dma_map_single(jrdev
, current_buf(state
), buflen
,
201 if (dma_mapping_error(jrdev
, state
->buf_dma
)) {
202 dev_err(jrdev
, "unable to map buf\n");
207 dma_to_sec4_sg_one(sec4_sg
, state
->buf_dma
, buflen
, 0);
212 /* Map state->caam_ctx, and add it to link table */
213 static inline int ctx_map_to_sec4_sg(struct device
*jrdev
,
214 struct caam_hash_state
*state
, int ctx_len
,
215 struct sec4_sg_entry
*sec4_sg
, u32 flag
)
217 state
->ctx_dma_len
= ctx_len
;
218 state
->ctx_dma
= dma_map_single(jrdev
, state
->caam_ctx
, ctx_len
, flag
);
219 if (dma_mapping_error(jrdev
, state
->ctx_dma
)) {
220 dev_err(jrdev
, "unable to map ctx\n");
225 dma_to_sec4_sg_one(sec4_sg
, state
->ctx_dma
, ctx_len
, 0);
230 static int ahash_set_sh_desc(struct crypto_ahash
*ahash
)
232 struct caam_hash_ctx
*ctx
= crypto_ahash_ctx(ahash
);
233 int digestsize
= crypto_ahash_digestsize(ahash
);
234 struct device
*jrdev
= ctx
->jrdev
;
235 struct caam_drv_private
*ctrlpriv
= dev_get_drvdata(jrdev
->parent
);
238 ctx
->adata
.key_virt
= ctx
->key
;
240 /* ahash_update shared descriptor */
241 desc
= ctx
->sh_desc_update
;
242 cnstr_shdsc_ahash(desc
, &ctx
->adata
, OP_ALG_AS_UPDATE
, ctx
->ctx_len
,
243 ctx
->ctx_len
, true, ctrlpriv
->era
);
244 dma_sync_single_for_device(jrdev
, ctx
->sh_desc_update_dma
,
245 desc_bytes(desc
), ctx
->dir
);
247 print_hex_dump(KERN_ERR
,
248 "ahash update shdesc@"__stringify(__LINE__
)": ",
249 DUMP_PREFIX_ADDRESS
, 16, 4, desc
, desc_bytes(desc
), 1);
252 /* ahash_update_first shared descriptor */
253 desc
= ctx
->sh_desc_update_first
;
254 cnstr_shdsc_ahash(desc
, &ctx
->adata
, OP_ALG_AS_INIT
, ctx
->ctx_len
,
255 ctx
->ctx_len
, false, ctrlpriv
->era
);
256 dma_sync_single_for_device(jrdev
, ctx
->sh_desc_update_first_dma
,
257 desc_bytes(desc
), ctx
->dir
);
259 print_hex_dump(KERN_ERR
,
260 "ahash update first shdesc@"__stringify(__LINE__
)": ",
261 DUMP_PREFIX_ADDRESS
, 16, 4, desc
, desc_bytes(desc
), 1);
264 /* ahash_final shared descriptor */
265 desc
= ctx
->sh_desc_fin
;
266 cnstr_shdsc_ahash(desc
, &ctx
->adata
, OP_ALG_AS_FINALIZE
, digestsize
,
267 ctx
->ctx_len
, true, ctrlpriv
->era
);
268 dma_sync_single_for_device(jrdev
, ctx
->sh_desc_fin_dma
,
269 desc_bytes(desc
), ctx
->dir
);
271 print_hex_dump(KERN_ERR
, "ahash final shdesc@"__stringify(__LINE__
)": ",
272 DUMP_PREFIX_ADDRESS
, 16, 4, desc
,
273 desc_bytes(desc
), 1);
276 /* ahash_digest shared descriptor */
277 desc
= ctx
->sh_desc_digest
;
278 cnstr_shdsc_ahash(desc
, &ctx
->adata
, OP_ALG_AS_INITFINAL
, digestsize
,
279 ctx
->ctx_len
, false, ctrlpriv
->era
);
280 dma_sync_single_for_device(jrdev
, ctx
->sh_desc_digest_dma
,
281 desc_bytes(desc
), ctx
->dir
);
283 print_hex_dump(KERN_ERR
,
284 "ahash digest shdesc@"__stringify(__LINE__
)": ",
285 DUMP_PREFIX_ADDRESS
, 16, 4, desc
,
286 desc_bytes(desc
), 1);
292 static int axcbc_set_sh_desc(struct crypto_ahash
*ahash
)
294 struct caam_hash_ctx
*ctx
= crypto_ahash_ctx(ahash
);
295 int digestsize
= crypto_ahash_digestsize(ahash
);
296 struct device
*jrdev
= ctx
->jrdev
;
299 /* key is loaded from memory for UPDATE and FINALIZE states */
300 ctx
->adata
.key_dma
= ctx
->key_dma
;
302 /* shared descriptor for ahash_update */
303 desc
= ctx
->sh_desc_update
;
304 cnstr_shdsc_sk_hash(desc
, &ctx
->adata
, OP_ALG_AS_UPDATE
,
305 ctx
->ctx_len
, ctx
->ctx_len
, 0);
306 dma_sync_single_for_device(jrdev
, ctx
->sh_desc_update_dma
,
307 desc_bytes(desc
), ctx
->dir
);
308 print_hex_dump_debug("axcbc update shdesc@" __stringify(__LINE__
)" : ",
309 DUMP_PREFIX_ADDRESS
, 16, 4, desc
, desc_bytes(desc
),
312 /* shared descriptor for ahash_{final,finup} */
313 desc
= ctx
->sh_desc_fin
;
314 cnstr_shdsc_sk_hash(desc
, &ctx
->adata
, OP_ALG_AS_FINALIZE
,
315 digestsize
, ctx
->ctx_len
, 0);
316 dma_sync_single_for_device(jrdev
, ctx
->sh_desc_fin_dma
,
317 desc_bytes(desc
), ctx
->dir
);
318 print_hex_dump_debug("axcbc finup shdesc@" __stringify(__LINE__
)" : ",
319 DUMP_PREFIX_ADDRESS
, 16, 4, desc
, desc_bytes(desc
),
322 /* key is immediate data for INIT and INITFINAL states */
323 ctx
->adata
.key_virt
= ctx
->key
;
325 /* shared descriptor for first invocation of ahash_update */
326 desc
= ctx
->sh_desc_update_first
;
327 cnstr_shdsc_sk_hash(desc
, &ctx
->adata
, OP_ALG_AS_INIT
, ctx
->ctx_len
,
328 ctx
->ctx_len
, ctx
->key_dma
);
329 dma_sync_single_for_device(jrdev
, ctx
->sh_desc_update_first_dma
,
330 desc_bytes(desc
), ctx
->dir
);
331 print_hex_dump_debug("axcbc update first shdesc@" __stringify(__LINE__
)" : ",
332 DUMP_PREFIX_ADDRESS
, 16, 4, desc
, desc_bytes(desc
),
335 /* shared descriptor for ahash_digest */
336 desc
= ctx
->sh_desc_digest
;
337 cnstr_shdsc_sk_hash(desc
, &ctx
->adata
, OP_ALG_AS_INITFINAL
,
338 digestsize
, ctx
->ctx_len
, 0);
339 dma_sync_single_for_device(jrdev
, ctx
->sh_desc_digest_dma
,
340 desc_bytes(desc
), ctx
->dir
);
341 print_hex_dump_debug("axcbc digest shdesc@" __stringify(__LINE__
)" : ",
342 DUMP_PREFIX_ADDRESS
, 16, 4, desc
, desc_bytes(desc
),
347 static int acmac_set_sh_desc(struct crypto_ahash
*ahash
)
349 struct caam_hash_ctx
*ctx
= crypto_ahash_ctx(ahash
);
350 int digestsize
= crypto_ahash_digestsize(ahash
);
351 struct device
*jrdev
= ctx
->jrdev
;
354 /* shared descriptor for ahash_update */
355 desc
= ctx
->sh_desc_update
;
356 cnstr_shdsc_sk_hash(desc
, &ctx
->adata
, OP_ALG_AS_UPDATE
,
357 ctx
->ctx_len
, ctx
->ctx_len
, 0);
358 dma_sync_single_for_device(jrdev
, ctx
->sh_desc_update_dma
,
359 desc_bytes(desc
), ctx
->dir
);
360 print_hex_dump_debug("acmac update shdesc@" __stringify(__LINE__
)" : ",
361 DUMP_PREFIX_ADDRESS
, 16, 4, desc
,
362 desc_bytes(desc
), 1);
364 /* shared descriptor for ahash_{final,finup} */
365 desc
= ctx
->sh_desc_fin
;
366 cnstr_shdsc_sk_hash(desc
, &ctx
->adata
, OP_ALG_AS_FINALIZE
,
367 digestsize
, ctx
->ctx_len
, 0);
368 dma_sync_single_for_device(jrdev
, ctx
->sh_desc_fin_dma
,
369 desc_bytes(desc
), ctx
->dir
);
370 print_hex_dump_debug("acmac finup shdesc@" __stringify(__LINE__
)" : ",
371 DUMP_PREFIX_ADDRESS
, 16, 4, desc
,
372 desc_bytes(desc
), 1);
374 /* shared descriptor for first invocation of ahash_update */
375 desc
= ctx
->sh_desc_update_first
;
376 cnstr_shdsc_sk_hash(desc
, &ctx
->adata
, OP_ALG_AS_INIT
, ctx
->ctx_len
,
378 dma_sync_single_for_device(jrdev
, ctx
->sh_desc_update_first_dma
,
379 desc_bytes(desc
), ctx
->dir
);
380 print_hex_dump_debug("acmac update first shdesc@" __stringify(__LINE__
)" : ",
381 DUMP_PREFIX_ADDRESS
, 16, 4, desc
,
382 desc_bytes(desc
), 1);
384 /* shared descriptor for ahash_digest */
385 desc
= ctx
->sh_desc_digest
;
386 cnstr_shdsc_sk_hash(desc
, &ctx
->adata
, OP_ALG_AS_INITFINAL
,
387 digestsize
, ctx
->ctx_len
, 0);
388 dma_sync_single_for_device(jrdev
, ctx
->sh_desc_digest_dma
,
389 desc_bytes(desc
), ctx
->dir
);
390 print_hex_dump_debug("acmac digest shdesc@" __stringify(__LINE__
)" : ",
391 DUMP_PREFIX_ADDRESS
, 16, 4, desc
,
392 desc_bytes(desc
), 1);
397 /* Digest hash size if it is too large */
398 static int hash_digest_key(struct caam_hash_ctx
*ctx
, u32
*keylen
, u8
*key
,
401 struct device
*jrdev
= ctx
->jrdev
;
403 struct split_key_result result
;
407 desc
= kmalloc(CAAM_CMD_SZ
* 8 + CAAM_PTR_SZ
* 2, GFP_KERNEL
| GFP_DMA
);
409 dev_err(jrdev
, "unable to allocate key input memory\n");
413 init_job_desc(desc
, 0);
415 key_dma
= dma_map_single(jrdev
, key
, *keylen
, DMA_BIDIRECTIONAL
);
416 if (dma_mapping_error(jrdev
, key_dma
)) {
417 dev_err(jrdev
, "unable to map key memory\n");
422 /* Job descriptor to perform unkeyed hash on key_in */
423 append_operation(desc
, ctx
->adata
.algtype
| OP_ALG_ENCRYPT
|
424 OP_ALG_AS_INITFINAL
);
425 append_seq_in_ptr(desc
, key_dma
, *keylen
, 0);
426 append_seq_fifo_load(desc
, *keylen
, FIFOLD_CLASS_CLASS2
|
427 FIFOLD_TYPE_LAST2
| FIFOLD_TYPE_MSG
);
428 append_seq_out_ptr(desc
, key_dma
, digestsize
, 0);
429 append_seq_store(desc
, digestsize
, LDST_CLASS_2_CCB
|
430 LDST_SRCDST_BYTE_CONTEXT
);
433 print_hex_dump(KERN_ERR
, "key_in@"__stringify(__LINE__
)": ",
434 DUMP_PREFIX_ADDRESS
, 16, 4, key
, *keylen
, 1);
435 print_hex_dump(KERN_ERR
, "jobdesc@"__stringify(__LINE__
)": ",
436 DUMP_PREFIX_ADDRESS
, 16, 4, desc
, desc_bytes(desc
), 1);
440 init_completion(&result
.completion
);
442 ret
= caam_jr_enqueue(jrdev
, desc
, split_key_done
, &result
);
445 wait_for_completion(&result
.completion
);
448 print_hex_dump(KERN_ERR
,
449 "digested key@"__stringify(__LINE__
)": ",
450 DUMP_PREFIX_ADDRESS
, 16, 4, key
, digestsize
, 1);
453 dma_unmap_single(jrdev
, key_dma
, *keylen
, DMA_BIDIRECTIONAL
);
455 *keylen
= digestsize
;
462 static int ahash_setkey(struct crypto_ahash
*ahash
,
463 const u8
*key
, unsigned int keylen
)
465 struct caam_hash_ctx
*ctx
= crypto_ahash_ctx(ahash
);
466 int blocksize
= crypto_tfm_alg_blocksize(&ahash
->base
);
467 int digestsize
= crypto_ahash_digestsize(ahash
);
468 struct caam_drv_private
*ctrlpriv
= dev_get_drvdata(ctx
->jrdev
->parent
);
470 u8
*hashed_key
= NULL
;
473 printk(KERN_ERR
"keylen %d\n", keylen
);
476 if (keylen
> blocksize
) {
477 hashed_key
= kmemdup(key
, keylen
, GFP_KERNEL
| GFP_DMA
);
480 ret
= hash_digest_key(ctx
, &keylen
, hashed_key
, digestsize
);
487 * If DKP is supported, use it in the shared descriptor to generate
490 if (ctrlpriv
->era
>= 6) {
491 ctx
->adata
.key_inline
= true;
492 ctx
->adata
.keylen
= keylen
;
493 ctx
->adata
.keylen_pad
= split_key_len(ctx
->adata
.algtype
&
496 if (ctx
->adata
.keylen_pad
> CAAM_MAX_HASH_KEY_SIZE
)
499 memcpy(ctx
->key
, key
, keylen
);
501 ret
= gen_split_key(ctx
->jrdev
, ctx
->key
, &ctx
->adata
, key
,
502 keylen
, CAAM_MAX_HASH_KEY_SIZE
);
508 return ahash_set_sh_desc(ahash
);
511 crypto_ahash_set_flags(ahash
, CRYPTO_TFM_RES_BAD_KEY_LEN
);
515 static int axcbc_setkey(struct crypto_ahash
*ahash
, const u8
*key
,
518 struct caam_hash_ctx
*ctx
= crypto_ahash_ctx(ahash
);
519 struct device
*jrdev
= ctx
->jrdev
;
521 memcpy(ctx
->key
, key
, keylen
);
522 dma_sync_single_for_device(jrdev
, ctx
->key_dma
, keylen
, DMA_TO_DEVICE
);
523 ctx
->adata
.keylen
= keylen
;
525 print_hex_dump_debug("axcbc ctx.key@" __stringify(__LINE__
)" : ",
526 DUMP_PREFIX_ADDRESS
, 16, 4, ctx
->key
, keylen
, 1);
528 return axcbc_set_sh_desc(ahash
);
531 static int acmac_setkey(struct crypto_ahash
*ahash
, const u8
*key
,
534 struct caam_hash_ctx
*ctx
= crypto_ahash_ctx(ahash
);
536 /* key is immediate data for all cmac shared descriptors */
537 ctx
->adata
.key_virt
= key
;
538 ctx
->adata
.keylen
= keylen
;
540 print_hex_dump_debug("acmac ctx.key@" __stringify(__LINE__
)" : ",
541 DUMP_PREFIX_ADDRESS
, 16, 4, key
, keylen
, 1);
543 return acmac_set_sh_desc(ahash
);
547 * ahash_edesc - s/w-extended ahash descriptor
548 * @sec4_sg_dma: physical mapped address of h/w link table
549 * @src_nents: number of segments in input scatterlist
550 * @sec4_sg_bytes: length of dma mapped sec4_sg space
551 * @hw_desc: the h/w job descriptor followed by any referenced link tables
552 * @sec4_sg: h/w link table
555 dma_addr_t sec4_sg_dma
;
558 u32 hw_desc
[DESC_JOB_IO_LEN
/ sizeof(u32
)] ____cacheline_aligned
;
559 struct sec4_sg_entry sec4_sg
[0];
562 static inline void ahash_unmap(struct device
*dev
,
563 struct ahash_edesc
*edesc
,
564 struct ahash_request
*req
, int dst_len
)
566 struct caam_hash_state
*state
= ahash_request_ctx(req
);
568 if (edesc
->src_nents
)
569 dma_unmap_sg(dev
, req
->src
, edesc
->src_nents
, DMA_TO_DEVICE
);
571 if (edesc
->sec4_sg_bytes
)
572 dma_unmap_single(dev
, edesc
->sec4_sg_dma
,
573 edesc
->sec4_sg_bytes
, DMA_TO_DEVICE
);
575 if (state
->buf_dma
) {
576 dma_unmap_single(dev
, state
->buf_dma
, *current_buflen(state
),
582 static inline void ahash_unmap_ctx(struct device
*dev
,
583 struct ahash_edesc
*edesc
,
584 struct ahash_request
*req
, int dst_len
, u32 flag
)
586 struct caam_hash_state
*state
= ahash_request_ctx(req
);
588 if (state
->ctx_dma
) {
589 dma_unmap_single(dev
, state
->ctx_dma
, state
->ctx_dma_len
, flag
);
592 ahash_unmap(dev
, edesc
, req
, dst_len
);
595 static void ahash_done(struct device
*jrdev
, u32
*desc
, u32 err
,
598 struct ahash_request
*req
= context
;
599 struct ahash_edesc
*edesc
;
600 struct crypto_ahash
*ahash
= crypto_ahash_reqtfm(req
);
601 int digestsize
= crypto_ahash_digestsize(ahash
);
602 struct caam_hash_state
*state
= ahash_request_ctx(req
);
604 struct caam_hash_ctx
*ctx
= crypto_ahash_ctx(ahash
);
606 dev_err(jrdev
, "%s %d: err 0x%x\n", __func__
, __LINE__
, err
);
609 edesc
= container_of(desc
, struct ahash_edesc
, hw_desc
[0]);
611 caam_jr_strstatus(jrdev
, err
);
613 ahash_unmap_ctx(jrdev
, edesc
, req
, digestsize
, DMA_FROM_DEVICE
);
614 memcpy(req
->result
, state
->caam_ctx
, digestsize
);
618 print_hex_dump(KERN_ERR
, "ctx@"__stringify(__LINE__
)": ",
619 DUMP_PREFIX_ADDRESS
, 16, 4, state
->caam_ctx
,
623 req
->base
.complete(&req
->base
, err
);
626 static void ahash_done_bi(struct device
*jrdev
, u32
*desc
, u32 err
,
629 struct ahash_request
*req
= context
;
630 struct ahash_edesc
*edesc
;
631 struct crypto_ahash
*ahash
= crypto_ahash_reqtfm(req
);
632 struct caam_hash_ctx
*ctx
= crypto_ahash_ctx(ahash
);
633 struct caam_hash_state
*state
= ahash_request_ctx(req
);
635 int digestsize
= crypto_ahash_digestsize(ahash
);
637 dev_err(jrdev
, "%s %d: err 0x%x\n", __func__
, __LINE__
, err
);
640 edesc
= container_of(desc
, struct ahash_edesc
, hw_desc
[0]);
642 caam_jr_strstatus(jrdev
, err
);
644 ahash_unmap_ctx(jrdev
, edesc
, req
, ctx
->ctx_len
, DMA_BIDIRECTIONAL
);
649 print_hex_dump(KERN_ERR
, "ctx@"__stringify(__LINE__
)": ",
650 DUMP_PREFIX_ADDRESS
, 16, 4, state
->caam_ctx
,
653 print_hex_dump(KERN_ERR
, "result@"__stringify(__LINE__
)": ",
654 DUMP_PREFIX_ADDRESS
, 16, 4, req
->result
,
658 req
->base
.complete(&req
->base
, err
);
661 static void ahash_done_ctx_src(struct device
*jrdev
, u32
*desc
, u32 err
,
664 struct ahash_request
*req
= context
;
665 struct ahash_edesc
*edesc
;
666 struct crypto_ahash
*ahash
= crypto_ahash_reqtfm(req
);
667 int digestsize
= crypto_ahash_digestsize(ahash
);
668 struct caam_hash_state
*state
= ahash_request_ctx(req
);
670 struct caam_hash_ctx
*ctx
= crypto_ahash_ctx(ahash
);
672 dev_err(jrdev
, "%s %d: err 0x%x\n", __func__
, __LINE__
, err
);
675 edesc
= container_of(desc
, struct ahash_edesc
, hw_desc
[0]);
677 caam_jr_strstatus(jrdev
, err
);
679 ahash_unmap_ctx(jrdev
, edesc
, req
, digestsize
, DMA_BIDIRECTIONAL
);
680 memcpy(req
->result
, state
->caam_ctx
, digestsize
);
684 print_hex_dump(KERN_ERR
, "ctx@"__stringify(__LINE__
)": ",
685 DUMP_PREFIX_ADDRESS
, 16, 4, state
->caam_ctx
,
689 req
->base
.complete(&req
->base
, err
);
692 static void ahash_done_ctx_dst(struct device
*jrdev
, u32
*desc
, u32 err
,
695 struct ahash_request
*req
= context
;
696 struct ahash_edesc
*edesc
;
697 struct crypto_ahash
*ahash
= crypto_ahash_reqtfm(req
);
698 struct caam_hash_ctx
*ctx
= crypto_ahash_ctx(ahash
);
699 struct caam_hash_state
*state
= ahash_request_ctx(req
);
701 int digestsize
= crypto_ahash_digestsize(ahash
);
703 dev_err(jrdev
, "%s %d: err 0x%x\n", __func__
, __LINE__
, err
);
706 edesc
= container_of(desc
, struct ahash_edesc
, hw_desc
[0]);
708 caam_jr_strstatus(jrdev
, err
);
710 ahash_unmap_ctx(jrdev
, edesc
, req
, ctx
->ctx_len
, DMA_FROM_DEVICE
);
715 print_hex_dump(KERN_ERR
, "ctx@"__stringify(__LINE__
)": ",
716 DUMP_PREFIX_ADDRESS
, 16, 4, state
->caam_ctx
,
719 print_hex_dump(KERN_ERR
, "result@"__stringify(__LINE__
)": ",
720 DUMP_PREFIX_ADDRESS
, 16, 4, req
->result
,
724 req
->base
.complete(&req
->base
, err
);
728 * Allocate an enhanced descriptor, which contains the hardware descriptor
729 * and space for hardware scatter table containing sg_num entries.
731 static struct ahash_edesc
*ahash_edesc_alloc(struct caam_hash_ctx
*ctx
,
732 int sg_num
, u32
*sh_desc
,
733 dma_addr_t sh_desc_dma
,
736 struct ahash_edesc
*edesc
;
737 unsigned int sg_size
= sg_num
* sizeof(struct sec4_sg_entry
);
739 edesc
= kzalloc(sizeof(*edesc
) + sg_size
, GFP_DMA
| flags
);
741 dev_err(ctx
->jrdev
, "could not allocate extended descriptor\n");
745 init_job_desc_shared(edesc
->hw_desc
, sh_desc_dma
, desc_len(sh_desc
),
746 HDR_SHARE_DEFER
| HDR_REVERSE
);
751 static int ahash_edesc_add_src(struct caam_hash_ctx
*ctx
,
752 struct ahash_edesc
*edesc
,
753 struct ahash_request
*req
, int nents
,
754 unsigned int first_sg
,
755 unsigned int first_bytes
, size_t to_hash
)
760 if (nents
> 1 || first_sg
) {
761 struct sec4_sg_entry
*sg
= edesc
->sec4_sg
;
762 unsigned int sgsize
= sizeof(*sg
) * (first_sg
+ nents
);
764 sg_to_sec4_sg_last(req
->src
, nents
, sg
+ first_sg
, 0);
766 src_dma
= dma_map_single(ctx
->jrdev
, sg
, sgsize
, DMA_TO_DEVICE
);
767 if (dma_mapping_error(ctx
->jrdev
, src_dma
)) {
768 dev_err(ctx
->jrdev
, "unable to map S/G table\n");
772 edesc
->sec4_sg_bytes
= sgsize
;
773 edesc
->sec4_sg_dma
= src_dma
;
776 src_dma
= sg_dma_address(req
->src
);
780 append_seq_in_ptr(edesc
->hw_desc
, src_dma
, first_bytes
+ to_hash
,
786 /* submit update job descriptor */
787 static int ahash_update_ctx(struct ahash_request
*req
)
789 struct crypto_ahash
*ahash
= crypto_ahash_reqtfm(req
);
790 struct caam_hash_ctx
*ctx
= crypto_ahash_ctx(ahash
);
791 struct caam_hash_state
*state
= ahash_request_ctx(req
);
792 struct device
*jrdev
= ctx
->jrdev
;
793 gfp_t flags
= (req
->base
.flags
& CRYPTO_TFM_REQ_MAY_SLEEP
) ?
794 GFP_KERNEL
: GFP_ATOMIC
;
795 u8
*buf
= current_buf(state
);
796 int *buflen
= current_buflen(state
);
797 u8
*next_buf
= alt_buf(state
);
798 int blocksize
= crypto_ahash_blocksize(ahash
);
799 int *next_buflen
= alt_buflen(state
), last_buflen
;
800 int in_len
= *buflen
+ req
->nbytes
, to_hash
;
802 int src_nents
, mapped_nents
, sec4_sg_bytes
, sec4_sg_src_index
;
803 struct ahash_edesc
*edesc
;
806 last_buflen
= *next_buflen
;
807 *next_buflen
= in_len
& (blocksize
- 1);
808 to_hash
= in_len
- *next_buflen
;
811 * For XCBC and CMAC, if to_hash is multiple of block size,
812 * keep last block in internal buffer
814 if ((is_xcbc_aes(ctx
->adata
.algtype
) ||
815 is_cmac_aes(ctx
->adata
.algtype
)) && to_hash
>= blocksize
&&
816 (*next_buflen
== 0)) {
817 *next_buflen
= blocksize
;
818 to_hash
-= blocksize
;
822 src_nents
= sg_nents_for_len(req
->src
,
823 req
->nbytes
- (*next_buflen
));
825 dev_err(jrdev
, "Invalid number of src SG.\n");
830 mapped_nents
= dma_map_sg(jrdev
, req
->src
, src_nents
,
833 dev_err(jrdev
, "unable to DMA map source\n");
840 sec4_sg_src_index
= 1 + (*buflen
? 1 : 0);
841 sec4_sg_bytes
= (sec4_sg_src_index
+ mapped_nents
) *
842 sizeof(struct sec4_sg_entry
);
845 * allocate space for base edesc and hw desc commands,
848 edesc
= ahash_edesc_alloc(ctx
, sec4_sg_src_index
+ mapped_nents
,
850 ctx
->sh_desc_update_dma
, flags
);
852 dma_unmap_sg(jrdev
, req
->src
, src_nents
, DMA_TO_DEVICE
);
856 edesc
->src_nents
= src_nents
;
857 edesc
->sec4_sg_bytes
= sec4_sg_bytes
;
859 ret
= ctx_map_to_sec4_sg(jrdev
, state
, ctx
->ctx_len
,
860 edesc
->sec4_sg
, DMA_BIDIRECTIONAL
);
864 ret
= buf_map_to_sec4_sg(jrdev
, edesc
->sec4_sg
+ 1, state
);
869 sg_to_sec4_sg_last(req
->src
, mapped_nents
,
870 edesc
->sec4_sg
+ sec4_sg_src_index
,
873 sg_to_sec4_set_last(edesc
->sec4_sg
+ sec4_sg_src_index
-
877 scatterwalk_map_and_copy(next_buf
, req
->src
,
880 desc
= edesc
->hw_desc
;
882 edesc
->sec4_sg_dma
= dma_map_single(jrdev
, edesc
->sec4_sg
,
885 if (dma_mapping_error(jrdev
, edesc
->sec4_sg_dma
)) {
886 dev_err(jrdev
, "unable to map S/G table\n");
891 append_seq_in_ptr(desc
, edesc
->sec4_sg_dma
, ctx
->ctx_len
+
894 append_seq_out_ptr(desc
, state
->ctx_dma
, ctx
->ctx_len
, 0);
897 print_hex_dump(KERN_ERR
, "jobdesc@"__stringify(__LINE__
)": ",
898 DUMP_PREFIX_ADDRESS
, 16, 4, desc
,
899 desc_bytes(desc
), 1);
902 ret
= caam_jr_enqueue(jrdev
, desc
, ahash_done_bi
, req
);
907 } else if (*next_buflen
) {
908 scatterwalk_map_and_copy(buf
+ *buflen
, req
->src
, 0,
910 *buflen
= *next_buflen
;
911 *next_buflen
= last_buflen
;
914 print_hex_dump(KERN_ERR
, "buf@"__stringify(__LINE__
)": ",
915 DUMP_PREFIX_ADDRESS
, 16, 4, buf
, *buflen
, 1);
916 print_hex_dump(KERN_ERR
, "next buf@"__stringify(__LINE__
)": ",
917 DUMP_PREFIX_ADDRESS
, 16, 4, next_buf
,
923 ahash_unmap_ctx(jrdev
, edesc
, req
, ctx
->ctx_len
, DMA_BIDIRECTIONAL
);
928 static int ahash_final_ctx(struct ahash_request
*req
)
930 struct crypto_ahash
*ahash
= crypto_ahash_reqtfm(req
);
931 struct caam_hash_ctx
*ctx
= crypto_ahash_ctx(ahash
);
932 struct caam_hash_state
*state
= ahash_request_ctx(req
);
933 struct device
*jrdev
= ctx
->jrdev
;
934 gfp_t flags
= (req
->base
.flags
& CRYPTO_TFM_REQ_MAY_SLEEP
) ?
935 GFP_KERNEL
: GFP_ATOMIC
;
936 int buflen
= *current_buflen(state
);
938 int sec4_sg_bytes
, sec4_sg_src_index
;
939 int digestsize
= crypto_ahash_digestsize(ahash
);
940 struct ahash_edesc
*edesc
;
943 sec4_sg_src_index
= 1 + (buflen
? 1 : 0);
944 sec4_sg_bytes
= sec4_sg_src_index
* sizeof(struct sec4_sg_entry
);
946 /* allocate space for base edesc and hw desc commands, link tables */
947 edesc
= ahash_edesc_alloc(ctx
, sec4_sg_src_index
,
948 ctx
->sh_desc_fin
, ctx
->sh_desc_fin_dma
,
953 desc
= edesc
->hw_desc
;
955 edesc
->sec4_sg_bytes
= sec4_sg_bytes
;
957 ret
= ctx_map_to_sec4_sg(jrdev
, state
, ctx
->ctx_len
,
958 edesc
->sec4_sg
, DMA_BIDIRECTIONAL
);
962 ret
= buf_map_to_sec4_sg(jrdev
, edesc
->sec4_sg
+ 1, state
);
966 sg_to_sec4_set_last(edesc
->sec4_sg
+ sec4_sg_src_index
- 1);
968 edesc
->sec4_sg_dma
= dma_map_single(jrdev
, edesc
->sec4_sg
,
969 sec4_sg_bytes
, DMA_TO_DEVICE
);
970 if (dma_mapping_error(jrdev
, edesc
->sec4_sg_dma
)) {
971 dev_err(jrdev
, "unable to map S/G table\n");
976 append_seq_in_ptr(desc
, edesc
->sec4_sg_dma
, ctx
->ctx_len
+ buflen
,
978 append_seq_out_ptr(desc
, state
->ctx_dma
, digestsize
, 0);
981 print_hex_dump(KERN_ERR
, "jobdesc@"__stringify(__LINE__
)": ",
982 DUMP_PREFIX_ADDRESS
, 16, 4, desc
, desc_bytes(desc
), 1);
985 ret
= caam_jr_enqueue(jrdev
, desc
, ahash_done_ctx_src
, req
);
991 ahash_unmap_ctx(jrdev
, edesc
, req
, digestsize
, DMA_BIDIRECTIONAL
);
996 static int ahash_finup_ctx(struct ahash_request
*req
)
998 struct crypto_ahash
*ahash
= crypto_ahash_reqtfm(req
);
999 struct caam_hash_ctx
*ctx
= crypto_ahash_ctx(ahash
);
1000 struct caam_hash_state
*state
= ahash_request_ctx(req
);
1001 struct device
*jrdev
= ctx
->jrdev
;
1002 gfp_t flags
= (req
->base
.flags
& CRYPTO_TFM_REQ_MAY_SLEEP
) ?
1003 GFP_KERNEL
: GFP_ATOMIC
;
1004 int buflen
= *current_buflen(state
);
1006 int sec4_sg_src_index
;
1007 int src_nents
, mapped_nents
;
1008 int digestsize
= crypto_ahash_digestsize(ahash
);
1009 struct ahash_edesc
*edesc
;
1012 src_nents
= sg_nents_for_len(req
->src
, req
->nbytes
);
1013 if (src_nents
< 0) {
1014 dev_err(jrdev
, "Invalid number of src SG.\n");
1019 mapped_nents
= dma_map_sg(jrdev
, req
->src
, src_nents
,
1021 if (!mapped_nents
) {
1022 dev_err(jrdev
, "unable to DMA map source\n");
1029 sec4_sg_src_index
= 1 + (buflen
? 1 : 0);
1031 /* allocate space for base edesc and hw desc commands, link tables */
1032 edesc
= ahash_edesc_alloc(ctx
, sec4_sg_src_index
+ mapped_nents
,
1033 ctx
->sh_desc_fin
, ctx
->sh_desc_fin_dma
,
1036 dma_unmap_sg(jrdev
, req
->src
, src_nents
, DMA_TO_DEVICE
);
1040 desc
= edesc
->hw_desc
;
1042 edesc
->src_nents
= src_nents
;
1044 ret
= ctx_map_to_sec4_sg(jrdev
, state
, ctx
->ctx_len
,
1045 edesc
->sec4_sg
, DMA_BIDIRECTIONAL
);
1049 ret
= buf_map_to_sec4_sg(jrdev
, edesc
->sec4_sg
+ 1, state
);
1053 ret
= ahash_edesc_add_src(ctx
, edesc
, req
, mapped_nents
,
1054 sec4_sg_src_index
, ctx
->ctx_len
+ buflen
,
1059 append_seq_out_ptr(desc
, state
->ctx_dma
, digestsize
, 0);
1062 print_hex_dump(KERN_ERR
, "jobdesc@"__stringify(__LINE__
)": ",
1063 DUMP_PREFIX_ADDRESS
, 16, 4, desc
, desc_bytes(desc
), 1);
1066 ret
= caam_jr_enqueue(jrdev
, desc
, ahash_done_ctx_src
, req
);
1070 return -EINPROGRESS
;
1072 ahash_unmap_ctx(jrdev
, edesc
, req
, digestsize
, DMA_BIDIRECTIONAL
);
1077 static int ahash_digest(struct ahash_request
*req
)
1079 struct crypto_ahash
*ahash
= crypto_ahash_reqtfm(req
);
1080 struct caam_hash_ctx
*ctx
= crypto_ahash_ctx(ahash
);
1081 struct caam_hash_state
*state
= ahash_request_ctx(req
);
1082 struct device
*jrdev
= ctx
->jrdev
;
1083 gfp_t flags
= (req
->base
.flags
& CRYPTO_TFM_REQ_MAY_SLEEP
) ?
1084 GFP_KERNEL
: GFP_ATOMIC
;
1086 int digestsize
= crypto_ahash_digestsize(ahash
);
1087 int src_nents
, mapped_nents
;
1088 struct ahash_edesc
*edesc
;
1093 src_nents
= sg_nents_for_len(req
->src
, req
->nbytes
);
1094 if (src_nents
< 0) {
1095 dev_err(jrdev
, "Invalid number of src SG.\n");
1100 mapped_nents
= dma_map_sg(jrdev
, req
->src
, src_nents
,
1102 if (!mapped_nents
) {
1103 dev_err(jrdev
, "unable to map source for DMA\n");
1110 /* allocate space for base edesc and hw desc commands, link tables */
1111 edesc
= ahash_edesc_alloc(ctx
, mapped_nents
> 1 ? mapped_nents
: 0,
1112 ctx
->sh_desc_digest
, ctx
->sh_desc_digest_dma
,
1115 dma_unmap_sg(jrdev
, req
->src
, src_nents
, DMA_TO_DEVICE
);
1119 edesc
->src_nents
= src_nents
;
1121 ret
= ahash_edesc_add_src(ctx
, edesc
, req
, mapped_nents
, 0, 0,
1124 ahash_unmap(jrdev
, edesc
, req
, digestsize
);
1129 desc
= edesc
->hw_desc
;
1131 ret
= map_seq_out_ptr_ctx(desc
, jrdev
, state
, digestsize
);
1133 ahash_unmap(jrdev
, edesc
, req
, digestsize
);
1139 print_hex_dump(KERN_ERR
, "jobdesc@"__stringify(__LINE__
)": ",
1140 DUMP_PREFIX_ADDRESS
, 16, 4, desc
, desc_bytes(desc
), 1);
1143 ret
= caam_jr_enqueue(jrdev
, desc
, ahash_done
, req
);
1147 ahash_unmap_ctx(jrdev
, edesc
, req
, digestsize
, DMA_FROM_DEVICE
);
1154 /* submit ahash final if it the first job descriptor */
1155 static int ahash_final_no_ctx(struct ahash_request
*req
)
1157 struct crypto_ahash
*ahash
= crypto_ahash_reqtfm(req
);
1158 struct caam_hash_ctx
*ctx
= crypto_ahash_ctx(ahash
);
1159 struct caam_hash_state
*state
= ahash_request_ctx(req
);
1160 struct device
*jrdev
= ctx
->jrdev
;
1161 gfp_t flags
= (req
->base
.flags
& CRYPTO_TFM_REQ_MAY_SLEEP
) ?
1162 GFP_KERNEL
: GFP_ATOMIC
;
1163 u8
*buf
= current_buf(state
);
1164 int buflen
= *current_buflen(state
);
1166 int digestsize
= crypto_ahash_digestsize(ahash
);
1167 struct ahash_edesc
*edesc
;
1170 /* allocate space for base edesc and hw desc commands, link tables */
1171 edesc
= ahash_edesc_alloc(ctx
, 0, ctx
->sh_desc_digest
,
1172 ctx
->sh_desc_digest_dma
, flags
);
1176 desc
= edesc
->hw_desc
;
1179 state
->buf_dma
= dma_map_single(jrdev
, buf
, buflen
,
1181 if (dma_mapping_error(jrdev
, state
->buf_dma
)) {
1182 dev_err(jrdev
, "unable to map src\n");
1186 append_seq_in_ptr(desc
, state
->buf_dma
, buflen
, 0);
1189 ret
= map_seq_out_ptr_ctx(desc
, jrdev
, state
, digestsize
);
1194 print_hex_dump(KERN_ERR
, "jobdesc@"__stringify(__LINE__
)": ",
1195 DUMP_PREFIX_ADDRESS
, 16, 4, desc
, desc_bytes(desc
), 1);
1198 ret
= caam_jr_enqueue(jrdev
, desc
, ahash_done
, req
);
1202 ahash_unmap_ctx(jrdev
, edesc
, req
, digestsize
, DMA_FROM_DEVICE
);
1208 ahash_unmap(jrdev
, edesc
, req
, digestsize
);
1214 /* submit ahash update if it the first job descriptor after update */
1215 static int ahash_update_no_ctx(struct ahash_request
*req
)
1217 struct crypto_ahash
*ahash
= crypto_ahash_reqtfm(req
);
1218 struct caam_hash_ctx
*ctx
= crypto_ahash_ctx(ahash
);
1219 struct caam_hash_state
*state
= ahash_request_ctx(req
);
1220 struct device
*jrdev
= ctx
->jrdev
;
1221 gfp_t flags
= (req
->base
.flags
& CRYPTO_TFM_REQ_MAY_SLEEP
) ?
1222 GFP_KERNEL
: GFP_ATOMIC
;
1223 u8
*buf
= current_buf(state
);
1224 int *buflen
= current_buflen(state
);
1225 int blocksize
= crypto_ahash_blocksize(ahash
);
1226 u8
*next_buf
= alt_buf(state
);
1227 int *next_buflen
= alt_buflen(state
);
1228 int in_len
= *buflen
+ req
->nbytes
, to_hash
;
1229 int sec4_sg_bytes
, src_nents
, mapped_nents
;
1230 struct ahash_edesc
*edesc
;
1234 *next_buflen
= in_len
& (blocksize
- 1);
1235 to_hash
= in_len
- *next_buflen
;
1238 * For XCBC and CMAC, if to_hash is multiple of block size,
1239 * keep last block in internal buffer
1241 if ((is_xcbc_aes(ctx
->adata
.algtype
) ||
1242 is_cmac_aes(ctx
->adata
.algtype
)) && to_hash
>= blocksize
&&
1243 (*next_buflen
== 0)) {
1244 *next_buflen
= blocksize
;
1245 to_hash
-= blocksize
;
1249 src_nents
= sg_nents_for_len(req
->src
,
1250 req
->nbytes
- *next_buflen
);
1251 if (src_nents
< 0) {
1252 dev_err(jrdev
, "Invalid number of src SG.\n");
1257 mapped_nents
= dma_map_sg(jrdev
, req
->src
, src_nents
,
1259 if (!mapped_nents
) {
1260 dev_err(jrdev
, "unable to DMA map source\n");
1267 sec4_sg_bytes
= (1 + mapped_nents
) *
1268 sizeof(struct sec4_sg_entry
);
1271 * allocate space for base edesc and hw desc commands,
1274 edesc
= ahash_edesc_alloc(ctx
, 1 + mapped_nents
,
1275 ctx
->sh_desc_update_first
,
1276 ctx
->sh_desc_update_first_dma
,
1279 dma_unmap_sg(jrdev
, req
->src
, src_nents
, DMA_TO_DEVICE
);
1283 edesc
->src_nents
= src_nents
;
1284 edesc
->sec4_sg_bytes
= sec4_sg_bytes
;
1286 ret
= buf_map_to_sec4_sg(jrdev
, edesc
->sec4_sg
, state
);
1290 sg_to_sec4_sg_last(req
->src
, mapped_nents
,
1291 edesc
->sec4_sg
+ 1, 0);
1294 scatterwalk_map_and_copy(next_buf
, req
->src
,
1299 desc
= edesc
->hw_desc
;
1301 edesc
->sec4_sg_dma
= dma_map_single(jrdev
, edesc
->sec4_sg
,
1304 if (dma_mapping_error(jrdev
, edesc
->sec4_sg_dma
)) {
1305 dev_err(jrdev
, "unable to map S/G table\n");
1310 append_seq_in_ptr(desc
, edesc
->sec4_sg_dma
, to_hash
, LDST_SGF
);
1312 ret
= map_seq_out_ptr_ctx(desc
, jrdev
, state
, ctx
->ctx_len
);
1317 print_hex_dump(KERN_ERR
, "jobdesc@"__stringify(__LINE__
)": ",
1318 DUMP_PREFIX_ADDRESS
, 16, 4, desc
,
1319 desc_bytes(desc
), 1);
1322 ret
= caam_jr_enqueue(jrdev
, desc
, ahash_done_ctx_dst
, req
);
1327 state
->update
= ahash_update_ctx
;
1328 state
->finup
= ahash_finup_ctx
;
1329 state
->final
= ahash_final_ctx
;
1330 } else if (*next_buflen
) {
1331 scatterwalk_map_and_copy(buf
+ *buflen
, req
->src
, 0,
1333 *buflen
= *next_buflen
;
1337 print_hex_dump(KERN_ERR
, "buf@"__stringify(__LINE__
)": ",
1338 DUMP_PREFIX_ADDRESS
, 16, 4, buf
, *buflen
, 1);
1339 print_hex_dump(KERN_ERR
, "next buf@"__stringify(__LINE__
)": ",
1340 DUMP_PREFIX_ADDRESS
, 16, 4, next_buf
,
1346 ahash_unmap_ctx(jrdev
, edesc
, req
, ctx
->ctx_len
, DMA_TO_DEVICE
);
1351 /* submit ahash finup if it the first job descriptor after update */
1352 static int ahash_finup_no_ctx(struct ahash_request
*req
)
1354 struct crypto_ahash
*ahash
= crypto_ahash_reqtfm(req
);
1355 struct caam_hash_ctx
*ctx
= crypto_ahash_ctx(ahash
);
1356 struct caam_hash_state
*state
= ahash_request_ctx(req
);
1357 struct device
*jrdev
= ctx
->jrdev
;
1358 gfp_t flags
= (req
->base
.flags
& CRYPTO_TFM_REQ_MAY_SLEEP
) ?
1359 GFP_KERNEL
: GFP_ATOMIC
;
1360 int buflen
= *current_buflen(state
);
1362 int sec4_sg_bytes
, sec4_sg_src_index
, src_nents
, mapped_nents
;
1363 int digestsize
= crypto_ahash_digestsize(ahash
);
1364 struct ahash_edesc
*edesc
;
1367 src_nents
= sg_nents_for_len(req
->src
, req
->nbytes
);
1368 if (src_nents
< 0) {
1369 dev_err(jrdev
, "Invalid number of src SG.\n");
1374 mapped_nents
= dma_map_sg(jrdev
, req
->src
, src_nents
,
1376 if (!mapped_nents
) {
1377 dev_err(jrdev
, "unable to DMA map source\n");
1384 sec4_sg_src_index
= 2;
1385 sec4_sg_bytes
= (sec4_sg_src_index
+ mapped_nents
) *
1386 sizeof(struct sec4_sg_entry
);
1388 /* allocate space for base edesc and hw desc commands, link tables */
1389 edesc
= ahash_edesc_alloc(ctx
, sec4_sg_src_index
+ mapped_nents
,
1390 ctx
->sh_desc_digest
, ctx
->sh_desc_digest_dma
,
1393 dma_unmap_sg(jrdev
, req
->src
, src_nents
, DMA_TO_DEVICE
);
1397 desc
= edesc
->hw_desc
;
1399 edesc
->src_nents
= src_nents
;
1400 edesc
->sec4_sg_bytes
= sec4_sg_bytes
;
1402 ret
= buf_map_to_sec4_sg(jrdev
, edesc
->sec4_sg
, state
);
1406 ret
= ahash_edesc_add_src(ctx
, edesc
, req
, mapped_nents
, 1, buflen
,
1409 dev_err(jrdev
, "unable to map S/G table\n");
1413 ret
= map_seq_out_ptr_ctx(desc
, jrdev
, state
, digestsize
);
1418 print_hex_dump(KERN_ERR
, "jobdesc@"__stringify(__LINE__
)": ",
1419 DUMP_PREFIX_ADDRESS
, 16, 4, desc
, desc_bytes(desc
), 1);
1422 ret
= caam_jr_enqueue(jrdev
, desc
, ahash_done
, req
);
1426 ahash_unmap_ctx(jrdev
, edesc
, req
, digestsize
, DMA_FROM_DEVICE
);
1432 ahash_unmap(jrdev
, edesc
, req
, digestsize
);
1438 /* submit first update job descriptor after init */
1439 static int ahash_update_first(struct ahash_request
*req
)
1441 struct crypto_ahash
*ahash
= crypto_ahash_reqtfm(req
);
1442 struct caam_hash_ctx
*ctx
= crypto_ahash_ctx(ahash
);
1443 struct caam_hash_state
*state
= ahash_request_ctx(req
);
1444 struct device
*jrdev
= ctx
->jrdev
;
1445 gfp_t flags
= (req
->base
.flags
& CRYPTO_TFM_REQ_MAY_SLEEP
) ?
1446 GFP_KERNEL
: GFP_ATOMIC
;
1447 u8
*next_buf
= alt_buf(state
);
1448 int *next_buflen
= alt_buflen(state
);
1450 int blocksize
= crypto_ahash_blocksize(ahash
);
1452 int src_nents
, mapped_nents
;
1453 struct ahash_edesc
*edesc
;
1456 *next_buflen
= req
->nbytes
& (blocksize
- 1);
1457 to_hash
= req
->nbytes
- *next_buflen
;
1460 * For XCBC and CMAC, if to_hash is multiple of block size,
1461 * keep last block in internal buffer
1463 if ((is_xcbc_aes(ctx
->adata
.algtype
) ||
1464 is_cmac_aes(ctx
->adata
.algtype
)) && to_hash
>= blocksize
&&
1465 (*next_buflen
== 0)) {
1466 *next_buflen
= blocksize
;
1467 to_hash
-= blocksize
;
1471 src_nents
= sg_nents_for_len(req
->src
,
1472 req
->nbytes
- *next_buflen
);
1473 if (src_nents
< 0) {
1474 dev_err(jrdev
, "Invalid number of src SG.\n");
1479 mapped_nents
= dma_map_sg(jrdev
, req
->src
, src_nents
,
1481 if (!mapped_nents
) {
1482 dev_err(jrdev
, "unable to map source for DMA\n");
1490 * allocate space for base edesc and hw desc commands,
1493 edesc
= ahash_edesc_alloc(ctx
, mapped_nents
> 1 ?
1495 ctx
->sh_desc_update_first
,
1496 ctx
->sh_desc_update_first_dma
,
1499 dma_unmap_sg(jrdev
, req
->src
, src_nents
, DMA_TO_DEVICE
);
1503 edesc
->src_nents
= src_nents
;
1505 ret
= ahash_edesc_add_src(ctx
, edesc
, req
, mapped_nents
, 0, 0,
1511 scatterwalk_map_and_copy(next_buf
, req
->src
, to_hash
,
1514 desc
= edesc
->hw_desc
;
1516 ret
= map_seq_out_ptr_ctx(desc
, jrdev
, state
, ctx
->ctx_len
);
1521 print_hex_dump(KERN_ERR
, "jobdesc@"__stringify(__LINE__
)": ",
1522 DUMP_PREFIX_ADDRESS
, 16, 4, desc
,
1523 desc_bytes(desc
), 1);
1526 ret
= caam_jr_enqueue(jrdev
, desc
, ahash_done_ctx_dst
, req
);
1531 state
->update
= ahash_update_ctx
;
1532 state
->finup
= ahash_finup_ctx
;
1533 state
->final
= ahash_final_ctx
;
1534 } else if (*next_buflen
) {
1535 state
->update
= ahash_update_no_ctx
;
1536 state
->finup
= ahash_finup_no_ctx
;
1537 state
->final
= ahash_final_no_ctx
;
1538 scatterwalk_map_and_copy(next_buf
, req
->src
, 0,
1543 print_hex_dump(KERN_ERR
, "next buf@"__stringify(__LINE__
)": ",
1544 DUMP_PREFIX_ADDRESS
, 16, 4, next_buf
,
1550 ahash_unmap_ctx(jrdev
, edesc
, req
, ctx
->ctx_len
, DMA_TO_DEVICE
);
1555 static int ahash_finup_first(struct ahash_request
*req
)
1557 return ahash_digest(req
);
1560 static int ahash_init(struct ahash_request
*req
)
1562 struct caam_hash_state
*state
= ahash_request_ctx(req
);
1564 state
->update
= ahash_update_first
;
1565 state
->finup
= ahash_finup_first
;
1566 state
->final
= ahash_final_no_ctx
;
1569 state
->ctx_dma_len
= 0;
1570 state
->current_buf
= 0;
1572 state
->buflen_0
= 0;
1573 state
->buflen_1
= 0;
1578 static int ahash_update(struct ahash_request
*req
)
1580 struct caam_hash_state
*state
= ahash_request_ctx(req
);
1582 return state
->update(req
);
1585 static int ahash_finup(struct ahash_request
*req
)
1587 struct caam_hash_state
*state
= ahash_request_ctx(req
);
1589 return state
->finup(req
);
1592 static int ahash_final(struct ahash_request
*req
)
1594 struct caam_hash_state
*state
= ahash_request_ctx(req
);
1596 return state
->final(req
);
1599 static int ahash_export(struct ahash_request
*req
, void *out
)
1601 struct caam_hash_state
*state
= ahash_request_ctx(req
);
1602 struct caam_export_state
*export
= out
;
1606 if (state
->current_buf
) {
1608 len
= state
->buflen_1
;
1611 len
= state
->buflen_0
;
1614 memcpy(export
->buf
, buf
, len
);
1615 memcpy(export
->caam_ctx
, state
->caam_ctx
, sizeof(export
->caam_ctx
));
1616 export
->buflen
= len
;
1617 export
->update
= state
->update
;
1618 export
->final
= state
->final
;
1619 export
->finup
= state
->finup
;
1624 static int ahash_import(struct ahash_request
*req
, const void *in
)
1626 struct caam_hash_state
*state
= ahash_request_ctx(req
);
1627 const struct caam_export_state
*export
= in
;
1629 memset(state
, 0, sizeof(*state
));
1630 memcpy(state
->buf_0
, export
->buf
, export
->buflen
);
1631 memcpy(state
->caam_ctx
, export
->caam_ctx
, sizeof(state
->caam_ctx
));
1632 state
->buflen_0
= export
->buflen
;
1633 state
->update
= export
->update
;
1634 state
->final
= export
->final
;
1635 state
->finup
= export
->finup
;
1640 struct caam_hash_template
{
1641 char name
[CRYPTO_MAX_ALG_NAME
];
1642 char driver_name
[CRYPTO_MAX_ALG_NAME
];
1643 char hmac_name
[CRYPTO_MAX_ALG_NAME
];
1644 char hmac_driver_name
[CRYPTO_MAX_ALG_NAME
];
1645 unsigned int blocksize
;
1646 struct ahash_alg template_ahash
;
1650 /* ahash descriptors */
1651 static struct caam_hash_template driver_hash
[] = {
1654 .driver_name
= "sha1-caam",
1655 .hmac_name
= "hmac(sha1)",
1656 .hmac_driver_name
= "hmac-sha1-caam",
1657 .blocksize
= SHA1_BLOCK_SIZE
,
1660 .update
= ahash_update
,
1661 .final
= ahash_final
,
1662 .finup
= ahash_finup
,
1663 .digest
= ahash_digest
,
1664 .export
= ahash_export
,
1665 .import
= ahash_import
,
1666 .setkey
= ahash_setkey
,
1668 .digestsize
= SHA1_DIGEST_SIZE
,
1669 .statesize
= sizeof(struct caam_export_state
),
1672 .alg_type
= OP_ALG_ALGSEL_SHA1
,
1675 .driver_name
= "sha224-caam",
1676 .hmac_name
= "hmac(sha224)",
1677 .hmac_driver_name
= "hmac-sha224-caam",
1678 .blocksize
= SHA224_BLOCK_SIZE
,
1681 .update
= ahash_update
,
1682 .final
= ahash_final
,
1683 .finup
= ahash_finup
,
1684 .digest
= ahash_digest
,
1685 .export
= ahash_export
,
1686 .import
= ahash_import
,
1687 .setkey
= ahash_setkey
,
1689 .digestsize
= SHA224_DIGEST_SIZE
,
1690 .statesize
= sizeof(struct caam_export_state
),
1693 .alg_type
= OP_ALG_ALGSEL_SHA224
,
1696 .driver_name
= "sha256-caam",
1697 .hmac_name
= "hmac(sha256)",
1698 .hmac_driver_name
= "hmac-sha256-caam",
1699 .blocksize
= SHA256_BLOCK_SIZE
,
1702 .update
= ahash_update
,
1703 .final
= ahash_final
,
1704 .finup
= ahash_finup
,
1705 .digest
= ahash_digest
,
1706 .export
= ahash_export
,
1707 .import
= ahash_import
,
1708 .setkey
= ahash_setkey
,
1710 .digestsize
= SHA256_DIGEST_SIZE
,
1711 .statesize
= sizeof(struct caam_export_state
),
1714 .alg_type
= OP_ALG_ALGSEL_SHA256
,
1717 .driver_name
= "sha384-caam",
1718 .hmac_name
= "hmac(sha384)",
1719 .hmac_driver_name
= "hmac-sha384-caam",
1720 .blocksize
= SHA384_BLOCK_SIZE
,
1723 .update
= ahash_update
,
1724 .final
= ahash_final
,
1725 .finup
= ahash_finup
,
1726 .digest
= ahash_digest
,
1727 .export
= ahash_export
,
1728 .import
= ahash_import
,
1729 .setkey
= ahash_setkey
,
1731 .digestsize
= SHA384_DIGEST_SIZE
,
1732 .statesize
= sizeof(struct caam_export_state
),
1735 .alg_type
= OP_ALG_ALGSEL_SHA384
,
1738 .driver_name
= "sha512-caam",
1739 .hmac_name
= "hmac(sha512)",
1740 .hmac_driver_name
= "hmac-sha512-caam",
1741 .blocksize
= SHA512_BLOCK_SIZE
,
1744 .update
= ahash_update
,
1745 .final
= ahash_final
,
1746 .finup
= ahash_finup
,
1747 .digest
= ahash_digest
,
1748 .export
= ahash_export
,
1749 .import
= ahash_import
,
1750 .setkey
= ahash_setkey
,
1752 .digestsize
= SHA512_DIGEST_SIZE
,
1753 .statesize
= sizeof(struct caam_export_state
),
1756 .alg_type
= OP_ALG_ALGSEL_SHA512
,
1759 .driver_name
= "md5-caam",
1760 .hmac_name
= "hmac(md5)",
1761 .hmac_driver_name
= "hmac-md5-caam",
1762 .blocksize
= MD5_BLOCK_WORDS
* 4,
1765 .update
= ahash_update
,
1766 .final
= ahash_final
,
1767 .finup
= ahash_finup
,
1768 .digest
= ahash_digest
,
1769 .export
= ahash_export
,
1770 .import
= ahash_import
,
1771 .setkey
= ahash_setkey
,
1773 .digestsize
= MD5_DIGEST_SIZE
,
1774 .statesize
= sizeof(struct caam_export_state
),
1777 .alg_type
= OP_ALG_ALGSEL_MD5
,
1779 .hmac_name
= "xcbc(aes)",
1780 .hmac_driver_name
= "xcbc-aes-caam",
1781 .blocksize
= AES_BLOCK_SIZE
,
1784 .update
= ahash_update
,
1785 .final
= ahash_final
,
1786 .finup
= ahash_finup
,
1787 .digest
= ahash_digest
,
1788 .export
= ahash_export
,
1789 .import
= ahash_import
,
1790 .setkey
= axcbc_setkey
,
1792 .digestsize
= AES_BLOCK_SIZE
,
1793 .statesize
= sizeof(struct caam_export_state
),
1796 .alg_type
= OP_ALG_ALGSEL_AES
| OP_ALG_AAI_XCBC_MAC
,
1798 .hmac_name
= "cmac(aes)",
1799 .hmac_driver_name
= "cmac-aes-caam",
1800 .blocksize
= AES_BLOCK_SIZE
,
1803 .update
= ahash_update
,
1804 .final
= ahash_final
,
1805 .finup
= ahash_finup
,
1806 .digest
= ahash_digest
,
1807 .export
= ahash_export
,
1808 .import
= ahash_import
,
1809 .setkey
= acmac_setkey
,
1811 .digestsize
= AES_BLOCK_SIZE
,
1812 .statesize
= sizeof(struct caam_export_state
),
1815 .alg_type
= OP_ALG_ALGSEL_AES
| OP_ALG_AAI_CMAC
,
1819 struct caam_hash_alg
{
1820 struct list_head entry
;
1822 struct ahash_alg ahash_alg
;
1825 static int caam_hash_cra_init(struct crypto_tfm
*tfm
)
1827 struct crypto_ahash
*ahash
= __crypto_ahash_cast(tfm
);
1828 struct crypto_alg
*base
= tfm
->__crt_alg
;
1829 struct hash_alg_common
*halg
=
1830 container_of(base
, struct hash_alg_common
, base
);
1831 struct ahash_alg
*alg
=
1832 container_of(halg
, struct ahash_alg
, halg
);
1833 struct caam_hash_alg
*caam_hash
=
1834 container_of(alg
, struct caam_hash_alg
, ahash_alg
);
1835 struct caam_hash_ctx
*ctx
= crypto_tfm_ctx(tfm
);
1836 /* Sizes for MDHA running digests: MD5, SHA1, 224, 256, 384, 512 */
1837 static const u8 runninglen
[] = { HASH_MSG_LEN
+ MD5_DIGEST_SIZE
,
1838 HASH_MSG_LEN
+ SHA1_DIGEST_SIZE
,
1840 HASH_MSG_LEN
+ SHA256_DIGEST_SIZE
,
1842 HASH_MSG_LEN
+ SHA512_DIGEST_SIZE
};
1843 dma_addr_t dma_addr
;
1844 struct caam_drv_private
*priv
;
1847 * Get a Job ring from Job Ring driver to ensure in-order
1848 * crypto request processing per tfm
1850 ctx
->jrdev
= caam_jr_alloc();
1851 if (IS_ERR(ctx
->jrdev
)) {
1852 pr_err("Job Ring Device allocation for transform failed\n");
1853 return PTR_ERR(ctx
->jrdev
);
1856 priv
= dev_get_drvdata(ctx
->jrdev
->parent
);
1858 if (is_xcbc_aes(caam_hash
->alg_type
)) {
1859 ctx
->dir
= DMA_TO_DEVICE
;
1860 ctx
->adata
.algtype
= OP_TYPE_CLASS1_ALG
| caam_hash
->alg_type
;
1863 ctx
->key_dma
= dma_map_single_attrs(ctx
->jrdev
, ctx
->key
,
1864 ARRAY_SIZE(ctx
->key
),
1866 DMA_ATTR_SKIP_CPU_SYNC
);
1867 if (dma_mapping_error(ctx
->jrdev
, ctx
->key_dma
)) {
1868 dev_err(ctx
->jrdev
, "unable to map key\n");
1869 caam_jr_free(ctx
->jrdev
);
1872 } else if (is_cmac_aes(caam_hash
->alg_type
)) {
1873 ctx
->dir
= DMA_TO_DEVICE
;
1874 ctx
->adata
.algtype
= OP_TYPE_CLASS1_ALG
| caam_hash
->alg_type
;
1877 ctx
->dir
= priv
->era
>= 6 ? DMA_BIDIRECTIONAL
: DMA_TO_DEVICE
;
1878 ctx
->adata
.algtype
= OP_TYPE_CLASS2_ALG
| caam_hash
->alg_type
;
1879 ctx
->ctx_len
= runninglen
[(ctx
->adata
.algtype
&
1880 OP_ALG_ALGSEL_SUBMASK
) >>
1881 OP_ALG_ALGSEL_SHIFT
];
1884 dma_addr
= dma_map_single_attrs(ctx
->jrdev
, ctx
->sh_desc_update
,
1885 offsetof(struct caam_hash_ctx
, key
),
1886 ctx
->dir
, DMA_ATTR_SKIP_CPU_SYNC
);
1887 if (dma_mapping_error(ctx
->jrdev
, dma_addr
)) {
1888 dev_err(ctx
->jrdev
, "unable to map shared descriptors\n");
1890 if (is_xcbc_aes(caam_hash
->alg_type
))
1891 dma_unmap_single_attrs(ctx
->jrdev
, ctx
->key_dma
,
1892 ARRAY_SIZE(ctx
->key
),
1894 DMA_ATTR_SKIP_CPU_SYNC
);
1896 caam_jr_free(ctx
->jrdev
);
1900 ctx
->sh_desc_update_dma
= dma_addr
;
1901 ctx
->sh_desc_update_first_dma
= dma_addr
+
1902 offsetof(struct caam_hash_ctx
,
1903 sh_desc_update_first
);
1904 ctx
->sh_desc_fin_dma
= dma_addr
+ offsetof(struct caam_hash_ctx
,
1906 ctx
->sh_desc_digest_dma
= dma_addr
+ offsetof(struct caam_hash_ctx
,
1909 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm
),
1910 sizeof(struct caam_hash_state
));
1913 * For keyed hash algorithms shared descriptors
1914 * will be created later in setkey() callback
1916 return alg
->setkey
? 0 : ahash_set_sh_desc(ahash
);
1919 static void caam_hash_cra_exit(struct crypto_tfm
*tfm
)
1921 struct caam_hash_ctx
*ctx
= crypto_tfm_ctx(tfm
);
1923 dma_unmap_single_attrs(ctx
->jrdev
, ctx
->sh_desc_update_dma
,
1924 offsetof(struct caam_hash_ctx
, key
),
1925 ctx
->dir
, DMA_ATTR_SKIP_CPU_SYNC
);
1926 if (is_xcbc_aes(ctx
->adata
.algtype
))
1927 dma_unmap_single_attrs(ctx
->jrdev
, ctx
->key_dma
,
1928 ARRAY_SIZE(ctx
->key
), DMA_BIDIRECTIONAL
,
1929 DMA_ATTR_SKIP_CPU_SYNC
);
1930 caam_jr_free(ctx
->jrdev
);
1933 static void __exit
caam_algapi_hash_exit(void)
1935 struct caam_hash_alg
*t_alg
, *n
;
1937 if (!hash_list
.next
)
1940 list_for_each_entry_safe(t_alg
, n
, &hash_list
, entry
) {
1941 crypto_unregister_ahash(&t_alg
->ahash_alg
);
1942 list_del(&t_alg
->entry
);
1947 static struct caam_hash_alg
*
1948 caam_hash_alloc(struct caam_hash_template
*template,
1951 struct caam_hash_alg
*t_alg
;
1952 struct ahash_alg
*halg
;
1953 struct crypto_alg
*alg
;
1955 t_alg
= kzalloc(sizeof(*t_alg
), GFP_KERNEL
);
1957 pr_err("failed to allocate t_alg\n");
1958 return ERR_PTR(-ENOMEM
);
1961 t_alg
->ahash_alg
= template->template_ahash
;
1962 halg
= &t_alg
->ahash_alg
;
1963 alg
= &halg
->halg
.base
;
1966 snprintf(alg
->cra_name
, CRYPTO_MAX_ALG_NAME
, "%s",
1967 template->hmac_name
);
1968 snprintf(alg
->cra_driver_name
, CRYPTO_MAX_ALG_NAME
, "%s",
1969 template->hmac_driver_name
);
1971 snprintf(alg
->cra_name
, CRYPTO_MAX_ALG_NAME
, "%s",
1973 snprintf(alg
->cra_driver_name
, CRYPTO_MAX_ALG_NAME
, "%s",
1974 template->driver_name
);
1975 t_alg
->ahash_alg
.setkey
= NULL
;
1977 alg
->cra_module
= THIS_MODULE
;
1978 alg
->cra_init
= caam_hash_cra_init
;
1979 alg
->cra_exit
= caam_hash_cra_exit
;
1980 alg
->cra_ctxsize
= sizeof(struct caam_hash_ctx
);
1981 alg
->cra_priority
= CAAM_CRA_PRIORITY
;
1982 alg
->cra_blocksize
= template->blocksize
;
1983 alg
->cra_alignmask
= 0;
1984 alg
->cra_flags
= CRYPTO_ALG_ASYNC
;
1986 t_alg
->alg_type
= template->alg_type
;
1991 static int __init
caam_algapi_hash_init(void)
1993 struct device_node
*dev_node
;
1994 struct platform_device
*pdev
;
1996 struct caam_drv_private
*priv
;
1997 unsigned int md_limit
= SHA512_DIGEST_SIZE
;
1998 u32 md_inst
, md_vid
;
2000 dev_node
= of_find_compatible_node(NULL
, NULL
, "fsl,sec-v4.0");
2002 dev_node
= of_find_compatible_node(NULL
, NULL
, "fsl,sec4.0");
2007 pdev
= of_find_device_by_node(dev_node
);
2009 of_node_put(dev_node
);
2013 priv
= dev_get_drvdata(&pdev
->dev
);
2014 of_node_put(dev_node
);
2017 * If priv is NULL, it's probably because the caam driver wasn't
2018 * properly initialized (e.g. RNG4 init failed). Thus, bail out here.
2026 * Register crypto algorithms the device supports. First, identify
2027 * presence and attributes of MD block.
2029 if (priv
->era
< 10) {
2030 md_vid
= (rd_reg32(&priv
->ctrl
->perfmon
.cha_id_ls
) &
2031 CHA_ID_LS_MD_MASK
) >> CHA_ID_LS_MD_SHIFT
;
2032 md_inst
= (rd_reg32(&priv
->ctrl
->perfmon
.cha_num_ls
) &
2033 CHA_ID_LS_MD_MASK
) >> CHA_ID_LS_MD_SHIFT
;
2035 u32 mdha
= rd_reg32(&priv
->ctrl
->vreg
.mdha
);
2037 md_vid
= (mdha
& CHA_VER_VID_MASK
) >> CHA_VER_VID_SHIFT
;
2038 md_inst
= mdha
& CHA_VER_NUM_MASK
;
2042 * Skip registration of any hashing algorithms if MD block
2050 /* Limit digest size based on LP256 */
2051 if (md_vid
== CHA_VER_VID_MD_LP256
)
2052 md_limit
= SHA256_DIGEST_SIZE
;
2054 INIT_LIST_HEAD(&hash_list
);
2056 /* register crypto algorithms the device supports */
2057 for (i
= 0; i
< ARRAY_SIZE(driver_hash
); i
++) {
2058 struct caam_hash_alg
*t_alg
;
2059 struct caam_hash_template
*alg
= driver_hash
+ i
;
2061 /* If MD size is not supported by device, skip registration */
2062 if (is_mdha(alg
->alg_type
) &&
2063 alg
->template_ahash
.halg
.digestsize
> md_limit
)
2066 /* register hmac version */
2067 t_alg
= caam_hash_alloc(alg
, true);
2068 if (IS_ERR(t_alg
)) {
2069 err
= PTR_ERR(t_alg
);
2070 pr_warn("%s alg allocation failed\n",
2071 alg
->hmac_driver_name
);
2075 err
= crypto_register_ahash(&t_alg
->ahash_alg
);
2077 pr_warn("%s alg registration failed: %d\n",
2078 t_alg
->ahash_alg
.halg
.base
.cra_driver_name
,
2082 list_add_tail(&t_alg
->entry
, &hash_list
);
2084 if ((alg
->alg_type
& OP_ALG_ALGSEL_MASK
) == OP_ALG_ALGSEL_AES
)
2087 /* register unkeyed version */
2088 t_alg
= caam_hash_alloc(alg
, false);
2089 if (IS_ERR(t_alg
)) {
2090 err
= PTR_ERR(t_alg
);
2091 pr_warn("%s alg allocation failed\n", alg
->driver_name
);
2095 err
= crypto_register_ahash(&t_alg
->ahash_alg
);
2097 pr_warn("%s alg registration failed: %d\n",
2098 t_alg
->ahash_alg
.halg
.base
.cra_driver_name
,
2102 list_add_tail(&t_alg
->entry
, &hash_list
);
2106 put_device(&pdev
->dev
);
2110 module_init(caam_algapi_hash_init
);
2111 module_exit(caam_algapi_hash_exit
);
2113 MODULE_LICENSE("GPL");
2114 MODULE_DESCRIPTION("FSL CAAM support for ahash functions of crypto API");
2115 MODULE_AUTHOR("Freescale Semiconductor - NMG");