2 * Hash algorithms supported by the CESA: MD5, SHA1 and SHA256.
4 * Author: Boris Brezillon <boris.brezillon@free-electrons.com>
5 * Author: Arnaud Ebalard <arno@natisbad.org>
7 * This work is based on an initial version written by
8 * Sebastian Andrzej Siewior < sebastian at breakpoint dot cc >
10 * This program is free software; you can redistribute it and/or modify it
11 * under the terms of the GNU General Public License version 2 as published
12 * by the Free Software Foundation.
15 #include <crypto/hmac.h>
16 #include <crypto/md5.h>
17 #include <crypto/sha.h>
21 struct mv_cesa_ahash_dma_iter
{
22 struct mv_cesa_dma_iter base
;
23 struct mv_cesa_sg_dma_iter src
;
27 mv_cesa_ahash_req_iter_init(struct mv_cesa_ahash_dma_iter
*iter
,
28 struct ahash_request
*req
)
30 struct mv_cesa_ahash_req
*creq
= ahash_request_ctx(req
);
31 unsigned int len
= req
->nbytes
+ creq
->cache_ptr
;
34 len
&= ~CESA_HASH_BLOCK_SIZE_MSK
;
36 mv_cesa_req_dma_iter_init(&iter
->base
, len
);
37 mv_cesa_sg_dma_iter_init(&iter
->src
, req
->src
, DMA_TO_DEVICE
);
38 iter
->src
.op_offset
= creq
->cache_ptr
;
42 mv_cesa_ahash_req_iter_next_op(struct mv_cesa_ahash_dma_iter
*iter
)
44 iter
->src
.op_offset
= 0;
46 return mv_cesa_req_dma_iter_next_op(&iter
->base
);
50 mv_cesa_ahash_dma_alloc_cache(struct mv_cesa_ahash_dma_req
*req
, gfp_t flags
)
52 req
->cache
= dma_pool_alloc(cesa_dev
->dma
->cache_pool
, flags
,
61 mv_cesa_ahash_dma_free_cache(struct mv_cesa_ahash_dma_req
*req
)
66 dma_pool_free(cesa_dev
->dma
->cache_pool
, req
->cache
,
70 static int mv_cesa_ahash_dma_alloc_padding(struct mv_cesa_ahash_dma_req
*req
,
76 req
->padding
= dma_pool_alloc(cesa_dev
->dma
->padding_pool
, flags
,
84 static void mv_cesa_ahash_dma_free_padding(struct mv_cesa_ahash_dma_req
*req
)
89 dma_pool_free(cesa_dev
->dma
->padding_pool
, req
->padding
,
94 static inline void mv_cesa_ahash_dma_last_cleanup(struct ahash_request
*req
)
96 struct mv_cesa_ahash_req
*creq
= ahash_request_ctx(req
);
98 mv_cesa_ahash_dma_free_padding(&creq
->req
.dma
);
101 static inline void mv_cesa_ahash_dma_cleanup(struct ahash_request
*req
)
103 struct mv_cesa_ahash_req
*creq
= ahash_request_ctx(req
);
105 dma_unmap_sg(cesa_dev
->dev
, req
->src
, creq
->src_nents
, DMA_TO_DEVICE
);
106 mv_cesa_ahash_dma_free_cache(&creq
->req
.dma
);
107 mv_cesa_dma_cleanup(&creq
->base
);
110 static inline void mv_cesa_ahash_cleanup(struct ahash_request
*req
)
112 struct mv_cesa_ahash_req
*creq
= ahash_request_ctx(req
);
114 if (mv_cesa_req_get_type(&creq
->base
) == CESA_DMA_REQ
)
115 mv_cesa_ahash_dma_cleanup(req
);
118 static void mv_cesa_ahash_last_cleanup(struct ahash_request
*req
)
120 struct mv_cesa_ahash_req
*creq
= ahash_request_ctx(req
);
122 if (mv_cesa_req_get_type(&creq
->base
) == CESA_DMA_REQ
)
123 mv_cesa_ahash_dma_last_cleanup(req
);
126 static int mv_cesa_ahash_pad_len(struct mv_cesa_ahash_req
*creq
)
128 unsigned int index
, padlen
;
130 index
= creq
->len
& CESA_HASH_BLOCK_SIZE_MSK
;
131 padlen
= (index
< 56) ? (56 - index
) : (64 + 56 - index
);
136 static int mv_cesa_ahash_pad_req(struct mv_cesa_ahash_req
*creq
, u8
*buf
)
138 unsigned int index
, padlen
;
141 /* Pad out to 56 mod 64 */
142 index
= creq
->len
& CESA_HASH_BLOCK_SIZE_MSK
;
143 padlen
= mv_cesa_ahash_pad_len(creq
);
144 memset(buf
+ 1, 0, padlen
- 1);
147 __le64 bits
= cpu_to_le64(creq
->len
<< 3);
148 memcpy(buf
+ padlen
, &bits
, sizeof(bits
));
150 __be64 bits
= cpu_to_be64(creq
->len
<< 3);
151 memcpy(buf
+ padlen
, &bits
, sizeof(bits
));
157 static void mv_cesa_ahash_std_step(struct ahash_request
*req
)
159 struct mv_cesa_ahash_req
*creq
= ahash_request_ctx(req
);
160 struct mv_cesa_ahash_std_req
*sreq
= &creq
->req
.std
;
161 struct mv_cesa_engine
*engine
= creq
->base
.engine
;
162 struct mv_cesa_op_ctx
*op
;
163 unsigned int new_cache_ptr
= 0;
166 unsigned int digsize
;
169 mv_cesa_adjust_op(engine
, &creq
->op_tmpl
);
170 memcpy_toio(engine
->sram
, &creq
->op_tmpl
, sizeof(creq
->op_tmpl
));
173 digsize
= crypto_ahash_digestsize(crypto_ahash_reqtfm(req
));
174 for (i
= 0; i
< digsize
/ 4; i
++)
175 writel_relaxed(creq
->state
[i
], engine
->regs
+ CESA_IVDIG(i
));
179 memcpy_toio(engine
->sram
+ CESA_SA_DATA_SRAM_OFFSET
,
180 creq
->cache
, creq
->cache_ptr
);
182 len
= min_t(size_t, req
->nbytes
+ creq
->cache_ptr
- sreq
->offset
,
183 CESA_SA_SRAM_PAYLOAD_SIZE
);
185 if (!creq
->last_req
) {
186 new_cache_ptr
= len
& CESA_HASH_BLOCK_SIZE_MSK
;
187 len
&= ~CESA_HASH_BLOCK_SIZE_MSK
;
190 if (len
- creq
->cache_ptr
)
191 sreq
->offset
+= sg_pcopy_to_buffer(req
->src
, creq
->src_nents
,
193 CESA_SA_DATA_SRAM_OFFSET
+
195 len
- creq
->cache_ptr
,
200 frag_mode
= mv_cesa_get_op_cfg(op
) & CESA_SA_DESC_CFG_FRAG_MSK
;
202 if (creq
->last_req
&& sreq
->offset
== req
->nbytes
&&
203 creq
->len
<= CESA_SA_DESC_MAC_SRC_TOTAL_LEN_MAX
) {
204 if (frag_mode
== CESA_SA_DESC_CFG_FIRST_FRAG
)
205 frag_mode
= CESA_SA_DESC_CFG_NOT_FRAG
;
206 else if (frag_mode
== CESA_SA_DESC_CFG_MID_FRAG
)
207 frag_mode
= CESA_SA_DESC_CFG_LAST_FRAG
;
210 if (frag_mode
== CESA_SA_DESC_CFG_NOT_FRAG
||
211 frag_mode
== CESA_SA_DESC_CFG_LAST_FRAG
) {
213 creq
->len
<= CESA_SA_DESC_MAC_SRC_TOTAL_LEN_MAX
) {
214 mv_cesa_set_mac_op_total_len(op
, creq
->len
);
216 int trailerlen
= mv_cesa_ahash_pad_len(creq
) + 8;
218 if (len
+ trailerlen
> CESA_SA_SRAM_PAYLOAD_SIZE
) {
219 len
&= CESA_HASH_BLOCK_SIZE_MSK
;
220 new_cache_ptr
= 64 - trailerlen
;
221 memcpy_fromio(creq
->cache
,
223 CESA_SA_DATA_SRAM_OFFSET
+ len
,
226 len
+= mv_cesa_ahash_pad_req(creq
,
228 CESA_SA_DATA_SRAM_OFFSET
);
231 if (frag_mode
== CESA_SA_DESC_CFG_LAST_FRAG
)
232 frag_mode
= CESA_SA_DESC_CFG_MID_FRAG
;
234 frag_mode
= CESA_SA_DESC_CFG_FIRST_FRAG
;
238 mv_cesa_set_mac_op_frag_len(op
, len
);
239 mv_cesa_update_op_cfg(op
, frag_mode
, CESA_SA_DESC_CFG_FRAG_MSK
);
241 /* FIXME: only update enc_len field */
242 memcpy_toio(engine
->sram
, op
, sizeof(*op
));
244 if (frag_mode
== CESA_SA_DESC_CFG_FIRST_FRAG
)
245 mv_cesa_update_op_cfg(op
, CESA_SA_DESC_CFG_MID_FRAG
,
246 CESA_SA_DESC_CFG_FRAG_MSK
);
248 creq
->cache_ptr
= new_cache_ptr
;
250 mv_cesa_set_int_mask(engine
, CESA_SA_INT_ACCEL0_DONE
);
251 writel_relaxed(CESA_SA_CFG_PARA_DIS
, engine
->regs
+ CESA_SA_CFG
);
252 BUG_ON(readl(engine
->regs
+ CESA_SA_CMD
) &
253 CESA_SA_CMD_EN_CESA_SA_ACCL0
);
254 writel(CESA_SA_CMD_EN_CESA_SA_ACCL0
, engine
->regs
+ CESA_SA_CMD
);
257 static int mv_cesa_ahash_std_process(struct ahash_request
*req
, u32 status
)
259 struct mv_cesa_ahash_req
*creq
= ahash_request_ctx(req
);
260 struct mv_cesa_ahash_std_req
*sreq
= &creq
->req
.std
;
262 if (sreq
->offset
< (req
->nbytes
- creq
->cache_ptr
))
268 static inline void mv_cesa_ahash_dma_prepare(struct ahash_request
*req
)
270 struct mv_cesa_ahash_req
*creq
= ahash_request_ctx(req
);
271 struct mv_cesa_req
*basereq
= &creq
->base
;
273 mv_cesa_dma_prepare(basereq
, basereq
->engine
);
276 static void mv_cesa_ahash_std_prepare(struct ahash_request
*req
)
278 struct mv_cesa_ahash_req
*creq
= ahash_request_ctx(req
);
279 struct mv_cesa_ahash_std_req
*sreq
= &creq
->req
.std
;
284 static void mv_cesa_ahash_dma_step(struct ahash_request
*req
)
286 struct mv_cesa_ahash_req
*creq
= ahash_request_ctx(req
);
287 struct mv_cesa_req
*base
= &creq
->base
;
289 /* We must explicitly set the digest state. */
290 if (base
->chain
.first
->flags
& CESA_TDMA_SET_STATE
) {
291 struct mv_cesa_engine
*engine
= base
->engine
;
294 /* Set the hash state in the IVDIG regs. */
295 for (i
= 0; i
< ARRAY_SIZE(creq
->state
); i
++)
296 writel_relaxed(creq
->state
[i
], engine
->regs
+
300 mv_cesa_dma_step(base
);
303 static void mv_cesa_ahash_step(struct crypto_async_request
*req
)
305 struct ahash_request
*ahashreq
= ahash_request_cast(req
);
306 struct mv_cesa_ahash_req
*creq
= ahash_request_ctx(ahashreq
);
308 if (mv_cesa_req_get_type(&creq
->base
) == CESA_DMA_REQ
)
309 mv_cesa_ahash_dma_step(ahashreq
);
311 mv_cesa_ahash_std_step(ahashreq
);
314 static int mv_cesa_ahash_process(struct crypto_async_request
*req
, u32 status
)
316 struct ahash_request
*ahashreq
= ahash_request_cast(req
);
317 struct mv_cesa_ahash_req
*creq
= ahash_request_ctx(ahashreq
);
319 if (mv_cesa_req_get_type(&creq
->base
) == CESA_DMA_REQ
)
320 return mv_cesa_dma_process(&creq
->base
, status
);
322 return mv_cesa_ahash_std_process(ahashreq
, status
);
325 static void mv_cesa_ahash_complete(struct crypto_async_request
*req
)
327 struct ahash_request
*ahashreq
= ahash_request_cast(req
);
328 struct mv_cesa_ahash_req
*creq
= ahash_request_ctx(ahashreq
);
329 struct mv_cesa_engine
*engine
= creq
->base
.engine
;
330 unsigned int digsize
;
333 digsize
= crypto_ahash_digestsize(crypto_ahash_reqtfm(ahashreq
));
335 if (mv_cesa_req_get_type(&creq
->base
) == CESA_DMA_REQ
&&
336 (creq
->base
.chain
.last
->flags
& CESA_TDMA_TYPE_MSK
) == CESA_TDMA_RESULT
) {
340 * Result is already in the correct endianess when the SA is
343 data
= creq
->base
.chain
.last
->op
->ctx
.hash
.hash
;
344 for (i
= 0; i
< digsize
/ 4; i
++)
345 creq
->state
[i
] = cpu_to_le32(data
[i
]);
347 memcpy(ahashreq
->result
, data
, digsize
);
349 for (i
= 0; i
< digsize
/ 4; i
++)
350 creq
->state
[i
] = readl_relaxed(engine
->regs
+
352 if (creq
->last_req
) {
354 * Hardware's MD5 digest is in little endian format, but
355 * SHA in big endian format
358 __le32
*result
= (void *)ahashreq
->result
;
360 for (i
= 0; i
< digsize
/ 4; i
++)
361 result
[i
] = cpu_to_le32(creq
->state
[i
]);
363 __be32
*result
= (void *)ahashreq
->result
;
365 for (i
= 0; i
< digsize
/ 4; i
++)
366 result
[i
] = cpu_to_be32(creq
->state
[i
]);
371 atomic_sub(ahashreq
->nbytes
, &engine
->load
);
374 static void mv_cesa_ahash_prepare(struct crypto_async_request
*req
,
375 struct mv_cesa_engine
*engine
)
377 struct ahash_request
*ahashreq
= ahash_request_cast(req
);
378 struct mv_cesa_ahash_req
*creq
= ahash_request_ctx(ahashreq
);
380 creq
->base
.engine
= engine
;
382 if (mv_cesa_req_get_type(&creq
->base
) == CESA_DMA_REQ
)
383 mv_cesa_ahash_dma_prepare(ahashreq
);
385 mv_cesa_ahash_std_prepare(ahashreq
);
388 static void mv_cesa_ahash_req_cleanup(struct crypto_async_request
*req
)
390 struct ahash_request
*ahashreq
= ahash_request_cast(req
);
391 struct mv_cesa_ahash_req
*creq
= ahash_request_ctx(ahashreq
);
394 mv_cesa_ahash_last_cleanup(ahashreq
);
396 mv_cesa_ahash_cleanup(ahashreq
);
399 sg_pcopy_to_buffer(ahashreq
->src
, creq
->src_nents
,
402 ahashreq
->nbytes
- creq
->cache_ptr
);
405 static const struct mv_cesa_req_ops mv_cesa_ahash_req_ops
= {
406 .step
= mv_cesa_ahash_step
,
407 .process
= mv_cesa_ahash_process
,
408 .cleanup
= mv_cesa_ahash_req_cleanup
,
409 .complete
= mv_cesa_ahash_complete
,
412 static void mv_cesa_ahash_init(struct ahash_request
*req
,
413 struct mv_cesa_op_ctx
*tmpl
, bool algo_le
)
415 struct mv_cesa_ahash_req
*creq
= ahash_request_ctx(req
);
417 memset(creq
, 0, sizeof(*creq
));
418 mv_cesa_update_op_cfg(tmpl
,
419 CESA_SA_DESC_CFG_OP_MAC_ONLY
|
420 CESA_SA_DESC_CFG_FIRST_FRAG
,
421 CESA_SA_DESC_CFG_OP_MSK
|
422 CESA_SA_DESC_CFG_FRAG_MSK
);
423 mv_cesa_set_mac_op_total_len(tmpl
, 0);
424 mv_cesa_set_mac_op_frag_len(tmpl
, 0);
425 creq
->op_tmpl
= *tmpl
;
427 creq
->algo_le
= algo_le
;
430 static inline int mv_cesa_ahash_cra_init(struct crypto_tfm
*tfm
)
432 struct mv_cesa_hash_ctx
*ctx
= crypto_tfm_ctx(tfm
);
434 ctx
->base
.ops
= &mv_cesa_ahash_req_ops
;
436 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm
),
437 sizeof(struct mv_cesa_ahash_req
));
441 static bool mv_cesa_ahash_cache_req(struct ahash_request
*req
)
443 struct mv_cesa_ahash_req
*creq
= ahash_request_ctx(req
);
446 if (creq
->cache_ptr
+ req
->nbytes
< CESA_MAX_HASH_BLOCK_SIZE
&& !creq
->last_req
) {
452 sg_pcopy_to_buffer(req
->src
, creq
->src_nents
,
453 creq
->cache
+ creq
->cache_ptr
,
456 creq
->cache_ptr
+= req
->nbytes
;
462 static struct mv_cesa_op_ctx
*
463 mv_cesa_dma_add_frag(struct mv_cesa_tdma_chain
*chain
,
464 struct mv_cesa_op_ctx
*tmpl
, unsigned int frag_len
,
467 struct mv_cesa_op_ctx
*op
;
470 op
= mv_cesa_dma_add_op(chain
, tmpl
, false, flags
);
474 /* Set the operation block fragment length. */
475 mv_cesa_set_mac_op_frag_len(op
, frag_len
);
477 /* Append dummy desc to launch operation */
478 ret
= mv_cesa_dma_add_dummy_launch(chain
, flags
);
482 if (mv_cesa_mac_op_is_first_frag(tmpl
))
483 mv_cesa_update_op_cfg(tmpl
,
484 CESA_SA_DESC_CFG_MID_FRAG
,
485 CESA_SA_DESC_CFG_FRAG_MSK
);
491 mv_cesa_ahash_dma_add_cache(struct mv_cesa_tdma_chain
*chain
,
492 struct mv_cesa_ahash_req
*creq
,
495 struct mv_cesa_ahash_dma_req
*ahashdreq
= &creq
->req
.dma
;
498 if (!creq
->cache_ptr
)
501 ret
= mv_cesa_ahash_dma_alloc_cache(ahashdreq
, flags
);
505 memcpy(ahashdreq
->cache
, creq
->cache
, creq
->cache_ptr
);
507 return mv_cesa_dma_add_data_transfer(chain
,
508 CESA_SA_DATA_SRAM_OFFSET
,
509 ahashdreq
->cache_dma
,
511 CESA_TDMA_DST_IN_SRAM
,
515 static struct mv_cesa_op_ctx
*
516 mv_cesa_ahash_dma_last_req(struct mv_cesa_tdma_chain
*chain
,
517 struct mv_cesa_ahash_dma_iter
*dma_iter
,
518 struct mv_cesa_ahash_req
*creq
,
519 unsigned int frag_len
, gfp_t flags
)
521 struct mv_cesa_ahash_dma_req
*ahashdreq
= &creq
->req
.dma
;
522 unsigned int len
, trailerlen
, padoff
= 0;
523 struct mv_cesa_op_ctx
*op
;
527 * If the transfer is smaller than our maximum length, and we have
528 * some data outstanding, we can ask the engine to finish the hash.
530 if (creq
->len
<= CESA_SA_DESC_MAC_SRC_TOTAL_LEN_MAX
&& frag_len
) {
531 op
= mv_cesa_dma_add_frag(chain
, &creq
->op_tmpl
, frag_len
,
536 mv_cesa_set_mac_op_total_len(op
, creq
->len
);
537 mv_cesa_update_op_cfg(op
, mv_cesa_mac_op_is_first_frag(op
) ?
538 CESA_SA_DESC_CFG_NOT_FRAG
:
539 CESA_SA_DESC_CFG_LAST_FRAG
,
540 CESA_SA_DESC_CFG_FRAG_MSK
);
542 ret
= mv_cesa_dma_add_result_op(chain
,
543 CESA_SA_CFG_SRAM_OFFSET
,
544 CESA_SA_DATA_SRAM_OFFSET
,
545 CESA_TDMA_SRC_IN_SRAM
, flags
);
547 return ERR_PTR(-ENOMEM
);
552 * The request is longer than the engine can handle, or we have
553 * no data outstanding. Manually generate the padding, adding it
554 * as a "mid" fragment.
556 ret
= mv_cesa_ahash_dma_alloc_padding(ahashdreq
, flags
);
560 trailerlen
= mv_cesa_ahash_pad_req(creq
, ahashdreq
->padding
);
562 len
= min(CESA_SA_SRAM_PAYLOAD_SIZE
- frag_len
, trailerlen
);
564 ret
= mv_cesa_dma_add_data_transfer(chain
,
565 CESA_SA_DATA_SRAM_OFFSET
+
567 ahashdreq
->padding_dma
,
568 len
, CESA_TDMA_DST_IN_SRAM
,
573 op
= mv_cesa_dma_add_frag(chain
, &creq
->op_tmpl
, frag_len
+ len
,
578 if (len
== trailerlen
)
584 ret
= mv_cesa_dma_add_data_transfer(chain
,
585 CESA_SA_DATA_SRAM_OFFSET
,
586 ahashdreq
->padding_dma
+
589 CESA_TDMA_DST_IN_SRAM
,
594 return mv_cesa_dma_add_frag(chain
, &creq
->op_tmpl
, trailerlen
- padoff
,
598 static int mv_cesa_ahash_dma_req_init(struct ahash_request
*req
)
600 struct mv_cesa_ahash_req
*creq
= ahash_request_ctx(req
);
601 gfp_t flags
= (req
->base
.flags
& CRYPTO_TFM_REQ_MAY_SLEEP
) ?
602 GFP_KERNEL
: GFP_ATOMIC
;
603 struct mv_cesa_req
*basereq
= &creq
->base
;
604 struct mv_cesa_ahash_dma_iter iter
;
605 struct mv_cesa_op_ctx
*op
= NULL
;
606 unsigned int frag_len
;
607 bool set_state
= false;
611 basereq
->chain
.first
= NULL
;
612 basereq
->chain
.last
= NULL
;
614 if (!mv_cesa_mac_op_is_first_frag(&creq
->op_tmpl
))
617 if (creq
->src_nents
) {
618 ret
= dma_map_sg(cesa_dev
->dev
, req
->src
, creq
->src_nents
,
626 mv_cesa_tdma_desc_iter_init(&basereq
->chain
);
627 mv_cesa_ahash_req_iter_init(&iter
, req
);
630 * Add the cache (left-over data from a previous block) first.
631 * This will never overflow the SRAM size.
633 ret
= mv_cesa_ahash_dma_add_cache(&basereq
->chain
, creq
, flags
);
639 * Add all the new data, inserting an operation block and
640 * launch command between each full SRAM block-worth of
641 * data. We intentionally do not add the final op block.
644 ret
= mv_cesa_dma_add_op_transfers(&basereq
->chain
,
650 frag_len
= iter
.base
.op_len
;
652 if (!mv_cesa_ahash_req_iter_next_op(&iter
))
655 op
= mv_cesa_dma_add_frag(&basereq
->chain
, &creq
->op_tmpl
,
663 /* Account for the data that was in the cache. */
664 frag_len
= iter
.base
.op_len
;
668 * At this point, frag_len indicates whether we have any data
669 * outstanding which needs an operation. Queue up the final
670 * operation, which depends whether this is the final request.
673 op
= mv_cesa_ahash_dma_last_req(&basereq
->chain
, &iter
, creq
,
676 op
= mv_cesa_dma_add_frag(&basereq
->chain
, &creq
->op_tmpl
,
685 * If results are copied via DMA, this means that this
686 * request can be directly processed by the engine,
687 * without partial updates. So we can chain it at the
688 * DMA level with other requests.
690 type
= basereq
->chain
.last
->flags
& CESA_TDMA_TYPE_MSK
;
692 if (op
&& type
!= CESA_TDMA_RESULT
) {
693 /* Add dummy desc to wait for crypto operation end */
694 ret
= mv_cesa_dma_add_dummy_end(&basereq
->chain
, flags
);
700 creq
->cache_ptr
= req
->nbytes
+ creq
->cache_ptr
-
705 basereq
->chain
.last
->flags
|= CESA_TDMA_END_OF_REQ
;
707 if (type
!= CESA_TDMA_RESULT
)
708 basereq
->chain
.last
->flags
|= CESA_TDMA_BREAK_CHAIN
;
712 * Put the CESA_TDMA_SET_STATE flag on the first tdma desc to
713 * let the step logic know that the IVDIG registers should be
714 * explicitly set before launching a TDMA chain.
716 basereq
->chain
.first
->flags
|= CESA_TDMA_SET_STATE
;
722 mv_cesa_dma_cleanup(basereq
);
723 dma_unmap_sg(cesa_dev
->dev
, req
->src
, creq
->src_nents
, DMA_TO_DEVICE
);
726 mv_cesa_ahash_last_cleanup(req
);
731 static int mv_cesa_ahash_req_init(struct ahash_request
*req
, bool *cached
)
733 struct mv_cesa_ahash_req
*creq
= ahash_request_ctx(req
);
735 creq
->src_nents
= sg_nents_for_len(req
->src
, req
->nbytes
);
736 if (creq
->src_nents
< 0) {
737 dev_err(cesa_dev
->dev
, "Invalid number of src SG");
738 return creq
->src_nents
;
741 *cached
= mv_cesa_ahash_cache_req(req
);
746 if (cesa_dev
->caps
->has_tdma
)
747 return mv_cesa_ahash_dma_req_init(req
);
752 static int mv_cesa_ahash_queue_req(struct ahash_request
*req
)
754 struct mv_cesa_ahash_req
*creq
= ahash_request_ctx(req
);
755 struct mv_cesa_engine
*engine
;
759 ret
= mv_cesa_ahash_req_init(req
, &cached
);
766 engine
= mv_cesa_select_engine(req
->nbytes
);
767 mv_cesa_ahash_prepare(&req
->base
, engine
);
769 ret
= mv_cesa_queue_req(&req
->base
, &creq
->base
);
771 if (mv_cesa_req_needs_cleanup(&req
->base
, ret
))
772 mv_cesa_ahash_cleanup(req
);
777 static int mv_cesa_ahash_update(struct ahash_request
*req
)
779 struct mv_cesa_ahash_req
*creq
= ahash_request_ctx(req
);
781 creq
->len
+= req
->nbytes
;
783 return mv_cesa_ahash_queue_req(req
);
786 static int mv_cesa_ahash_final(struct ahash_request
*req
)
788 struct mv_cesa_ahash_req
*creq
= ahash_request_ctx(req
);
789 struct mv_cesa_op_ctx
*tmpl
= &creq
->op_tmpl
;
791 mv_cesa_set_mac_op_total_len(tmpl
, creq
->len
);
792 creq
->last_req
= true;
795 return mv_cesa_ahash_queue_req(req
);
798 static int mv_cesa_ahash_finup(struct ahash_request
*req
)
800 struct mv_cesa_ahash_req
*creq
= ahash_request_ctx(req
);
801 struct mv_cesa_op_ctx
*tmpl
= &creq
->op_tmpl
;
803 creq
->len
+= req
->nbytes
;
804 mv_cesa_set_mac_op_total_len(tmpl
, creq
->len
);
805 creq
->last_req
= true;
807 return mv_cesa_ahash_queue_req(req
);
810 static int mv_cesa_ahash_export(struct ahash_request
*req
, void *hash
,
811 u64
*len
, void *cache
)
813 struct crypto_ahash
*ahash
= crypto_ahash_reqtfm(req
);
814 struct mv_cesa_ahash_req
*creq
= ahash_request_ctx(req
);
815 unsigned int digsize
= crypto_ahash_digestsize(ahash
);
816 unsigned int blocksize
;
818 blocksize
= crypto_ahash_blocksize(ahash
);
821 memcpy(hash
, creq
->state
, digsize
);
822 memset(cache
, 0, blocksize
);
823 memcpy(cache
, creq
->cache
, creq
->cache_ptr
);
828 static int mv_cesa_ahash_import(struct ahash_request
*req
, const void *hash
,
829 u64 len
, const void *cache
)
831 struct crypto_ahash
*ahash
= crypto_ahash_reqtfm(req
);
832 struct mv_cesa_ahash_req
*creq
= ahash_request_ctx(req
);
833 unsigned int digsize
= crypto_ahash_digestsize(ahash
);
834 unsigned int blocksize
;
835 unsigned int cache_ptr
;
838 ret
= crypto_ahash_init(req
);
842 blocksize
= crypto_ahash_blocksize(ahash
);
843 if (len
>= blocksize
)
844 mv_cesa_update_op_cfg(&creq
->op_tmpl
,
845 CESA_SA_DESC_CFG_MID_FRAG
,
846 CESA_SA_DESC_CFG_FRAG_MSK
);
849 memcpy(creq
->state
, hash
, digsize
);
852 cache_ptr
= do_div(len
, blocksize
);
856 memcpy(creq
->cache
, cache
, cache_ptr
);
857 creq
->cache_ptr
= cache_ptr
;
862 static int mv_cesa_md5_init(struct ahash_request
*req
)
864 struct mv_cesa_ahash_req
*creq
= ahash_request_ctx(req
);
865 struct mv_cesa_op_ctx tmpl
= { };
867 mv_cesa_set_op_cfg(&tmpl
, CESA_SA_DESC_CFG_MACM_MD5
);
869 mv_cesa_ahash_init(req
, &tmpl
, true);
871 creq
->state
[0] = MD5_H0
;
872 creq
->state
[1] = MD5_H1
;
873 creq
->state
[2] = MD5_H2
;
874 creq
->state
[3] = MD5_H3
;
879 static int mv_cesa_md5_export(struct ahash_request
*req
, void *out
)
881 struct md5_state
*out_state
= out
;
883 return mv_cesa_ahash_export(req
, out_state
->hash
,
884 &out_state
->byte_count
, out_state
->block
);
887 static int mv_cesa_md5_import(struct ahash_request
*req
, const void *in
)
889 const struct md5_state
*in_state
= in
;
891 return mv_cesa_ahash_import(req
, in_state
->hash
, in_state
->byte_count
,
895 static int mv_cesa_md5_digest(struct ahash_request
*req
)
899 ret
= mv_cesa_md5_init(req
);
903 return mv_cesa_ahash_finup(req
);
906 struct ahash_alg mv_md5_alg
= {
907 .init
= mv_cesa_md5_init
,
908 .update
= mv_cesa_ahash_update
,
909 .final
= mv_cesa_ahash_final
,
910 .finup
= mv_cesa_ahash_finup
,
911 .digest
= mv_cesa_md5_digest
,
912 .export
= mv_cesa_md5_export
,
913 .import
= mv_cesa_md5_import
,
915 .digestsize
= MD5_DIGEST_SIZE
,
916 .statesize
= sizeof(struct md5_state
),
919 .cra_driver_name
= "mv-md5",
921 .cra_flags
= CRYPTO_ALG_ASYNC
|
922 CRYPTO_ALG_KERN_DRIVER_ONLY
,
923 .cra_blocksize
= MD5_HMAC_BLOCK_SIZE
,
924 .cra_ctxsize
= sizeof(struct mv_cesa_hash_ctx
),
925 .cra_init
= mv_cesa_ahash_cra_init
,
926 .cra_module
= THIS_MODULE
,
931 static int mv_cesa_sha1_init(struct ahash_request
*req
)
933 struct mv_cesa_ahash_req
*creq
= ahash_request_ctx(req
);
934 struct mv_cesa_op_ctx tmpl
= { };
936 mv_cesa_set_op_cfg(&tmpl
, CESA_SA_DESC_CFG_MACM_SHA1
);
938 mv_cesa_ahash_init(req
, &tmpl
, false);
940 creq
->state
[0] = SHA1_H0
;
941 creq
->state
[1] = SHA1_H1
;
942 creq
->state
[2] = SHA1_H2
;
943 creq
->state
[3] = SHA1_H3
;
944 creq
->state
[4] = SHA1_H4
;
949 static int mv_cesa_sha1_export(struct ahash_request
*req
, void *out
)
951 struct sha1_state
*out_state
= out
;
953 return mv_cesa_ahash_export(req
, out_state
->state
, &out_state
->count
,
957 static int mv_cesa_sha1_import(struct ahash_request
*req
, const void *in
)
959 const struct sha1_state
*in_state
= in
;
961 return mv_cesa_ahash_import(req
, in_state
->state
, in_state
->count
,
965 static int mv_cesa_sha1_digest(struct ahash_request
*req
)
969 ret
= mv_cesa_sha1_init(req
);
973 return mv_cesa_ahash_finup(req
);
976 struct ahash_alg mv_sha1_alg
= {
977 .init
= mv_cesa_sha1_init
,
978 .update
= mv_cesa_ahash_update
,
979 .final
= mv_cesa_ahash_final
,
980 .finup
= mv_cesa_ahash_finup
,
981 .digest
= mv_cesa_sha1_digest
,
982 .export
= mv_cesa_sha1_export
,
983 .import
= mv_cesa_sha1_import
,
985 .digestsize
= SHA1_DIGEST_SIZE
,
986 .statesize
= sizeof(struct sha1_state
),
989 .cra_driver_name
= "mv-sha1",
991 .cra_flags
= CRYPTO_ALG_ASYNC
|
992 CRYPTO_ALG_KERN_DRIVER_ONLY
,
993 .cra_blocksize
= SHA1_BLOCK_SIZE
,
994 .cra_ctxsize
= sizeof(struct mv_cesa_hash_ctx
),
995 .cra_init
= mv_cesa_ahash_cra_init
,
996 .cra_module
= THIS_MODULE
,
1001 static int mv_cesa_sha256_init(struct ahash_request
*req
)
1003 struct mv_cesa_ahash_req
*creq
= ahash_request_ctx(req
);
1004 struct mv_cesa_op_ctx tmpl
= { };
1006 mv_cesa_set_op_cfg(&tmpl
, CESA_SA_DESC_CFG_MACM_SHA256
);
1008 mv_cesa_ahash_init(req
, &tmpl
, false);
1010 creq
->state
[0] = SHA256_H0
;
1011 creq
->state
[1] = SHA256_H1
;
1012 creq
->state
[2] = SHA256_H2
;
1013 creq
->state
[3] = SHA256_H3
;
1014 creq
->state
[4] = SHA256_H4
;
1015 creq
->state
[5] = SHA256_H5
;
1016 creq
->state
[6] = SHA256_H6
;
1017 creq
->state
[7] = SHA256_H7
;
1022 static int mv_cesa_sha256_digest(struct ahash_request
*req
)
1026 ret
= mv_cesa_sha256_init(req
);
1030 return mv_cesa_ahash_finup(req
);
1033 static int mv_cesa_sha256_export(struct ahash_request
*req
, void *out
)
1035 struct sha256_state
*out_state
= out
;
1037 return mv_cesa_ahash_export(req
, out_state
->state
, &out_state
->count
,
1041 static int mv_cesa_sha256_import(struct ahash_request
*req
, const void *in
)
1043 const struct sha256_state
*in_state
= in
;
1045 return mv_cesa_ahash_import(req
, in_state
->state
, in_state
->count
,
1049 struct ahash_alg mv_sha256_alg
= {
1050 .init
= mv_cesa_sha256_init
,
1051 .update
= mv_cesa_ahash_update
,
1052 .final
= mv_cesa_ahash_final
,
1053 .finup
= mv_cesa_ahash_finup
,
1054 .digest
= mv_cesa_sha256_digest
,
1055 .export
= mv_cesa_sha256_export
,
1056 .import
= mv_cesa_sha256_import
,
1058 .digestsize
= SHA256_DIGEST_SIZE
,
1059 .statesize
= sizeof(struct sha256_state
),
1061 .cra_name
= "sha256",
1062 .cra_driver_name
= "mv-sha256",
1063 .cra_priority
= 300,
1064 .cra_flags
= CRYPTO_ALG_ASYNC
|
1065 CRYPTO_ALG_KERN_DRIVER_ONLY
,
1066 .cra_blocksize
= SHA256_BLOCK_SIZE
,
1067 .cra_ctxsize
= sizeof(struct mv_cesa_hash_ctx
),
1068 .cra_init
= mv_cesa_ahash_cra_init
,
1069 .cra_module
= THIS_MODULE
,
1074 struct mv_cesa_ahash_result
{
1075 struct completion completion
;
1079 static void mv_cesa_hmac_ahash_complete(struct crypto_async_request
*req
,
1082 struct mv_cesa_ahash_result
*result
= req
->data
;
1084 if (error
== -EINPROGRESS
)
1087 result
->error
= error
;
1088 complete(&result
->completion
);
1091 static int mv_cesa_ahmac_iv_state_init(struct ahash_request
*req
, u8
*pad
,
1092 void *state
, unsigned int blocksize
)
1094 struct mv_cesa_ahash_result result
;
1095 struct scatterlist sg
;
1098 ahash_request_set_callback(req
, CRYPTO_TFM_REQ_MAY_BACKLOG
,
1099 mv_cesa_hmac_ahash_complete
, &result
);
1100 sg_init_one(&sg
, pad
, blocksize
);
1101 ahash_request_set_crypt(req
, &sg
, pad
, blocksize
);
1102 init_completion(&result
.completion
);
1104 ret
= crypto_ahash_init(req
);
1108 ret
= crypto_ahash_update(req
);
1109 if (ret
&& ret
!= -EINPROGRESS
)
1112 wait_for_completion_interruptible(&result
.completion
);
1114 return result
.error
;
1116 ret
= crypto_ahash_export(req
, state
);
1123 static int mv_cesa_ahmac_pad_init(struct ahash_request
*req
,
1124 const u8
*key
, unsigned int keylen
,
1126 unsigned int blocksize
)
1128 struct mv_cesa_ahash_result result
;
1129 struct scatterlist sg
;
1133 if (keylen
<= blocksize
) {
1134 memcpy(ipad
, key
, keylen
);
1136 u8
*keydup
= kmemdup(key
, keylen
, GFP_KERNEL
);
1141 ahash_request_set_callback(req
, CRYPTO_TFM_REQ_MAY_BACKLOG
,
1142 mv_cesa_hmac_ahash_complete
,
1144 sg_init_one(&sg
, keydup
, keylen
);
1145 ahash_request_set_crypt(req
, &sg
, ipad
, keylen
);
1146 init_completion(&result
.completion
);
1148 ret
= crypto_ahash_digest(req
);
1149 if (ret
== -EINPROGRESS
) {
1150 wait_for_completion_interruptible(&result
.completion
);
1154 /* Set the memory region to 0 to avoid any leak. */
1155 memset(keydup
, 0, keylen
);
1161 keylen
= crypto_ahash_digestsize(crypto_ahash_reqtfm(req
));
1164 memset(ipad
+ keylen
, 0, blocksize
- keylen
);
1165 memcpy(opad
, ipad
, blocksize
);
1167 for (i
= 0; i
< blocksize
; i
++) {
1168 ipad
[i
] ^= HMAC_IPAD_VALUE
;
1169 opad
[i
] ^= HMAC_OPAD_VALUE
;
1175 static int mv_cesa_ahmac_setkey(const char *hash_alg_name
,
1176 const u8
*key
, unsigned int keylen
,
1177 void *istate
, void *ostate
)
1179 struct ahash_request
*req
;
1180 struct crypto_ahash
*tfm
;
1181 unsigned int blocksize
;
1186 tfm
= crypto_alloc_ahash(hash_alg_name
, CRYPTO_ALG_TYPE_AHASH
,
1187 CRYPTO_ALG_TYPE_AHASH_MASK
);
1189 return PTR_ERR(tfm
);
1191 req
= ahash_request_alloc(tfm
, GFP_KERNEL
);
1197 crypto_ahash_clear_flags(tfm
, ~0);
1199 blocksize
= crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm
));
1201 ipad
= kcalloc(2, blocksize
, GFP_KERNEL
);
1207 opad
= ipad
+ blocksize
;
1209 ret
= mv_cesa_ahmac_pad_init(req
, key
, keylen
, ipad
, opad
, blocksize
);
1213 ret
= mv_cesa_ahmac_iv_state_init(req
, ipad
, istate
, blocksize
);
1217 ret
= mv_cesa_ahmac_iv_state_init(req
, opad
, ostate
, blocksize
);
1222 ahash_request_free(req
);
1224 crypto_free_ahash(tfm
);
1229 static int mv_cesa_ahmac_cra_init(struct crypto_tfm
*tfm
)
1231 struct mv_cesa_hmac_ctx
*ctx
= crypto_tfm_ctx(tfm
);
1233 ctx
->base
.ops
= &mv_cesa_ahash_req_ops
;
1235 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm
),
1236 sizeof(struct mv_cesa_ahash_req
));
1240 static int mv_cesa_ahmac_md5_init(struct ahash_request
*req
)
1242 struct mv_cesa_hmac_ctx
*ctx
= crypto_tfm_ctx(req
->base
.tfm
);
1243 struct mv_cesa_op_ctx tmpl
= { };
1245 mv_cesa_set_op_cfg(&tmpl
, CESA_SA_DESC_CFG_MACM_HMAC_MD5
);
1246 memcpy(tmpl
.ctx
.hash
.iv
, ctx
->iv
, sizeof(ctx
->iv
));
1248 mv_cesa_ahash_init(req
, &tmpl
, true);
1253 static int mv_cesa_ahmac_md5_setkey(struct crypto_ahash
*tfm
, const u8
*key
,
1254 unsigned int keylen
)
1256 struct mv_cesa_hmac_ctx
*ctx
= crypto_tfm_ctx(crypto_ahash_tfm(tfm
));
1257 struct md5_state istate
, ostate
;
1260 ret
= mv_cesa_ahmac_setkey("mv-md5", key
, keylen
, &istate
, &ostate
);
1264 for (i
= 0; i
< ARRAY_SIZE(istate
.hash
); i
++)
1265 ctx
->iv
[i
] = be32_to_cpu(istate
.hash
[i
]);
1267 for (i
= 0; i
< ARRAY_SIZE(ostate
.hash
); i
++)
1268 ctx
->iv
[i
+ 8] = be32_to_cpu(ostate
.hash
[i
]);
1273 static int mv_cesa_ahmac_md5_digest(struct ahash_request
*req
)
1277 ret
= mv_cesa_ahmac_md5_init(req
);
1281 return mv_cesa_ahash_finup(req
);
1284 struct ahash_alg mv_ahmac_md5_alg
= {
1285 .init
= mv_cesa_ahmac_md5_init
,
1286 .update
= mv_cesa_ahash_update
,
1287 .final
= mv_cesa_ahash_final
,
1288 .finup
= mv_cesa_ahash_finup
,
1289 .digest
= mv_cesa_ahmac_md5_digest
,
1290 .setkey
= mv_cesa_ahmac_md5_setkey
,
1291 .export
= mv_cesa_md5_export
,
1292 .import
= mv_cesa_md5_import
,
1294 .digestsize
= MD5_DIGEST_SIZE
,
1295 .statesize
= sizeof(struct md5_state
),
1297 .cra_name
= "hmac(md5)",
1298 .cra_driver_name
= "mv-hmac-md5",
1299 .cra_priority
= 300,
1300 .cra_flags
= CRYPTO_ALG_ASYNC
|
1301 CRYPTO_ALG_KERN_DRIVER_ONLY
,
1302 .cra_blocksize
= MD5_HMAC_BLOCK_SIZE
,
1303 .cra_ctxsize
= sizeof(struct mv_cesa_hmac_ctx
),
1304 .cra_init
= mv_cesa_ahmac_cra_init
,
1305 .cra_module
= THIS_MODULE
,
1310 static int mv_cesa_ahmac_sha1_init(struct ahash_request
*req
)
1312 struct mv_cesa_hmac_ctx
*ctx
= crypto_tfm_ctx(req
->base
.tfm
);
1313 struct mv_cesa_op_ctx tmpl
= { };
1315 mv_cesa_set_op_cfg(&tmpl
, CESA_SA_DESC_CFG_MACM_HMAC_SHA1
);
1316 memcpy(tmpl
.ctx
.hash
.iv
, ctx
->iv
, sizeof(ctx
->iv
));
1318 mv_cesa_ahash_init(req
, &tmpl
, false);
1323 static int mv_cesa_ahmac_sha1_setkey(struct crypto_ahash
*tfm
, const u8
*key
,
1324 unsigned int keylen
)
1326 struct mv_cesa_hmac_ctx
*ctx
= crypto_tfm_ctx(crypto_ahash_tfm(tfm
));
1327 struct sha1_state istate
, ostate
;
1330 ret
= mv_cesa_ahmac_setkey("mv-sha1", key
, keylen
, &istate
, &ostate
);
1334 for (i
= 0; i
< ARRAY_SIZE(istate
.state
); i
++)
1335 ctx
->iv
[i
] = be32_to_cpu(istate
.state
[i
]);
1337 for (i
= 0; i
< ARRAY_SIZE(ostate
.state
); i
++)
1338 ctx
->iv
[i
+ 8] = be32_to_cpu(ostate
.state
[i
]);
1343 static int mv_cesa_ahmac_sha1_digest(struct ahash_request
*req
)
1347 ret
= mv_cesa_ahmac_sha1_init(req
);
1351 return mv_cesa_ahash_finup(req
);
1354 struct ahash_alg mv_ahmac_sha1_alg
= {
1355 .init
= mv_cesa_ahmac_sha1_init
,
1356 .update
= mv_cesa_ahash_update
,
1357 .final
= mv_cesa_ahash_final
,
1358 .finup
= mv_cesa_ahash_finup
,
1359 .digest
= mv_cesa_ahmac_sha1_digest
,
1360 .setkey
= mv_cesa_ahmac_sha1_setkey
,
1361 .export
= mv_cesa_sha1_export
,
1362 .import
= mv_cesa_sha1_import
,
1364 .digestsize
= SHA1_DIGEST_SIZE
,
1365 .statesize
= sizeof(struct sha1_state
),
1367 .cra_name
= "hmac(sha1)",
1368 .cra_driver_name
= "mv-hmac-sha1",
1369 .cra_priority
= 300,
1370 .cra_flags
= CRYPTO_ALG_ASYNC
|
1371 CRYPTO_ALG_KERN_DRIVER_ONLY
,
1372 .cra_blocksize
= SHA1_BLOCK_SIZE
,
1373 .cra_ctxsize
= sizeof(struct mv_cesa_hmac_ctx
),
1374 .cra_init
= mv_cesa_ahmac_cra_init
,
1375 .cra_module
= THIS_MODULE
,
1380 static int mv_cesa_ahmac_sha256_setkey(struct crypto_ahash
*tfm
, const u8
*key
,
1381 unsigned int keylen
)
1383 struct mv_cesa_hmac_ctx
*ctx
= crypto_tfm_ctx(crypto_ahash_tfm(tfm
));
1384 struct sha256_state istate
, ostate
;
1387 ret
= mv_cesa_ahmac_setkey("mv-sha256", key
, keylen
, &istate
, &ostate
);
1391 for (i
= 0; i
< ARRAY_SIZE(istate
.state
); i
++)
1392 ctx
->iv
[i
] = be32_to_cpu(istate
.state
[i
]);
1394 for (i
= 0; i
< ARRAY_SIZE(ostate
.state
); i
++)
1395 ctx
->iv
[i
+ 8] = be32_to_cpu(ostate
.state
[i
]);
1400 static int mv_cesa_ahmac_sha256_init(struct ahash_request
*req
)
1402 struct mv_cesa_hmac_ctx
*ctx
= crypto_tfm_ctx(req
->base
.tfm
);
1403 struct mv_cesa_op_ctx tmpl
= { };
1405 mv_cesa_set_op_cfg(&tmpl
, CESA_SA_DESC_CFG_MACM_HMAC_SHA256
);
1406 memcpy(tmpl
.ctx
.hash
.iv
, ctx
->iv
, sizeof(ctx
->iv
));
1408 mv_cesa_ahash_init(req
, &tmpl
, false);
1413 static int mv_cesa_ahmac_sha256_digest(struct ahash_request
*req
)
1417 ret
= mv_cesa_ahmac_sha256_init(req
);
1421 return mv_cesa_ahash_finup(req
);
1424 struct ahash_alg mv_ahmac_sha256_alg
= {
1425 .init
= mv_cesa_ahmac_sha256_init
,
1426 .update
= mv_cesa_ahash_update
,
1427 .final
= mv_cesa_ahash_final
,
1428 .finup
= mv_cesa_ahash_finup
,
1429 .digest
= mv_cesa_ahmac_sha256_digest
,
1430 .setkey
= mv_cesa_ahmac_sha256_setkey
,
1431 .export
= mv_cesa_sha256_export
,
1432 .import
= mv_cesa_sha256_import
,
1434 .digestsize
= SHA256_DIGEST_SIZE
,
1435 .statesize
= sizeof(struct sha256_state
),
1437 .cra_name
= "hmac(sha256)",
1438 .cra_driver_name
= "mv-hmac-sha256",
1439 .cra_priority
= 300,
1440 .cra_flags
= CRYPTO_ALG_ASYNC
|
1441 CRYPTO_ALG_KERN_DRIVER_ONLY
,
1442 .cra_blocksize
= SHA256_BLOCK_SIZE
,
1443 .cra_ctxsize
= sizeof(struct mv_cesa_hmac_ctx
),
1444 .cra_init
= mv_cesa_ahmac_cra_init
,
1445 .cra_module
= THIS_MODULE
,