1 // SPDX-License-Identifier: GPL-2.0-only
3 * Hash algorithms supported by the CESA: MD5, SHA1 and SHA256.
5 * Author: Boris Brezillon <boris.brezillon@free-electrons.com>
6 * Author: Arnaud Ebalard <arno@natisbad.org>
8 * This work is based on an initial version written by
9 * Sebastian Andrzej Siewior < sebastian at breakpoint dot cc >
12 #include <crypto/hmac.h>
13 #include <crypto/md5.h>
14 #include <crypto/sha1.h>
15 #include <crypto/sha2.h>
16 #include <linux/device.h>
17 #include <linux/dma-mapping.h>
21 struct mv_cesa_ahash_dma_iter
{
22 struct mv_cesa_dma_iter base
;
23 struct mv_cesa_sg_dma_iter src
;
27 mv_cesa_ahash_req_iter_init(struct mv_cesa_ahash_dma_iter
*iter
,
28 struct ahash_request
*req
)
30 struct mv_cesa_ahash_req
*creq
= ahash_request_ctx(req
);
31 unsigned int len
= req
->nbytes
+ creq
->cache_ptr
;
34 len
&= ~CESA_HASH_BLOCK_SIZE_MSK
;
36 mv_cesa_req_dma_iter_init(&iter
->base
, len
);
37 mv_cesa_sg_dma_iter_init(&iter
->src
, req
->src
, DMA_TO_DEVICE
);
38 iter
->src
.op_offset
= creq
->cache_ptr
;
42 mv_cesa_ahash_req_iter_next_op(struct mv_cesa_ahash_dma_iter
*iter
)
44 iter
->src
.op_offset
= 0;
46 return mv_cesa_req_dma_iter_next_op(&iter
->base
);
50 mv_cesa_ahash_dma_alloc_cache(struct mv_cesa_ahash_dma_req
*req
, gfp_t flags
)
52 req
->cache
= dma_pool_alloc(cesa_dev
->dma
->cache_pool
, flags
,
61 mv_cesa_ahash_dma_free_cache(struct mv_cesa_ahash_dma_req
*req
)
66 dma_pool_free(cesa_dev
->dma
->cache_pool
, req
->cache
,
70 static int mv_cesa_ahash_dma_alloc_padding(struct mv_cesa_ahash_dma_req
*req
,
76 req
->padding
= dma_pool_alloc(cesa_dev
->dma
->padding_pool
, flags
,
84 static void mv_cesa_ahash_dma_free_padding(struct mv_cesa_ahash_dma_req
*req
)
89 dma_pool_free(cesa_dev
->dma
->padding_pool
, req
->padding
,
94 static inline void mv_cesa_ahash_dma_last_cleanup(struct ahash_request
*req
)
96 struct mv_cesa_ahash_req
*creq
= ahash_request_ctx(req
);
98 mv_cesa_ahash_dma_free_padding(&creq
->req
.dma
);
101 static inline void mv_cesa_ahash_dma_cleanup(struct ahash_request
*req
)
103 struct mv_cesa_ahash_req
*creq
= ahash_request_ctx(req
);
105 dma_unmap_sg(cesa_dev
->dev
, req
->src
, creq
->src_nents
, DMA_TO_DEVICE
);
106 mv_cesa_ahash_dma_free_cache(&creq
->req
.dma
);
107 mv_cesa_dma_cleanup(&creq
->base
);
110 static inline void mv_cesa_ahash_cleanup(struct ahash_request
*req
)
112 struct mv_cesa_ahash_req
*creq
= ahash_request_ctx(req
);
114 if (mv_cesa_req_get_type(&creq
->base
) == CESA_DMA_REQ
)
115 mv_cesa_ahash_dma_cleanup(req
);
118 static void mv_cesa_ahash_last_cleanup(struct ahash_request
*req
)
120 struct mv_cesa_ahash_req
*creq
= ahash_request_ctx(req
);
122 if (mv_cesa_req_get_type(&creq
->base
) == CESA_DMA_REQ
)
123 mv_cesa_ahash_dma_last_cleanup(req
);
126 static int mv_cesa_ahash_pad_len(struct mv_cesa_ahash_req
*creq
)
128 unsigned int index
, padlen
;
130 index
= creq
->len
& CESA_HASH_BLOCK_SIZE_MSK
;
131 padlen
= (index
< 56) ? (56 - index
) : (64 + 56 - index
);
136 static int mv_cesa_ahash_pad_req(struct mv_cesa_ahash_req
*creq
, u8
*buf
)
141 /* Pad out to 56 mod 64 */
142 padlen
= mv_cesa_ahash_pad_len(creq
);
143 memset(buf
+ 1, 0, padlen
- 1);
146 __le64 bits
= cpu_to_le64(creq
->len
<< 3);
148 memcpy(buf
+ padlen
, &bits
, sizeof(bits
));
150 __be64 bits
= cpu_to_be64(creq
->len
<< 3);
152 memcpy(buf
+ padlen
, &bits
, sizeof(bits
));
158 static void mv_cesa_ahash_std_step(struct ahash_request
*req
)
160 struct mv_cesa_ahash_req
*creq
= ahash_request_ctx(req
);
161 struct mv_cesa_ahash_std_req
*sreq
= &creq
->req
.std
;
162 struct mv_cesa_engine
*engine
= creq
->base
.engine
;
163 struct mv_cesa_op_ctx
*op
;
164 unsigned int new_cache_ptr
= 0;
167 unsigned int digsize
;
170 mv_cesa_adjust_op(engine
, &creq
->op_tmpl
);
171 memcpy_toio(engine
->sram
, &creq
->op_tmpl
, sizeof(creq
->op_tmpl
));
174 digsize
= crypto_ahash_digestsize(crypto_ahash_reqtfm(req
));
175 for (i
= 0; i
< digsize
/ 4; i
++)
176 writel_relaxed(creq
->state
[i
],
177 engine
->regs
+ CESA_IVDIG(i
));
181 memcpy_toio(engine
->sram
+ CESA_SA_DATA_SRAM_OFFSET
,
182 creq
->cache
, creq
->cache_ptr
);
184 len
= min_t(size_t, req
->nbytes
+ creq
->cache_ptr
- sreq
->offset
,
185 CESA_SA_SRAM_PAYLOAD_SIZE
);
187 if (!creq
->last_req
) {
188 new_cache_ptr
= len
& CESA_HASH_BLOCK_SIZE_MSK
;
189 len
&= ~CESA_HASH_BLOCK_SIZE_MSK
;
192 if (len
- creq
->cache_ptr
)
193 sreq
->offset
+= sg_pcopy_to_buffer(req
->src
, creq
->src_nents
,
195 CESA_SA_DATA_SRAM_OFFSET
+
197 len
- creq
->cache_ptr
,
202 frag_mode
= mv_cesa_get_op_cfg(op
) & CESA_SA_DESC_CFG_FRAG_MSK
;
204 if (creq
->last_req
&& sreq
->offset
== req
->nbytes
&&
205 creq
->len
<= CESA_SA_DESC_MAC_SRC_TOTAL_LEN_MAX
) {
206 if (frag_mode
== CESA_SA_DESC_CFG_FIRST_FRAG
)
207 frag_mode
= CESA_SA_DESC_CFG_NOT_FRAG
;
208 else if (frag_mode
== CESA_SA_DESC_CFG_MID_FRAG
)
209 frag_mode
= CESA_SA_DESC_CFG_LAST_FRAG
;
212 if (frag_mode
== CESA_SA_DESC_CFG_NOT_FRAG
||
213 frag_mode
== CESA_SA_DESC_CFG_LAST_FRAG
) {
215 creq
->len
<= CESA_SA_DESC_MAC_SRC_TOTAL_LEN_MAX
) {
216 mv_cesa_set_mac_op_total_len(op
, creq
->len
);
218 int trailerlen
= mv_cesa_ahash_pad_len(creq
) + 8;
220 if (len
+ trailerlen
> CESA_SA_SRAM_PAYLOAD_SIZE
) {
221 len
&= CESA_HASH_BLOCK_SIZE_MSK
;
222 new_cache_ptr
= 64 - trailerlen
;
223 memcpy_fromio(creq
->cache
,
225 CESA_SA_DATA_SRAM_OFFSET
+ len
,
228 i
= mv_cesa_ahash_pad_req(creq
, creq
->cache
);
230 memcpy_toio(engine
->sram
+ len
+
231 CESA_SA_DATA_SRAM_OFFSET
,
235 if (frag_mode
== CESA_SA_DESC_CFG_LAST_FRAG
)
236 frag_mode
= CESA_SA_DESC_CFG_MID_FRAG
;
238 frag_mode
= CESA_SA_DESC_CFG_FIRST_FRAG
;
242 mv_cesa_set_mac_op_frag_len(op
, len
);
243 mv_cesa_update_op_cfg(op
, frag_mode
, CESA_SA_DESC_CFG_FRAG_MSK
);
245 /* FIXME: only update enc_len field */
246 memcpy_toio(engine
->sram
, op
, sizeof(*op
));
248 if (frag_mode
== CESA_SA_DESC_CFG_FIRST_FRAG
)
249 mv_cesa_update_op_cfg(op
, CESA_SA_DESC_CFG_MID_FRAG
,
250 CESA_SA_DESC_CFG_FRAG_MSK
);
252 creq
->cache_ptr
= new_cache_ptr
;
254 mv_cesa_set_int_mask(engine
, CESA_SA_INT_ACCEL0_DONE
);
255 writel_relaxed(CESA_SA_CFG_PARA_DIS
, engine
->regs
+ CESA_SA_CFG
);
256 WARN_ON(readl(engine
->regs
+ CESA_SA_CMD
) &
257 CESA_SA_CMD_EN_CESA_SA_ACCL0
);
258 writel(CESA_SA_CMD_EN_CESA_SA_ACCL0
, engine
->regs
+ CESA_SA_CMD
);
261 static int mv_cesa_ahash_std_process(struct ahash_request
*req
, u32 status
)
263 struct mv_cesa_ahash_req
*creq
= ahash_request_ctx(req
);
264 struct mv_cesa_ahash_std_req
*sreq
= &creq
->req
.std
;
266 if (sreq
->offset
< (req
->nbytes
- creq
->cache_ptr
))
272 static inline void mv_cesa_ahash_dma_prepare(struct ahash_request
*req
)
274 struct mv_cesa_ahash_req
*creq
= ahash_request_ctx(req
);
275 struct mv_cesa_req
*basereq
= &creq
->base
;
277 mv_cesa_dma_prepare(basereq
, basereq
->engine
);
280 static void mv_cesa_ahash_std_prepare(struct ahash_request
*req
)
282 struct mv_cesa_ahash_req
*creq
= ahash_request_ctx(req
);
283 struct mv_cesa_ahash_std_req
*sreq
= &creq
->req
.std
;
288 static void mv_cesa_ahash_dma_step(struct ahash_request
*req
)
290 struct mv_cesa_ahash_req
*creq
= ahash_request_ctx(req
);
291 struct mv_cesa_req
*base
= &creq
->base
;
293 /* We must explicitly set the digest state. */
294 if (base
->chain
.first
->flags
& CESA_TDMA_SET_STATE
) {
295 struct mv_cesa_engine
*engine
= base
->engine
;
298 /* Set the hash state in the IVDIG regs. */
299 for (i
= 0; i
< ARRAY_SIZE(creq
->state
); i
++)
300 writel_relaxed(creq
->state
[i
], engine
->regs
+
304 mv_cesa_dma_step(base
);
307 static void mv_cesa_ahash_step(struct crypto_async_request
*req
)
309 struct ahash_request
*ahashreq
= ahash_request_cast(req
);
310 struct mv_cesa_ahash_req
*creq
= ahash_request_ctx(ahashreq
);
312 if (mv_cesa_req_get_type(&creq
->base
) == CESA_DMA_REQ
)
313 mv_cesa_ahash_dma_step(ahashreq
);
315 mv_cesa_ahash_std_step(ahashreq
);
318 static int mv_cesa_ahash_process(struct crypto_async_request
*req
, u32 status
)
320 struct ahash_request
*ahashreq
= ahash_request_cast(req
);
321 struct mv_cesa_ahash_req
*creq
= ahash_request_ctx(ahashreq
);
323 if (mv_cesa_req_get_type(&creq
->base
) == CESA_DMA_REQ
)
324 return mv_cesa_dma_process(&creq
->base
, status
);
326 return mv_cesa_ahash_std_process(ahashreq
, status
);
329 static void mv_cesa_ahash_complete(struct crypto_async_request
*req
)
331 struct ahash_request
*ahashreq
= ahash_request_cast(req
);
332 struct mv_cesa_ahash_req
*creq
= ahash_request_ctx(ahashreq
);
333 struct mv_cesa_engine
*engine
= creq
->base
.engine
;
334 unsigned int digsize
;
337 digsize
= crypto_ahash_digestsize(crypto_ahash_reqtfm(ahashreq
));
339 if (mv_cesa_req_get_type(&creq
->base
) == CESA_DMA_REQ
&&
340 (creq
->base
.chain
.last
->flags
& CESA_TDMA_TYPE_MSK
) ==
345 * Result is already in the correct endianness when the SA is
348 data
= creq
->base
.chain
.last
->op
->ctx
.hash
.hash
;
349 for (i
= 0; i
< digsize
/ 4; i
++)
350 creq
->state
[i
] = le32_to_cpu(data
[i
]);
352 memcpy(ahashreq
->result
, data
, digsize
);
354 for (i
= 0; i
< digsize
/ 4; i
++)
355 creq
->state
[i
] = readl_relaxed(engine
->regs
+
357 if (creq
->last_req
) {
359 * Hardware's MD5 digest is in little endian format, but
360 * SHA in big endian format
363 __le32
*result
= (void *)ahashreq
->result
;
365 for (i
= 0; i
< digsize
/ 4; i
++)
366 result
[i
] = cpu_to_le32(creq
->state
[i
]);
368 __be32
*result
= (void *)ahashreq
->result
;
370 for (i
= 0; i
< digsize
/ 4; i
++)
371 result
[i
] = cpu_to_be32(creq
->state
[i
]);
376 atomic_sub(ahashreq
->nbytes
, &engine
->load
);
379 static void mv_cesa_ahash_prepare(struct crypto_async_request
*req
,
380 struct mv_cesa_engine
*engine
)
382 struct ahash_request
*ahashreq
= ahash_request_cast(req
);
383 struct mv_cesa_ahash_req
*creq
= ahash_request_ctx(ahashreq
);
385 creq
->base
.engine
= engine
;
387 if (mv_cesa_req_get_type(&creq
->base
) == CESA_DMA_REQ
)
388 mv_cesa_ahash_dma_prepare(ahashreq
);
390 mv_cesa_ahash_std_prepare(ahashreq
);
393 static void mv_cesa_ahash_req_cleanup(struct crypto_async_request
*req
)
395 struct ahash_request
*ahashreq
= ahash_request_cast(req
);
396 struct mv_cesa_ahash_req
*creq
= ahash_request_ctx(ahashreq
);
399 mv_cesa_ahash_last_cleanup(ahashreq
);
401 mv_cesa_ahash_cleanup(ahashreq
);
404 sg_pcopy_to_buffer(ahashreq
->src
, creq
->src_nents
,
407 ahashreq
->nbytes
- creq
->cache_ptr
);
410 static const struct mv_cesa_req_ops mv_cesa_ahash_req_ops
= {
411 .step
= mv_cesa_ahash_step
,
412 .process
= mv_cesa_ahash_process
,
413 .cleanup
= mv_cesa_ahash_req_cleanup
,
414 .complete
= mv_cesa_ahash_complete
,
417 static void mv_cesa_ahash_init(struct ahash_request
*req
,
418 struct mv_cesa_op_ctx
*tmpl
, bool algo_le
)
420 struct mv_cesa_ahash_req
*creq
= ahash_request_ctx(req
);
422 memset(creq
, 0, sizeof(*creq
));
423 mv_cesa_update_op_cfg(tmpl
,
424 CESA_SA_DESC_CFG_OP_MAC_ONLY
|
425 CESA_SA_DESC_CFG_FIRST_FRAG
,
426 CESA_SA_DESC_CFG_OP_MSK
|
427 CESA_SA_DESC_CFG_FRAG_MSK
);
428 mv_cesa_set_mac_op_total_len(tmpl
, 0);
429 mv_cesa_set_mac_op_frag_len(tmpl
, 0);
430 creq
->op_tmpl
= *tmpl
;
432 creq
->algo_le
= algo_le
;
435 static inline int mv_cesa_ahash_cra_init(struct crypto_tfm
*tfm
)
437 struct mv_cesa_hash_ctx
*ctx
= crypto_tfm_ctx(tfm
);
439 ctx
->base
.ops
= &mv_cesa_ahash_req_ops
;
441 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm
),
442 sizeof(struct mv_cesa_ahash_req
));
446 static bool mv_cesa_ahash_cache_req(struct ahash_request
*req
)
448 struct mv_cesa_ahash_req
*creq
= ahash_request_ctx(req
);
451 if (creq
->cache_ptr
+ req
->nbytes
< CESA_MAX_HASH_BLOCK_SIZE
&&
458 sg_pcopy_to_buffer(req
->src
, creq
->src_nents
,
459 creq
->cache
+ creq
->cache_ptr
,
462 creq
->cache_ptr
+= req
->nbytes
;
468 static struct mv_cesa_op_ctx
*
469 mv_cesa_dma_add_frag(struct mv_cesa_tdma_chain
*chain
,
470 struct mv_cesa_op_ctx
*tmpl
, unsigned int frag_len
,
473 struct mv_cesa_op_ctx
*op
;
476 op
= mv_cesa_dma_add_op(chain
, tmpl
, false, flags
);
480 /* Set the operation block fragment length. */
481 mv_cesa_set_mac_op_frag_len(op
, frag_len
);
483 /* Append dummy desc to launch operation */
484 ret
= mv_cesa_dma_add_dummy_launch(chain
, flags
);
488 if (mv_cesa_mac_op_is_first_frag(tmpl
))
489 mv_cesa_update_op_cfg(tmpl
,
490 CESA_SA_DESC_CFG_MID_FRAG
,
491 CESA_SA_DESC_CFG_FRAG_MSK
);
497 mv_cesa_ahash_dma_add_cache(struct mv_cesa_tdma_chain
*chain
,
498 struct mv_cesa_ahash_req
*creq
,
501 struct mv_cesa_ahash_dma_req
*ahashdreq
= &creq
->req
.dma
;
504 if (!creq
->cache_ptr
)
507 ret
= mv_cesa_ahash_dma_alloc_cache(ahashdreq
, flags
);
511 memcpy(ahashdreq
->cache
, creq
->cache
, creq
->cache_ptr
);
513 return mv_cesa_dma_add_data_transfer(chain
,
514 CESA_SA_DATA_SRAM_OFFSET
,
515 ahashdreq
->cache_dma
,
517 CESA_TDMA_DST_IN_SRAM
,
521 static struct mv_cesa_op_ctx
*
522 mv_cesa_ahash_dma_last_req(struct mv_cesa_tdma_chain
*chain
,
523 struct mv_cesa_ahash_dma_iter
*dma_iter
,
524 struct mv_cesa_ahash_req
*creq
,
525 unsigned int frag_len
, gfp_t flags
)
527 struct mv_cesa_ahash_dma_req
*ahashdreq
= &creq
->req
.dma
;
528 unsigned int len
, trailerlen
, padoff
= 0;
529 struct mv_cesa_op_ctx
*op
;
533 * If the transfer is smaller than our maximum length, and we have
534 * some data outstanding, we can ask the engine to finish the hash.
536 if (creq
->len
<= CESA_SA_DESC_MAC_SRC_TOTAL_LEN_MAX
&& frag_len
) {
537 op
= mv_cesa_dma_add_frag(chain
, &creq
->op_tmpl
, frag_len
,
542 mv_cesa_set_mac_op_total_len(op
, creq
->len
);
543 mv_cesa_update_op_cfg(op
, mv_cesa_mac_op_is_first_frag(op
) ?
544 CESA_SA_DESC_CFG_NOT_FRAG
:
545 CESA_SA_DESC_CFG_LAST_FRAG
,
546 CESA_SA_DESC_CFG_FRAG_MSK
);
548 ret
= mv_cesa_dma_add_result_op(chain
,
549 CESA_SA_CFG_SRAM_OFFSET
,
550 CESA_SA_DATA_SRAM_OFFSET
,
551 CESA_TDMA_SRC_IN_SRAM
, flags
);
553 return ERR_PTR(-ENOMEM
);
558 * The request is longer than the engine can handle, or we have
559 * no data outstanding. Manually generate the padding, adding it
560 * as a "mid" fragment.
562 ret
= mv_cesa_ahash_dma_alloc_padding(ahashdreq
, flags
);
566 trailerlen
= mv_cesa_ahash_pad_req(creq
, ahashdreq
->padding
);
568 len
= min(CESA_SA_SRAM_PAYLOAD_SIZE
- frag_len
, trailerlen
);
570 ret
= mv_cesa_dma_add_data_transfer(chain
,
571 CESA_SA_DATA_SRAM_OFFSET
+
573 ahashdreq
->padding_dma
,
574 len
, CESA_TDMA_DST_IN_SRAM
,
579 op
= mv_cesa_dma_add_frag(chain
, &creq
->op_tmpl
, frag_len
+ len
,
584 if (len
== trailerlen
)
590 ret
= mv_cesa_dma_add_data_transfer(chain
,
591 CESA_SA_DATA_SRAM_OFFSET
,
592 ahashdreq
->padding_dma
+
595 CESA_TDMA_DST_IN_SRAM
,
600 return mv_cesa_dma_add_frag(chain
, &creq
->op_tmpl
, trailerlen
- padoff
,
604 static int mv_cesa_ahash_dma_req_init(struct ahash_request
*req
)
606 struct mv_cesa_ahash_req
*creq
= ahash_request_ctx(req
);
607 gfp_t flags
= (req
->base
.flags
& CRYPTO_TFM_REQ_MAY_SLEEP
) ?
608 GFP_KERNEL
: GFP_ATOMIC
;
609 struct mv_cesa_req
*basereq
= &creq
->base
;
610 struct mv_cesa_ahash_dma_iter iter
;
611 struct mv_cesa_op_ctx
*op
= NULL
;
612 unsigned int frag_len
;
613 bool set_state
= false;
617 basereq
->chain
.first
= NULL
;
618 basereq
->chain
.last
= NULL
;
620 if (!mv_cesa_mac_op_is_first_frag(&creq
->op_tmpl
))
623 if (creq
->src_nents
) {
624 ret
= dma_map_sg(cesa_dev
->dev
, req
->src
, creq
->src_nents
,
632 mv_cesa_tdma_desc_iter_init(&basereq
->chain
);
633 mv_cesa_ahash_req_iter_init(&iter
, req
);
636 * Add the cache (left-over data from a previous block) first.
637 * This will never overflow the SRAM size.
639 ret
= mv_cesa_ahash_dma_add_cache(&basereq
->chain
, creq
, flags
);
645 * Add all the new data, inserting an operation block and
646 * launch command between each full SRAM block-worth of
647 * data. We intentionally do not add the final op block.
650 ret
= mv_cesa_dma_add_op_transfers(&basereq
->chain
,
656 frag_len
= iter
.base
.op_len
;
658 if (!mv_cesa_ahash_req_iter_next_op(&iter
))
661 op
= mv_cesa_dma_add_frag(&basereq
->chain
,
670 /* Account for the data that was in the cache. */
671 frag_len
= iter
.base
.op_len
;
675 * At this point, frag_len indicates whether we have any data
676 * outstanding which needs an operation. Queue up the final
677 * operation, which depends whether this is the final request.
680 op
= mv_cesa_ahash_dma_last_req(&basereq
->chain
, &iter
, creq
,
683 op
= mv_cesa_dma_add_frag(&basereq
->chain
, &creq
->op_tmpl
,
692 * If results are copied via DMA, this means that this
693 * request can be directly processed by the engine,
694 * without partial updates. So we can chain it at the
695 * DMA level with other requests.
697 type
= basereq
->chain
.last
->flags
& CESA_TDMA_TYPE_MSK
;
699 if (op
&& type
!= CESA_TDMA_RESULT
) {
700 /* Add dummy desc to wait for crypto operation end */
701 ret
= mv_cesa_dma_add_dummy_end(&basereq
->chain
, flags
);
707 creq
->cache_ptr
= req
->nbytes
+ creq
->cache_ptr
-
712 basereq
->chain
.last
->flags
|= CESA_TDMA_END_OF_REQ
;
714 if (type
!= CESA_TDMA_RESULT
)
715 basereq
->chain
.last
->flags
|= CESA_TDMA_BREAK_CHAIN
;
719 * Put the CESA_TDMA_SET_STATE flag on the first tdma desc to
720 * let the step logic know that the IVDIG registers should be
721 * explicitly set before launching a TDMA chain.
723 basereq
->chain
.first
->flags
|= CESA_TDMA_SET_STATE
;
729 mv_cesa_dma_cleanup(basereq
);
730 dma_unmap_sg(cesa_dev
->dev
, req
->src
, creq
->src_nents
, DMA_TO_DEVICE
);
733 mv_cesa_ahash_last_cleanup(req
);
738 static int mv_cesa_ahash_req_init(struct ahash_request
*req
, bool *cached
)
740 struct mv_cesa_ahash_req
*creq
= ahash_request_ctx(req
);
742 creq
->src_nents
= sg_nents_for_len(req
->src
, req
->nbytes
);
743 if (creq
->src_nents
< 0) {
744 dev_err(cesa_dev
->dev
, "Invalid number of src SG");
745 return creq
->src_nents
;
748 *cached
= mv_cesa_ahash_cache_req(req
);
753 if (cesa_dev
->caps
->has_tdma
)
754 return mv_cesa_ahash_dma_req_init(req
);
759 static int mv_cesa_ahash_queue_req(struct ahash_request
*req
)
761 struct mv_cesa_ahash_req
*creq
= ahash_request_ctx(req
);
762 struct mv_cesa_engine
*engine
;
766 ret
= mv_cesa_ahash_req_init(req
, &cached
);
773 engine
= mv_cesa_select_engine(req
->nbytes
);
774 mv_cesa_ahash_prepare(&req
->base
, engine
);
776 ret
= mv_cesa_queue_req(&req
->base
, &creq
->base
);
778 if (mv_cesa_req_needs_cleanup(&req
->base
, ret
))
779 mv_cesa_ahash_cleanup(req
);
784 static int mv_cesa_ahash_update(struct ahash_request
*req
)
786 struct mv_cesa_ahash_req
*creq
= ahash_request_ctx(req
);
788 creq
->len
+= req
->nbytes
;
790 return mv_cesa_ahash_queue_req(req
);
793 static int mv_cesa_ahash_final(struct ahash_request
*req
)
795 struct mv_cesa_ahash_req
*creq
= ahash_request_ctx(req
);
796 struct mv_cesa_op_ctx
*tmpl
= &creq
->op_tmpl
;
798 mv_cesa_set_mac_op_total_len(tmpl
, creq
->len
);
799 creq
->last_req
= true;
802 return mv_cesa_ahash_queue_req(req
);
805 static int mv_cesa_ahash_finup(struct ahash_request
*req
)
807 struct mv_cesa_ahash_req
*creq
= ahash_request_ctx(req
);
808 struct mv_cesa_op_ctx
*tmpl
= &creq
->op_tmpl
;
810 creq
->len
+= req
->nbytes
;
811 mv_cesa_set_mac_op_total_len(tmpl
, creq
->len
);
812 creq
->last_req
= true;
814 return mv_cesa_ahash_queue_req(req
);
817 static int mv_cesa_ahash_export(struct ahash_request
*req
, void *hash
,
818 u64
*len
, void *cache
)
820 struct crypto_ahash
*ahash
= crypto_ahash_reqtfm(req
);
821 struct mv_cesa_ahash_req
*creq
= ahash_request_ctx(req
);
822 unsigned int digsize
= crypto_ahash_digestsize(ahash
);
823 unsigned int blocksize
;
825 blocksize
= crypto_ahash_blocksize(ahash
);
828 memcpy(hash
, creq
->state
, digsize
);
829 memset(cache
, 0, blocksize
);
830 memcpy(cache
, creq
->cache
, creq
->cache_ptr
);
835 static int mv_cesa_ahash_import(struct ahash_request
*req
, const void *hash
,
836 u64 len
, const void *cache
)
838 struct crypto_ahash
*ahash
= crypto_ahash_reqtfm(req
);
839 struct mv_cesa_ahash_req
*creq
= ahash_request_ctx(req
);
840 unsigned int digsize
= crypto_ahash_digestsize(ahash
);
841 unsigned int blocksize
;
842 unsigned int cache_ptr
;
845 ret
= crypto_ahash_init(req
);
849 blocksize
= crypto_ahash_blocksize(ahash
);
850 if (len
>= blocksize
)
851 mv_cesa_update_op_cfg(&creq
->op_tmpl
,
852 CESA_SA_DESC_CFG_MID_FRAG
,
853 CESA_SA_DESC_CFG_FRAG_MSK
);
856 memcpy(creq
->state
, hash
, digsize
);
859 cache_ptr
= do_div(len
, blocksize
);
863 memcpy(creq
->cache
, cache
, cache_ptr
);
864 creq
->cache_ptr
= cache_ptr
;
869 static int mv_cesa_md5_init(struct ahash_request
*req
)
871 struct mv_cesa_ahash_req
*creq
= ahash_request_ctx(req
);
872 struct mv_cesa_op_ctx tmpl
= { };
874 mv_cesa_set_op_cfg(&tmpl
, CESA_SA_DESC_CFG_MACM_MD5
);
876 mv_cesa_ahash_init(req
, &tmpl
, true);
878 creq
->state
[0] = MD5_H0
;
879 creq
->state
[1] = MD5_H1
;
880 creq
->state
[2] = MD5_H2
;
881 creq
->state
[3] = MD5_H3
;
886 static int mv_cesa_md5_export(struct ahash_request
*req
, void *out
)
888 struct md5_state
*out_state
= out
;
890 return mv_cesa_ahash_export(req
, out_state
->hash
,
891 &out_state
->byte_count
, out_state
->block
);
894 static int mv_cesa_md5_import(struct ahash_request
*req
, const void *in
)
896 const struct md5_state
*in_state
= in
;
898 return mv_cesa_ahash_import(req
, in_state
->hash
, in_state
->byte_count
,
902 static int mv_cesa_md5_digest(struct ahash_request
*req
)
906 ret
= mv_cesa_md5_init(req
);
910 return mv_cesa_ahash_finup(req
);
913 struct ahash_alg mv_md5_alg
= {
914 .init
= mv_cesa_md5_init
,
915 .update
= mv_cesa_ahash_update
,
916 .final
= mv_cesa_ahash_final
,
917 .finup
= mv_cesa_ahash_finup
,
918 .digest
= mv_cesa_md5_digest
,
919 .export
= mv_cesa_md5_export
,
920 .import
= mv_cesa_md5_import
,
922 .digestsize
= MD5_DIGEST_SIZE
,
923 .statesize
= sizeof(struct md5_state
),
926 .cra_driver_name
= "mv-md5",
928 .cra_flags
= CRYPTO_ALG_ASYNC
|
929 CRYPTO_ALG_ALLOCATES_MEMORY
|
930 CRYPTO_ALG_KERN_DRIVER_ONLY
,
931 .cra_blocksize
= MD5_HMAC_BLOCK_SIZE
,
932 .cra_ctxsize
= sizeof(struct mv_cesa_hash_ctx
),
933 .cra_init
= mv_cesa_ahash_cra_init
,
934 .cra_module
= THIS_MODULE
,
939 static int mv_cesa_sha1_init(struct ahash_request
*req
)
941 struct mv_cesa_ahash_req
*creq
= ahash_request_ctx(req
);
942 struct mv_cesa_op_ctx tmpl
= { };
944 mv_cesa_set_op_cfg(&tmpl
, CESA_SA_DESC_CFG_MACM_SHA1
);
946 mv_cesa_ahash_init(req
, &tmpl
, false);
948 creq
->state
[0] = SHA1_H0
;
949 creq
->state
[1] = SHA1_H1
;
950 creq
->state
[2] = SHA1_H2
;
951 creq
->state
[3] = SHA1_H3
;
952 creq
->state
[4] = SHA1_H4
;
957 static int mv_cesa_sha1_export(struct ahash_request
*req
, void *out
)
959 struct sha1_state
*out_state
= out
;
961 return mv_cesa_ahash_export(req
, out_state
->state
, &out_state
->count
,
965 static int mv_cesa_sha1_import(struct ahash_request
*req
, const void *in
)
967 const struct sha1_state
*in_state
= in
;
969 return mv_cesa_ahash_import(req
, in_state
->state
, in_state
->count
,
973 static int mv_cesa_sha1_digest(struct ahash_request
*req
)
977 ret
= mv_cesa_sha1_init(req
);
981 return mv_cesa_ahash_finup(req
);
984 struct ahash_alg mv_sha1_alg
= {
985 .init
= mv_cesa_sha1_init
,
986 .update
= mv_cesa_ahash_update
,
987 .final
= mv_cesa_ahash_final
,
988 .finup
= mv_cesa_ahash_finup
,
989 .digest
= mv_cesa_sha1_digest
,
990 .export
= mv_cesa_sha1_export
,
991 .import
= mv_cesa_sha1_import
,
993 .digestsize
= SHA1_DIGEST_SIZE
,
994 .statesize
= sizeof(struct sha1_state
),
997 .cra_driver_name
= "mv-sha1",
999 .cra_flags
= CRYPTO_ALG_ASYNC
|
1000 CRYPTO_ALG_ALLOCATES_MEMORY
|
1001 CRYPTO_ALG_KERN_DRIVER_ONLY
,
1002 .cra_blocksize
= SHA1_BLOCK_SIZE
,
1003 .cra_ctxsize
= sizeof(struct mv_cesa_hash_ctx
),
1004 .cra_init
= mv_cesa_ahash_cra_init
,
1005 .cra_module
= THIS_MODULE
,
1010 static int mv_cesa_sha256_init(struct ahash_request
*req
)
1012 struct mv_cesa_ahash_req
*creq
= ahash_request_ctx(req
);
1013 struct mv_cesa_op_ctx tmpl
= { };
1015 mv_cesa_set_op_cfg(&tmpl
, CESA_SA_DESC_CFG_MACM_SHA256
);
1017 mv_cesa_ahash_init(req
, &tmpl
, false);
1019 creq
->state
[0] = SHA256_H0
;
1020 creq
->state
[1] = SHA256_H1
;
1021 creq
->state
[2] = SHA256_H2
;
1022 creq
->state
[3] = SHA256_H3
;
1023 creq
->state
[4] = SHA256_H4
;
1024 creq
->state
[5] = SHA256_H5
;
1025 creq
->state
[6] = SHA256_H6
;
1026 creq
->state
[7] = SHA256_H7
;
1031 static int mv_cesa_sha256_digest(struct ahash_request
*req
)
1035 ret
= mv_cesa_sha256_init(req
);
1039 return mv_cesa_ahash_finup(req
);
1042 static int mv_cesa_sha256_export(struct ahash_request
*req
, void *out
)
1044 struct sha256_state
*out_state
= out
;
1046 return mv_cesa_ahash_export(req
, out_state
->state
, &out_state
->count
,
1050 static int mv_cesa_sha256_import(struct ahash_request
*req
, const void *in
)
1052 const struct sha256_state
*in_state
= in
;
1054 return mv_cesa_ahash_import(req
, in_state
->state
, in_state
->count
,
1058 struct ahash_alg mv_sha256_alg
= {
1059 .init
= mv_cesa_sha256_init
,
1060 .update
= mv_cesa_ahash_update
,
1061 .final
= mv_cesa_ahash_final
,
1062 .finup
= mv_cesa_ahash_finup
,
1063 .digest
= mv_cesa_sha256_digest
,
1064 .export
= mv_cesa_sha256_export
,
1065 .import
= mv_cesa_sha256_import
,
1067 .digestsize
= SHA256_DIGEST_SIZE
,
1068 .statesize
= sizeof(struct sha256_state
),
1070 .cra_name
= "sha256",
1071 .cra_driver_name
= "mv-sha256",
1072 .cra_priority
= 300,
1073 .cra_flags
= CRYPTO_ALG_ASYNC
|
1074 CRYPTO_ALG_ALLOCATES_MEMORY
|
1075 CRYPTO_ALG_KERN_DRIVER_ONLY
,
1076 .cra_blocksize
= SHA256_BLOCK_SIZE
,
1077 .cra_ctxsize
= sizeof(struct mv_cesa_hash_ctx
),
1078 .cra_init
= mv_cesa_ahash_cra_init
,
1079 .cra_module
= THIS_MODULE
,
1084 struct mv_cesa_ahash_result
{
1085 struct completion completion
;
1089 static void mv_cesa_hmac_ahash_complete(struct crypto_async_request
*req
,
1092 struct mv_cesa_ahash_result
*result
= req
->data
;
1094 if (error
== -EINPROGRESS
)
1097 result
->error
= error
;
1098 complete(&result
->completion
);
1101 static int mv_cesa_ahmac_iv_state_init(struct ahash_request
*req
, u8
*pad
,
1102 void *state
, unsigned int blocksize
)
1104 struct mv_cesa_ahash_result result
;
1105 struct scatterlist sg
;
1108 ahash_request_set_callback(req
, CRYPTO_TFM_REQ_MAY_BACKLOG
,
1109 mv_cesa_hmac_ahash_complete
, &result
);
1110 sg_init_one(&sg
, pad
, blocksize
);
1111 ahash_request_set_crypt(req
, &sg
, pad
, blocksize
);
1112 init_completion(&result
.completion
);
1114 ret
= crypto_ahash_init(req
);
1118 ret
= crypto_ahash_update(req
);
1119 if (ret
&& ret
!= -EINPROGRESS
)
1122 wait_for_completion_interruptible(&result
.completion
);
1124 return result
.error
;
1126 ret
= crypto_ahash_export(req
, state
);
1133 static int mv_cesa_ahmac_pad_init(struct ahash_request
*req
,
1134 const u8
*key
, unsigned int keylen
,
1136 unsigned int blocksize
)
1138 struct mv_cesa_ahash_result result
;
1139 struct scatterlist sg
;
1143 if (keylen
<= blocksize
) {
1144 memcpy(ipad
, key
, keylen
);
1146 u8
*keydup
= kmemdup(key
, keylen
, GFP_KERNEL
);
1151 ahash_request_set_callback(req
, CRYPTO_TFM_REQ_MAY_BACKLOG
,
1152 mv_cesa_hmac_ahash_complete
,
1154 sg_init_one(&sg
, keydup
, keylen
);
1155 ahash_request_set_crypt(req
, &sg
, ipad
, keylen
);
1156 init_completion(&result
.completion
);
1158 ret
= crypto_ahash_digest(req
);
1159 if (ret
== -EINPROGRESS
) {
1160 wait_for_completion_interruptible(&result
.completion
);
1164 /* Set the memory region to 0 to avoid any leak. */
1165 kfree_sensitive(keydup
);
1170 keylen
= crypto_ahash_digestsize(crypto_ahash_reqtfm(req
));
1173 memset(ipad
+ keylen
, 0, blocksize
- keylen
);
1174 memcpy(opad
, ipad
, blocksize
);
1176 for (i
= 0; i
< blocksize
; i
++) {
1177 ipad
[i
] ^= HMAC_IPAD_VALUE
;
1178 opad
[i
] ^= HMAC_OPAD_VALUE
;
1184 static int mv_cesa_ahmac_setkey(const char *hash_alg_name
,
1185 const u8
*key
, unsigned int keylen
,
1186 void *istate
, void *ostate
)
1188 struct ahash_request
*req
;
1189 struct crypto_ahash
*tfm
;
1190 unsigned int blocksize
;
1195 tfm
= crypto_alloc_ahash(hash_alg_name
, 0, 0);
1197 return PTR_ERR(tfm
);
1199 req
= ahash_request_alloc(tfm
, GFP_KERNEL
);
1205 crypto_ahash_clear_flags(tfm
, ~0);
1207 blocksize
= crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm
));
1209 ipad
= kcalloc(2, blocksize
, GFP_KERNEL
);
1215 opad
= ipad
+ blocksize
;
1217 ret
= mv_cesa_ahmac_pad_init(req
, key
, keylen
, ipad
, opad
, blocksize
);
1221 ret
= mv_cesa_ahmac_iv_state_init(req
, ipad
, istate
, blocksize
);
1225 ret
= mv_cesa_ahmac_iv_state_init(req
, opad
, ostate
, blocksize
);
1230 ahash_request_free(req
);
1232 crypto_free_ahash(tfm
);
1237 static int mv_cesa_ahmac_cra_init(struct crypto_tfm
*tfm
)
1239 struct mv_cesa_hmac_ctx
*ctx
= crypto_tfm_ctx(tfm
);
1241 ctx
->base
.ops
= &mv_cesa_ahash_req_ops
;
1243 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm
),
1244 sizeof(struct mv_cesa_ahash_req
));
1248 static int mv_cesa_ahmac_md5_init(struct ahash_request
*req
)
1250 struct mv_cesa_hmac_ctx
*ctx
= crypto_tfm_ctx(req
->base
.tfm
);
1251 struct mv_cesa_op_ctx tmpl
= { };
1253 mv_cesa_set_op_cfg(&tmpl
, CESA_SA_DESC_CFG_MACM_HMAC_MD5
);
1254 memcpy(tmpl
.ctx
.hash
.iv
, ctx
->iv
, sizeof(ctx
->iv
));
1256 mv_cesa_ahash_init(req
, &tmpl
, true);
1261 static int mv_cesa_ahmac_md5_setkey(struct crypto_ahash
*tfm
, const u8
*key
,
1262 unsigned int keylen
)
1264 struct mv_cesa_hmac_ctx
*ctx
= crypto_tfm_ctx(crypto_ahash_tfm(tfm
));
1265 struct md5_state istate
, ostate
;
1268 ret
= mv_cesa_ahmac_setkey("mv-md5", key
, keylen
, &istate
, &ostate
);
1272 for (i
= 0; i
< ARRAY_SIZE(istate
.hash
); i
++)
1273 ctx
->iv
[i
] = cpu_to_be32(istate
.hash
[i
]);
1275 for (i
= 0; i
< ARRAY_SIZE(ostate
.hash
); i
++)
1276 ctx
->iv
[i
+ 8] = cpu_to_be32(ostate
.hash
[i
]);
1281 static int mv_cesa_ahmac_md5_digest(struct ahash_request
*req
)
1285 ret
= mv_cesa_ahmac_md5_init(req
);
1289 return mv_cesa_ahash_finup(req
);
1292 struct ahash_alg mv_ahmac_md5_alg
= {
1293 .init
= mv_cesa_ahmac_md5_init
,
1294 .update
= mv_cesa_ahash_update
,
1295 .final
= mv_cesa_ahash_final
,
1296 .finup
= mv_cesa_ahash_finup
,
1297 .digest
= mv_cesa_ahmac_md5_digest
,
1298 .setkey
= mv_cesa_ahmac_md5_setkey
,
1299 .export
= mv_cesa_md5_export
,
1300 .import
= mv_cesa_md5_import
,
1302 .digestsize
= MD5_DIGEST_SIZE
,
1303 .statesize
= sizeof(struct md5_state
),
1305 .cra_name
= "hmac(md5)",
1306 .cra_driver_name
= "mv-hmac-md5",
1307 .cra_priority
= 300,
1308 .cra_flags
= CRYPTO_ALG_ASYNC
|
1309 CRYPTO_ALG_ALLOCATES_MEMORY
|
1310 CRYPTO_ALG_KERN_DRIVER_ONLY
,
1311 .cra_blocksize
= MD5_HMAC_BLOCK_SIZE
,
1312 .cra_ctxsize
= sizeof(struct mv_cesa_hmac_ctx
),
1313 .cra_init
= mv_cesa_ahmac_cra_init
,
1314 .cra_module
= THIS_MODULE
,
1319 static int mv_cesa_ahmac_sha1_init(struct ahash_request
*req
)
1321 struct mv_cesa_hmac_ctx
*ctx
= crypto_tfm_ctx(req
->base
.tfm
);
1322 struct mv_cesa_op_ctx tmpl
= { };
1324 mv_cesa_set_op_cfg(&tmpl
, CESA_SA_DESC_CFG_MACM_HMAC_SHA1
);
1325 memcpy(tmpl
.ctx
.hash
.iv
, ctx
->iv
, sizeof(ctx
->iv
));
1327 mv_cesa_ahash_init(req
, &tmpl
, false);
1332 static int mv_cesa_ahmac_sha1_setkey(struct crypto_ahash
*tfm
, const u8
*key
,
1333 unsigned int keylen
)
1335 struct mv_cesa_hmac_ctx
*ctx
= crypto_tfm_ctx(crypto_ahash_tfm(tfm
));
1336 struct sha1_state istate
, ostate
;
1339 ret
= mv_cesa_ahmac_setkey("mv-sha1", key
, keylen
, &istate
, &ostate
);
1343 for (i
= 0; i
< ARRAY_SIZE(istate
.state
); i
++)
1344 ctx
->iv
[i
] = cpu_to_be32(istate
.state
[i
]);
1346 for (i
= 0; i
< ARRAY_SIZE(ostate
.state
); i
++)
1347 ctx
->iv
[i
+ 8] = cpu_to_be32(ostate
.state
[i
]);
1352 static int mv_cesa_ahmac_sha1_digest(struct ahash_request
*req
)
1356 ret
= mv_cesa_ahmac_sha1_init(req
);
1360 return mv_cesa_ahash_finup(req
);
1363 struct ahash_alg mv_ahmac_sha1_alg
= {
1364 .init
= mv_cesa_ahmac_sha1_init
,
1365 .update
= mv_cesa_ahash_update
,
1366 .final
= mv_cesa_ahash_final
,
1367 .finup
= mv_cesa_ahash_finup
,
1368 .digest
= mv_cesa_ahmac_sha1_digest
,
1369 .setkey
= mv_cesa_ahmac_sha1_setkey
,
1370 .export
= mv_cesa_sha1_export
,
1371 .import
= mv_cesa_sha1_import
,
1373 .digestsize
= SHA1_DIGEST_SIZE
,
1374 .statesize
= sizeof(struct sha1_state
),
1376 .cra_name
= "hmac(sha1)",
1377 .cra_driver_name
= "mv-hmac-sha1",
1378 .cra_priority
= 300,
1379 .cra_flags
= CRYPTO_ALG_ASYNC
|
1380 CRYPTO_ALG_ALLOCATES_MEMORY
|
1381 CRYPTO_ALG_KERN_DRIVER_ONLY
,
1382 .cra_blocksize
= SHA1_BLOCK_SIZE
,
1383 .cra_ctxsize
= sizeof(struct mv_cesa_hmac_ctx
),
1384 .cra_init
= mv_cesa_ahmac_cra_init
,
1385 .cra_module
= THIS_MODULE
,
1390 static int mv_cesa_ahmac_sha256_setkey(struct crypto_ahash
*tfm
, const u8
*key
,
1391 unsigned int keylen
)
1393 struct mv_cesa_hmac_ctx
*ctx
= crypto_tfm_ctx(crypto_ahash_tfm(tfm
));
1394 struct sha256_state istate
, ostate
;
1397 ret
= mv_cesa_ahmac_setkey("mv-sha256", key
, keylen
, &istate
, &ostate
);
1401 for (i
= 0; i
< ARRAY_SIZE(istate
.state
); i
++)
1402 ctx
->iv
[i
] = cpu_to_be32(istate
.state
[i
]);
1404 for (i
= 0; i
< ARRAY_SIZE(ostate
.state
); i
++)
1405 ctx
->iv
[i
+ 8] = cpu_to_be32(ostate
.state
[i
]);
1410 static int mv_cesa_ahmac_sha256_init(struct ahash_request
*req
)
1412 struct mv_cesa_hmac_ctx
*ctx
= crypto_tfm_ctx(req
->base
.tfm
);
1413 struct mv_cesa_op_ctx tmpl
= { };
1415 mv_cesa_set_op_cfg(&tmpl
, CESA_SA_DESC_CFG_MACM_HMAC_SHA256
);
1416 memcpy(tmpl
.ctx
.hash
.iv
, ctx
->iv
, sizeof(ctx
->iv
));
1418 mv_cesa_ahash_init(req
, &tmpl
, false);
1423 static int mv_cesa_ahmac_sha256_digest(struct ahash_request
*req
)
1427 ret
= mv_cesa_ahmac_sha256_init(req
);
1431 return mv_cesa_ahash_finup(req
);
1434 struct ahash_alg mv_ahmac_sha256_alg
= {
1435 .init
= mv_cesa_ahmac_sha256_init
,
1436 .update
= mv_cesa_ahash_update
,
1437 .final
= mv_cesa_ahash_final
,
1438 .finup
= mv_cesa_ahash_finup
,
1439 .digest
= mv_cesa_ahmac_sha256_digest
,
1440 .setkey
= mv_cesa_ahmac_sha256_setkey
,
1441 .export
= mv_cesa_sha256_export
,
1442 .import
= mv_cesa_sha256_import
,
1444 .digestsize
= SHA256_DIGEST_SIZE
,
1445 .statesize
= sizeof(struct sha256_state
),
1447 .cra_name
= "hmac(sha256)",
1448 .cra_driver_name
= "mv-hmac-sha256",
1449 .cra_priority
= 300,
1450 .cra_flags
= CRYPTO_ALG_ASYNC
|
1451 CRYPTO_ALG_ALLOCATES_MEMORY
|
1452 CRYPTO_ALG_KERN_DRIVER_ONLY
,
1453 .cra_blocksize
= SHA256_BLOCK_SIZE
,
1454 .cra_ctxsize
= sizeof(struct mv_cesa_hmac_ctx
),
1455 .cra_init
= mv_cesa_ahmac_cra_init
,
1456 .cra_module
= THIS_MODULE
,