2 * Copyright (C) 2017 Marvell
4 * Antoine Tenart <antoine.tenart@free-electrons.com>
6 * This file is licensed under the terms of the GNU General Public
7 * License version 2. This program is licensed "as is" without any
8 * warranty of any kind, whether express or implied.
11 #include <crypto/hmac.h>
12 #include <crypto/sha.h>
13 #include <linux/device.h>
14 #include <linux/dma-mapping.h>
15 #include <linux/dmapool.h>
19 struct safexcel_ahash_ctx
{
20 struct safexcel_context base
;
21 struct safexcel_crypto_priv
*priv
;
26 u32 ipad
[SHA1_DIGEST_SIZE
/ sizeof(u32
)];
27 u32 opad
[SHA1_DIGEST_SIZE
/ sizeof(u32
)];
30 struct safexcel_ahash_req
{
38 u8 state_sz
; /* expected sate size, only set once */
39 u32 state
[SHA256_DIGEST_SIZE
/ sizeof(u32
)] __aligned(sizeof(u32
));
44 u8 cache
[SHA256_BLOCK_SIZE
] __aligned(sizeof(u32
));
45 u8 cache_next
[SHA256_BLOCK_SIZE
] __aligned(sizeof(u32
));
48 struct safexcel_ahash_export_state
{
52 u32 state
[SHA256_DIGEST_SIZE
/ sizeof(u32
)];
53 u8 cache
[SHA256_BLOCK_SIZE
];
56 static void safexcel_hash_token(struct safexcel_command_desc
*cdesc
,
57 u32 input_length
, u32 result_length
)
59 struct safexcel_token
*token
=
60 (struct safexcel_token
*)cdesc
->control_data
.token
;
62 token
[0].opcode
= EIP197_TOKEN_OPCODE_DIRECTION
;
63 token
[0].packet_length
= input_length
;
64 token
[0].stat
= EIP197_TOKEN_STAT_LAST_HASH
;
65 token
[0].instructions
= EIP197_TOKEN_INS_TYPE_HASH
;
67 token
[1].opcode
= EIP197_TOKEN_OPCODE_INSERT
;
68 token
[1].packet_length
= result_length
;
69 token
[1].stat
= EIP197_TOKEN_STAT_LAST_HASH
|
70 EIP197_TOKEN_STAT_LAST_PACKET
;
71 token
[1].instructions
= EIP197_TOKEN_INS_TYPE_OUTPUT
|
72 EIP197_TOKEN_INS_INSERT_HASH_DIGEST
;
75 static void safexcel_context_control(struct safexcel_ahash_ctx
*ctx
,
76 struct safexcel_ahash_req
*req
,
77 struct safexcel_command_desc
*cdesc
,
78 unsigned int digestsize
,
79 unsigned int blocksize
)
83 cdesc
->control_data
.control0
|= CONTEXT_CONTROL_TYPE_HASH_OUT
;
84 cdesc
->control_data
.control0
|= ctx
->alg
;
85 cdesc
->control_data
.control0
|= ctx
->digest
;
87 if (ctx
->digest
== CONTEXT_CONTROL_DIGEST_PRECOMPUTED
) {
89 if (ctx
->alg
== CONTEXT_CONTROL_CRYPTO_ALG_SHA1
)
90 cdesc
->control_data
.control0
|= CONTEXT_CONTROL_SIZE(6);
91 else if (ctx
->alg
== CONTEXT_CONTROL_CRYPTO_ALG_SHA224
||
92 ctx
->alg
== CONTEXT_CONTROL_CRYPTO_ALG_SHA256
)
93 cdesc
->control_data
.control0
|= CONTEXT_CONTROL_SIZE(9);
95 cdesc
->control_data
.control1
|= CONTEXT_CONTROL_DIGEST_CNT
;
97 cdesc
->control_data
.control0
|= CONTEXT_CONTROL_RESTART_HASH
;
101 cdesc
->control_data
.control0
|= CONTEXT_CONTROL_NO_FINISH_HASH
;
104 * Copy the input digest if needed, and setup the context
105 * fields. Do this now as we need it to setup the first command
108 if (req
->processed
) {
109 for (i
= 0; i
< digestsize
/ sizeof(u32
); i
++)
110 ctx
->base
.ctxr
->data
[i
] = cpu_to_le32(req
->state
[i
]);
113 ctx
->base
.ctxr
->data
[i
] = cpu_to_le32(req
->processed
/ blocksize
);
115 } else if (ctx
->digest
== CONTEXT_CONTROL_DIGEST_HMAC
) {
116 cdesc
->control_data
.control0
|= CONTEXT_CONTROL_SIZE(10);
118 memcpy(ctx
->base
.ctxr
->data
, ctx
->ipad
, digestsize
);
119 memcpy(ctx
->base
.ctxr
->data
+ digestsize
/ sizeof(u32
),
120 ctx
->opad
, digestsize
);
124 static int safexcel_handle_req_result(struct safexcel_crypto_priv
*priv
, int ring
,
125 struct crypto_async_request
*async
,
126 bool *should_complete
, int *ret
)
128 struct safexcel_result_desc
*rdesc
;
129 struct ahash_request
*areq
= ahash_request_cast(async
);
130 struct crypto_ahash
*ahash
= crypto_ahash_reqtfm(areq
);
131 struct safexcel_ahash_req
*sreq
= ahash_request_ctx(areq
);
136 spin_lock_bh(&priv
->ring
[ring
].egress_lock
);
137 rdesc
= safexcel_ring_next_rptr(priv
, &priv
->ring
[ring
].rdr
);
140 "hash: result: could not retrieve the result descriptor\n");
141 *ret
= PTR_ERR(rdesc
);
142 } else if (rdesc
->result_data
.error_code
) {
144 "hash: result: result descriptor error (%d)\n",
145 rdesc
->result_data
.error_code
);
149 safexcel_complete(priv
, ring
);
150 spin_unlock_bh(&priv
->ring
[ring
].egress_lock
);
153 memcpy(areq
->result
, sreq
->state
,
154 crypto_ahash_digestsize(ahash
));
157 dma_unmap_sg(priv
->dev
, areq
->src
, sreq
->nents
, DMA_TO_DEVICE
);
161 safexcel_free_context(priv
, async
, sreq
->state_sz
);
163 cache_len
= sreq
->len
- sreq
->processed
;
165 memcpy(sreq
->cache
, sreq
->cache_next
, cache_len
);
167 *should_complete
= true;
172 static int safexcel_ahash_send_req(struct crypto_async_request
*async
, int ring
,
173 struct safexcel_request
*request
,
174 int *commands
, int *results
)
176 struct ahash_request
*areq
= ahash_request_cast(async
);
177 struct crypto_ahash
*ahash
= crypto_ahash_reqtfm(areq
);
178 struct safexcel_ahash_req
*req
= ahash_request_ctx(areq
);
179 struct safexcel_ahash_ctx
*ctx
= crypto_ahash_ctx(crypto_ahash_reqtfm(areq
));
180 struct safexcel_crypto_priv
*priv
= ctx
->priv
;
181 struct safexcel_command_desc
*cdesc
, *first_cdesc
= NULL
;
182 struct safexcel_result_desc
*rdesc
;
183 struct scatterlist
*sg
;
184 int i
, queued
, len
, cache_len
, extra
, n_cdesc
= 0, ret
= 0;
186 queued
= len
= req
->len
- req
->processed
;
187 if (queued
< crypto_ahash_blocksize(ahash
))
190 cache_len
= queued
- areq
->nbytes
;
192 if (!req
->last_req
) {
193 /* If this is not the last request and the queued data does not
194 * fit into full blocks, cache it for the next send() call.
196 extra
= queued
& (crypto_ahash_blocksize(ahash
) - 1);
198 /* If this is not the last request and the queued data
199 * is a multiple of a block, cache the last one for now.
201 extra
= queued
- crypto_ahash_blocksize(ahash
);
204 sg_pcopy_to_buffer(areq
->src
, sg_nents(areq
->src
),
205 req
->cache_next
, extra
,
206 areq
->nbytes
- extra
);
219 spin_lock_bh(&priv
->ring
[ring
].egress_lock
);
221 /* Add a command descriptor for the cached data, if any */
223 ctx
->base
.cache
= kzalloc(cache_len
, EIP197_GFP_FLAGS(*async
));
224 if (!ctx
->base
.cache
) {
228 memcpy(ctx
->base
.cache
, req
->cache
, cache_len
);
229 ctx
->base
.cache_dma
= dma_map_single(priv
->dev
, ctx
->base
.cache
,
230 cache_len
, DMA_TO_DEVICE
);
231 if (dma_mapping_error(priv
->dev
, ctx
->base
.cache_dma
)) {
236 ctx
->base
.cache_sz
= cache_len
;
237 first_cdesc
= safexcel_add_cdesc(priv
, ring
, 1,
242 if (IS_ERR(first_cdesc
)) {
243 ret
= PTR_ERR(first_cdesc
);
253 /* Now handle the current ahash request buffer(s) */
254 req
->nents
= dma_map_sg(priv
->dev
, areq
->src
,
255 sg_nents_for_len(areq
->src
, areq
->nbytes
),
262 for_each_sg(areq
->src
, sg
, req
->nents
, i
) {
263 int sglen
= sg_dma_len(sg
);
265 /* Do not overflow the request */
266 if (queued
- sglen
< 0)
269 cdesc
= safexcel_add_cdesc(priv
, ring
, !n_cdesc
,
270 !(queued
- sglen
), sg_dma_address(sg
),
271 sglen
, len
, ctx
->base
.ctxr_dma
);
273 ret
= PTR_ERR(cdesc
);
287 /* Setup the context options */
288 safexcel_context_control(ctx
, req
, first_cdesc
, req
->state_sz
,
289 crypto_ahash_blocksize(ahash
));
292 safexcel_hash_token(first_cdesc
, len
, req
->state_sz
);
294 ctx
->base
.result_dma
= dma_map_single(priv
->dev
, req
->state
,
295 req
->state_sz
, DMA_FROM_DEVICE
);
296 if (dma_mapping_error(priv
->dev
, ctx
->base
.result_dma
)) {
301 /* Add a result descriptor */
302 rdesc
= safexcel_add_rdesc(priv
, ring
, 1, 1, ctx
->base
.result_dma
,
305 ret
= PTR_ERR(rdesc
);
309 spin_unlock_bh(&priv
->ring
[ring
].egress_lock
);
311 req
->processed
+= len
;
312 request
->req
= &areq
->base
;
319 for (i
= 0; i
< n_cdesc
; i
++)
320 safexcel_ring_rollback_wptr(priv
, &priv
->ring
[ring
].cdr
);
322 if (ctx
->base
.cache_dma
) {
323 dma_unmap_single(priv
->dev
, ctx
->base
.cache_dma
,
324 ctx
->base
.cache_sz
, DMA_TO_DEVICE
);
325 ctx
->base
.cache_sz
= 0;
328 kfree(ctx
->base
.cache
);
329 ctx
->base
.cache
= NULL
;
332 spin_unlock_bh(&priv
->ring
[ring
].egress_lock
);
336 static inline bool safexcel_ahash_needs_inv_get(struct ahash_request
*areq
)
338 struct safexcel_ahash_ctx
*ctx
= crypto_ahash_ctx(crypto_ahash_reqtfm(areq
));
339 struct safexcel_ahash_req
*req
= ahash_request_ctx(areq
);
340 struct crypto_ahash
*ahash
= crypto_ahash_reqtfm(areq
);
341 unsigned int state_w_sz
= req
->state_sz
/ sizeof(u32
);
344 for (i
= 0; i
< state_w_sz
; i
++)
345 if (ctx
->base
.ctxr
->data
[i
] != cpu_to_le32(req
->state
[i
]))
348 if (ctx
->base
.ctxr
->data
[state_w_sz
] !=
349 cpu_to_le32(req
->processed
/ crypto_ahash_blocksize(ahash
)))
355 static int safexcel_handle_inv_result(struct safexcel_crypto_priv
*priv
,
357 struct crypto_async_request
*async
,
358 bool *should_complete
, int *ret
)
360 struct safexcel_result_desc
*rdesc
;
361 struct ahash_request
*areq
= ahash_request_cast(async
);
362 struct crypto_ahash
*ahash
= crypto_ahash_reqtfm(areq
);
363 struct safexcel_ahash_ctx
*ctx
= crypto_ahash_ctx(ahash
);
368 spin_lock_bh(&priv
->ring
[ring
].egress_lock
);
369 rdesc
= safexcel_ring_next_rptr(priv
, &priv
->ring
[ring
].rdr
);
372 "hash: invalidate: could not retrieve the result descriptor\n");
373 *ret
= PTR_ERR(rdesc
);
374 } else if (rdesc
->result_data
.error_code
) {
376 "hash: invalidate: result descriptor error (%d)\n",
377 rdesc
->result_data
.error_code
);
381 safexcel_complete(priv
, ring
);
382 spin_unlock_bh(&priv
->ring
[ring
].egress_lock
);
384 if (ctx
->base
.exit_inv
) {
385 dma_pool_free(priv
->context_pool
, ctx
->base
.ctxr
,
388 *should_complete
= true;
392 ring
= safexcel_select_ring(priv
);
393 ctx
->base
.ring
= ring
;
395 spin_lock_bh(&priv
->ring
[ring
].queue_lock
);
396 enq_ret
= crypto_enqueue_request(&priv
->ring
[ring
].queue
, async
);
397 spin_unlock_bh(&priv
->ring
[ring
].queue_lock
);
399 if (enq_ret
!= -EINPROGRESS
)
402 queue_work(priv
->ring
[ring
].workqueue
,
403 &priv
->ring
[ring
].work_data
.work
);
405 *should_complete
= false;
410 static int safexcel_handle_result(struct safexcel_crypto_priv
*priv
, int ring
,
411 struct crypto_async_request
*async
,
412 bool *should_complete
, int *ret
)
414 struct ahash_request
*areq
= ahash_request_cast(async
);
415 struct safexcel_ahash_req
*req
= ahash_request_ctx(areq
);
418 BUG_ON(priv
->version
== EIP97
&& req
->needs_inv
);
420 if (req
->needs_inv
) {
421 req
->needs_inv
= false;
422 err
= safexcel_handle_inv_result(priv
, ring
, async
,
423 should_complete
, ret
);
425 err
= safexcel_handle_req_result(priv
, ring
, async
,
426 should_complete
, ret
);
432 static int safexcel_ahash_send_inv(struct crypto_async_request
*async
,
433 int ring
, struct safexcel_request
*request
,
434 int *commands
, int *results
)
436 struct ahash_request
*areq
= ahash_request_cast(async
);
437 struct safexcel_ahash_ctx
*ctx
= crypto_ahash_ctx(crypto_ahash_reqtfm(areq
));
440 ret
= safexcel_invalidate_cache(async
, ctx
->priv
,
441 ctx
->base
.ctxr_dma
, ring
, request
);
451 static int safexcel_ahash_send(struct crypto_async_request
*async
,
452 int ring
, struct safexcel_request
*request
,
453 int *commands
, int *results
)
455 struct ahash_request
*areq
= ahash_request_cast(async
);
456 struct safexcel_ahash_req
*req
= ahash_request_ctx(areq
);
460 ret
= safexcel_ahash_send_inv(async
, ring
, request
,
463 ret
= safexcel_ahash_send_req(async
, ring
, request
,
468 static int safexcel_ahash_exit_inv(struct crypto_tfm
*tfm
)
470 struct safexcel_ahash_ctx
*ctx
= crypto_tfm_ctx(tfm
);
471 struct safexcel_crypto_priv
*priv
= ctx
->priv
;
472 AHASH_REQUEST_ON_STACK(req
, __crypto_ahash_cast(tfm
));
473 struct safexcel_ahash_req
*rctx
= ahash_request_ctx(req
);
474 struct safexcel_inv_result result
= {};
475 int ring
= ctx
->base
.ring
;
477 memset(req
, 0, sizeof(struct ahash_request
));
479 /* create invalidation request */
480 init_completion(&result
.completion
);
481 ahash_request_set_callback(req
, CRYPTO_TFM_REQ_MAY_BACKLOG
,
482 safexcel_inv_complete
, &result
);
484 ahash_request_set_tfm(req
, __crypto_ahash_cast(tfm
));
485 ctx
= crypto_tfm_ctx(req
->base
.tfm
);
486 ctx
->base
.exit_inv
= true;
487 rctx
->needs_inv
= true;
489 spin_lock_bh(&priv
->ring
[ring
].queue_lock
);
490 crypto_enqueue_request(&priv
->ring
[ring
].queue
, &req
->base
);
491 spin_unlock_bh(&priv
->ring
[ring
].queue_lock
);
493 queue_work(priv
->ring
[ring
].workqueue
,
494 &priv
->ring
[ring
].work_data
.work
);
496 wait_for_completion_interruptible(&result
.completion
);
499 dev_warn(priv
->dev
, "hash: completion error (%d)\n",
507 /* safexcel_ahash_cache: cache data until at least one request can be sent to
508 * the engine, aka. when there is at least 1 block size in the pipe.
510 static int safexcel_ahash_cache(struct ahash_request
*areq
)
512 struct safexcel_ahash_req
*req
= ahash_request_ctx(areq
);
513 struct crypto_ahash
*ahash
= crypto_ahash_reqtfm(areq
);
514 int queued
, cache_len
;
516 /* cache_len: everyting accepted by the driver but not sent yet,
517 * tot sz handled by update() - last req sz - tot sz handled by send()
519 cache_len
= req
->len
- areq
->nbytes
- req
->processed
;
520 /* queued: everything accepted by the driver which will be handled by
521 * the next send() calls.
522 * tot sz handled by update() - tot sz handled by send()
524 queued
= req
->len
- req
->processed
;
527 * In case there isn't enough bytes to proceed (less than a
528 * block size), cache the data until we have enough.
530 if (cache_len
+ areq
->nbytes
<= crypto_ahash_blocksize(ahash
)) {
531 sg_pcopy_to_buffer(areq
->src
, sg_nents(areq
->src
),
532 req
->cache
+ cache_len
,
537 /* We couldn't cache all the data */
541 static int safexcel_ahash_enqueue(struct ahash_request
*areq
)
543 struct safexcel_ahash_ctx
*ctx
= crypto_ahash_ctx(crypto_ahash_reqtfm(areq
));
544 struct safexcel_ahash_req
*req
= ahash_request_ctx(areq
);
545 struct safexcel_crypto_priv
*priv
= ctx
->priv
;
548 req
->needs_inv
= false;
550 if (ctx
->base
.ctxr
) {
551 if (priv
->version
== EIP197
&&
552 !ctx
->base
.needs_inv
&& req
->processed
&&
553 ctx
->digest
== CONTEXT_CONTROL_DIGEST_PRECOMPUTED
)
554 /* We're still setting needs_inv here, even though it is
555 * cleared right away, because the needs_inv flag can be
556 * set in other functions and we want to keep the same
559 ctx
->base
.needs_inv
= safexcel_ahash_needs_inv_get(areq
);
561 if (ctx
->base
.needs_inv
) {
562 ctx
->base
.needs_inv
= false;
563 req
->needs_inv
= true;
566 ctx
->base
.ring
= safexcel_select_ring(priv
);
567 ctx
->base
.ctxr
= dma_pool_zalloc(priv
->context_pool
,
568 EIP197_GFP_FLAGS(areq
->base
),
569 &ctx
->base
.ctxr_dma
);
574 ring
= ctx
->base
.ring
;
576 spin_lock_bh(&priv
->ring
[ring
].queue_lock
);
577 ret
= crypto_enqueue_request(&priv
->ring
[ring
].queue
, &areq
->base
);
578 spin_unlock_bh(&priv
->ring
[ring
].queue_lock
);
580 queue_work(priv
->ring
[ring
].workqueue
,
581 &priv
->ring
[ring
].work_data
.work
);
586 static int safexcel_ahash_update(struct ahash_request
*areq
)
588 struct safexcel_ahash_ctx
*ctx
= crypto_ahash_ctx(crypto_ahash_reqtfm(areq
));
589 struct safexcel_ahash_req
*req
= ahash_request_ctx(areq
);
590 struct crypto_ahash
*ahash
= crypto_ahash_reqtfm(areq
);
592 /* If the request is 0 length, do nothing */
596 req
->len
+= areq
->nbytes
;
598 safexcel_ahash_cache(areq
);
601 * We're not doing partial updates when performing an hmac request.
602 * Everything will be handled by the final() call.
604 if (ctx
->digest
== CONTEXT_CONTROL_DIGEST_HMAC
)
608 return safexcel_ahash_enqueue(areq
);
610 if (!req
->last_req
&&
611 req
->len
- req
->processed
> crypto_ahash_blocksize(ahash
))
612 return safexcel_ahash_enqueue(areq
);
617 static int safexcel_ahash_final(struct ahash_request
*areq
)
619 struct safexcel_ahash_req
*req
= ahash_request_ctx(areq
);
620 struct safexcel_ahash_ctx
*ctx
= crypto_ahash_ctx(crypto_ahash_reqtfm(areq
));
622 req
->last_req
= true;
625 /* If we have an overall 0 length request */
626 if (!(req
->len
+ areq
->nbytes
)) {
627 if (ctx
->alg
== CONTEXT_CONTROL_CRYPTO_ALG_SHA1
)
628 memcpy(areq
->result
, sha1_zero_message_hash
,
630 else if (ctx
->alg
== CONTEXT_CONTROL_CRYPTO_ALG_SHA224
)
631 memcpy(areq
->result
, sha224_zero_message_hash
,
633 else if (ctx
->alg
== CONTEXT_CONTROL_CRYPTO_ALG_SHA256
)
634 memcpy(areq
->result
, sha256_zero_message_hash
,
640 return safexcel_ahash_enqueue(areq
);
643 static int safexcel_ahash_finup(struct ahash_request
*areq
)
645 struct safexcel_ahash_req
*req
= ahash_request_ctx(areq
);
647 req
->last_req
= true;
650 safexcel_ahash_update(areq
);
651 return safexcel_ahash_final(areq
);
654 static int safexcel_ahash_export(struct ahash_request
*areq
, void *out
)
656 struct crypto_ahash
*ahash
= crypto_ahash_reqtfm(areq
);
657 struct safexcel_ahash_req
*req
= ahash_request_ctx(areq
);
658 struct safexcel_ahash_export_state
*export
= out
;
660 export
->len
= req
->len
;
661 export
->processed
= req
->processed
;
663 memcpy(export
->state
, req
->state
, req
->state_sz
);
664 memcpy(export
->cache
, req
->cache
, crypto_ahash_blocksize(ahash
));
669 static int safexcel_ahash_import(struct ahash_request
*areq
, const void *in
)
671 struct crypto_ahash
*ahash
= crypto_ahash_reqtfm(areq
);
672 struct safexcel_ahash_req
*req
= ahash_request_ctx(areq
);
673 const struct safexcel_ahash_export_state
*export
= in
;
676 ret
= crypto_ahash_init(areq
);
680 req
->len
= export
->len
;
681 req
->processed
= export
->processed
;
683 memcpy(req
->cache
, export
->cache
, crypto_ahash_blocksize(ahash
));
684 memcpy(req
->state
, export
->state
, req
->state_sz
);
689 static int safexcel_ahash_cra_init(struct crypto_tfm
*tfm
)
691 struct safexcel_ahash_ctx
*ctx
= crypto_tfm_ctx(tfm
);
692 struct safexcel_alg_template
*tmpl
=
693 container_of(__crypto_ahash_alg(tfm
->__crt_alg
),
694 struct safexcel_alg_template
, alg
.ahash
);
696 ctx
->priv
= tmpl
->priv
;
697 ctx
->base
.send
= safexcel_ahash_send
;
698 ctx
->base
.handle_result
= safexcel_handle_result
;
700 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm
),
701 sizeof(struct safexcel_ahash_req
));
705 static int safexcel_sha1_init(struct ahash_request
*areq
)
707 struct safexcel_ahash_ctx
*ctx
= crypto_ahash_ctx(crypto_ahash_reqtfm(areq
));
708 struct safexcel_ahash_req
*req
= ahash_request_ctx(areq
);
710 memset(req
, 0, sizeof(*req
));
712 req
->state
[0] = SHA1_H0
;
713 req
->state
[1] = SHA1_H1
;
714 req
->state
[2] = SHA1_H2
;
715 req
->state
[3] = SHA1_H3
;
716 req
->state
[4] = SHA1_H4
;
718 ctx
->alg
= CONTEXT_CONTROL_CRYPTO_ALG_SHA1
;
719 ctx
->digest
= CONTEXT_CONTROL_DIGEST_PRECOMPUTED
;
720 req
->state_sz
= SHA1_DIGEST_SIZE
;
725 static int safexcel_sha1_digest(struct ahash_request
*areq
)
727 int ret
= safexcel_sha1_init(areq
);
732 return safexcel_ahash_finup(areq
);
735 static void safexcel_ahash_cra_exit(struct crypto_tfm
*tfm
)
737 struct safexcel_ahash_ctx
*ctx
= crypto_tfm_ctx(tfm
);
738 struct safexcel_crypto_priv
*priv
= ctx
->priv
;
741 /* context not allocated, skip invalidation */
745 if (priv
->version
== EIP197
) {
746 ret
= safexcel_ahash_exit_inv(tfm
);
748 dev_warn(priv
->dev
, "hash: invalidation error %d\n", ret
);
750 dma_pool_free(priv
->context_pool
, ctx
->base
.ctxr
,
755 struct safexcel_alg_template safexcel_alg_sha1
= {
756 .type
= SAFEXCEL_ALG_TYPE_AHASH
,
758 .init
= safexcel_sha1_init
,
759 .update
= safexcel_ahash_update
,
760 .final
= safexcel_ahash_final
,
761 .finup
= safexcel_ahash_finup
,
762 .digest
= safexcel_sha1_digest
,
763 .export
= safexcel_ahash_export
,
764 .import
= safexcel_ahash_import
,
766 .digestsize
= SHA1_DIGEST_SIZE
,
767 .statesize
= sizeof(struct safexcel_ahash_export_state
),
770 .cra_driver_name
= "safexcel-sha1",
772 .cra_flags
= CRYPTO_ALG_ASYNC
|
773 CRYPTO_ALG_KERN_DRIVER_ONLY
,
774 .cra_blocksize
= SHA1_BLOCK_SIZE
,
775 .cra_ctxsize
= sizeof(struct safexcel_ahash_ctx
),
776 .cra_init
= safexcel_ahash_cra_init
,
777 .cra_exit
= safexcel_ahash_cra_exit
,
778 .cra_module
= THIS_MODULE
,
784 static int safexcel_hmac_sha1_init(struct ahash_request
*areq
)
786 struct safexcel_ahash_ctx
*ctx
= crypto_ahash_ctx(crypto_ahash_reqtfm(areq
));
788 safexcel_sha1_init(areq
);
789 ctx
->digest
= CONTEXT_CONTROL_DIGEST_HMAC
;
793 static int safexcel_hmac_sha1_digest(struct ahash_request
*areq
)
795 int ret
= safexcel_hmac_sha1_init(areq
);
800 return safexcel_ahash_finup(areq
);
803 struct safexcel_ahash_result
{
804 struct completion completion
;
808 static void safexcel_ahash_complete(struct crypto_async_request
*req
, int error
)
810 struct safexcel_ahash_result
*result
= req
->data
;
812 if (error
== -EINPROGRESS
)
815 result
->error
= error
;
816 complete(&result
->completion
);
819 static int safexcel_hmac_init_pad(struct ahash_request
*areq
,
820 unsigned int blocksize
, const u8
*key
,
821 unsigned int keylen
, u8
*ipad
, u8
*opad
)
823 struct safexcel_ahash_result result
;
824 struct scatterlist sg
;
828 if (keylen
<= blocksize
) {
829 memcpy(ipad
, key
, keylen
);
831 keydup
= kmemdup(key
, keylen
, GFP_KERNEL
);
835 ahash_request_set_callback(areq
, CRYPTO_TFM_REQ_MAY_BACKLOG
,
836 safexcel_ahash_complete
, &result
);
837 sg_init_one(&sg
, keydup
, keylen
);
838 ahash_request_set_crypt(areq
, &sg
, ipad
, keylen
);
839 init_completion(&result
.completion
);
841 ret
= crypto_ahash_digest(areq
);
842 if (ret
== -EINPROGRESS
) {
843 wait_for_completion_interruptible(&result
.completion
);
848 memzero_explicit(keydup
, keylen
);
854 keylen
= crypto_ahash_digestsize(crypto_ahash_reqtfm(areq
));
857 memset(ipad
+ keylen
, 0, blocksize
- keylen
);
858 memcpy(opad
, ipad
, blocksize
);
860 for (i
= 0; i
< blocksize
; i
++) {
861 ipad
[i
] ^= HMAC_IPAD_VALUE
;
862 opad
[i
] ^= HMAC_OPAD_VALUE
;
868 static int safexcel_hmac_init_iv(struct ahash_request
*areq
,
869 unsigned int blocksize
, u8
*pad
, void *state
)
871 struct safexcel_ahash_result result
;
872 struct safexcel_ahash_req
*req
;
873 struct scatterlist sg
;
876 ahash_request_set_callback(areq
, CRYPTO_TFM_REQ_MAY_BACKLOG
,
877 safexcel_ahash_complete
, &result
);
878 sg_init_one(&sg
, pad
, blocksize
);
879 ahash_request_set_crypt(areq
, &sg
, pad
, blocksize
);
880 init_completion(&result
.completion
);
882 ret
= crypto_ahash_init(areq
);
886 req
= ahash_request_ctx(areq
);
888 req
->last_req
= true;
890 ret
= crypto_ahash_update(areq
);
891 if (ret
&& ret
!= -EINPROGRESS
&& ret
!= -EBUSY
)
894 wait_for_completion_interruptible(&result
.completion
);
898 return crypto_ahash_export(areq
, state
);
901 static int safexcel_hmac_setkey(const char *alg
, const u8
*key
,
902 unsigned int keylen
, void *istate
, void *ostate
)
904 struct ahash_request
*areq
;
905 struct crypto_ahash
*tfm
;
906 unsigned int blocksize
;
910 tfm
= crypto_alloc_ahash(alg
, CRYPTO_ALG_TYPE_AHASH
,
911 CRYPTO_ALG_TYPE_AHASH_MASK
);
915 areq
= ahash_request_alloc(tfm
, GFP_KERNEL
);
921 crypto_ahash_clear_flags(tfm
, ~0);
922 blocksize
= crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm
));
924 ipad
= kzalloc(2 * blocksize
, GFP_KERNEL
);
930 opad
= ipad
+ blocksize
;
932 ret
= safexcel_hmac_init_pad(areq
, blocksize
, key
, keylen
, ipad
, opad
);
936 ret
= safexcel_hmac_init_iv(areq
, blocksize
, ipad
, istate
);
940 ret
= safexcel_hmac_init_iv(areq
, blocksize
, opad
, ostate
);
945 ahash_request_free(areq
);
947 crypto_free_ahash(tfm
);
952 static int safexcel_hmac_sha1_setkey(struct crypto_ahash
*tfm
, const u8
*key
,
955 struct safexcel_ahash_ctx
*ctx
= crypto_tfm_ctx(crypto_ahash_tfm(tfm
));
956 struct safexcel_crypto_priv
*priv
= ctx
->priv
;
957 struct safexcel_ahash_export_state istate
, ostate
;
960 ret
= safexcel_hmac_setkey("safexcel-sha1", key
, keylen
, &istate
, &ostate
);
964 if (priv
->version
== EIP197
&& ctx
->base
.ctxr
) {
965 for (i
= 0; i
< SHA1_DIGEST_SIZE
/ sizeof(u32
); i
++) {
966 if (ctx
->ipad
[i
] != le32_to_cpu(istate
.state
[i
]) ||
967 ctx
->opad
[i
] != le32_to_cpu(ostate
.state
[i
])) {
968 ctx
->base
.needs_inv
= true;
974 memcpy(ctx
->ipad
, &istate
.state
, SHA1_DIGEST_SIZE
);
975 memcpy(ctx
->opad
, &ostate
.state
, SHA1_DIGEST_SIZE
);
980 struct safexcel_alg_template safexcel_alg_hmac_sha1
= {
981 .type
= SAFEXCEL_ALG_TYPE_AHASH
,
983 .init
= safexcel_hmac_sha1_init
,
984 .update
= safexcel_ahash_update
,
985 .final
= safexcel_ahash_final
,
986 .finup
= safexcel_ahash_finup
,
987 .digest
= safexcel_hmac_sha1_digest
,
988 .setkey
= safexcel_hmac_sha1_setkey
,
989 .export
= safexcel_ahash_export
,
990 .import
= safexcel_ahash_import
,
992 .digestsize
= SHA1_DIGEST_SIZE
,
993 .statesize
= sizeof(struct safexcel_ahash_export_state
),
995 .cra_name
= "hmac(sha1)",
996 .cra_driver_name
= "safexcel-hmac-sha1",
998 .cra_flags
= CRYPTO_ALG_ASYNC
|
999 CRYPTO_ALG_KERN_DRIVER_ONLY
,
1000 .cra_blocksize
= SHA1_BLOCK_SIZE
,
1001 .cra_ctxsize
= sizeof(struct safexcel_ahash_ctx
),
1002 .cra_init
= safexcel_ahash_cra_init
,
1003 .cra_exit
= safexcel_ahash_cra_exit
,
1004 .cra_module
= THIS_MODULE
,
1010 static int safexcel_sha256_init(struct ahash_request
*areq
)
1012 struct safexcel_ahash_ctx
*ctx
= crypto_ahash_ctx(crypto_ahash_reqtfm(areq
));
1013 struct safexcel_ahash_req
*req
= ahash_request_ctx(areq
);
1015 memset(req
, 0, sizeof(*req
));
1017 req
->state
[0] = SHA256_H0
;
1018 req
->state
[1] = SHA256_H1
;
1019 req
->state
[2] = SHA256_H2
;
1020 req
->state
[3] = SHA256_H3
;
1021 req
->state
[4] = SHA256_H4
;
1022 req
->state
[5] = SHA256_H5
;
1023 req
->state
[6] = SHA256_H6
;
1024 req
->state
[7] = SHA256_H7
;
1026 ctx
->alg
= CONTEXT_CONTROL_CRYPTO_ALG_SHA256
;
1027 ctx
->digest
= CONTEXT_CONTROL_DIGEST_PRECOMPUTED
;
1028 req
->state_sz
= SHA256_DIGEST_SIZE
;
1033 static int safexcel_sha256_digest(struct ahash_request
*areq
)
1035 int ret
= safexcel_sha256_init(areq
);
1040 return safexcel_ahash_finup(areq
);
1043 struct safexcel_alg_template safexcel_alg_sha256
= {
1044 .type
= SAFEXCEL_ALG_TYPE_AHASH
,
1046 .init
= safexcel_sha256_init
,
1047 .update
= safexcel_ahash_update
,
1048 .final
= safexcel_ahash_final
,
1049 .finup
= safexcel_ahash_finup
,
1050 .digest
= safexcel_sha256_digest
,
1051 .export
= safexcel_ahash_export
,
1052 .import
= safexcel_ahash_import
,
1054 .digestsize
= SHA256_DIGEST_SIZE
,
1055 .statesize
= sizeof(struct safexcel_ahash_export_state
),
1057 .cra_name
= "sha256",
1058 .cra_driver_name
= "safexcel-sha256",
1059 .cra_priority
= 300,
1060 .cra_flags
= CRYPTO_ALG_ASYNC
|
1061 CRYPTO_ALG_KERN_DRIVER_ONLY
,
1062 .cra_blocksize
= SHA256_BLOCK_SIZE
,
1063 .cra_ctxsize
= sizeof(struct safexcel_ahash_ctx
),
1064 .cra_init
= safexcel_ahash_cra_init
,
1065 .cra_exit
= safexcel_ahash_cra_exit
,
1066 .cra_module
= THIS_MODULE
,
1072 static int safexcel_sha224_init(struct ahash_request
*areq
)
1074 struct safexcel_ahash_ctx
*ctx
= crypto_ahash_ctx(crypto_ahash_reqtfm(areq
));
1075 struct safexcel_ahash_req
*req
= ahash_request_ctx(areq
);
1077 memset(req
, 0, sizeof(*req
));
1079 req
->state
[0] = SHA224_H0
;
1080 req
->state
[1] = SHA224_H1
;
1081 req
->state
[2] = SHA224_H2
;
1082 req
->state
[3] = SHA224_H3
;
1083 req
->state
[4] = SHA224_H4
;
1084 req
->state
[5] = SHA224_H5
;
1085 req
->state
[6] = SHA224_H6
;
1086 req
->state
[7] = SHA224_H7
;
1088 ctx
->alg
= CONTEXT_CONTROL_CRYPTO_ALG_SHA224
;
1089 ctx
->digest
= CONTEXT_CONTROL_DIGEST_PRECOMPUTED
;
1090 req
->state_sz
= SHA256_DIGEST_SIZE
;
1095 static int safexcel_sha224_digest(struct ahash_request
*areq
)
1097 int ret
= safexcel_sha224_init(areq
);
1102 return safexcel_ahash_finup(areq
);
1105 struct safexcel_alg_template safexcel_alg_sha224
= {
1106 .type
= SAFEXCEL_ALG_TYPE_AHASH
,
1108 .init
= safexcel_sha224_init
,
1109 .update
= safexcel_ahash_update
,
1110 .final
= safexcel_ahash_final
,
1111 .finup
= safexcel_ahash_finup
,
1112 .digest
= safexcel_sha224_digest
,
1113 .export
= safexcel_ahash_export
,
1114 .import
= safexcel_ahash_import
,
1116 .digestsize
= SHA224_DIGEST_SIZE
,
1117 .statesize
= sizeof(struct safexcel_ahash_export_state
),
1119 .cra_name
= "sha224",
1120 .cra_driver_name
= "safexcel-sha224",
1121 .cra_priority
= 300,
1122 .cra_flags
= CRYPTO_ALG_ASYNC
|
1123 CRYPTO_ALG_KERN_DRIVER_ONLY
,
1124 .cra_blocksize
= SHA224_BLOCK_SIZE
,
1125 .cra_ctxsize
= sizeof(struct safexcel_ahash_ctx
),
1126 .cra_init
= safexcel_ahash_cra_init
,
1127 .cra_exit
= safexcel_ahash_cra_exit
,
1128 .cra_module
= THIS_MODULE
,