2 * Copyright (C) 2017 Marvell
4 * Antoine Tenart <antoine.tenart@free-electrons.com>
6 * This file is licensed under the terms of the GNU General Public
7 * License version 2. This program is licensed "as is" without any
8 * warranty of any kind, whether express or implied.
11 #include <crypto/hmac.h>
12 #include <crypto/sha.h>
13 #include <linux/device.h>
14 #include <linux/dma-mapping.h>
15 #include <linux/dmapool.h>
19 struct safexcel_ahash_ctx
{
20 struct safexcel_context base
;
21 struct safexcel_crypto_priv
*priv
;
25 u32 ipad
[SHA256_DIGEST_SIZE
/ sizeof(u32
)];
26 u32 opad
[SHA256_DIGEST_SIZE
/ sizeof(u32
)];
29 struct safexcel_ahash_req
{
36 dma_addr_t result_dma
;
40 u8 state_sz
; /* expected sate size, only set once */
41 u32 state
[SHA256_DIGEST_SIZE
/ sizeof(u32
)] __aligned(sizeof(u32
));
46 u8 cache
[SHA256_BLOCK_SIZE
] __aligned(sizeof(u32
));
48 unsigned int cache_sz
;
50 u8 cache_next
[SHA256_BLOCK_SIZE
] __aligned(sizeof(u32
));
53 static void safexcel_hash_token(struct safexcel_command_desc
*cdesc
,
54 u32 input_length
, u32 result_length
)
56 struct safexcel_token
*token
=
57 (struct safexcel_token
*)cdesc
->control_data
.token
;
59 token
[0].opcode
= EIP197_TOKEN_OPCODE_DIRECTION
;
60 token
[0].packet_length
= input_length
;
61 token
[0].stat
= EIP197_TOKEN_STAT_LAST_HASH
;
62 token
[0].instructions
= EIP197_TOKEN_INS_TYPE_HASH
;
64 token
[1].opcode
= EIP197_TOKEN_OPCODE_INSERT
;
65 token
[1].packet_length
= result_length
;
66 token
[1].stat
= EIP197_TOKEN_STAT_LAST_HASH
|
67 EIP197_TOKEN_STAT_LAST_PACKET
;
68 token
[1].instructions
= EIP197_TOKEN_INS_TYPE_OUTPUT
|
69 EIP197_TOKEN_INS_INSERT_HASH_DIGEST
;
72 static void safexcel_context_control(struct safexcel_ahash_ctx
*ctx
,
73 struct safexcel_ahash_req
*req
,
74 struct safexcel_command_desc
*cdesc
,
75 unsigned int digestsize
,
76 unsigned int blocksize
)
80 cdesc
->control_data
.control0
|= CONTEXT_CONTROL_TYPE_HASH_OUT
;
81 cdesc
->control_data
.control0
|= ctx
->alg
;
82 cdesc
->control_data
.control0
|= req
->digest
;
84 if (req
->digest
== CONTEXT_CONTROL_DIGEST_PRECOMPUTED
) {
86 if (ctx
->alg
== CONTEXT_CONTROL_CRYPTO_ALG_SHA1
)
87 cdesc
->control_data
.control0
|= CONTEXT_CONTROL_SIZE(6);
88 else if (ctx
->alg
== CONTEXT_CONTROL_CRYPTO_ALG_SHA224
||
89 ctx
->alg
== CONTEXT_CONTROL_CRYPTO_ALG_SHA256
)
90 cdesc
->control_data
.control0
|= CONTEXT_CONTROL_SIZE(9);
92 cdesc
->control_data
.control1
|= CONTEXT_CONTROL_DIGEST_CNT
;
94 cdesc
->control_data
.control0
|= CONTEXT_CONTROL_RESTART_HASH
;
98 cdesc
->control_data
.control0
|= CONTEXT_CONTROL_NO_FINISH_HASH
;
101 * Copy the input digest if needed, and setup the context
102 * fields. Do this now as we need it to setup the first command
105 if (req
->processed
) {
106 for (i
= 0; i
< digestsize
/ sizeof(u32
); i
++)
107 ctx
->base
.ctxr
->data
[i
] = cpu_to_le32(req
->state
[i
]);
110 ctx
->base
.ctxr
->data
[i
] = cpu_to_le32(req
->processed
/ blocksize
);
112 } else if (req
->digest
== CONTEXT_CONTROL_DIGEST_HMAC
) {
113 cdesc
->control_data
.control0
|= CONTEXT_CONTROL_SIZE(2 * req
->state_sz
/ sizeof(u32
));
115 memcpy(ctx
->base
.ctxr
->data
, ctx
->ipad
, req
->state_sz
);
116 memcpy(ctx
->base
.ctxr
->data
+ req
->state_sz
/ sizeof(u32
),
117 ctx
->opad
, req
->state_sz
);
121 static int safexcel_handle_req_result(struct safexcel_crypto_priv
*priv
, int ring
,
122 struct crypto_async_request
*async
,
123 bool *should_complete
, int *ret
)
125 struct safexcel_result_desc
*rdesc
;
126 struct ahash_request
*areq
= ahash_request_cast(async
);
127 struct crypto_ahash
*ahash
= crypto_ahash_reqtfm(areq
);
128 struct safexcel_ahash_req
*sreq
= ahash_request_ctx(areq
);
133 spin_lock_bh(&priv
->ring
[ring
].egress_lock
);
134 rdesc
= safexcel_ring_next_rptr(priv
, &priv
->ring
[ring
].rdr
);
137 "hash: result: could not retrieve the result descriptor\n");
138 *ret
= PTR_ERR(rdesc
);
140 *ret
= safexcel_rdesc_check_errors(priv
, rdesc
);
143 safexcel_complete(priv
, ring
);
144 spin_unlock_bh(&priv
->ring
[ring
].egress_lock
);
147 dma_unmap_sg(priv
->dev
, areq
->src
, sreq
->nents
, DMA_TO_DEVICE
);
151 if (sreq
->result_dma
) {
152 dma_unmap_single(priv
->dev
, sreq
->result_dma
, sreq
->state_sz
,
154 sreq
->result_dma
= 0;
157 if (sreq
->cache_dma
) {
158 dma_unmap_single(priv
->dev
, sreq
->cache_dma
, sreq
->cache_sz
,
164 memcpy(areq
->result
, sreq
->state
,
165 crypto_ahash_digestsize(ahash
));
167 cache_len
= sreq
->len
- sreq
->processed
;
169 memcpy(sreq
->cache
, sreq
->cache_next
, cache_len
);
171 *should_complete
= true;
176 static int safexcel_ahash_send_req(struct crypto_async_request
*async
, int ring
,
177 struct safexcel_request
*request
,
178 int *commands
, int *results
)
180 struct ahash_request
*areq
= ahash_request_cast(async
);
181 struct crypto_ahash
*ahash
= crypto_ahash_reqtfm(areq
);
182 struct safexcel_ahash_req
*req
= ahash_request_ctx(areq
);
183 struct safexcel_ahash_ctx
*ctx
= crypto_ahash_ctx(crypto_ahash_reqtfm(areq
));
184 struct safexcel_crypto_priv
*priv
= ctx
->priv
;
185 struct safexcel_command_desc
*cdesc
, *first_cdesc
= NULL
;
186 struct safexcel_result_desc
*rdesc
;
187 struct scatterlist
*sg
;
188 int i
, queued
, len
, cache_len
, extra
, n_cdesc
= 0, ret
= 0;
190 queued
= len
= req
->len
- req
->processed
;
191 if (queued
<= crypto_ahash_blocksize(ahash
))
194 cache_len
= queued
- areq
->nbytes
;
196 if (!req
->last_req
) {
197 /* If this is not the last request and the queued data does not
198 * fit into full blocks, cache it for the next send() call.
200 extra
= queued
& (crypto_ahash_blocksize(ahash
) - 1);
202 /* If this is not the last request and the queued data
203 * is a multiple of a block, cache the last one for now.
205 extra
= crypto_ahash_blocksize(ahash
);
208 sg_pcopy_to_buffer(areq
->src
, sg_nents(areq
->src
),
209 req
->cache_next
, extra
,
210 areq
->nbytes
- extra
);
223 spin_lock_bh(&priv
->ring
[ring
].egress_lock
);
225 /* Add a command descriptor for the cached data, if any */
227 req
->cache_dma
= dma_map_single(priv
->dev
, req
->cache
,
228 cache_len
, DMA_TO_DEVICE
);
229 if (dma_mapping_error(priv
->dev
, req
->cache_dma
)) {
230 spin_unlock_bh(&priv
->ring
[ring
].egress_lock
);
234 req
->cache_sz
= cache_len
;
235 first_cdesc
= safexcel_add_cdesc(priv
, ring
, 1,
237 req
->cache_dma
, cache_len
, len
,
239 if (IS_ERR(first_cdesc
)) {
240 ret
= PTR_ERR(first_cdesc
);
250 /* Now handle the current ahash request buffer(s) */
251 req
->nents
= dma_map_sg(priv
->dev
, areq
->src
,
252 sg_nents_for_len(areq
->src
, areq
->nbytes
),
259 for_each_sg(areq
->src
, sg
, req
->nents
, i
) {
260 int sglen
= sg_dma_len(sg
);
262 /* Do not overflow the request */
263 if (queued
- sglen
< 0)
266 cdesc
= safexcel_add_cdesc(priv
, ring
, !n_cdesc
,
267 !(queued
- sglen
), sg_dma_address(sg
),
268 sglen
, len
, ctx
->base
.ctxr_dma
);
270 ret
= PTR_ERR(cdesc
);
284 /* Setup the context options */
285 safexcel_context_control(ctx
, req
, first_cdesc
, req
->state_sz
,
286 crypto_ahash_blocksize(ahash
));
289 safexcel_hash_token(first_cdesc
, len
, req
->state_sz
);
291 req
->result_dma
= dma_map_single(priv
->dev
, req
->state
, req
->state_sz
,
293 if (dma_mapping_error(priv
->dev
, req
->result_dma
)) {
298 /* Add a result descriptor */
299 rdesc
= safexcel_add_rdesc(priv
, ring
, 1, 1, req
->result_dma
,
302 ret
= PTR_ERR(rdesc
);
306 spin_unlock_bh(&priv
->ring
[ring
].egress_lock
);
308 req
->processed
+= len
;
309 request
->req
= &areq
->base
;
316 dma_unmap_single(priv
->dev
, req
->result_dma
, req
->state_sz
,
319 dma_unmap_sg(priv
->dev
, areq
->src
, req
->nents
, DMA_TO_DEVICE
);
321 for (i
= 0; i
< n_cdesc
; i
++)
322 safexcel_ring_rollback_wptr(priv
, &priv
->ring
[ring
].cdr
);
324 if (req
->cache_dma
) {
325 dma_unmap_single(priv
->dev
, req
->cache_dma
, req
->cache_sz
,
330 spin_unlock_bh(&priv
->ring
[ring
].egress_lock
);
334 static inline bool safexcel_ahash_needs_inv_get(struct ahash_request
*areq
)
336 struct safexcel_ahash_ctx
*ctx
= crypto_ahash_ctx(crypto_ahash_reqtfm(areq
));
337 struct safexcel_ahash_req
*req
= ahash_request_ctx(areq
);
338 struct crypto_ahash
*ahash
= crypto_ahash_reqtfm(areq
);
339 unsigned int state_w_sz
= req
->state_sz
/ sizeof(u32
);
342 for (i
= 0; i
< state_w_sz
; i
++)
343 if (ctx
->base
.ctxr
->data
[i
] != cpu_to_le32(req
->state
[i
]))
346 if (ctx
->base
.ctxr
->data
[state_w_sz
] !=
347 cpu_to_le32(req
->processed
/ crypto_ahash_blocksize(ahash
)))
353 static int safexcel_handle_inv_result(struct safexcel_crypto_priv
*priv
,
355 struct crypto_async_request
*async
,
356 bool *should_complete
, int *ret
)
358 struct safexcel_result_desc
*rdesc
;
359 struct ahash_request
*areq
= ahash_request_cast(async
);
360 struct crypto_ahash
*ahash
= crypto_ahash_reqtfm(areq
);
361 struct safexcel_ahash_ctx
*ctx
= crypto_ahash_ctx(ahash
);
366 spin_lock_bh(&priv
->ring
[ring
].egress_lock
);
367 rdesc
= safexcel_ring_next_rptr(priv
, &priv
->ring
[ring
].rdr
);
370 "hash: invalidate: could not retrieve the result descriptor\n");
371 *ret
= PTR_ERR(rdesc
);
372 } else if (rdesc
->result_data
.error_code
) {
374 "hash: invalidate: result descriptor error (%d)\n",
375 rdesc
->result_data
.error_code
);
379 safexcel_complete(priv
, ring
);
380 spin_unlock_bh(&priv
->ring
[ring
].egress_lock
);
382 if (ctx
->base
.exit_inv
) {
383 dma_pool_free(priv
->context_pool
, ctx
->base
.ctxr
,
386 *should_complete
= true;
390 ring
= safexcel_select_ring(priv
);
391 ctx
->base
.ring
= ring
;
393 spin_lock_bh(&priv
->ring
[ring
].queue_lock
);
394 enq_ret
= crypto_enqueue_request(&priv
->ring
[ring
].queue
, async
);
395 spin_unlock_bh(&priv
->ring
[ring
].queue_lock
);
397 if (enq_ret
!= -EINPROGRESS
)
400 queue_work(priv
->ring
[ring
].workqueue
,
401 &priv
->ring
[ring
].work_data
.work
);
403 *should_complete
= false;
408 static int safexcel_handle_result(struct safexcel_crypto_priv
*priv
, int ring
,
409 struct crypto_async_request
*async
,
410 bool *should_complete
, int *ret
)
412 struct ahash_request
*areq
= ahash_request_cast(async
);
413 struct safexcel_ahash_req
*req
= ahash_request_ctx(areq
);
416 BUG_ON(priv
->version
== EIP97
&& req
->needs_inv
);
418 if (req
->needs_inv
) {
419 req
->needs_inv
= false;
420 err
= safexcel_handle_inv_result(priv
, ring
, async
,
421 should_complete
, ret
);
423 err
= safexcel_handle_req_result(priv
, ring
, async
,
424 should_complete
, ret
);
430 static int safexcel_ahash_send_inv(struct crypto_async_request
*async
,
431 int ring
, struct safexcel_request
*request
,
432 int *commands
, int *results
)
434 struct ahash_request
*areq
= ahash_request_cast(async
);
435 struct safexcel_ahash_ctx
*ctx
= crypto_ahash_ctx(crypto_ahash_reqtfm(areq
));
438 ret
= safexcel_invalidate_cache(async
, ctx
->priv
,
439 ctx
->base
.ctxr_dma
, ring
, request
);
449 static int safexcel_ahash_send(struct crypto_async_request
*async
,
450 int ring
, struct safexcel_request
*request
,
451 int *commands
, int *results
)
453 struct ahash_request
*areq
= ahash_request_cast(async
);
454 struct safexcel_ahash_req
*req
= ahash_request_ctx(areq
);
458 ret
= safexcel_ahash_send_inv(async
, ring
, request
,
461 ret
= safexcel_ahash_send_req(async
, ring
, request
,
466 static int safexcel_ahash_exit_inv(struct crypto_tfm
*tfm
)
468 struct safexcel_ahash_ctx
*ctx
= crypto_tfm_ctx(tfm
);
469 struct safexcel_crypto_priv
*priv
= ctx
->priv
;
470 EIP197_REQUEST_ON_STACK(req
, ahash
, EIP197_AHASH_REQ_SIZE
);
471 struct safexcel_ahash_req
*rctx
= ahash_request_ctx(req
);
472 struct safexcel_inv_result result
= {};
473 int ring
= ctx
->base
.ring
;
475 memset(req
, 0, sizeof(struct ahash_request
));
477 /* create invalidation request */
478 init_completion(&result
.completion
);
479 ahash_request_set_callback(req
, CRYPTO_TFM_REQ_MAY_BACKLOG
,
480 safexcel_inv_complete
, &result
);
482 ahash_request_set_tfm(req
, __crypto_ahash_cast(tfm
));
483 ctx
= crypto_tfm_ctx(req
->base
.tfm
);
484 ctx
->base
.exit_inv
= true;
485 rctx
->needs_inv
= true;
487 spin_lock_bh(&priv
->ring
[ring
].queue_lock
);
488 crypto_enqueue_request(&priv
->ring
[ring
].queue
, &req
->base
);
489 spin_unlock_bh(&priv
->ring
[ring
].queue_lock
);
491 queue_work(priv
->ring
[ring
].workqueue
,
492 &priv
->ring
[ring
].work_data
.work
);
494 wait_for_completion(&result
.completion
);
497 dev_warn(priv
->dev
, "hash: completion error (%d)\n",
505 /* safexcel_ahash_cache: cache data until at least one request can be sent to
506 * the engine, aka. when there is at least 1 block size in the pipe.
508 static int safexcel_ahash_cache(struct ahash_request
*areq
)
510 struct safexcel_ahash_req
*req
= ahash_request_ctx(areq
);
511 struct crypto_ahash
*ahash
= crypto_ahash_reqtfm(areq
);
512 int queued
, cache_len
;
514 /* cache_len: everyting accepted by the driver but not sent yet,
515 * tot sz handled by update() - last req sz - tot sz handled by send()
517 cache_len
= req
->len
- areq
->nbytes
- req
->processed
;
518 /* queued: everything accepted by the driver which will be handled by
519 * the next send() calls.
520 * tot sz handled by update() - tot sz handled by send()
522 queued
= req
->len
- req
->processed
;
525 * In case there isn't enough bytes to proceed (less than a
526 * block size), cache the data until we have enough.
528 if (cache_len
+ areq
->nbytes
<= crypto_ahash_blocksize(ahash
)) {
529 sg_pcopy_to_buffer(areq
->src
, sg_nents(areq
->src
),
530 req
->cache
+ cache_len
,
535 /* We couldn't cache all the data */
539 static int safexcel_ahash_enqueue(struct ahash_request
*areq
)
541 struct safexcel_ahash_ctx
*ctx
= crypto_ahash_ctx(crypto_ahash_reqtfm(areq
));
542 struct safexcel_ahash_req
*req
= ahash_request_ctx(areq
);
543 struct safexcel_crypto_priv
*priv
= ctx
->priv
;
546 req
->needs_inv
= false;
548 if (ctx
->base
.ctxr
) {
549 if (priv
->version
== EIP197
&&
550 !ctx
->base
.needs_inv
&& req
->processed
&&
551 req
->digest
== CONTEXT_CONTROL_DIGEST_PRECOMPUTED
)
552 /* We're still setting needs_inv here, even though it is
553 * cleared right away, because the needs_inv flag can be
554 * set in other functions and we want to keep the same
557 ctx
->base
.needs_inv
= safexcel_ahash_needs_inv_get(areq
);
559 if (ctx
->base
.needs_inv
) {
560 ctx
->base
.needs_inv
= false;
561 req
->needs_inv
= true;
564 ctx
->base
.ring
= safexcel_select_ring(priv
);
565 ctx
->base
.ctxr
= dma_pool_zalloc(priv
->context_pool
,
566 EIP197_GFP_FLAGS(areq
->base
),
567 &ctx
->base
.ctxr_dma
);
572 ring
= ctx
->base
.ring
;
574 spin_lock_bh(&priv
->ring
[ring
].queue_lock
);
575 ret
= crypto_enqueue_request(&priv
->ring
[ring
].queue
, &areq
->base
);
576 spin_unlock_bh(&priv
->ring
[ring
].queue_lock
);
578 queue_work(priv
->ring
[ring
].workqueue
,
579 &priv
->ring
[ring
].work_data
.work
);
584 static int safexcel_ahash_update(struct ahash_request
*areq
)
586 struct safexcel_ahash_req
*req
= ahash_request_ctx(areq
);
587 struct crypto_ahash
*ahash
= crypto_ahash_reqtfm(areq
);
589 /* If the request is 0 length, do nothing */
593 req
->len
+= areq
->nbytes
;
595 safexcel_ahash_cache(areq
);
598 * We're not doing partial updates when performing an hmac request.
599 * Everything will be handled by the final() call.
601 if (req
->digest
== CONTEXT_CONTROL_DIGEST_HMAC
)
605 return safexcel_ahash_enqueue(areq
);
607 if (!req
->last_req
&&
608 req
->len
- req
->processed
> crypto_ahash_blocksize(ahash
))
609 return safexcel_ahash_enqueue(areq
);
614 static int safexcel_ahash_final(struct ahash_request
*areq
)
616 struct safexcel_ahash_req
*req
= ahash_request_ctx(areq
);
617 struct safexcel_ahash_ctx
*ctx
= crypto_ahash_ctx(crypto_ahash_reqtfm(areq
));
619 req
->last_req
= true;
622 /* If we have an overall 0 length request */
623 if (!(req
->len
+ areq
->nbytes
)) {
624 if (ctx
->alg
== CONTEXT_CONTROL_CRYPTO_ALG_SHA1
)
625 memcpy(areq
->result
, sha1_zero_message_hash
,
627 else if (ctx
->alg
== CONTEXT_CONTROL_CRYPTO_ALG_SHA224
)
628 memcpy(areq
->result
, sha224_zero_message_hash
,
630 else if (ctx
->alg
== CONTEXT_CONTROL_CRYPTO_ALG_SHA256
)
631 memcpy(areq
->result
, sha256_zero_message_hash
,
637 return safexcel_ahash_enqueue(areq
);
640 static int safexcel_ahash_finup(struct ahash_request
*areq
)
642 struct safexcel_ahash_req
*req
= ahash_request_ctx(areq
);
644 req
->last_req
= true;
647 safexcel_ahash_update(areq
);
648 return safexcel_ahash_final(areq
);
651 static int safexcel_ahash_export(struct ahash_request
*areq
, void *out
)
653 struct crypto_ahash
*ahash
= crypto_ahash_reqtfm(areq
);
654 struct safexcel_ahash_req
*req
= ahash_request_ctx(areq
);
655 struct safexcel_ahash_export_state
*export
= out
;
657 export
->len
= req
->len
;
658 export
->processed
= req
->processed
;
660 export
->digest
= req
->digest
;
662 memcpy(export
->state
, req
->state
, req
->state_sz
);
663 memcpy(export
->cache
, req
->cache
, crypto_ahash_blocksize(ahash
));
668 static int safexcel_ahash_import(struct ahash_request
*areq
, const void *in
)
670 struct crypto_ahash
*ahash
= crypto_ahash_reqtfm(areq
);
671 struct safexcel_ahash_req
*req
= ahash_request_ctx(areq
);
672 const struct safexcel_ahash_export_state
*export
= in
;
675 ret
= crypto_ahash_init(areq
);
679 req
->len
= export
->len
;
680 req
->processed
= export
->processed
;
682 req
->digest
= export
->digest
;
684 memcpy(req
->cache
, export
->cache
, crypto_ahash_blocksize(ahash
));
685 memcpy(req
->state
, export
->state
, req
->state_sz
);
690 static int safexcel_ahash_cra_init(struct crypto_tfm
*tfm
)
692 struct safexcel_ahash_ctx
*ctx
= crypto_tfm_ctx(tfm
);
693 struct safexcel_alg_template
*tmpl
=
694 container_of(__crypto_ahash_alg(tfm
->__crt_alg
),
695 struct safexcel_alg_template
, alg
.ahash
);
697 ctx
->priv
= tmpl
->priv
;
698 ctx
->base
.send
= safexcel_ahash_send
;
699 ctx
->base
.handle_result
= safexcel_handle_result
;
701 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm
),
702 sizeof(struct safexcel_ahash_req
));
706 static int safexcel_sha1_init(struct ahash_request
*areq
)
708 struct safexcel_ahash_ctx
*ctx
= crypto_ahash_ctx(crypto_ahash_reqtfm(areq
));
709 struct safexcel_ahash_req
*req
= ahash_request_ctx(areq
);
711 memset(req
, 0, sizeof(*req
));
713 req
->state
[0] = SHA1_H0
;
714 req
->state
[1] = SHA1_H1
;
715 req
->state
[2] = SHA1_H2
;
716 req
->state
[3] = SHA1_H3
;
717 req
->state
[4] = SHA1_H4
;
719 ctx
->alg
= CONTEXT_CONTROL_CRYPTO_ALG_SHA1
;
720 req
->digest
= CONTEXT_CONTROL_DIGEST_PRECOMPUTED
;
721 req
->state_sz
= SHA1_DIGEST_SIZE
;
726 static int safexcel_sha1_digest(struct ahash_request
*areq
)
728 int ret
= safexcel_sha1_init(areq
);
733 return safexcel_ahash_finup(areq
);
736 static void safexcel_ahash_cra_exit(struct crypto_tfm
*tfm
)
738 struct safexcel_ahash_ctx
*ctx
= crypto_tfm_ctx(tfm
);
739 struct safexcel_crypto_priv
*priv
= ctx
->priv
;
742 /* context not allocated, skip invalidation */
746 if (priv
->version
== EIP197
) {
747 ret
= safexcel_ahash_exit_inv(tfm
);
749 dev_warn(priv
->dev
, "hash: invalidation error %d\n", ret
);
751 dma_pool_free(priv
->context_pool
, ctx
->base
.ctxr
,
756 struct safexcel_alg_template safexcel_alg_sha1
= {
757 .type
= SAFEXCEL_ALG_TYPE_AHASH
,
759 .init
= safexcel_sha1_init
,
760 .update
= safexcel_ahash_update
,
761 .final
= safexcel_ahash_final
,
762 .finup
= safexcel_ahash_finup
,
763 .digest
= safexcel_sha1_digest
,
764 .export
= safexcel_ahash_export
,
765 .import
= safexcel_ahash_import
,
767 .digestsize
= SHA1_DIGEST_SIZE
,
768 .statesize
= sizeof(struct safexcel_ahash_export_state
),
771 .cra_driver_name
= "safexcel-sha1",
773 .cra_flags
= CRYPTO_ALG_ASYNC
|
774 CRYPTO_ALG_KERN_DRIVER_ONLY
,
775 .cra_blocksize
= SHA1_BLOCK_SIZE
,
776 .cra_ctxsize
= sizeof(struct safexcel_ahash_ctx
),
777 .cra_init
= safexcel_ahash_cra_init
,
778 .cra_exit
= safexcel_ahash_cra_exit
,
779 .cra_module
= THIS_MODULE
,
785 static int safexcel_hmac_sha1_init(struct ahash_request
*areq
)
787 struct safexcel_ahash_req
*req
= ahash_request_ctx(areq
);
789 safexcel_sha1_init(areq
);
790 req
->digest
= CONTEXT_CONTROL_DIGEST_HMAC
;
794 static int safexcel_hmac_sha1_digest(struct ahash_request
*areq
)
796 int ret
= safexcel_hmac_sha1_init(areq
);
801 return safexcel_ahash_finup(areq
);
804 struct safexcel_ahash_result
{
805 struct completion completion
;
809 static void safexcel_ahash_complete(struct crypto_async_request
*req
, int error
)
811 struct safexcel_ahash_result
*result
= req
->data
;
813 if (error
== -EINPROGRESS
)
816 result
->error
= error
;
817 complete(&result
->completion
);
820 static int safexcel_hmac_init_pad(struct ahash_request
*areq
,
821 unsigned int blocksize
, const u8
*key
,
822 unsigned int keylen
, u8
*ipad
, u8
*opad
)
824 struct safexcel_ahash_result result
;
825 struct scatterlist sg
;
829 if (keylen
<= blocksize
) {
830 memcpy(ipad
, key
, keylen
);
832 keydup
= kmemdup(key
, keylen
, GFP_KERNEL
);
836 ahash_request_set_callback(areq
, CRYPTO_TFM_REQ_MAY_BACKLOG
,
837 safexcel_ahash_complete
, &result
);
838 sg_init_one(&sg
, keydup
, keylen
);
839 ahash_request_set_crypt(areq
, &sg
, ipad
, keylen
);
840 init_completion(&result
.completion
);
842 ret
= crypto_ahash_digest(areq
);
843 if (ret
== -EINPROGRESS
|| ret
== -EBUSY
) {
844 wait_for_completion_interruptible(&result
.completion
);
849 memzero_explicit(keydup
, keylen
);
855 keylen
= crypto_ahash_digestsize(crypto_ahash_reqtfm(areq
));
858 memset(ipad
+ keylen
, 0, blocksize
- keylen
);
859 memcpy(opad
, ipad
, blocksize
);
861 for (i
= 0; i
< blocksize
; i
++) {
862 ipad
[i
] ^= HMAC_IPAD_VALUE
;
863 opad
[i
] ^= HMAC_OPAD_VALUE
;
869 static int safexcel_hmac_init_iv(struct ahash_request
*areq
,
870 unsigned int blocksize
, u8
*pad
, void *state
)
872 struct safexcel_ahash_result result
;
873 struct safexcel_ahash_req
*req
;
874 struct scatterlist sg
;
877 ahash_request_set_callback(areq
, CRYPTO_TFM_REQ_MAY_BACKLOG
,
878 safexcel_ahash_complete
, &result
);
879 sg_init_one(&sg
, pad
, blocksize
);
880 ahash_request_set_crypt(areq
, &sg
, pad
, blocksize
);
881 init_completion(&result
.completion
);
883 ret
= crypto_ahash_init(areq
);
887 req
= ahash_request_ctx(areq
);
889 req
->last_req
= true;
891 ret
= crypto_ahash_update(areq
);
892 if (ret
&& ret
!= -EINPROGRESS
&& ret
!= -EBUSY
)
895 wait_for_completion_interruptible(&result
.completion
);
899 return crypto_ahash_export(areq
, state
);
902 int safexcel_hmac_setkey(const char *alg
, const u8
*key
, unsigned int keylen
,
903 void *istate
, void *ostate
)
905 struct ahash_request
*areq
;
906 struct crypto_ahash
*tfm
;
907 unsigned int blocksize
;
911 tfm
= crypto_alloc_ahash(alg
, CRYPTO_ALG_TYPE_AHASH
,
912 CRYPTO_ALG_TYPE_AHASH_MASK
);
916 areq
= ahash_request_alloc(tfm
, GFP_KERNEL
);
922 crypto_ahash_clear_flags(tfm
, ~0);
923 blocksize
= crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm
));
925 ipad
= kcalloc(2, blocksize
, GFP_KERNEL
);
931 opad
= ipad
+ blocksize
;
933 ret
= safexcel_hmac_init_pad(areq
, blocksize
, key
, keylen
, ipad
, opad
);
937 ret
= safexcel_hmac_init_iv(areq
, blocksize
, ipad
, istate
);
941 ret
= safexcel_hmac_init_iv(areq
, blocksize
, opad
, ostate
);
946 ahash_request_free(areq
);
948 crypto_free_ahash(tfm
);
953 static int safexcel_hmac_alg_setkey(struct crypto_ahash
*tfm
, const u8
*key
,
954 unsigned int keylen
, const char *alg
,
955 unsigned int state_sz
)
957 struct safexcel_ahash_ctx
*ctx
= crypto_tfm_ctx(crypto_ahash_tfm(tfm
));
958 struct safexcel_crypto_priv
*priv
= ctx
->priv
;
959 struct safexcel_ahash_export_state istate
, ostate
;
962 ret
= safexcel_hmac_setkey(alg
, key
, keylen
, &istate
, &ostate
);
966 if (priv
->version
== EIP197
&& ctx
->base
.ctxr
) {
967 for (i
= 0; i
< state_sz
/ sizeof(u32
); i
++) {
968 if (ctx
->ipad
[i
] != le32_to_cpu(istate
.state
[i
]) ||
969 ctx
->opad
[i
] != le32_to_cpu(ostate
.state
[i
])) {
970 ctx
->base
.needs_inv
= true;
976 memcpy(ctx
->ipad
, &istate
.state
, state_sz
);
977 memcpy(ctx
->opad
, &ostate
.state
, state_sz
);
982 static int safexcel_hmac_sha1_setkey(struct crypto_ahash
*tfm
, const u8
*key
,
985 return safexcel_hmac_alg_setkey(tfm
, key
, keylen
, "safexcel-sha1",
989 struct safexcel_alg_template safexcel_alg_hmac_sha1
= {
990 .type
= SAFEXCEL_ALG_TYPE_AHASH
,
992 .init
= safexcel_hmac_sha1_init
,
993 .update
= safexcel_ahash_update
,
994 .final
= safexcel_ahash_final
,
995 .finup
= safexcel_ahash_finup
,
996 .digest
= safexcel_hmac_sha1_digest
,
997 .setkey
= safexcel_hmac_sha1_setkey
,
998 .export
= safexcel_ahash_export
,
999 .import
= safexcel_ahash_import
,
1001 .digestsize
= SHA1_DIGEST_SIZE
,
1002 .statesize
= sizeof(struct safexcel_ahash_export_state
),
1004 .cra_name
= "hmac(sha1)",
1005 .cra_driver_name
= "safexcel-hmac-sha1",
1006 .cra_priority
= 300,
1007 .cra_flags
= CRYPTO_ALG_ASYNC
|
1008 CRYPTO_ALG_KERN_DRIVER_ONLY
,
1009 .cra_blocksize
= SHA1_BLOCK_SIZE
,
1010 .cra_ctxsize
= sizeof(struct safexcel_ahash_ctx
),
1011 .cra_init
= safexcel_ahash_cra_init
,
1012 .cra_exit
= safexcel_ahash_cra_exit
,
1013 .cra_module
= THIS_MODULE
,
1019 static int safexcel_sha256_init(struct ahash_request
*areq
)
1021 struct safexcel_ahash_ctx
*ctx
= crypto_ahash_ctx(crypto_ahash_reqtfm(areq
));
1022 struct safexcel_ahash_req
*req
= ahash_request_ctx(areq
);
1024 memset(req
, 0, sizeof(*req
));
1026 req
->state
[0] = SHA256_H0
;
1027 req
->state
[1] = SHA256_H1
;
1028 req
->state
[2] = SHA256_H2
;
1029 req
->state
[3] = SHA256_H3
;
1030 req
->state
[4] = SHA256_H4
;
1031 req
->state
[5] = SHA256_H5
;
1032 req
->state
[6] = SHA256_H6
;
1033 req
->state
[7] = SHA256_H7
;
1035 ctx
->alg
= CONTEXT_CONTROL_CRYPTO_ALG_SHA256
;
1036 req
->digest
= CONTEXT_CONTROL_DIGEST_PRECOMPUTED
;
1037 req
->state_sz
= SHA256_DIGEST_SIZE
;
1042 static int safexcel_sha256_digest(struct ahash_request
*areq
)
1044 int ret
= safexcel_sha256_init(areq
);
1049 return safexcel_ahash_finup(areq
);
1052 struct safexcel_alg_template safexcel_alg_sha256
= {
1053 .type
= SAFEXCEL_ALG_TYPE_AHASH
,
1055 .init
= safexcel_sha256_init
,
1056 .update
= safexcel_ahash_update
,
1057 .final
= safexcel_ahash_final
,
1058 .finup
= safexcel_ahash_finup
,
1059 .digest
= safexcel_sha256_digest
,
1060 .export
= safexcel_ahash_export
,
1061 .import
= safexcel_ahash_import
,
1063 .digestsize
= SHA256_DIGEST_SIZE
,
1064 .statesize
= sizeof(struct safexcel_ahash_export_state
),
1066 .cra_name
= "sha256",
1067 .cra_driver_name
= "safexcel-sha256",
1068 .cra_priority
= 300,
1069 .cra_flags
= CRYPTO_ALG_ASYNC
|
1070 CRYPTO_ALG_KERN_DRIVER_ONLY
,
1071 .cra_blocksize
= SHA256_BLOCK_SIZE
,
1072 .cra_ctxsize
= sizeof(struct safexcel_ahash_ctx
),
1073 .cra_init
= safexcel_ahash_cra_init
,
1074 .cra_exit
= safexcel_ahash_cra_exit
,
1075 .cra_module
= THIS_MODULE
,
1081 static int safexcel_sha224_init(struct ahash_request
*areq
)
1083 struct safexcel_ahash_ctx
*ctx
= crypto_ahash_ctx(crypto_ahash_reqtfm(areq
));
1084 struct safexcel_ahash_req
*req
= ahash_request_ctx(areq
);
1086 memset(req
, 0, sizeof(*req
));
1088 req
->state
[0] = SHA224_H0
;
1089 req
->state
[1] = SHA224_H1
;
1090 req
->state
[2] = SHA224_H2
;
1091 req
->state
[3] = SHA224_H3
;
1092 req
->state
[4] = SHA224_H4
;
1093 req
->state
[5] = SHA224_H5
;
1094 req
->state
[6] = SHA224_H6
;
1095 req
->state
[7] = SHA224_H7
;
1097 ctx
->alg
= CONTEXT_CONTROL_CRYPTO_ALG_SHA224
;
1098 req
->digest
= CONTEXT_CONTROL_DIGEST_PRECOMPUTED
;
1099 req
->state_sz
= SHA256_DIGEST_SIZE
;
1104 static int safexcel_sha224_digest(struct ahash_request
*areq
)
1106 int ret
= safexcel_sha224_init(areq
);
1111 return safexcel_ahash_finup(areq
);
1114 struct safexcel_alg_template safexcel_alg_sha224
= {
1115 .type
= SAFEXCEL_ALG_TYPE_AHASH
,
1117 .init
= safexcel_sha224_init
,
1118 .update
= safexcel_ahash_update
,
1119 .final
= safexcel_ahash_final
,
1120 .finup
= safexcel_ahash_finup
,
1121 .digest
= safexcel_sha224_digest
,
1122 .export
= safexcel_ahash_export
,
1123 .import
= safexcel_ahash_import
,
1125 .digestsize
= SHA224_DIGEST_SIZE
,
1126 .statesize
= sizeof(struct safexcel_ahash_export_state
),
1128 .cra_name
= "sha224",
1129 .cra_driver_name
= "safexcel-sha224",
1130 .cra_priority
= 300,
1131 .cra_flags
= CRYPTO_ALG_ASYNC
|
1132 CRYPTO_ALG_KERN_DRIVER_ONLY
,
1133 .cra_blocksize
= SHA224_BLOCK_SIZE
,
1134 .cra_ctxsize
= sizeof(struct safexcel_ahash_ctx
),
1135 .cra_init
= safexcel_ahash_cra_init
,
1136 .cra_exit
= safexcel_ahash_cra_exit
,
1137 .cra_module
= THIS_MODULE
,
1143 static int safexcel_hmac_sha224_setkey(struct crypto_ahash
*tfm
, const u8
*key
,
1144 unsigned int keylen
)
1146 return safexcel_hmac_alg_setkey(tfm
, key
, keylen
, "safexcel-sha224",
1147 SHA256_DIGEST_SIZE
);
1150 static int safexcel_hmac_sha224_init(struct ahash_request
*areq
)
1152 struct safexcel_ahash_req
*req
= ahash_request_ctx(areq
);
1154 safexcel_sha224_init(areq
);
1155 req
->digest
= CONTEXT_CONTROL_DIGEST_HMAC
;
1159 static int safexcel_hmac_sha224_digest(struct ahash_request
*areq
)
1161 int ret
= safexcel_hmac_sha224_init(areq
);
1166 return safexcel_ahash_finup(areq
);
1169 struct safexcel_alg_template safexcel_alg_hmac_sha224
= {
1170 .type
= SAFEXCEL_ALG_TYPE_AHASH
,
1172 .init
= safexcel_hmac_sha224_init
,
1173 .update
= safexcel_ahash_update
,
1174 .final
= safexcel_ahash_final
,
1175 .finup
= safexcel_ahash_finup
,
1176 .digest
= safexcel_hmac_sha224_digest
,
1177 .setkey
= safexcel_hmac_sha224_setkey
,
1178 .export
= safexcel_ahash_export
,
1179 .import
= safexcel_ahash_import
,
1181 .digestsize
= SHA224_DIGEST_SIZE
,
1182 .statesize
= sizeof(struct safexcel_ahash_export_state
),
1184 .cra_name
= "hmac(sha224)",
1185 .cra_driver_name
= "safexcel-hmac-sha224",
1186 .cra_priority
= 300,
1187 .cra_flags
= CRYPTO_ALG_ASYNC
|
1188 CRYPTO_ALG_KERN_DRIVER_ONLY
,
1189 .cra_blocksize
= SHA224_BLOCK_SIZE
,
1190 .cra_ctxsize
= sizeof(struct safexcel_ahash_ctx
),
1191 .cra_init
= safexcel_ahash_cra_init
,
1192 .cra_exit
= safexcel_ahash_cra_exit
,
1193 .cra_module
= THIS_MODULE
,
1199 static int safexcel_hmac_sha256_setkey(struct crypto_ahash
*tfm
, const u8
*key
,
1200 unsigned int keylen
)
1202 return safexcel_hmac_alg_setkey(tfm
, key
, keylen
, "safexcel-sha256",
1203 SHA256_DIGEST_SIZE
);
1206 static int safexcel_hmac_sha256_init(struct ahash_request
*areq
)
1208 struct safexcel_ahash_req
*req
= ahash_request_ctx(areq
);
1210 safexcel_sha256_init(areq
);
1211 req
->digest
= CONTEXT_CONTROL_DIGEST_HMAC
;
1215 static int safexcel_hmac_sha256_digest(struct ahash_request
*areq
)
1217 int ret
= safexcel_hmac_sha256_init(areq
);
1222 return safexcel_ahash_finup(areq
);
1225 struct safexcel_alg_template safexcel_alg_hmac_sha256
= {
1226 .type
= SAFEXCEL_ALG_TYPE_AHASH
,
1228 .init
= safexcel_hmac_sha256_init
,
1229 .update
= safexcel_ahash_update
,
1230 .final
= safexcel_ahash_final
,
1231 .finup
= safexcel_ahash_finup
,
1232 .digest
= safexcel_hmac_sha256_digest
,
1233 .setkey
= safexcel_hmac_sha256_setkey
,
1234 .export
= safexcel_ahash_export
,
1235 .import
= safexcel_ahash_import
,
1237 .digestsize
= SHA256_DIGEST_SIZE
,
1238 .statesize
= sizeof(struct safexcel_ahash_export_state
),
1240 .cra_name
= "hmac(sha256)",
1241 .cra_driver_name
= "safexcel-hmac-sha256",
1242 .cra_priority
= 300,
1243 .cra_flags
= CRYPTO_ALG_ASYNC
|
1244 CRYPTO_ALG_KERN_DRIVER_ONLY
,
1245 .cra_blocksize
= SHA256_BLOCK_SIZE
,
1246 .cra_ctxsize
= sizeof(struct safexcel_ahash_ctx
),
1247 .cra_init
= safexcel_ahash_cra_init
,
1248 .cra_exit
= safexcel_ahash_cra_exit
,
1249 .cra_module
= THIS_MODULE
,