2 * Multi buffer SHA512 algorithm Glue Code
4 * This file is provided under a dual BSD/GPLv2 license. When using or
5 * redistributing this file, you may do so under either license.
9 * Copyright(c) 2016 Intel Corporation.
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of version 2 of the GNU General Public License as
13 * published by the Free Software Foundation.
15 * This program is distributed in the hope that it will be useful, but
16 * WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
20 * Contact Information:
21 * Megha Dey <megha.dey@linux.intel.com>
25 * Copyright(c) 2016 Intel Corporation.
27 * Redistribution and use in source and binary forms, with or without
28 * modification, are permitted provided that the following conditions
31 * * Redistributions of source code must retain the above copyright
32 * notice, this list of conditions and the following disclaimer.
33 * * Redistributions in binary form must reproduce the above copyright
34 * notice, this list of conditions and the following disclaimer in
35 * the documentation and/or other materials provided with the
37 * * Neither the name of Intel Corporation nor the names of its
38 * contributors may be used to endorse or promote products derived
39 * from this software without specific prior written permission.
41 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
42 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
43 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
44 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
45 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
46 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
47 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
48 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
49 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
50 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
51 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
54 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
56 #include <crypto/internal/hash.h>
57 #include <linux/init.h>
58 #include <linux/module.h>
60 #include <linux/cryptohash.h>
61 #include <linux/types.h>
62 #include <linux/list.h>
63 #include <crypto/scatterwalk.h>
64 #include <crypto/sha.h>
65 #include <crypto/mcryptd.h>
66 #include <crypto/crypto_wq.h>
67 #include <asm/byteorder.h>
68 #include <linux/hardirq.h>
69 #include <asm/fpu/api.h>
70 #include "sha512_mb_ctx.h"
72 #define FLUSH_INTERVAL 1000 /* in usec */
74 static struct mcryptd_alg_state sha512_mb_alg_state
;
76 struct sha512_mb_ctx
{
77 struct mcryptd_ahash
*mcryptd_tfm
;
80 static inline struct mcryptd_hash_request_ctx
81 *cast_hash_to_mcryptd_ctx(struct sha512_hash_ctx
*hash_ctx
)
83 struct ahash_request
*areq
;
85 areq
= container_of((void *) hash_ctx
, struct ahash_request
, __ctx
);
86 return container_of(areq
, struct mcryptd_hash_request_ctx
, areq
);
89 static inline struct ahash_request
90 *cast_mcryptd_ctx_to_req(struct mcryptd_hash_request_ctx
*ctx
)
92 return container_of((void *) ctx
, struct ahash_request
, __ctx
);
95 static void req_ctx_init(struct mcryptd_hash_request_ctx
*rctx
,
96 struct ahash_request
*areq
)
98 rctx
->flag
= HASH_UPDATE
;
101 static asmlinkage
void (*sha512_job_mgr_init
)(struct sha512_mb_mgr
*state
);
102 static asmlinkage
struct job_sha512
* (*sha512_job_mgr_submit
)
103 (struct sha512_mb_mgr
*state
,
104 struct job_sha512
*job
);
105 static asmlinkage
struct job_sha512
* (*sha512_job_mgr_flush
)
106 (struct sha512_mb_mgr
*state
);
107 static asmlinkage
struct job_sha512
* (*sha512_job_mgr_get_comp_job
)
108 (struct sha512_mb_mgr
*state
);
110 inline uint32_t sha512_pad(uint8_t padblock
[SHA512_BLOCK_SIZE
* 2],
113 uint32_t i
= total_len
& (SHA512_BLOCK_SIZE
- 1);
115 memset(&padblock
[i
], 0, SHA512_BLOCK_SIZE
);
118 i
+= ((SHA512_BLOCK_SIZE
- 1) &
119 (0 - (total_len
+ SHA512_PADLENGTHFIELD_SIZE
+ 1)))
120 + 1 + SHA512_PADLENGTHFIELD_SIZE
;
122 #if SHA512_PADLENGTHFIELD_SIZE == 16
123 *((uint64_t *) &padblock
[i
- 16]) = 0;
126 *((uint64_t *) &padblock
[i
- 8]) = cpu_to_be64(total_len
<< 3);
128 /* Number of extra blocks to hash */
129 return i
>> SHA512_LOG2_BLOCK_SIZE
;
132 static struct sha512_hash_ctx
*sha512_ctx_mgr_resubmit
133 (struct sha512_ctx_mgr
*mgr
, struct sha512_hash_ctx
*ctx
)
136 if (ctx
->status
& HASH_CTX_STS_COMPLETE
) {
137 /* Clear PROCESSING bit */
138 ctx
->status
= HASH_CTX_STS_COMPLETE
;
143 * If the extra blocks are empty, begin hashing what remains
144 * in the user's buffer.
146 if (ctx
->partial_block_buffer_length
== 0 &&
147 ctx
->incoming_buffer_length
) {
149 const void *buffer
= ctx
->incoming_buffer
;
150 uint32_t len
= ctx
->incoming_buffer_length
;
154 * Only entire blocks can be hashed.
155 * Copy remainder to extra blocks buffer.
157 copy_len
= len
& (SHA512_BLOCK_SIZE
-1);
161 memcpy(ctx
->partial_block_buffer
,
162 ((const char *) buffer
+ len
),
164 ctx
->partial_block_buffer_length
= copy_len
;
167 ctx
->incoming_buffer_length
= 0;
169 /* len should be a multiple of the block size now */
170 assert((len
% SHA512_BLOCK_SIZE
) == 0);
172 /* Set len to the number of blocks to be hashed */
173 len
>>= SHA512_LOG2_BLOCK_SIZE
;
177 ctx
->job
.buffer
= (uint8_t *) buffer
;
179 ctx
= (struct sha512_hash_ctx
*)
180 sha512_job_mgr_submit(&mgr
->mgr
,
187 * If the extra blocks are not empty, then we are
188 * either on the last block(s) or we need more
189 * user input before continuing.
191 if (ctx
->status
& HASH_CTX_STS_LAST
) {
193 uint8_t *buf
= ctx
->partial_block_buffer
;
194 uint32_t n_extra_blocks
=
195 sha512_pad(buf
, ctx
->total_length
);
197 ctx
->status
= (HASH_CTX_STS_PROCESSING
|
198 HASH_CTX_STS_COMPLETE
);
199 ctx
->job
.buffer
= buf
;
200 ctx
->job
.len
= (uint32_t) n_extra_blocks
;
201 ctx
= (struct sha512_hash_ctx
*)
202 sha512_job_mgr_submit(&mgr
->mgr
, &ctx
->job
);
207 ctx
->status
= HASH_CTX_STS_IDLE
;
214 static struct sha512_hash_ctx
215 *sha512_ctx_mgr_get_comp_ctx(struct mcryptd_alg_cstate
*cstate
)
218 * If get_comp_job returns NULL, there are no jobs complete.
219 * If get_comp_job returns a job, verify that it is safe to return to
221 * If it is not ready, resubmit the job to finish processing.
222 * If sha512_ctx_mgr_resubmit returned a job, it is ready to be
224 * Otherwise, all jobs currently being managed by the hash_ctx_mgr
225 * still need processing.
227 struct sha512_ctx_mgr
*mgr
;
228 struct sha512_hash_ctx
*ctx
;
232 spin_lock_irqsave(&cstate
->work_lock
, flags
);
233 ctx
= (struct sha512_hash_ctx
*)
234 sha512_job_mgr_get_comp_job(&mgr
->mgr
);
235 ctx
= sha512_ctx_mgr_resubmit(mgr
, ctx
);
236 spin_unlock_irqrestore(&cstate
->work_lock
, flags
);
240 static void sha512_ctx_mgr_init(struct sha512_ctx_mgr
*mgr
)
242 sha512_job_mgr_init(&mgr
->mgr
);
245 static struct sha512_hash_ctx
246 *sha512_ctx_mgr_submit(struct mcryptd_alg_cstate
*cstate
,
247 struct sha512_hash_ctx
*ctx
,
252 struct sha512_ctx_mgr
*mgr
;
253 unsigned long irqflags
;
256 spin_lock_irqsave(&cstate
->work_lock
, irqflags
);
257 if (flags
& ~(HASH_UPDATE
| HASH_LAST
)) {
258 /* User should not pass anything other than UPDATE or LAST */
259 ctx
->error
= HASH_CTX_ERROR_INVALID_FLAGS
;
263 if (ctx
->status
& HASH_CTX_STS_PROCESSING
) {
264 /* Cannot submit to a currently processing job. */
265 ctx
->error
= HASH_CTX_ERROR_ALREADY_PROCESSING
;
269 if (ctx
->status
& HASH_CTX_STS_COMPLETE
) {
270 /* Cannot update a finished job. */
271 ctx
->error
= HASH_CTX_ERROR_ALREADY_COMPLETED
;
276 * If we made it here, there were no errors during this call to
279 ctx
->error
= HASH_CTX_ERROR_NONE
;
281 /* Store buffer ptr info from user */
282 ctx
->incoming_buffer
= buffer
;
283 ctx
->incoming_buffer_length
= len
;
286 * Store the user's request flags and mark this ctx as currently being
289 ctx
->status
= (flags
& HASH_LAST
) ?
290 (HASH_CTX_STS_PROCESSING
| HASH_CTX_STS_LAST
) :
291 HASH_CTX_STS_PROCESSING
;
293 /* Advance byte counter */
294 ctx
->total_length
+= len
;
297 * If there is anything currently buffered in the extra blocks,
298 * append to it until it contains a whole block.
299 * Or if the user's buffer contains less than a whole block,
300 * append as much as possible to the extra block.
302 if (ctx
->partial_block_buffer_length
|| len
< SHA512_BLOCK_SIZE
) {
303 /* Compute how many bytes to copy from user buffer into extra
306 uint32_t copy_len
= SHA512_BLOCK_SIZE
-
307 ctx
->partial_block_buffer_length
;
312 /* Copy and update relevant pointers and counters */
314 (&ctx
->partial_block_buffer
[ctx
->partial_block_buffer_length
],
317 ctx
->partial_block_buffer_length
+= copy_len
;
318 ctx
->incoming_buffer
= (const void *)
319 ((const char *)buffer
+ copy_len
);
320 ctx
->incoming_buffer_length
= len
- copy_len
;
323 /* The extra block should never contain more than 1 block
326 assert(ctx
->partial_block_buffer_length
<= SHA512_BLOCK_SIZE
);
328 /* If the extra block buffer contains exactly 1 block, it can
331 if (ctx
->partial_block_buffer_length
>= SHA512_BLOCK_SIZE
) {
332 ctx
->partial_block_buffer_length
= 0;
334 ctx
->job
.buffer
= ctx
->partial_block_buffer
;
336 ctx
= (struct sha512_hash_ctx
*)
337 sha512_job_mgr_submit(&mgr
->mgr
, &ctx
->job
);
341 ctx
= sha512_ctx_mgr_resubmit(mgr
, ctx
);
343 spin_unlock_irqrestore(&cstate
->work_lock
, irqflags
);
347 static struct sha512_hash_ctx
*sha512_ctx_mgr_flush(struct mcryptd_alg_cstate
*cstate
)
349 struct sha512_ctx_mgr
*mgr
;
350 struct sha512_hash_ctx
*ctx
;
354 spin_lock_irqsave(&cstate
->work_lock
, flags
);
356 ctx
= (struct sha512_hash_ctx
*)
357 sha512_job_mgr_flush(&mgr
->mgr
);
359 /* If flush returned 0, there are no more jobs in flight. */
364 * If flush returned a job, resubmit the job to finish
367 ctx
= sha512_ctx_mgr_resubmit(mgr
, ctx
);
370 * If sha512_ctx_mgr_resubmit returned a job, it is ready to
371 * be returned. Otherwise, all jobs currently being managed by
372 * the sha512_ctx_mgr still need processing. Loop.
377 spin_unlock_irqrestore(&cstate
->work_lock
, flags
);
381 static int sha512_mb_init(struct ahash_request
*areq
)
383 struct sha512_hash_ctx
*sctx
= ahash_request_ctx(areq
);
386 sctx
->job
.result_digest
[0] = SHA512_H0
;
387 sctx
->job
.result_digest
[1] = SHA512_H1
;
388 sctx
->job
.result_digest
[2] = SHA512_H2
;
389 sctx
->job
.result_digest
[3] = SHA512_H3
;
390 sctx
->job
.result_digest
[4] = SHA512_H4
;
391 sctx
->job
.result_digest
[5] = SHA512_H5
;
392 sctx
->job
.result_digest
[6] = SHA512_H6
;
393 sctx
->job
.result_digest
[7] = SHA512_H7
;
394 sctx
->total_length
= 0;
395 sctx
->partial_block_buffer_length
= 0;
396 sctx
->status
= HASH_CTX_STS_IDLE
;
401 static int sha512_mb_set_results(struct mcryptd_hash_request_ctx
*rctx
)
404 struct sha512_hash_ctx
*sctx
= ahash_request_ctx(&rctx
->areq
);
405 __be64
*dst
= (__be64
*) rctx
->out
;
407 for (i
= 0; i
< 8; ++i
)
408 dst
[i
] = cpu_to_be64(sctx
->job
.result_digest
[i
]);
413 static int sha_finish_walk(struct mcryptd_hash_request_ctx
**ret_rctx
,
414 struct mcryptd_alg_cstate
*cstate
, bool flush
)
416 int flag
= HASH_UPDATE
;
418 struct mcryptd_hash_request_ctx
*rctx
= *ret_rctx
;
419 struct sha512_hash_ctx
*sha_ctx
;
422 while (!(rctx
->flag
& HASH_DONE
)) {
423 nbytes
= crypto_ahash_walk_done(&rctx
->walk
, 0);
428 /* check if the walk is done */
429 if (crypto_ahash_walk_last(&rctx
->walk
)) {
430 rctx
->flag
|= HASH_DONE
;
431 if (rctx
->flag
& HASH_FINAL
)
435 sha_ctx
= (struct sha512_hash_ctx
*)
436 ahash_request_ctx(&rctx
->areq
);
438 sha_ctx
= sha512_ctx_mgr_submit(cstate
, sha_ctx
,
439 rctx
->walk
.data
, nbytes
, flag
);
442 sha_ctx
= sha512_ctx_mgr_flush(cstate
);
446 rctx
= cast_hash_to_mcryptd_ctx(sha_ctx
);
453 /* copy the results */
454 if (rctx
->flag
& HASH_FINAL
)
455 sha512_mb_set_results(rctx
);
462 static int sha_complete_job(struct mcryptd_hash_request_ctx
*rctx
,
463 struct mcryptd_alg_cstate
*cstate
,
466 struct ahash_request
*req
= cast_mcryptd_ctx_to_req(rctx
);
467 struct sha512_hash_ctx
*sha_ctx
;
468 struct mcryptd_hash_request_ctx
*req_ctx
;
472 /* remove from work list */
473 spin_lock_irqsave(&cstate
->work_lock
, flags
);
474 list_del(&rctx
->waiter
);
475 spin_unlock_irqrestore(&cstate
->work_lock
, flags
);
478 rctx
->complete(&req
->base
, err
);
481 rctx
->complete(&req
->base
, err
);
485 /* check to see if there are other jobs that are done */
486 sha_ctx
= sha512_ctx_mgr_get_comp_ctx(cstate
);
488 req_ctx
= cast_hash_to_mcryptd_ctx(sha_ctx
);
489 ret
= sha_finish_walk(&req_ctx
, cstate
, false);
491 spin_lock_irqsave(&cstate
->work_lock
, flags
);
492 list_del(&req_ctx
->waiter
);
493 spin_unlock_irqrestore(&cstate
->work_lock
, flags
);
495 req
= cast_mcryptd_ctx_to_req(req_ctx
);
497 req_ctx
->complete(&req
->base
, ret
);
500 req_ctx
->complete(&req
->base
, ret
);
504 sha_ctx
= sha512_ctx_mgr_get_comp_ctx(cstate
);
510 static void sha512_mb_add_list(struct mcryptd_hash_request_ctx
*rctx
,
511 struct mcryptd_alg_cstate
*cstate
)
513 unsigned long next_flush
;
514 unsigned long delay
= usecs_to_jiffies(FLUSH_INTERVAL
);
518 rctx
->tag
.arrival
= jiffies
; /* tag the arrival time */
519 rctx
->tag
.seq_num
= cstate
->next_seq_num
++;
520 next_flush
= rctx
->tag
.arrival
+ delay
;
521 rctx
->tag
.expire
= next_flush
;
523 spin_lock_irqsave(&cstate
->work_lock
, flags
);
524 list_add_tail(&rctx
->waiter
, &cstate
->work_list
);
525 spin_unlock_irqrestore(&cstate
->work_lock
, flags
);
527 mcryptd_arm_flusher(cstate
, delay
);
530 static int sha512_mb_update(struct ahash_request
*areq
)
532 struct mcryptd_hash_request_ctx
*rctx
=
533 container_of(areq
, struct mcryptd_hash_request_ctx
,
535 struct mcryptd_alg_cstate
*cstate
=
536 this_cpu_ptr(sha512_mb_alg_state
.alg_cstate
);
538 struct ahash_request
*req
= cast_mcryptd_ctx_to_req(rctx
);
539 struct sha512_hash_ctx
*sha_ctx
;
544 if (rctx
->tag
.cpu
!= smp_processor_id()) {
545 pr_err("mcryptd error: cpu clash\n");
549 /* need to init context */
550 req_ctx_init(rctx
, areq
);
552 nbytes
= crypto_ahash_walk_first(req
, &rctx
->walk
);
559 if (crypto_ahash_walk_last(&rctx
->walk
))
560 rctx
->flag
|= HASH_DONE
;
563 sha_ctx
= (struct sha512_hash_ctx
*) ahash_request_ctx(areq
);
564 sha512_mb_add_list(rctx
, cstate
);
566 sha_ctx
= sha512_ctx_mgr_submit(cstate
, sha_ctx
, rctx
->walk
.data
,
567 nbytes
, HASH_UPDATE
);
570 /* check if anything is returned */
574 if (sha_ctx
->error
) {
575 ret
= sha_ctx
->error
;
576 rctx
= cast_hash_to_mcryptd_ctx(sha_ctx
);
580 rctx
= cast_hash_to_mcryptd_ctx(sha_ctx
);
581 ret
= sha_finish_walk(&rctx
, cstate
, false);
586 sha_complete_job(rctx
, cstate
, ret
);
590 static int sha512_mb_finup(struct ahash_request
*areq
)
592 struct mcryptd_hash_request_ctx
*rctx
=
593 container_of(areq
, struct mcryptd_hash_request_ctx
,
595 struct mcryptd_alg_cstate
*cstate
=
596 this_cpu_ptr(sha512_mb_alg_state
.alg_cstate
);
598 struct ahash_request
*req
= cast_mcryptd_ctx_to_req(rctx
);
599 struct sha512_hash_ctx
*sha_ctx
;
600 int ret
= 0, flag
= HASH_UPDATE
, nbytes
;
603 if (rctx
->tag
.cpu
!= smp_processor_id()) {
604 pr_err("mcryptd error: cpu clash\n");
608 /* need to init context */
609 req_ctx_init(rctx
, areq
);
611 nbytes
= crypto_ahash_walk_first(req
, &rctx
->walk
);
618 if (crypto_ahash_walk_last(&rctx
->walk
)) {
619 rctx
->flag
|= HASH_DONE
;
624 rctx
->flag
|= HASH_FINAL
;
625 sha_ctx
= (struct sha512_hash_ctx
*) ahash_request_ctx(areq
);
626 sha512_mb_add_list(rctx
, cstate
);
629 sha_ctx
= sha512_ctx_mgr_submit(cstate
, sha_ctx
, rctx
->walk
.data
,
633 /* check if anything is returned */
637 if (sha_ctx
->error
) {
638 ret
= sha_ctx
->error
;
642 rctx
= cast_hash_to_mcryptd_ctx(sha_ctx
);
643 ret
= sha_finish_walk(&rctx
, cstate
, false);
647 sha_complete_job(rctx
, cstate
, ret
);
651 static int sha512_mb_final(struct ahash_request
*areq
)
653 struct mcryptd_hash_request_ctx
*rctx
=
654 container_of(areq
, struct mcryptd_hash_request_ctx
,
656 struct mcryptd_alg_cstate
*cstate
=
657 this_cpu_ptr(sha512_mb_alg_state
.alg_cstate
);
659 struct sha512_hash_ctx
*sha_ctx
;
664 if (rctx
->tag
.cpu
!= smp_processor_id()) {
665 pr_err("mcryptd error: cpu clash\n");
669 /* need to init context */
670 req_ctx_init(rctx
, areq
);
672 rctx
->flag
|= HASH_DONE
| HASH_FINAL
;
674 sha_ctx
= (struct sha512_hash_ctx
*) ahash_request_ctx(areq
);
675 /* flag HASH_FINAL and 0 data size */
676 sha512_mb_add_list(rctx
, cstate
);
678 sha_ctx
= sha512_ctx_mgr_submit(cstate
, sha_ctx
, &data
, 0, HASH_LAST
);
681 /* check if anything is returned */
685 if (sha_ctx
->error
) {
686 ret
= sha_ctx
->error
;
687 rctx
= cast_hash_to_mcryptd_ctx(sha_ctx
);
691 rctx
= cast_hash_to_mcryptd_ctx(sha_ctx
);
692 ret
= sha_finish_walk(&rctx
, cstate
, false);
696 sha_complete_job(rctx
, cstate
, ret
);
700 static int sha512_mb_export(struct ahash_request
*areq
, void *out
)
702 struct sha512_hash_ctx
*sctx
= ahash_request_ctx(areq
);
704 memcpy(out
, sctx
, sizeof(*sctx
));
709 static int sha512_mb_import(struct ahash_request
*areq
, const void *in
)
711 struct sha512_hash_ctx
*sctx
= ahash_request_ctx(areq
);
713 memcpy(sctx
, in
, sizeof(*sctx
));
718 static int sha512_mb_async_init_tfm(struct crypto_tfm
*tfm
)
720 struct mcryptd_ahash
*mcryptd_tfm
;
721 struct sha512_mb_ctx
*ctx
= crypto_tfm_ctx(tfm
);
722 struct mcryptd_hash_ctx
*mctx
;
724 mcryptd_tfm
= mcryptd_alloc_ahash("__intel_sha512-mb",
726 CRYPTO_ALG_INTERNAL
);
727 if (IS_ERR(mcryptd_tfm
))
728 return PTR_ERR(mcryptd_tfm
);
729 mctx
= crypto_ahash_ctx(&mcryptd_tfm
->base
);
730 mctx
->alg_state
= &sha512_mb_alg_state
;
731 ctx
->mcryptd_tfm
= mcryptd_tfm
;
732 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm
),
733 sizeof(struct ahash_request
) +
734 crypto_ahash_reqsize(&mcryptd_tfm
->base
));
739 static void sha512_mb_async_exit_tfm(struct crypto_tfm
*tfm
)
741 struct sha512_mb_ctx
*ctx
= crypto_tfm_ctx(tfm
);
743 mcryptd_free_ahash(ctx
->mcryptd_tfm
);
746 static int sha512_mb_areq_init_tfm(struct crypto_tfm
*tfm
)
748 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm
),
749 sizeof(struct ahash_request
) +
750 sizeof(struct sha512_hash_ctx
));
755 static void sha512_mb_areq_exit_tfm(struct crypto_tfm
*tfm
)
757 struct sha512_mb_ctx
*ctx
= crypto_tfm_ctx(tfm
);
759 mcryptd_free_ahash(ctx
->mcryptd_tfm
);
762 static struct ahash_alg sha512_mb_areq_alg
= {
763 .init
= sha512_mb_init
,
764 .update
= sha512_mb_update
,
765 .final
= sha512_mb_final
,
766 .finup
= sha512_mb_finup
,
767 .export
= sha512_mb_export
,
768 .import
= sha512_mb_import
,
770 .digestsize
= SHA512_DIGEST_SIZE
,
771 .statesize
= sizeof(struct sha512_hash_ctx
),
773 .cra_name
= "__sha512-mb",
774 .cra_driver_name
= "__intel_sha512-mb",
777 * use ASYNC flag as some buffers in multi-buffer
778 * algo may not have completed before hashing thread
781 .cra_flags
= CRYPTO_ALG_ASYNC
|
783 .cra_blocksize
= SHA512_BLOCK_SIZE
,
784 .cra_module
= THIS_MODULE
,
785 .cra_list
= LIST_HEAD_INIT
786 (sha512_mb_areq_alg
.halg
.base
.cra_list
),
787 .cra_init
= sha512_mb_areq_init_tfm
,
788 .cra_exit
= sha512_mb_areq_exit_tfm
,
789 .cra_ctxsize
= sizeof(struct sha512_hash_ctx
),
794 static int sha512_mb_async_init(struct ahash_request
*req
)
796 struct crypto_ahash
*tfm
= crypto_ahash_reqtfm(req
);
797 struct sha512_mb_ctx
*ctx
= crypto_ahash_ctx(tfm
);
798 struct ahash_request
*mcryptd_req
= ahash_request_ctx(req
);
799 struct mcryptd_ahash
*mcryptd_tfm
= ctx
->mcryptd_tfm
;
801 memcpy(mcryptd_req
, req
, sizeof(*req
));
802 ahash_request_set_tfm(mcryptd_req
, &mcryptd_tfm
->base
);
803 return crypto_ahash_init(mcryptd_req
);
806 static int sha512_mb_async_update(struct ahash_request
*req
)
808 struct ahash_request
*mcryptd_req
= ahash_request_ctx(req
);
810 struct crypto_ahash
*tfm
= crypto_ahash_reqtfm(req
);
811 struct sha512_mb_ctx
*ctx
= crypto_ahash_ctx(tfm
);
812 struct mcryptd_ahash
*mcryptd_tfm
= ctx
->mcryptd_tfm
;
814 memcpy(mcryptd_req
, req
, sizeof(*req
));
815 ahash_request_set_tfm(mcryptd_req
, &mcryptd_tfm
->base
);
816 return crypto_ahash_update(mcryptd_req
);
819 static int sha512_mb_async_finup(struct ahash_request
*req
)
821 struct ahash_request
*mcryptd_req
= ahash_request_ctx(req
);
823 struct crypto_ahash
*tfm
= crypto_ahash_reqtfm(req
);
824 struct sha512_mb_ctx
*ctx
= crypto_ahash_ctx(tfm
);
825 struct mcryptd_ahash
*mcryptd_tfm
= ctx
->mcryptd_tfm
;
827 memcpy(mcryptd_req
, req
, sizeof(*req
));
828 ahash_request_set_tfm(mcryptd_req
, &mcryptd_tfm
->base
);
829 return crypto_ahash_finup(mcryptd_req
);
832 static int sha512_mb_async_final(struct ahash_request
*req
)
834 struct ahash_request
*mcryptd_req
= ahash_request_ctx(req
);
836 struct crypto_ahash
*tfm
= crypto_ahash_reqtfm(req
);
837 struct sha512_mb_ctx
*ctx
= crypto_ahash_ctx(tfm
);
838 struct mcryptd_ahash
*mcryptd_tfm
= ctx
->mcryptd_tfm
;
840 memcpy(mcryptd_req
, req
, sizeof(*req
));
841 ahash_request_set_tfm(mcryptd_req
, &mcryptd_tfm
->base
);
842 return crypto_ahash_final(mcryptd_req
);
845 static int sha512_mb_async_digest(struct ahash_request
*req
)
847 struct crypto_ahash
*tfm
= crypto_ahash_reqtfm(req
);
848 struct sha512_mb_ctx
*ctx
= crypto_ahash_ctx(tfm
);
849 struct ahash_request
*mcryptd_req
= ahash_request_ctx(req
);
850 struct mcryptd_ahash
*mcryptd_tfm
= ctx
->mcryptd_tfm
;
852 memcpy(mcryptd_req
, req
, sizeof(*req
));
853 ahash_request_set_tfm(mcryptd_req
, &mcryptd_tfm
->base
);
854 return crypto_ahash_digest(mcryptd_req
);
857 static int sha512_mb_async_export(struct ahash_request
*req
, void *out
)
859 struct ahash_request
*mcryptd_req
= ahash_request_ctx(req
);
860 struct crypto_ahash
*tfm
= crypto_ahash_reqtfm(req
);
861 struct sha512_mb_ctx
*ctx
= crypto_ahash_ctx(tfm
);
862 struct mcryptd_ahash
*mcryptd_tfm
= ctx
->mcryptd_tfm
;
864 memcpy(mcryptd_req
, req
, sizeof(*req
));
865 ahash_request_set_tfm(mcryptd_req
, &mcryptd_tfm
->base
);
866 return crypto_ahash_export(mcryptd_req
, out
);
869 static int sha512_mb_async_import(struct ahash_request
*req
, const void *in
)
871 struct ahash_request
*mcryptd_req
= ahash_request_ctx(req
);
872 struct crypto_ahash
*tfm
= crypto_ahash_reqtfm(req
);
873 struct sha512_mb_ctx
*ctx
= crypto_ahash_ctx(tfm
);
874 struct mcryptd_ahash
*mcryptd_tfm
= ctx
->mcryptd_tfm
;
875 struct crypto_ahash
*child
= mcryptd_ahash_child(mcryptd_tfm
);
876 struct mcryptd_hash_request_ctx
*rctx
;
877 struct ahash_request
*areq
;
879 memcpy(mcryptd_req
, req
, sizeof(*req
));
880 ahash_request_set_tfm(mcryptd_req
, &mcryptd_tfm
->base
);
881 rctx
= ahash_request_ctx(mcryptd_req
);
885 ahash_request_set_tfm(areq
, child
);
886 ahash_request_set_callback(areq
, CRYPTO_TFM_REQ_MAY_SLEEP
,
887 rctx
->complete
, req
);
889 return crypto_ahash_import(mcryptd_req
, in
);
892 static struct ahash_alg sha512_mb_async_alg
= {
893 .init
= sha512_mb_async_init
,
894 .update
= sha512_mb_async_update
,
895 .final
= sha512_mb_async_final
,
896 .finup
= sha512_mb_async_finup
,
897 .digest
= sha512_mb_async_digest
,
898 .export
= sha512_mb_async_export
,
899 .import
= sha512_mb_async_import
,
901 .digestsize
= SHA512_DIGEST_SIZE
,
902 .statesize
= sizeof(struct sha512_hash_ctx
),
904 .cra_name
= "sha512",
905 .cra_driver_name
= "sha512_mb",
907 * Low priority, since with few concurrent hash requests
908 * this is extremely slow due to the flush delay. Users
909 * whose workloads would benefit from this can request
910 * it explicitly by driver name, or can increase its
911 * priority at runtime using NETLINK_CRYPTO.
914 .cra_flags
= CRYPTO_ALG_ASYNC
,
915 .cra_blocksize
= SHA512_BLOCK_SIZE
,
916 .cra_module
= THIS_MODULE
,
917 .cra_list
= LIST_HEAD_INIT
918 (sha512_mb_async_alg
.halg
.base
.cra_list
),
919 .cra_init
= sha512_mb_async_init_tfm
,
920 .cra_exit
= sha512_mb_async_exit_tfm
,
921 .cra_ctxsize
= sizeof(struct sha512_mb_ctx
),
927 static unsigned long sha512_mb_flusher(struct mcryptd_alg_cstate
*cstate
)
929 struct mcryptd_hash_request_ctx
*rctx
;
930 unsigned long cur_time
;
931 unsigned long next_flush
= 0;
932 struct sha512_hash_ctx
*sha_ctx
;
937 while (!list_empty(&cstate
->work_list
)) {
938 rctx
= list_entry(cstate
->work_list
.next
,
939 struct mcryptd_hash_request_ctx
, waiter
);
940 if time_before(cur_time
, rctx
->tag
.expire
)
943 sha_ctx
= (struct sha512_hash_ctx
*)
944 sha512_ctx_mgr_flush(cstate
);
947 pr_err("sha512_mb error: nothing got flushed for"
948 " non-empty list\n");
951 rctx
= cast_hash_to_mcryptd_ctx(sha_ctx
);
952 sha_finish_walk(&rctx
, cstate
, true);
953 sha_complete_job(rctx
, cstate
, 0);
956 if (!list_empty(&cstate
->work_list
)) {
957 rctx
= list_entry(cstate
->work_list
.next
,
958 struct mcryptd_hash_request_ctx
, waiter
);
959 /* get the hash context and then flush time */
960 next_flush
= rctx
->tag
.expire
;
961 mcryptd_arm_flusher(cstate
, get_delay(next_flush
));
966 static int __init
sha512_mb_mod_init(void)
971 struct mcryptd_alg_cstate
*cpu_state
;
973 /* check for dependent cpu features */
974 if (!boot_cpu_has(X86_FEATURE_AVX2
) ||
975 !boot_cpu_has(X86_FEATURE_BMI2
))
978 /* initialize multibuffer structures */
979 sha512_mb_alg_state
.alg_cstate
=
980 alloc_percpu(struct mcryptd_alg_cstate
);
982 sha512_job_mgr_init
= sha512_mb_mgr_init_avx2
;
983 sha512_job_mgr_submit
= sha512_mb_mgr_submit_avx2
;
984 sha512_job_mgr_flush
= sha512_mb_mgr_flush_avx2
;
985 sha512_job_mgr_get_comp_job
= sha512_mb_mgr_get_comp_job_avx2
;
987 if (!sha512_mb_alg_state
.alg_cstate
)
989 for_each_possible_cpu(cpu
) {
990 cpu_state
= per_cpu_ptr(sha512_mb_alg_state
.alg_cstate
, cpu
);
991 cpu_state
->next_flush
= 0;
992 cpu_state
->next_seq_num
= 0;
993 cpu_state
->flusher_engaged
= false;
994 INIT_DELAYED_WORK(&cpu_state
->flush
, mcryptd_flusher
);
995 cpu_state
->cpu
= cpu
;
996 cpu_state
->alg_state
= &sha512_mb_alg_state
;
997 cpu_state
->mgr
= kzalloc(sizeof(struct sha512_ctx_mgr
),
1001 sha512_ctx_mgr_init(cpu_state
->mgr
);
1002 INIT_LIST_HEAD(&cpu_state
->work_list
);
1003 spin_lock_init(&cpu_state
->work_lock
);
1005 sha512_mb_alg_state
.flusher
= &sha512_mb_flusher
;
1007 err
= crypto_register_ahash(&sha512_mb_areq_alg
);
1010 err
= crypto_register_ahash(&sha512_mb_async_alg
);
1017 crypto_unregister_ahash(&sha512_mb_areq_alg
);
1019 for_each_possible_cpu(cpu
) {
1020 cpu_state
= per_cpu_ptr(sha512_mb_alg_state
.alg_cstate
, cpu
);
1021 kfree(cpu_state
->mgr
);
1023 free_percpu(sha512_mb_alg_state
.alg_cstate
);
1027 static void __exit
sha512_mb_mod_fini(void)
1030 struct mcryptd_alg_cstate
*cpu_state
;
1032 crypto_unregister_ahash(&sha512_mb_async_alg
);
1033 crypto_unregister_ahash(&sha512_mb_areq_alg
);
1034 for_each_possible_cpu(cpu
) {
1035 cpu_state
= per_cpu_ptr(sha512_mb_alg_state
.alg_cstate
, cpu
);
1036 kfree(cpu_state
->mgr
);
1038 free_percpu(sha512_mb_alg_state
.alg_cstate
);
1041 module_init(sha512_mb_mod_init
);
1042 module_exit(sha512_mb_mod_fini
);
1044 MODULE_LICENSE("GPL");
1045 MODULE_DESCRIPTION("SHA512 Secure Hash Algorithm, multi buffer accelerated");
1047 MODULE_ALIAS("sha512");