2 * Multi buffer SHA512 algorithm Glue Code
4 * This file is provided under a dual BSD/GPLv2 license. When using or
5 * redistributing this file, you may do so under either license.
9 * Copyright(c) 2016 Intel Corporation.
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of version 2 of the GNU General Public License as
13 * published by the Free Software Foundation.
15 * This program is distributed in the hope that it will be useful, but
16 * WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
20 * Contact Information:
21 * Megha Dey <megha.dey@linux.intel.com>
25 * Copyright(c) 2016 Intel Corporation.
27 * Redistribution and use in source and binary forms, with or without
28 * modification, are permitted provided that the following conditions
31 * * Redistributions of source code must retain the above copyright
32 * notice, this list of conditions and the following disclaimer.
33 * * Redistributions in binary form must reproduce the above copyright
34 * notice, this list of conditions and the following disclaimer in
35 * the documentation and/or other materials provided with the
37 * * Neither the name of Intel Corporation nor the names of its
38 * contributors may be used to endorse or promote products derived
39 * from this software without specific prior written permission.
41 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
42 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
43 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
44 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
45 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
46 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
47 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
48 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
49 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
50 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
51 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
54 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
56 #include <crypto/internal/hash.h>
57 #include <linux/init.h>
58 #include <linux/module.h>
60 #include <linux/cryptohash.h>
61 #include <linux/types.h>
62 #include <linux/list.h>
63 #include <crypto/scatterwalk.h>
64 #include <crypto/sha.h>
65 #include <crypto/mcryptd.h>
66 #include <crypto/crypto_wq.h>
67 #include <asm/byteorder.h>
68 #include <linux/hardirq.h>
69 #include <asm/fpu/api.h>
70 #include "sha512_mb_ctx.h"
72 #define FLUSH_INTERVAL 1000 /* in usec */
74 static struct mcryptd_alg_state sha512_mb_alg_state
;
76 struct sha512_mb_ctx
{
77 struct mcryptd_ahash
*mcryptd_tfm
;
80 static inline struct mcryptd_hash_request_ctx
81 *cast_hash_to_mcryptd_ctx(struct sha512_hash_ctx
*hash_ctx
)
83 struct ahash_request
*areq
;
85 areq
= container_of((void *) hash_ctx
, struct ahash_request
, __ctx
);
86 return container_of(areq
, struct mcryptd_hash_request_ctx
, areq
);
89 static inline struct ahash_request
90 *cast_mcryptd_ctx_to_req(struct mcryptd_hash_request_ctx
*ctx
)
92 return container_of((void *) ctx
, struct ahash_request
, __ctx
);
95 static void req_ctx_init(struct mcryptd_hash_request_ctx
*rctx
,
96 struct ahash_request
*areq
)
98 rctx
->flag
= HASH_UPDATE
;
101 static asmlinkage
void (*sha512_job_mgr_init
)(struct sha512_mb_mgr
*state
);
102 static asmlinkage
struct job_sha512
* (*sha512_job_mgr_submit
)
103 (struct sha512_mb_mgr
*state
,
104 struct job_sha512
*job
);
105 static asmlinkage
struct job_sha512
* (*sha512_job_mgr_flush
)
106 (struct sha512_mb_mgr
*state
);
107 static asmlinkage
struct job_sha512
* (*sha512_job_mgr_get_comp_job
)
108 (struct sha512_mb_mgr
*state
);
110 inline void sha512_init_digest(uint64_t *digest
)
112 static const uint64_t initial_digest
[SHA512_DIGEST_LENGTH
] = {
113 SHA512_H0
, SHA512_H1
, SHA512_H2
,
114 SHA512_H3
, SHA512_H4
, SHA512_H5
,
115 SHA512_H6
, SHA512_H7
};
116 memcpy(digest
, initial_digest
, sizeof(initial_digest
));
119 inline uint32_t sha512_pad(uint8_t padblock
[SHA512_BLOCK_SIZE
* 2],
122 uint32_t i
= total_len
& (SHA512_BLOCK_SIZE
- 1);
124 memset(&padblock
[i
], 0, SHA512_BLOCK_SIZE
);
127 i
+= ((SHA512_BLOCK_SIZE
- 1) &
128 (0 - (total_len
+ SHA512_PADLENGTHFIELD_SIZE
+ 1)))
129 + 1 + SHA512_PADLENGTHFIELD_SIZE
;
131 #if SHA512_PADLENGTHFIELD_SIZE == 16
132 *((uint64_t *) &padblock
[i
- 16]) = 0;
135 *((uint64_t *) &padblock
[i
- 8]) = cpu_to_be64(total_len
<< 3);
137 /* Number of extra blocks to hash */
138 return i
>> SHA512_LOG2_BLOCK_SIZE
;
141 static struct sha512_hash_ctx
*sha512_ctx_mgr_resubmit
142 (struct sha512_ctx_mgr
*mgr
, struct sha512_hash_ctx
*ctx
)
145 if (ctx
->status
& HASH_CTX_STS_COMPLETE
) {
146 /* Clear PROCESSING bit */
147 ctx
->status
= HASH_CTX_STS_COMPLETE
;
152 * If the extra blocks are empty, begin hashing what remains
153 * in the user's buffer.
155 if (ctx
->partial_block_buffer_length
== 0 &&
156 ctx
->incoming_buffer_length
) {
158 const void *buffer
= ctx
->incoming_buffer
;
159 uint32_t len
= ctx
->incoming_buffer_length
;
163 * Only entire blocks can be hashed.
164 * Copy remainder to extra blocks buffer.
166 copy_len
= len
& (SHA512_BLOCK_SIZE
-1);
170 memcpy(ctx
->partial_block_buffer
,
171 ((const char *) buffer
+ len
),
173 ctx
->partial_block_buffer_length
= copy_len
;
176 ctx
->incoming_buffer_length
= 0;
178 /* len should be a multiple of the block size now */
179 assert((len
% SHA512_BLOCK_SIZE
) == 0);
181 /* Set len to the number of blocks to be hashed */
182 len
>>= SHA512_LOG2_BLOCK_SIZE
;
186 ctx
->job
.buffer
= (uint8_t *) buffer
;
188 ctx
= (struct sha512_hash_ctx
*)
189 sha512_job_mgr_submit(&mgr
->mgr
,
196 * If the extra blocks are not empty, then we are
197 * either on the last block(s) or we need more
198 * user input before continuing.
200 if (ctx
->status
& HASH_CTX_STS_LAST
) {
202 uint8_t *buf
= ctx
->partial_block_buffer
;
203 uint32_t n_extra_blocks
=
204 sha512_pad(buf
, ctx
->total_length
);
206 ctx
->status
= (HASH_CTX_STS_PROCESSING
|
207 HASH_CTX_STS_COMPLETE
);
208 ctx
->job
.buffer
= buf
;
209 ctx
->job
.len
= (uint32_t) n_extra_blocks
;
210 ctx
= (struct sha512_hash_ctx
*)
211 sha512_job_mgr_submit(&mgr
->mgr
, &ctx
->job
);
216 ctx
->status
= HASH_CTX_STS_IDLE
;
223 static struct sha512_hash_ctx
224 *sha512_ctx_mgr_get_comp_ctx(struct mcryptd_alg_cstate
*cstate
)
227 * If get_comp_job returns NULL, there are no jobs complete.
228 * If get_comp_job returns a job, verify that it is safe to return to
230 * If it is not ready, resubmit the job to finish processing.
231 * If sha512_ctx_mgr_resubmit returned a job, it is ready to be
233 * Otherwise, all jobs currently being managed by the hash_ctx_mgr
234 * still need processing.
236 struct sha512_ctx_mgr
*mgr
;
237 struct sha512_hash_ctx
*ctx
;
241 spin_lock_irqsave(&cstate
->work_lock
, flags
);
242 ctx
= (struct sha512_hash_ctx
*)
243 sha512_job_mgr_get_comp_job(&mgr
->mgr
);
244 ctx
= sha512_ctx_mgr_resubmit(mgr
, ctx
);
245 spin_unlock_irqrestore(&cstate
->work_lock
, flags
);
249 static void sha512_ctx_mgr_init(struct sha512_ctx_mgr
*mgr
)
251 sha512_job_mgr_init(&mgr
->mgr
);
254 static struct sha512_hash_ctx
255 *sha512_ctx_mgr_submit(struct mcryptd_alg_cstate
*cstate
,
256 struct sha512_hash_ctx
*ctx
,
261 struct sha512_ctx_mgr
*mgr
;
262 unsigned long irqflags
;
265 spin_lock_irqsave(&cstate
->work_lock
, irqflags
);
266 if (flags
& (~HASH_ENTIRE
)) {
268 * User should not pass anything other than FIRST, UPDATE, or
271 ctx
->error
= HASH_CTX_ERROR_INVALID_FLAGS
;
275 if (ctx
->status
& HASH_CTX_STS_PROCESSING
) {
276 /* Cannot submit to a currently processing job. */
277 ctx
->error
= HASH_CTX_ERROR_ALREADY_PROCESSING
;
281 if ((ctx
->status
& HASH_CTX_STS_COMPLETE
) && !(flags
& HASH_FIRST
)) {
282 /* Cannot update a finished job. */
283 ctx
->error
= HASH_CTX_ERROR_ALREADY_COMPLETED
;
288 if (flags
& HASH_FIRST
) {
290 sha512_init_digest(ctx
->job
.result_digest
);
292 /* Reset byte counter */
293 ctx
->total_length
= 0;
295 /* Clear extra blocks */
296 ctx
->partial_block_buffer_length
= 0;
300 * If we made it here, there were no errors during this call to
303 ctx
->error
= HASH_CTX_ERROR_NONE
;
305 /* Store buffer ptr info from user */
306 ctx
->incoming_buffer
= buffer
;
307 ctx
->incoming_buffer_length
= len
;
310 * Store the user's request flags and mark this ctx as currently being
313 ctx
->status
= (flags
& HASH_LAST
) ?
314 (HASH_CTX_STS_PROCESSING
| HASH_CTX_STS_LAST
) :
315 HASH_CTX_STS_PROCESSING
;
317 /* Advance byte counter */
318 ctx
->total_length
+= len
;
321 * If there is anything currently buffered in the extra blocks,
322 * append to it until it contains a whole block.
323 * Or if the user's buffer contains less than a whole block,
324 * append as much as possible to the extra block.
326 if (ctx
->partial_block_buffer_length
|| len
< SHA512_BLOCK_SIZE
) {
327 /* Compute how many bytes to copy from user buffer into extra
330 uint32_t copy_len
= SHA512_BLOCK_SIZE
-
331 ctx
->partial_block_buffer_length
;
336 /* Copy and update relevant pointers and counters */
338 (&ctx
->partial_block_buffer
[ctx
->partial_block_buffer_length
],
341 ctx
->partial_block_buffer_length
+= copy_len
;
342 ctx
->incoming_buffer
= (const void *)
343 ((const char *)buffer
+ copy_len
);
344 ctx
->incoming_buffer_length
= len
- copy_len
;
347 /* The extra block should never contain more than 1 block
350 assert(ctx
->partial_block_buffer_length
<= SHA512_BLOCK_SIZE
);
352 /* If the extra block buffer contains exactly 1 block, it can
355 if (ctx
->partial_block_buffer_length
>= SHA512_BLOCK_SIZE
) {
356 ctx
->partial_block_buffer_length
= 0;
358 ctx
->job
.buffer
= ctx
->partial_block_buffer
;
360 ctx
= (struct sha512_hash_ctx
*)
361 sha512_job_mgr_submit(&mgr
->mgr
, &ctx
->job
);
365 ctx
= sha512_ctx_mgr_resubmit(mgr
, ctx
);
367 spin_unlock_irqrestore(&cstate
->work_lock
, irqflags
);
371 static struct sha512_hash_ctx
*sha512_ctx_mgr_flush(struct mcryptd_alg_cstate
*cstate
)
373 struct sha512_ctx_mgr
*mgr
;
374 struct sha512_hash_ctx
*ctx
;
378 spin_lock_irqsave(&cstate
->work_lock
, flags
);
380 ctx
= (struct sha512_hash_ctx
*)
381 sha512_job_mgr_flush(&mgr
->mgr
);
383 /* If flush returned 0, there are no more jobs in flight. */
388 * If flush returned a job, resubmit the job to finish
391 ctx
= sha512_ctx_mgr_resubmit(mgr
, ctx
);
394 * If sha512_ctx_mgr_resubmit returned a job, it is ready to
395 * be returned. Otherwise, all jobs currently being managed by
396 * the sha512_ctx_mgr still need processing. Loop.
401 spin_unlock_irqrestore(&cstate
->work_lock
, flags
);
405 static int sha512_mb_init(struct ahash_request
*areq
)
407 struct sha512_hash_ctx
*sctx
= ahash_request_ctx(areq
);
410 sctx
->job
.result_digest
[0] = SHA512_H0
;
411 sctx
->job
.result_digest
[1] = SHA512_H1
;
412 sctx
->job
.result_digest
[2] = SHA512_H2
;
413 sctx
->job
.result_digest
[3] = SHA512_H3
;
414 sctx
->job
.result_digest
[4] = SHA512_H4
;
415 sctx
->job
.result_digest
[5] = SHA512_H5
;
416 sctx
->job
.result_digest
[6] = SHA512_H6
;
417 sctx
->job
.result_digest
[7] = SHA512_H7
;
418 sctx
->total_length
= 0;
419 sctx
->partial_block_buffer_length
= 0;
420 sctx
->status
= HASH_CTX_STS_IDLE
;
425 static int sha512_mb_set_results(struct mcryptd_hash_request_ctx
*rctx
)
428 struct sha512_hash_ctx
*sctx
= ahash_request_ctx(&rctx
->areq
);
429 __be64
*dst
= (__be64
*) rctx
->out
;
431 for (i
= 0; i
< 8; ++i
)
432 dst
[i
] = cpu_to_be64(sctx
->job
.result_digest
[i
]);
437 static int sha_finish_walk(struct mcryptd_hash_request_ctx
**ret_rctx
,
438 struct mcryptd_alg_cstate
*cstate
, bool flush
)
440 int flag
= HASH_UPDATE
;
442 struct mcryptd_hash_request_ctx
*rctx
= *ret_rctx
;
443 struct sha512_hash_ctx
*sha_ctx
;
446 while (!(rctx
->flag
& HASH_DONE
)) {
447 nbytes
= crypto_ahash_walk_done(&rctx
->walk
, 0);
452 /* check if the walk is done */
453 if (crypto_ahash_walk_last(&rctx
->walk
)) {
454 rctx
->flag
|= HASH_DONE
;
455 if (rctx
->flag
& HASH_FINAL
)
459 sha_ctx
= (struct sha512_hash_ctx
*)
460 ahash_request_ctx(&rctx
->areq
);
462 sha_ctx
= sha512_ctx_mgr_submit(cstate
, sha_ctx
,
463 rctx
->walk
.data
, nbytes
, flag
);
466 sha_ctx
= sha512_ctx_mgr_flush(cstate
);
470 rctx
= cast_hash_to_mcryptd_ctx(sha_ctx
);
477 /* copy the results */
478 if (rctx
->flag
& HASH_FINAL
)
479 sha512_mb_set_results(rctx
);
486 static int sha_complete_job(struct mcryptd_hash_request_ctx
*rctx
,
487 struct mcryptd_alg_cstate
*cstate
,
490 struct ahash_request
*req
= cast_mcryptd_ctx_to_req(rctx
);
491 struct sha512_hash_ctx
*sha_ctx
;
492 struct mcryptd_hash_request_ctx
*req_ctx
;
496 /* remove from work list */
497 spin_lock_irqsave(&cstate
->work_lock
, flags
);
498 list_del(&rctx
->waiter
);
499 spin_unlock_irqrestore(&cstate
->work_lock
, flags
);
502 rctx
->complete(&req
->base
, err
);
505 rctx
->complete(&req
->base
, err
);
509 /* check to see if there are other jobs that are done */
510 sha_ctx
= sha512_ctx_mgr_get_comp_ctx(cstate
);
512 req_ctx
= cast_hash_to_mcryptd_ctx(sha_ctx
);
513 ret
= sha_finish_walk(&req_ctx
, cstate
, false);
515 spin_lock_irqsave(&cstate
->work_lock
, flags
);
516 list_del(&req_ctx
->waiter
);
517 spin_unlock_irqrestore(&cstate
->work_lock
, flags
);
519 req
= cast_mcryptd_ctx_to_req(req_ctx
);
521 req_ctx
->complete(&req
->base
, ret
);
524 req_ctx
->complete(&req
->base
, ret
);
528 sha_ctx
= sha512_ctx_mgr_get_comp_ctx(cstate
);
534 static void sha512_mb_add_list(struct mcryptd_hash_request_ctx
*rctx
,
535 struct mcryptd_alg_cstate
*cstate
)
537 unsigned long next_flush
;
538 unsigned long delay
= usecs_to_jiffies(FLUSH_INTERVAL
);
542 rctx
->tag
.arrival
= jiffies
; /* tag the arrival time */
543 rctx
->tag
.seq_num
= cstate
->next_seq_num
++;
544 next_flush
= rctx
->tag
.arrival
+ delay
;
545 rctx
->tag
.expire
= next_flush
;
547 spin_lock_irqsave(&cstate
->work_lock
, flags
);
548 list_add_tail(&rctx
->waiter
, &cstate
->work_list
);
549 spin_unlock_irqrestore(&cstate
->work_lock
, flags
);
551 mcryptd_arm_flusher(cstate
, delay
);
554 static int sha512_mb_update(struct ahash_request
*areq
)
556 struct mcryptd_hash_request_ctx
*rctx
=
557 container_of(areq
, struct mcryptd_hash_request_ctx
,
559 struct mcryptd_alg_cstate
*cstate
=
560 this_cpu_ptr(sha512_mb_alg_state
.alg_cstate
);
562 struct ahash_request
*req
= cast_mcryptd_ctx_to_req(rctx
);
563 struct sha512_hash_ctx
*sha_ctx
;
568 if (rctx
->tag
.cpu
!= smp_processor_id()) {
569 pr_err("mcryptd error: cpu clash\n");
573 /* need to init context */
574 req_ctx_init(rctx
, areq
);
576 nbytes
= crypto_ahash_walk_first(req
, &rctx
->walk
);
583 if (crypto_ahash_walk_last(&rctx
->walk
))
584 rctx
->flag
|= HASH_DONE
;
587 sha_ctx
= (struct sha512_hash_ctx
*) ahash_request_ctx(areq
);
588 sha512_mb_add_list(rctx
, cstate
);
590 sha_ctx
= sha512_ctx_mgr_submit(cstate
, sha_ctx
, rctx
->walk
.data
,
591 nbytes
, HASH_UPDATE
);
594 /* check if anything is returned */
598 if (sha_ctx
->error
) {
599 ret
= sha_ctx
->error
;
600 rctx
= cast_hash_to_mcryptd_ctx(sha_ctx
);
604 rctx
= cast_hash_to_mcryptd_ctx(sha_ctx
);
605 ret
= sha_finish_walk(&rctx
, cstate
, false);
610 sha_complete_job(rctx
, cstate
, ret
);
614 static int sha512_mb_finup(struct ahash_request
*areq
)
616 struct mcryptd_hash_request_ctx
*rctx
=
617 container_of(areq
, struct mcryptd_hash_request_ctx
,
619 struct mcryptd_alg_cstate
*cstate
=
620 this_cpu_ptr(sha512_mb_alg_state
.alg_cstate
);
622 struct ahash_request
*req
= cast_mcryptd_ctx_to_req(rctx
);
623 struct sha512_hash_ctx
*sha_ctx
;
624 int ret
= 0, flag
= HASH_UPDATE
, nbytes
;
627 if (rctx
->tag
.cpu
!= smp_processor_id()) {
628 pr_err("mcryptd error: cpu clash\n");
632 /* need to init context */
633 req_ctx_init(rctx
, areq
);
635 nbytes
= crypto_ahash_walk_first(req
, &rctx
->walk
);
642 if (crypto_ahash_walk_last(&rctx
->walk
)) {
643 rctx
->flag
|= HASH_DONE
;
648 rctx
->flag
|= HASH_FINAL
;
649 sha_ctx
= (struct sha512_hash_ctx
*) ahash_request_ctx(areq
);
650 sha512_mb_add_list(rctx
, cstate
);
653 sha_ctx
= sha512_ctx_mgr_submit(cstate
, sha_ctx
, rctx
->walk
.data
,
657 /* check if anything is returned */
661 if (sha_ctx
->error
) {
662 ret
= sha_ctx
->error
;
666 rctx
= cast_hash_to_mcryptd_ctx(sha_ctx
);
667 ret
= sha_finish_walk(&rctx
, cstate
, false);
671 sha_complete_job(rctx
, cstate
, ret
);
675 static int sha512_mb_final(struct ahash_request
*areq
)
677 struct mcryptd_hash_request_ctx
*rctx
=
678 container_of(areq
, struct mcryptd_hash_request_ctx
,
680 struct mcryptd_alg_cstate
*cstate
=
681 this_cpu_ptr(sha512_mb_alg_state
.alg_cstate
);
683 struct sha512_hash_ctx
*sha_ctx
;
688 if (rctx
->tag
.cpu
!= smp_processor_id()) {
689 pr_err("mcryptd error: cpu clash\n");
693 /* need to init context */
694 req_ctx_init(rctx
, areq
);
696 rctx
->flag
|= HASH_DONE
| HASH_FINAL
;
698 sha_ctx
= (struct sha512_hash_ctx
*) ahash_request_ctx(areq
);
699 /* flag HASH_FINAL and 0 data size */
700 sha512_mb_add_list(rctx
, cstate
);
702 sha_ctx
= sha512_ctx_mgr_submit(cstate
, sha_ctx
, &data
, 0, HASH_LAST
);
705 /* check if anything is returned */
709 if (sha_ctx
->error
) {
710 ret
= sha_ctx
->error
;
711 rctx
= cast_hash_to_mcryptd_ctx(sha_ctx
);
715 rctx
= cast_hash_to_mcryptd_ctx(sha_ctx
);
716 ret
= sha_finish_walk(&rctx
, cstate
, false);
720 sha_complete_job(rctx
, cstate
, ret
);
724 static int sha512_mb_export(struct ahash_request
*areq
, void *out
)
726 struct sha512_hash_ctx
*sctx
= ahash_request_ctx(areq
);
728 memcpy(out
, sctx
, sizeof(*sctx
));
733 static int sha512_mb_import(struct ahash_request
*areq
, const void *in
)
735 struct sha512_hash_ctx
*sctx
= ahash_request_ctx(areq
);
737 memcpy(sctx
, in
, sizeof(*sctx
));
742 static int sha512_mb_async_init_tfm(struct crypto_tfm
*tfm
)
744 struct mcryptd_ahash
*mcryptd_tfm
;
745 struct sha512_mb_ctx
*ctx
= crypto_tfm_ctx(tfm
);
746 struct mcryptd_hash_ctx
*mctx
;
748 mcryptd_tfm
= mcryptd_alloc_ahash("__intel_sha512-mb",
750 CRYPTO_ALG_INTERNAL
);
751 if (IS_ERR(mcryptd_tfm
))
752 return PTR_ERR(mcryptd_tfm
);
753 mctx
= crypto_ahash_ctx(&mcryptd_tfm
->base
);
754 mctx
->alg_state
= &sha512_mb_alg_state
;
755 ctx
->mcryptd_tfm
= mcryptd_tfm
;
756 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm
),
757 sizeof(struct ahash_request
) +
758 crypto_ahash_reqsize(&mcryptd_tfm
->base
));
763 static void sha512_mb_async_exit_tfm(struct crypto_tfm
*tfm
)
765 struct sha512_mb_ctx
*ctx
= crypto_tfm_ctx(tfm
);
767 mcryptd_free_ahash(ctx
->mcryptd_tfm
);
770 static int sha512_mb_areq_init_tfm(struct crypto_tfm
*tfm
)
772 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm
),
773 sizeof(struct ahash_request
) +
774 sizeof(struct sha512_hash_ctx
));
779 static void sha512_mb_areq_exit_tfm(struct crypto_tfm
*tfm
)
781 struct sha512_mb_ctx
*ctx
= crypto_tfm_ctx(tfm
);
783 mcryptd_free_ahash(ctx
->mcryptd_tfm
);
786 static struct ahash_alg sha512_mb_areq_alg
= {
787 .init
= sha512_mb_init
,
788 .update
= sha512_mb_update
,
789 .final
= sha512_mb_final
,
790 .finup
= sha512_mb_finup
,
791 .export
= sha512_mb_export
,
792 .import
= sha512_mb_import
,
794 .digestsize
= SHA512_DIGEST_SIZE
,
795 .statesize
= sizeof(struct sha512_hash_ctx
),
797 .cra_name
= "__sha512-mb",
798 .cra_driver_name
= "__intel_sha512-mb",
801 * use ASYNC flag as some buffers in multi-buffer
802 * algo may not have completed before hashing thread
805 .cra_flags
= CRYPTO_ALG_TYPE_AHASH
|
808 .cra_blocksize
= SHA512_BLOCK_SIZE
,
809 .cra_module
= THIS_MODULE
,
810 .cra_list
= LIST_HEAD_INIT
811 (sha512_mb_areq_alg
.halg
.base
.cra_list
),
812 .cra_init
= sha512_mb_areq_init_tfm
,
813 .cra_exit
= sha512_mb_areq_exit_tfm
,
814 .cra_ctxsize
= sizeof(struct sha512_hash_ctx
),
819 static int sha512_mb_async_init(struct ahash_request
*req
)
821 struct crypto_ahash
*tfm
= crypto_ahash_reqtfm(req
);
822 struct sha512_mb_ctx
*ctx
= crypto_ahash_ctx(tfm
);
823 struct ahash_request
*mcryptd_req
= ahash_request_ctx(req
);
824 struct mcryptd_ahash
*mcryptd_tfm
= ctx
->mcryptd_tfm
;
826 memcpy(mcryptd_req
, req
, sizeof(*req
));
827 ahash_request_set_tfm(mcryptd_req
, &mcryptd_tfm
->base
);
828 return crypto_ahash_init(mcryptd_req
);
831 static int sha512_mb_async_update(struct ahash_request
*req
)
833 struct ahash_request
*mcryptd_req
= ahash_request_ctx(req
);
835 struct crypto_ahash
*tfm
= crypto_ahash_reqtfm(req
);
836 struct sha512_mb_ctx
*ctx
= crypto_ahash_ctx(tfm
);
837 struct mcryptd_ahash
*mcryptd_tfm
= ctx
->mcryptd_tfm
;
839 memcpy(mcryptd_req
, req
, sizeof(*req
));
840 ahash_request_set_tfm(mcryptd_req
, &mcryptd_tfm
->base
);
841 return crypto_ahash_update(mcryptd_req
);
844 static int sha512_mb_async_finup(struct ahash_request
*req
)
846 struct ahash_request
*mcryptd_req
= ahash_request_ctx(req
);
848 struct crypto_ahash
*tfm
= crypto_ahash_reqtfm(req
);
849 struct sha512_mb_ctx
*ctx
= crypto_ahash_ctx(tfm
);
850 struct mcryptd_ahash
*mcryptd_tfm
= ctx
->mcryptd_tfm
;
852 memcpy(mcryptd_req
, req
, sizeof(*req
));
853 ahash_request_set_tfm(mcryptd_req
, &mcryptd_tfm
->base
);
854 return crypto_ahash_finup(mcryptd_req
);
857 static int sha512_mb_async_final(struct ahash_request
*req
)
859 struct ahash_request
*mcryptd_req
= ahash_request_ctx(req
);
861 struct crypto_ahash
*tfm
= crypto_ahash_reqtfm(req
);
862 struct sha512_mb_ctx
*ctx
= crypto_ahash_ctx(tfm
);
863 struct mcryptd_ahash
*mcryptd_tfm
= ctx
->mcryptd_tfm
;
865 memcpy(mcryptd_req
, req
, sizeof(*req
));
866 ahash_request_set_tfm(mcryptd_req
, &mcryptd_tfm
->base
);
867 return crypto_ahash_final(mcryptd_req
);
870 static int sha512_mb_async_digest(struct ahash_request
*req
)
872 struct crypto_ahash
*tfm
= crypto_ahash_reqtfm(req
);
873 struct sha512_mb_ctx
*ctx
= crypto_ahash_ctx(tfm
);
874 struct ahash_request
*mcryptd_req
= ahash_request_ctx(req
);
875 struct mcryptd_ahash
*mcryptd_tfm
= ctx
->mcryptd_tfm
;
877 memcpy(mcryptd_req
, req
, sizeof(*req
));
878 ahash_request_set_tfm(mcryptd_req
, &mcryptd_tfm
->base
);
879 return crypto_ahash_digest(mcryptd_req
);
882 static int sha512_mb_async_export(struct ahash_request
*req
, void *out
)
884 struct ahash_request
*mcryptd_req
= ahash_request_ctx(req
);
885 struct crypto_ahash
*tfm
= crypto_ahash_reqtfm(req
);
886 struct sha512_mb_ctx
*ctx
= crypto_ahash_ctx(tfm
);
887 struct mcryptd_ahash
*mcryptd_tfm
= ctx
->mcryptd_tfm
;
889 memcpy(mcryptd_req
, req
, sizeof(*req
));
890 ahash_request_set_tfm(mcryptd_req
, &mcryptd_tfm
->base
);
891 return crypto_ahash_export(mcryptd_req
, out
);
894 static int sha512_mb_async_import(struct ahash_request
*req
, const void *in
)
896 struct ahash_request
*mcryptd_req
= ahash_request_ctx(req
);
897 struct crypto_ahash
*tfm
= crypto_ahash_reqtfm(req
);
898 struct sha512_mb_ctx
*ctx
= crypto_ahash_ctx(tfm
);
899 struct mcryptd_ahash
*mcryptd_tfm
= ctx
->mcryptd_tfm
;
900 struct crypto_ahash
*child
= mcryptd_ahash_child(mcryptd_tfm
);
901 struct mcryptd_hash_request_ctx
*rctx
;
902 struct ahash_request
*areq
;
904 memcpy(mcryptd_req
, req
, sizeof(*req
));
905 ahash_request_set_tfm(mcryptd_req
, &mcryptd_tfm
->base
);
906 rctx
= ahash_request_ctx(mcryptd_req
);
910 ahash_request_set_tfm(areq
, child
);
911 ahash_request_set_callback(areq
, CRYPTO_TFM_REQ_MAY_SLEEP
,
912 rctx
->complete
, req
);
914 return crypto_ahash_import(mcryptd_req
, in
);
917 static struct ahash_alg sha512_mb_async_alg
= {
918 .init
= sha512_mb_async_init
,
919 .update
= sha512_mb_async_update
,
920 .final
= sha512_mb_async_final
,
921 .finup
= sha512_mb_async_finup
,
922 .digest
= sha512_mb_async_digest
,
923 .export
= sha512_mb_async_export
,
924 .import
= sha512_mb_async_import
,
926 .digestsize
= SHA512_DIGEST_SIZE
,
927 .statesize
= sizeof(struct sha512_hash_ctx
),
929 .cra_name
= "sha512",
930 .cra_driver_name
= "sha512_mb",
932 .cra_flags
= CRYPTO_ALG_TYPE_AHASH
|
934 .cra_blocksize
= SHA512_BLOCK_SIZE
,
935 .cra_type
= &crypto_ahash_type
,
936 .cra_module
= THIS_MODULE
,
937 .cra_list
= LIST_HEAD_INIT
938 (sha512_mb_async_alg
.halg
.base
.cra_list
),
939 .cra_init
= sha512_mb_async_init_tfm
,
940 .cra_exit
= sha512_mb_async_exit_tfm
,
941 .cra_ctxsize
= sizeof(struct sha512_mb_ctx
),
947 static unsigned long sha512_mb_flusher(struct mcryptd_alg_cstate
*cstate
)
949 struct mcryptd_hash_request_ctx
*rctx
;
950 unsigned long cur_time
;
951 unsigned long next_flush
= 0;
952 struct sha512_hash_ctx
*sha_ctx
;
957 while (!list_empty(&cstate
->work_list
)) {
958 rctx
= list_entry(cstate
->work_list
.next
,
959 struct mcryptd_hash_request_ctx
, waiter
);
960 if time_before(cur_time
, rctx
->tag
.expire
)
963 sha_ctx
= (struct sha512_hash_ctx
*)
964 sha512_ctx_mgr_flush(cstate
);
967 pr_err("sha512_mb error: nothing got flushed for"
968 " non-empty list\n");
971 rctx
= cast_hash_to_mcryptd_ctx(sha_ctx
);
972 sha_finish_walk(&rctx
, cstate
, true);
973 sha_complete_job(rctx
, cstate
, 0);
976 if (!list_empty(&cstate
->work_list
)) {
977 rctx
= list_entry(cstate
->work_list
.next
,
978 struct mcryptd_hash_request_ctx
, waiter
);
979 /* get the hash context and then flush time */
980 next_flush
= rctx
->tag
.expire
;
981 mcryptd_arm_flusher(cstate
, get_delay(next_flush
));
986 static int __init
sha512_mb_mod_init(void)
991 struct mcryptd_alg_cstate
*cpu_state
;
993 /* check for dependent cpu features */
994 if (!boot_cpu_has(X86_FEATURE_AVX2
) ||
995 !boot_cpu_has(X86_FEATURE_BMI2
))
998 /* initialize multibuffer structures */
999 sha512_mb_alg_state
.alg_cstate
=
1000 alloc_percpu(struct mcryptd_alg_cstate
);
1002 sha512_job_mgr_init
= sha512_mb_mgr_init_avx2
;
1003 sha512_job_mgr_submit
= sha512_mb_mgr_submit_avx2
;
1004 sha512_job_mgr_flush
= sha512_mb_mgr_flush_avx2
;
1005 sha512_job_mgr_get_comp_job
= sha512_mb_mgr_get_comp_job_avx2
;
1007 if (!sha512_mb_alg_state
.alg_cstate
)
1009 for_each_possible_cpu(cpu
) {
1010 cpu_state
= per_cpu_ptr(sha512_mb_alg_state
.alg_cstate
, cpu
);
1011 cpu_state
->next_flush
= 0;
1012 cpu_state
->next_seq_num
= 0;
1013 cpu_state
->flusher_engaged
= false;
1014 INIT_DELAYED_WORK(&cpu_state
->flush
, mcryptd_flusher
);
1015 cpu_state
->cpu
= cpu
;
1016 cpu_state
->alg_state
= &sha512_mb_alg_state
;
1017 cpu_state
->mgr
= kzalloc(sizeof(struct sha512_ctx_mgr
),
1019 if (!cpu_state
->mgr
)
1021 sha512_ctx_mgr_init(cpu_state
->mgr
);
1022 INIT_LIST_HEAD(&cpu_state
->work_list
);
1023 spin_lock_init(&cpu_state
->work_lock
);
1025 sha512_mb_alg_state
.flusher
= &sha512_mb_flusher
;
1027 err
= crypto_register_ahash(&sha512_mb_areq_alg
);
1030 err
= crypto_register_ahash(&sha512_mb_async_alg
);
1037 crypto_unregister_ahash(&sha512_mb_areq_alg
);
1039 for_each_possible_cpu(cpu
) {
1040 cpu_state
= per_cpu_ptr(sha512_mb_alg_state
.alg_cstate
, cpu
);
1041 kfree(cpu_state
->mgr
);
1043 free_percpu(sha512_mb_alg_state
.alg_cstate
);
1047 static void __exit
sha512_mb_mod_fini(void)
1050 struct mcryptd_alg_cstate
*cpu_state
;
1052 crypto_unregister_ahash(&sha512_mb_async_alg
);
1053 crypto_unregister_ahash(&sha512_mb_areq_alg
);
1054 for_each_possible_cpu(cpu
) {
1055 cpu_state
= per_cpu_ptr(sha512_mb_alg_state
.alg_cstate
, cpu
);
1056 kfree(cpu_state
->mgr
);
1058 free_percpu(sha512_mb_alg_state
.alg_cstate
);
1061 module_init(sha512_mb_mod_init
);
1062 module_exit(sha512_mb_mod_fini
);
1064 MODULE_LICENSE("GPL");
1065 MODULE_DESCRIPTION("SHA512 Secure Hash Algorithm, multi buffer accelerated");
1067 MODULE_ALIAS("sha512");