2 * Multi buffer SHA1 algorithm Glue Code
4 * This file is provided under a dual BSD/GPLv2 license. When using or
5 * redistributing this file, you may do so under either license.
9 * Copyright(c) 2014 Intel Corporation.
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of version 2 of the GNU General Public License as
13 * published by the Free Software Foundation.
15 * This program is distributed in the hope that it will be useful, but
16 * WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
20 * Contact Information:
21 * Tim Chen <tim.c.chen@linux.intel.com>
25 * Copyright(c) 2014 Intel Corporation.
27 * Redistribution and use in source and binary forms, with or without
28 * modification, are permitted provided that the following conditions
31 * * Redistributions of source code must retain the above copyright
32 * notice, this list of conditions and the following disclaimer.
33 * * Redistributions in binary form must reproduce the above copyright
34 * notice, this list of conditions and the following disclaimer in
35 * the documentation and/or other materials provided with the
37 * * Neither the name of Intel Corporation nor the names of its
38 * contributors may be used to endorse or promote products derived
39 * from this software without specific prior written permission.
41 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
42 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
43 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
44 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
45 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
46 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
47 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
48 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
49 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
50 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
51 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
54 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
56 #include <crypto/internal/hash.h>
57 #include <linux/init.h>
58 #include <linux/module.h>
60 #include <linux/cryptohash.h>
61 #include <linux/types.h>
62 #include <linux/list.h>
63 #include <crypto/scatterwalk.h>
64 #include <crypto/sha.h>
65 #include <crypto/mcryptd.h>
66 #include <crypto/crypto_wq.h>
67 #include <asm/byteorder.h>
70 #include <asm/xsave.h>
71 #include <linux/hardirq.h>
72 #include <asm/fpu-internal.h>
73 #include "sha_mb_ctx.h"
75 #define FLUSH_INTERVAL 1000 /* in usec */
77 static struct mcryptd_alg_state sha1_mb_alg_state
;
80 struct mcryptd_ahash
*mcryptd_tfm
;
83 static inline struct mcryptd_hash_request_ctx
*cast_hash_to_mcryptd_ctx(struct sha1_hash_ctx
*hash_ctx
)
85 struct shash_desc
*desc
;
87 desc
= container_of((void *) hash_ctx
, struct shash_desc
, __ctx
);
88 return container_of(desc
, struct mcryptd_hash_request_ctx
, desc
);
91 static inline struct ahash_request
*cast_mcryptd_ctx_to_req(struct mcryptd_hash_request_ctx
*ctx
)
93 return container_of((void *) ctx
, struct ahash_request
, __ctx
);
96 static void req_ctx_init(struct mcryptd_hash_request_ctx
*rctx
,
97 struct shash_desc
*desc
)
99 rctx
->flag
= HASH_UPDATE
;
102 static asmlinkage
void (*sha1_job_mgr_init
)(struct sha1_mb_mgr
*state
);
103 static asmlinkage
struct job_sha1
* (*sha1_job_mgr_submit
)(struct sha1_mb_mgr
*state
,
104 struct job_sha1
*job
);
105 static asmlinkage
struct job_sha1
* (*sha1_job_mgr_flush
)(struct sha1_mb_mgr
*state
);
106 static asmlinkage
struct job_sha1
* (*sha1_job_mgr_get_comp_job
)(struct sha1_mb_mgr
*state
);
108 inline void sha1_init_digest(uint32_t *digest
)
110 static const uint32_t initial_digest
[SHA1_DIGEST_LENGTH
] = {SHA1_H0
,
111 SHA1_H1
, SHA1_H2
, SHA1_H3
, SHA1_H4
};
112 memcpy(digest
, initial_digest
, sizeof(initial_digest
));
115 inline uint32_t sha1_pad(uint8_t padblock
[SHA1_BLOCK_SIZE
* 2],
118 uint32_t i
= total_len
& (SHA1_BLOCK_SIZE
- 1);
120 memset(&padblock
[i
], 0, SHA1_BLOCK_SIZE
);
123 i
+= ((SHA1_BLOCK_SIZE
- 1) &
124 (0 - (total_len
+ SHA1_PADLENGTHFIELD_SIZE
+ 1)))
125 + 1 + SHA1_PADLENGTHFIELD_SIZE
;
127 #if SHA1_PADLENGTHFIELD_SIZE == 16
128 *((uint64_t *) &padblock
[i
- 16]) = 0;
131 *((uint64_t *) &padblock
[i
- 8]) = cpu_to_be64(total_len
<< 3);
133 /* Number of extra blocks to hash */
134 return i
>> SHA1_LOG2_BLOCK_SIZE
;
137 static struct sha1_hash_ctx
*sha1_ctx_mgr_resubmit(struct sha1_ctx_mgr
*mgr
, struct sha1_hash_ctx
*ctx
)
140 if (ctx
->status
& HASH_CTX_STS_COMPLETE
) {
141 /* Clear PROCESSING bit */
142 ctx
->status
= HASH_CTX_STS_COMPLETE
;
147 * If the extra blocks are empty, begin hashing what remains
148 * in the user's buffer.
150 if (ctx
->partial_block_buffer_length
== 0 &&
151 ctx
->incoming_buffer_length
) {
153 const void *buffer
= ctx
->incoming_buffer
;
154 uint32_t len
= ctx
->incoming_buffer_length
;
158 * Only entire blocks can be hashed.
159 * Copy remainder to extra blocks buffer.
161 copy_len
= len
& (SHA1_BLOCK_SIZE
-1);
165 memcpy(ctx
->partial_block_buffer
,
166 ((const char *) buffer
+ len
),
168 ctx
->partial_block_buffer_length
= copy_len
;
171 ctx
->incoming_buffer_length
= 0;
173 /* len should be a multiple of the block size now */
174 assert((len
% SHA1_BLOCK_SIZE
) == 0);
176 /* Set len to the number of blocks to be hashed */
177 len
>>= SHA1_LOG2_BLOCK_SIZE
;
181 ctx
->job
.buffer
= (uint8_t *) buffer
;
183 ctx
= (struct sha1_hash_ctx
*) sha1_job_mgr_submit(&mgr
->mgr
,
190 * If the extra blocks are not empty, then we are
191 * either on the last block(s) or we need more
192 * user input before continuing.
194 if (ctx
->status
& HASH_CTX_STS_LAST
) {
196 uint8_t *buf
= ctx
->partial_block_buffer
;
197 uint32_t n_extra_blocks
= sha1_pad(buf
, ctx
->total_length
);
199 ctx
->status
= (HASH_CTX_STS_PROCESSING
|
200 HASH_CTX_STS_COMPLETE
);
201 ctx
->job
.buffer
= buf
;
202 ctx
->job
.len
= (uint32_t) n_extra_blocks
;
203 ctx
= (struct sha1_hash_ctx
*) sha1_job_mgr_submit(&mgr
->mgr
, &ctx
->job
);
207 ctx
->status
= HASH_CTX_STS_IDLE
;
214 static struct sha1_hash_ctx
*sha1_ctx_mgr_get_comp_ctx(struct sha1_ctx_mgr
*mgr
)
217 * If get_comp_job returns NULL, there are no jobs complete.
218 * If get_comp_job returns a job, verify that it is safe to return to the user.
219 * If it is not ready, resubmit the job to finish processing.
220 * If sha1_ctx_mgr_resubmit returned a job, it is ready to be returned.
221 * Otherwise, all jobs currently being managed by the hash_ctx_mgr still need processing.
223 struct sha1_hash_ctx
*ctx
;
225 ctx
= (struct sha1_hash_ctx
*) sha1_job_mgr_get_comp_job(&mgr
->mgr
);
226 return sha1_ctx_mgr_resubmit(mgr
, ctx
);
229 static void sha1_ctx_mgr_init(struct sha1_ctx_mgr
*mgr
)
231 sha1_job_mgr_init(&mgr
->mgr
);
234 static struct sha1_hash_ctx
*sha1_ctx_mgr_submit(struct sha1_ctx_mgr
*mgr
,
235 struct sha1_hash_ctx
*ctx
,
240 if (flags
& (~HASH_ENTIRE
)) {
241 /* User should not pass anything other than FIRST, UPDATE, or LAST */
242 ctx
->error
= HASH_CTX_ERROR_INVALID_FLAGS
;
246 if (ctx
->status
& HASH_CTX_STS_PROCESSING
) {
247 /* Cannot submit to a currently processing job. */
248 ctx
->error
= HASH_CTX_ERROR_ALREADY_PROCESSING
;
252 if ((ctx
->status
& HASH_CTX_STS_COMPLETE
) && !(flags
& HASH_FIRST
)) {
253 /* Cannot update a finished job. */
254 ctx
->error
= HASH_CTX_ERROR_ALREADY_COMPLETED
;
259 if (flags
& HASH_FIRST
) {
261 sha1_init_digest(ctx
->job
.result_digest
);
263 /* Reset byte counter */
264 ctx
->total_length
= 0;
266 /* Clear extra blocks */
267 ctx
->partial_block_buffer_length
= 0;
270 /* If we made it here, there were no errors during this call to submit */
271 ctx
->error
= HASH_CTX_ERROR_NONE
;
273 /* Store buffer ptr info from user */
274 ctx
->incoming_buffer
= buffer
;
275 ctx
->incoming_buffer_length
= len
;
277 /* Store the user's request flags and mark this ctx as currently being processed. */
278 ctx
->status
= (flags
& HASH_LAST
) ?
279 (HASH_CTX_STS_PROCESSING
| HASH_CTX_STS_LAST
) :
280 HASH_CTX_STS_PROCESSING
;
282 /* Advance byte counter */
283 ctx
->total_length
+= len
;
286 * If there is anything currently buffered in the extra blocks,
287 * append to it until it contains a whole block.
288 * Or if the user's buffer contains less than a whole block,
289 * append as much as possible to the extra block.
291 if ((ctx
->partial_block_buffer_length
) | (len
< SHA1_BLOCK_SIZE
)) {
292 /* Compute how many bytes to copy from user buffer into extra block */
293 uint32_t copy_len
= SHA1_BLOCK_SIZE
- ctx
->partial_block_buffer_length
;
298 /* Copy and update relevant pointers and counters */
299 memcpy(&ctx
->partial_block_buffer
[ctx
->partial_block_buffer_length
],
302 ctx
->partial_block_buffer_length
+= copy_len
;
303 ctx
->incoming_buffer
= (const void *)((const char *)buffer
+ copy_len
);
304 ctx
->incoming_buffer_length
= len
- copy_len
;
307 /* The extra block should never contain more than 1 block here */
308 assert(ctx
->partial_block_buffer_length
<= SHA1_BLOCK_SIZE
);
310 /* If the extra block buffer contains exactly 1 block, it can be hashed. */
311 if (ctx
->partial_block_buffer_length
>= SHA1_BLOCK_SIZE
) {
312 ctx
->partial_block_buffer_length
= 0;
314 ctx
->job
.buffer
= ctx
->partial_block_buffer
;
316 ctx
= (struct sha1_hash_ctx
*) sha1_job_mgr_submit(&mgr
->mgr
, &ctx
->job
);
320 return sha1_ctx_mgr_resubmit(mgr
, ctx
);
323 static struct sha1_hash_ctx
*sha1_ctx_mgr_flush(struct sha1_ctx_mgr
*mgr
)
325 struct sha1_hash_ctx
*ctx
;
328 ctx
= (struct sha1_hash_ctx
*) sha1_job_mgr_flush(&mgr
->mgr
);
330 /* If flush returned 0, there are no more jobs in flight. */
335 * If flush returned a job, resubmit the job to finish processing.
337 ctx
= sha1_ctx_mgr_resubmit(mgr
, ctx
);
340 * If sha1_ctx_mgr_resubmit returned a job, it is ready to be returned.
341 * Otherwise, all jobs currently being managed by the sha1_ctx_mgr
342 * still need processing. Loop.
349 static int sha1_mb_init(struct shash_desc
*desc
)
351 struct sha1_hash_ctx
*sctx
= shash_desc_ctx(desc
);
354 sctx
->job
.result_digest
[0] = SHA1_H0
;
355 sctx
->job
.result_digest
[1] = SHA1_H1
;
356 sctx
->job
.result_digest
[2] = SHA1_H2
;
357 sctx
->job
.result_digest
[3] = SHA1_H3
;
358 sctx
->job
.result_digest
[4] = SHA1_H4
;
359 sctx
->total_length
= 0;
360 sctx
->partial_block_buffer_length
= 0;
361 sctx
->status
= HASH_CTX_STS_IDLE
;
366 static int sha1_mb_set_results(struct mcryptd_hash_request_ctx
*rctx
)
369 struct sha1_hash_ctx
*sctx
= shash_desc_ctx(&rctx
->desc
);
370 __be32
*dst
= (__be32
*) rctx
->out
;
372 for (i
= 0; i
< 5; ++i
)
373 dst
[i
] = cpu_to_be32(sctx
->job
.result_digest
[i
]);
378 static int sha_finish_walk(struct mcryptd_hash_request_ctx
**ret_rctx
,
379 struct mcryptd_alg_cstate
*cstate
, bool flush
)
381 int flag
= HASH_UPDATE
;
383 struct mcryptd_hash_request_ctx
*rctx
= *ret_rctx
;
384 struct sha1_hash_ctx
*sha_ctx
;
387 while (!(rctx
->flag
& HASH_DONE
)) {
388 nbytes
= crypto_ahash_walk_done(&rctx
->walk
, 0);
393 /* check if the walk is done */
394 if (crypto_ahash_walk_last(&rctx
->walk
)) {
395 rctx
->flag
|= HASH_DONE
;
396 if (rctx
->flag
& HASH_FINAL
)
400 sha_ctx
= (struct sha1_hash_ctx
*) shash_desc_ctx(&rctx
->desc
);
402 sha_ctx
= sha1_ctx_mgr_submit(cstate
->mgr
, sha_ctx
, rctx
->walk
.data
, nbytes
, flag
);
405 sha_ctx
= sha1_ctx_mgr_flush(cstate
->mgr
);
409 rctx
= cast_hash_to_mcryptd_ctx(sha_ctx
);
416 /* copy the results */
417 if (rctx
->flag
& HASH_FINAL
)
418 sha1_mb_set_results(rctx
);
425 static int sha_complete_job(struct mcryptd_hash_request_ctx
*rctx
,
426 struct mcryptd_alg_cstate
*cstate
,
429 struct ahash_request
*req
= cast_mcryptd_ctx_to_req(rctx
);
430 struct sha1_hash_ctx
*sha_ctx
;
431 struct mcryptd_hash_request_ctx
*req_ctx
;
434 /* remove from work list */
435 spin_lock(&cstate
->work_lock
);
436 list_del(&rctx
->waiter
);
437 spin_unlock(&cstate
->work_lock
);
440 rctx
->complete(&req
->base
, err
);
443 rctx
->complete(&req
->base
, err
);
447 /* check to see if there are other jobs that are done */
448 sha_ctx
= sha1_ctx_mgr_get_comp_ctx(cstate
->mgr
);
450 req_ctx
= cast_hash_to_mcryptd_ctx(sha_ctx
);
451 ret
= sha_finish_walk(&req_ctx
, cstate
, false);
453 spin_lock(&cstate
->work_lock
);
454 list_del(&req_ctx
->waiter
);
455 spin_unlock(&cstate
->work_lock
);
457 req
= cast_mcryptd_ctx_to_req(req_ctx
);
459 rctx
->complete(&req
->base
, ret
);
462 rctx
->complete(&req
->base
, ret
);
466 sha_ctx
= sha1_ctx_mgr_get_comp_ctx(cstate
->mgr
);
472 static void sha1_mb_add_list(struct mcryptd_hash_request_ctx
*rctx
,
473 struct mcryptd_alg_cstate
*cstate
)
475 unsigned long next_flush
;
476 unsigned long delay
= usecs_to_jiffies(FLUSH_INTERVAL
);
479 rctx
->tag
.arrival
= jiffies
; /* tag the arrival time */
480 rctx
->tag
.seq_num
= cstate
->next_seq_num
++;
481 next_flush
= rctx
->tag
.arrival
+ delay
;
482 rctx
->tag
.expire
= next_flush
;
484 spin_lock(&cstate
->work_lock
);
485 list_add_tail(&rctx
->waiter
, &cstate
->work_list
);
486 spin_unlock(&cstate
->work_lock
);
488 mcryptd_arm_flusher(cstate
, delay
);
491 static int sha1_mb_update(struct shash_desc
*desc
, const u8
*data
,
494 struct mcryptd_hash_request_ctx
*rctx
=
495 container_of(desc
, struct mcryptd_hash_request_ctx
, desc
);
496 struct mcryptd_alg_cstate
*cstate
=
497 this_cpu_ptr(sha1_mb_alg_state
.alg_cstate
);
499 struct ahash_request
*req
= cast_mcryptd_ctx_to_req(rctx
);
500 struct sha1_hash_ctx
*sha_ctx
;
505 if (rctx
->tag
.cpu
!= smp_processor_id()) {
506 pr_err("mcryptd error: cpu clash\n");
510 /* need to init context */
511 req_ctx_init(rctx
, desc
);
513 nbytes
= crypto_ahash_walk_first(req
, &rctx
->walk
);
520 if (crypto_ahash_walk_last(&rctx
->walk
))
521 rctx
->flag
|= HASH_DONE
;
524 sha_ctx
= (struct sha1_hash_ctx
*) shash_desc_ctx(desc
);
525 sha1_mb_add_list(rctx
, cstate
);
527 sha_ctx
= sha1_ctx_mgr_submit(cstate
->mgr
, sha_ctx
, rctx
->walk
.data
, nbytes
, HASH_UPDATE
);
530 /* check if anything is returned */
534 if (sha_ctx
->error
) {
535 ret
= sha_ctx
->error
;
536 rctx
= cast_hash_to_mcryptd_ctx(sha_ctx
);
540 rctx
= cast_hash_to_mcryptd_ctx(sha_ctx
);
541 ret
= sha_finish_walk(&rctx
, cstate
, false);
546 sha_complete_job(rctx
, cstate
, ret
);
550 static int sha1_mb_finup(struct shash_desc
*desc
, const u8
*data
,
551 unsigned int len
, u8
*out
)
553 struct mcryptd_hash_request_ctx
*rctx
=
554 container_of(desc
, struct mcryptd_hash_request_ctx
, desc
);
555 struct mcryptd_alg_cstate
*cstate
=
556 this_cpu_ptr(sha1_mb_alg_state
.alg_cstate
);
558 struct ahash_request
*req
= cast_mcryptd_ctx_to_req(rctx
);
559 struct sha1_hash_ctx
*sha_ctx
;
560 int ret
= 0, flag
= HASH_UPDATE
, nbytes
;
563 if (rctx
->tag
.cpu
!= smp_processor_id()) {
564 pr_err("mcryptd error: cpu clash\n");
568 /* need to init context */
569 req_ctx_init(rctx
, desc
);
571 nbytes
= crypto_ahash_walk_first(req
, &rctx
->walk
);
578 if (crypto_ahash_walk_last(&rctx
->walk
)) {
579 rctx
->flag
|= HASH_DONE
;
585 rctx
->flag
|= HASH_FINAL
;
586 sha_ctx
= (struct sha1_hash_ctx
*) shash_desc_ctx(desc
);
587 sha1_mb_add_list(rctx
, cstate
);
590 sha_ctx
= sha1_ctx_mgr_submit(cstate
->mgr
, sha_ctx
, rctx
->walk
.data
, nbytes
, flag
);
593 /* check if anything is returned */
597 if (sha_ctx
->error
) {
598 ret
= sha_ctx
->error
;
602 rctx
= cast_hash_to_mcryptd_ctx(sha_ctx
);
603 ret
= sha_finish_walk(&rctx
, cstate
, false);
607 sha_complete_job(rctx
, cstate
, ret
);
611 static int sha1_mb_final(struct shash_desc
*desc
, u8
*out
)
613 struct mcryptd_hash_request_ctx
*rctx
=
614 container_of(desc
, struct mcryptd_hash_request_ctx
, desc
);
615 struct mcryptd_alg_cstate
*cstate
=
616 this_cpu_ptr(sha1_mb_alg_state
.alg_cstate
);
618 struct sha1_hash_ctx
*sha_ctx
;
623 if (rctx
->tag
.cpu
!= smp_processor_id()) {
624 pr_err("mcryptd error: cpu clash\n");
628 /* need to init context */
629 req_ctx_init(rctx
, desc
);
632 rctx
->flag
|= HASH_DONE
| HASH_FINAL
;
634 sha_ctx
= (struct sha1_hash_ctx
*) shash_desc_ctx(desc
);
635 /* flag HASH_FINAL and 0 data size */
636 sha1_mb_add_list(rctx
, cstate
);
638 sha_ctx
= sha1_ctx_mgr_submit(cstate
->mgr
, sha_ctx
, &data
, 0, HASH_LAST
);
641 /* check if anything is returned */
645 if (sha_ctx
->error
) {
646 ret
= sha_ctx
->error
;
647 rctx
= cast_hash_to_mcryptd_ctx(sha_ctx
);
651 rctx
= cast_hash_to_mcryptd_ctx(sha_ctx
);
652 ret
= sha_finish_walk(&rctx
, cstate
, false);
656 sha_complete_job(rctx
, cstate
, ret
);
660 static int sha1_mb_export(struct shash_desc
*desc
, void *out
)
662 struct sha1_hash_ctx
*sctx
= shash_desc_ctx(desc
);
664 memcpy(out
, sctx
, sizeof(*sctx
));
669 static int sha1_mb_import(struct shash_desc
*desc
, const void *in
)
671 struct sha1_hash_ctx
*sctx
= shash_desc_ctx(desc
);
673 memcpy(sctx
, in
, sizeof(*sctx
));
679 static struct shash_alg sha1_mb_shash_alg
= {
680 .digestsize
= SHA1_DIGEST_SIZE
,
681 .init
= sha1_mb_init
,
682 .update
= sha1_mb_update
,
683 .final
= sha1_mb_final
,
684 .finup
= sha1_mb_finup
,
685 .export
= sha1_mb_export
,
686 .import
= sha1_mb_import
,
687 .descsize
= sizeof(struct sha1_hash_ctx
),
688 .statesize
= sizeof(struct sha1_hash_ctx
),
690 .cra_name
= "__sha1-mb",
691 .cra_driver_name
= "__intel_sha1-mb",
694 * use ASYNC flag as some buffers in multi-buffer
695 * algo may not have completed before hashing thread sleep
697 .cra_flags
= CRYPTO_ALG_TYPE_SHASH
| CRYPTO_ALG_ASYNC
|
699 .cra_blocksize
= SHA1_BLOCK_SIZE
,
700 .cra_module
= THIS_MODULE
,
701 .cra_list
= LIST_HEAD_INIT(sha1_mb_shash_alg
.base
.cra_list
),
705 static int sha1_mb_async_init(struct ahash_request
*req
)
707 struct crypto_ahash
*tfm
= crypto_ahash_reqtfm(req
);
708 struct sha1_mb_ctx
*ctx
= crypto_ahash_ctx(tfm
);
709 struct ahash_request
*mcryptd_req
= ahash_request_ctx(req
);
710 struct mcryptd_ahash
*mcryptd_tfm
= ctx
->mcryptd_tfm
;
712 memcpy(mcryptd_req
, req
, sizeof(*req
));
713 ahash_request_set_tfm(mcryptd_req
, &mcryptd_tfm
->base
);
714 return crypto_ahash_init(mcryptd_req
);
717 static int sha1_mb_async_update(struct ahash_request
*req
)
719 struct ahash_request
*mcryptd_req
= ahash_request_ctx(req
);
721 struct crypto_ahash
*tfm
= crypto_ahash_reqtfm(req
);
722 struct sha1_mb_ctx
*ctx
= crypto_ahash_ctx(tfm
);
723 struct mcryptd_ahash
*mcryptd_tfm
= ctx
->mcryptd_tfm
;
725 memcpy(mcryptd_req
, req
, sizeof(*req
));
726 ahash_request_set_tfm(mcryptd_req
, &mcryptd_tfm
->base
);
727 return crypto_ahash_update(mcryptd_req
);
730 static int sha1_mb_async_finup(struct ahash_request
*req
)
732 struct ahash_request
*mcryptd_req
= ahash_request_ctx(req
);
734 struct crypto_ahash
*tfm
= crypto_ahash_reqtfm(req
);
735 struct sha1_mb_ctx
*ctx
= crypto_ahash_ctx(tfm
);
736 struct mcryptd_ahash
*mcryptd_tfm
= ctx
->mcryptd_tfm
;
738 memcpy(mcryptd_req
, req
, sizeof(*req
));
739 ahash_request_set_tfm(mcryptd_req
, &mcryptd_tfm
->base
);
740 return crypto_ahash_finup(mcryptd_req
);
743 static int sha1_mb_async_final(struct ahash_request
*req
)
745 struct ahash_request
*mcryptd_req
= ahash_request_ctx(req
);
747 struct crypto_ahash
*tfm
= crypto_ahash_reqtfm(req
);
748 struct sha1_mb_ctx
*ctx
= crypto_ahash_ctx(tfm
);
749 struct mcryptd_ahash
*mcryptd_tfm
= ctx
->mcryptd_tfm
;
751 memcpy(mcryptd_req
, req
, sizeof(*req
));
752 ahash_request_set_tfm(mcryptd_req
, &mcryptd_tfm
->base
);
753 return crypto_ahash_final(mcryptd_req
);
756 static int sha1_mb_async_digest(struct ahash_request
*req
)
758 struct crypto_ahash
*tfm
= crypto_ahash_reqtfm(req
);
759 struct sha1_mb_ctx
*ctx
= crypto_ahash_ctx(tfm
);
760 struct ahash_request
*mcryptd_req
= ahash_request_ctx(req
);
761 struct mcryptd_ahash
*mcryptd_tfm
= ctx
->mcryptd_tfm
;
763 memcpy(mcryptd_req
, req
, sizeof(*req
));
764 ahash_request_set_tfm(mcryptd_req
, &mcryptd_tfm
->base
);
765 return crypto_ahash_digest(mcryptd_req
);
768 static int sha1_mb_async_init_tfm(struct crypto_tfm
*tfm
)
770 struct mcryptd_ahash
*mcryptd_tfm
;
771 struct sha1_mb_ctx
*ctx
= crypto_tfm_ctx(tfm
);
772 struct mcryptd_hash_ctx
*mctx
;
774 mcryptd_tfm
= mcryptd_alloc_ahash("__intel_sha1-mb",
776 CRYPTO_ALG_INTERNAL
);
777 if (IS_ERR(mcryptd_tfm
))
778 return PTR_ERR(mcryptd_tfm
);
779 mctx
= crypto_ahash_ctx(&mcryptd_tfm
->base
);
780 mctx
->alg_state
= &sha1_mb_alg_state
;
781 ctx
->mcryptd_tfm
= mcryptd_tfm
;
782 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm
),
783 sizeof(struct ahash_request
) +
784 crypto_ahash_reqsize(&mcryptd_tfm
->base
));
789 static void sha1_mb_async_exit_tfm(struct crypto_tfm
*tfm
)
791 struct sha1_mb_ctx
*ctx
= crypto_tfm_ctx(tfm
);
793 mcryptd_free_ahash(ctx
->mcryptd_tfm
);
796 static struct ahash_alg sha1_mb_async_alg
= {
797 .init
= sha1_mb_async_init
,
798 .update
= sha1_mb_async_update
,
799 .final
= sha1_mb_async_final
,
800 .finup
= sha1_mb_async_finup
,
801 .digest
= sha1_mb_async_digest
,
803 .digestsize
= SHA1_DIGEST_SIZE
,
806 .cra_driver_name
= "sha1_mb",
808 .cra_flags
= CRYPTO_ALG_TYPE_AHASH
| CRYPTO_ALG_ASYNC
,
809 .cra_blocksize
= SHA1_BLOCK_SIZE
,
810 .cra_type
= &crypto_ahash_type
,
811 .cra_module
= THIS_MODULE
,
812 .cra_list
= LIST_HEAD_INIT(sha1_mb_async_alg
.halg
.base
.cra_list
),
813 .cra_init
= sha1_mb_async_init_tfm
,
814 .cra_exit
= sha1_mb_async_exit_tfm
,
815 .cra_ctxsize
= sizeof(struct sha1_mb_ctx
),
821 static unsigned long sha1_mb_flusher(struct mcryptd_alg_cstate
*cstate
)
823 struct mcryptd_hash_request_ctx
*rctx
;
824 unsigned long cur_time
;
825 unsigned long next_flush
= 0;
826 struct sha1_hash_ctx
*sha_ctx
;
831 while (!list_empty(&cstate
->work_list
)) {
832 rctx
= list_entry(cstate
->work_list
.next
,
833 struct mcryptd_hash_request_ctx
, waiter
);
834 if (time_before(cur_time
, rctx
->tag
.expire
))
837 sha_ctx
= (struct sha1_hash_ctx
*) sha1_ctx_mgr_flush(cstate
->mgr
);
840 pr_err("sha1_mb error: nothing got flushed for non-empty list\n");
843 rctx
= cast_hash_to_mcryptd_ctx(sha_ctx
);
844 sha_finish_walk(&rctx
, cstate
, true);
845 sha_complete_job(rctx
, cstate
, 0);
848 if (!list_empty(&cstate
->work_list
)) {
849 rctx
= list_entry(cstate
->work_list
.next
,
850 struct mcryptd_hash_request_ctx
, waiter
);
851 /* get the hash context and then flush time */
852 next_flush
= rctx
->tag
.expire
;
853 mcryptd_arm_flusher(cstate
, get_delay(next_flush
));
858 static int __init
sha1_mb_mod_init(void)
863 struct mcryptd_alg_cstate
*cpu_state
;
865 /* check for dependent cpu features */
866 if (!boot_cpu_has(X86_FEATURE_AVX2
) ||
867 !boot_cpu_has(X86_FEATURE_BMI2
))
870 /* initialize multibuffer structures */
871 sha1_mb_alg_state
.alg_cstate
= alloc_percpu(struct mcryptd_alg_cstate
);
873 sha1_job_mgr_init
= sha1_mb_mgr_init_avx2
;
874 sha1_job_mgr_submit
= sha1_mb_mgr_submit_avx2
;
875 sha1_job_mgr_flush
= sha1_mb_mgr_flush_avx2
;
876 sha1_job_mgr_get_comp_job
= sha1_mb_mgr_get_comp_job_avx2
;
878 if (!sha1_mb_alg_state
.alg_cstate
)
880 for_each_possible_cpu(cpu
) {
881 cpu_state
= per_cpu_ptr(sha1_mb_alg_state
.alg_cstate
, cpu
);
882 cpu_state
->next_flush
= 0;
883 cpu_state
->next_seq_num
= 0;
884 cpu_state
->flusher_engaged
= false;
885 INIT_DELAYED_WORK(&cpu_state
->flush
, mcryptd_flusher
);
886 cpu_state
->cpu
= cpu
;
887 cpu_state
->alg_state
= &sha1_mb_alg_state
;
888 cpu_state
->mgr
= (struct sha1_ctx_mgr
*) kzalloc(sizeof(struct sha1_ctx_mgr
), GFP_KERNEL
);
891 sha1_ctx_mgr_init(cpu_state
->mgr
);
892 INIT_LIST_HEAD(&cpu_state
->work_list
);
893 spin_lock_init(&cpu_state
->work_lock
);
895 sha1_mb_alg_state
.flusher
= &sha1_mb_flusher
;
897 err
= crypto_register_shash(&sha1_mb_shash_alg
);
900 err
= crypto_register_ahash(&sha1_mb_async_alg
);
907 crypto_unregister_shash(&sha1_mb_shash_alg
);
909 for_each_possible_cpu(cpu
) {
910 cpu_state
= per_cpu_ptr(sha1_mb_alg_state
.alg_cstate
, cpu
);
911 kfree(cpu_state
->mgr
);
913 free_percpu(sha1_mb_alg_state
.alg_cstate
);
917 static void __exit
sha1_mb_mod_fini(void)
920 struct mcryptd_alg_cstate
*cpu_state
;
922 crypto_unregister_ahash(&sha1_mb_async_alg
);
923 crypto_unregister_shash(&sha1_mb_shash_alg
);
924 for_each_possible_cpu(cpu
) {
925 cpu_state
= per_cpu_ptr(sha1_mb_alg_state
.alg_cstate
, cpu
);
926 kfree(cpu_state
->mgr
);
928 free_percpu(sha1_mb_alg_state
.alg_cstate
);
931 module_init(sha1_mb_mod_init
);
932 module_exit(sha1_mb_mod_fini
);
934 MODULE_LICENSE("GPL");
935 MODULE_DESCRIPTION("SHA1 Secure Hash Algorithm, multi buffer accelerated");
937 MODULE_ALIAS_CRYPTO("sha1");