iwlwifi: mvm: fix version check for GEO_TX_POWER_LIMIT support
[linux/fpc-iii.git] / arch / x86 / crypto / sha256-mb / sha256_mb.c
blob97c5fc43e115dac127e6f0681f881708691c413b
1 /*
2 * Multi buffer SHA256 algorithm Glue Code
4 * This file is provided under a dual BSD/GPLv2 license. When using or
5 * redistributing this file, you may do so under either license.
7 * GPL LICENSE SUMMARY
9 * Copyright(c) 2016 Intel Corporation.
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of version 2 of the GNU General Public License as
13 * published by the Free Software Foundation.
15 * This program is distributed in the hope that it will be useful, but
16 * WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
20 * Contact Information:
21 * Megha Dey <megha.dey@linux.intel.com>
23 * BSD LICENSE
25 * Copyright(c) 2016 Intel Corporation.
27 * Redistribution and use in source and binary forms, with or without
28 * modification, are permitted provided that the following conditions
29 * are met:
31 * * Redistributions of source code must retain the above copyright
32 * notice, this list of conditions and the following disclaimer.
33 * * Redistributions in binary form must reproduce the above copyright
34 * notice, this list of conditions and the following disclaimer in
35 * the documentation and/or other materials provided with the
36 * distribution.
37 * * Neither the name of Intel Corporation nor the names of its
38 * contributors may be used to endorse or promote products derived
39 * from this software without specific prior written permission.
41 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
42 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
43 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
44 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
45 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
46 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
47 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
48 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
49 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
50 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
51 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
54 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
56 #include <crypto/internal/hash.h>
57 #include <linux/init.h>
58 #include <linux/module.h>
59 #include <linux/mm.h>
60 #include <linux/cryptohash.h>
61 #include <linux/types.h>
62 #include <linux/list.h>
63 #include <crypto/scatterwalk.h>
64 #include <crypto/sha.h>
65 #include <crypto/mcryptd.h>
66 #include <crypto/crypto_wq.h>
67 #include <asm/byteorder.h>
68 #include <linux/hardirq.h>
69 #include <asm/fpu/api.h>
70 #include "sha256_mb_ctx.h"
72 #define FLUSH_INTERVAL 1000 /* in usec */
74 static struct mcryptd_alg_state sha256_mb_alg_state;
76 struct sha256_mb_ctx {
77 struct mcryptd_ahash *mcryptd_tfm;
80 static inline struct mcryptd_hash_request_ctx
81 *cast_hash_to_mcryptd_ctx(struct sha256_hash_ctx *hash_ctx)
83 struct ahash_request *areq;
85 areq = container_of((void *) hash_ctx, struct ahash_request, __ctx);
86 return container_of(areq, struct mcryptd_hash_request_ctx, areq);
89 static inline struct ahash_request
90 *cast_mcryptd_ctx_to_req(struct mcryptd_hash_request_ctx *ctx)
92 return container_of((void *) ctx, struct ahash_request, __ctx);
95 static void req_ctx_init(struct mcryptd_hash_request_ctx *rctx,
96 struct ahash_request *areq)
98 rctx->flag = HASH_UPDATE;
101 static asmlinkage void (*sha256_job_mgr_init)(struct sha256_mb_mgr *state);
102 static asmlinkage struct job_sha256* (*sha256_job_mgr_submit)
103 (struct sha256_mb_mgr *state, struct job_sha256 *job);
104 static asmlinkage struct job_sha256* (*sha256_job_mgr_flush)
105 (struct sha256_mb_mgr *state);
106 static asmlinkage struct job_sha256* (*sha256_job_mgr_get_comp_job)
107 (struct sha256_mb_mgr *state);
109 inline uint32_t sha256_pad(uint8_t padblock[SHA256_BLOCK_SIZE * 2],
110 uint64_t total_len)
112 uint32_t i = total_len & (SHA256_BLOCK_SIZE - 1);
114 memset(&padblock[i], 0, SHA256_BLOCK_SIZE);
115 padblock[i] = 0x80;
117 i += ((SHA256_BLOCK_SIZE - 1) &
118 (0 - (total_len + SHA256_PADLENGTHFIELD_SIZE + 1)))
119 + 1 + SHA256_PADLENGTHFIELD_SIZE;
121 #if SHA256_PADLENGTHFIELD_SIZE == 16
122 *((uint64_t *) &padblock[i - 16]) = 0;
123 #endif
125 *((uint64_t *) &padblock[i - 8]) = cpu_to_be64(total_len << 3);
127 /* Number of extra blocks to hash */
128 return i >> SHA256_LOG2_BLOCK_SIZE;
131 static struct sha256_hash_ctx
132 *sha256_ctx_mgr_resubmit(struct sha256_ctx_mgr *mgr,
133 struct sha256_hash_ctx *ctx)
135 while (ctx) {
136 if (ctx->status & HASH_CTX_STS_COMPLETE) {
137 /* Clear PROCESSING bit */
138 ctx->status = HASH_CTX_STS_COMPLETE;
139 return ctx;
143 * If the extra blocks are empty, begin hashing what remains
144 * in the user's buffer.
146 if (ctx->partial_block_buffer_length == 0 &&
147 ctx->incoming_buffer_length) {
149 const void *buffer = ctx->incoming_buffer;
150 uint32_t len = ctx->incoming_buffer_length;
151 uint32_t copy_len;
154 * Only entire blocks can be hashed.
155 * Copy remainder to extra blocks buffer.
157 copy_len = len & (SHA256_BLOCK_SIZE-1);
159 if (copy_len) {
160 len -= copy_len;
161 memcpy(ctx->partial_block_buffer,
162 ((const char *) buffer + len),
163 copy_len);
164 ctx->partial_block_buffer_length = copy_len;
167 ctx->incoming_buffer_length = 0;
169 /* len should be a multiple of the block size now */
170 assert((len % SHA256_BLOCK_SIZE) == 0);
172 /* Set len to the number of blocks to be hashed */
173 len >>= SHA256_LOG2_BLOCK_SIZE;
175 if (len) {
177 ctx->job.buffer = (uint8_t *) buffer;
178 ctx->job.len = len;
179 ctx = (struct sha256_hash_ctx *)
180 sha256_job_mgr_submit(&mgr->mgr, &ctx->job);
181 continue;
186 * If the extra blocks are not empty, then we are
187 * either on the last block(s) or we need more
188 * user input before continuing.
190 if (ctx->status & HASH_CTX_STS_LAST) {
192 uint8_t *buf = ctx->partial_block_buffer;
193 uint32_t n_extra_blocks =
194 sha256_pad(buf, ctx->total_length);
196 ctx->status = (HASH_CTX_STS_PROCESSING |
197 HASH_CTX_STS_COMPLETE);
198 ctx->job.buffer = buf;
199 ctx->job.len = (uint32_t) n_extra_blocks;
200 ctx = (struct sha256_hash_ctx *)
201 sha256_job_mgr_submit(&mgr->mgr, &ctx->job);
202 continue;
205 ctx->status = HASH_CTX_STS_IDLE;
206 return ctx;
209 return NULL;
212 static struct sha256_hash_ctx
213 *sha256_ctx_mgr_get_comp_ctx(struct sha256_ctx_mgr *mgr)
216 * If get_comp_job returns NULL, there are no jobs complete.
217 * If get_comp_job returns a job, verify that it is safe to return to
218 * the user. If it is not ready, resubmit the job to finish processing.
219 * If sha256_ctx_mgr_resubmit returned a job, it is ready to be
220 * returned. Otherwise, all jobs currently being managed by the
221 * hash_ctx_mgr still need processing.
223 struct sha256_hash_ctx *ctx;
225 ctx = (struct sha256_hash_ctx *) sha256_job_mgr_get_comp_job(&mgr->mgr);
226 return sha256_ctx_mgr_resubmit(mgr, ctx);
229 static void sha256_ctx_mgr_init(struct sha256_ctx_mgr *mgr)
231 sha256_job_mgr_init(&mgr->mgr);
234 static struct sha256_hash_ctx *sha256_ctx_mgr_submit(struct sha256_ctx_mgr *mgr,
235 struct sha256_hash_ctx *ctx,
236 const void *buffer,
237 uint32_t len,
238 int flags)
240 if (flags & ~(HASH_UPDATE | HASH_LAST)) {
241 /* User should not pass anything other than UPDATE or LAST */
242 ctx->error = HASH_CTX_ERROR_INVALID_FLAGS;
243 return ctx;
246 if (ctx->status & HASH_CTX_STS_PROCESSING) {
247 /* Cannot submit to a currently processing job. */
248 ctx->error = HASH_CTX_ERROR_ALREADY_PROCESSING;
249 return ctx;
252 if (ctx->status & HASH_CTX_STS_COMPLETE) {
253 /* Cannot update a finished job. */
254 ctx->error = HASH_CTX_ERROR_ALREADY_COMPLETED;
255 return ctx;
258 /* If we made it here, there was no error during this call to submit */
259 ctx->error = HASH_CTX_ERROR_NONE;
261 /* Store buffer ptr info from user */
262 ctx->incoming_buffer = buffer;
263 ctx->incoming_buffer_length = len;
266 * Store the user's request flags and mark this ctx as currently
267 * being processed.
269 ctx->status = (flags & HASH_LAST) ?
270 (HASH_CTX_STS_PROCESSING | HASH_CTX_STS_LAST) :
271 HASH_CTX_STS_PROCESSING;
273 /* Advance byte counter */
274 ctx->total_length += len;
277 * If there is anything currently buffered in the extra blocks,
278 * append to it until it contains a whole block.
279 * Or if the user's buffer contains less than a whole block,
280 * append as much as possible to the extra block.
282 if (ctx->partial_block_buffer_length || len < SHA256_BLOCK_SIZE) {
284 * Compute how many bytes to copy from user buffer into
285 * extra block
287 uint32_t copy_len = SHA256_BLOCK_SIZE -
288 ctx->partial_block_buffer_length;
289 if (len < copy_len)
290 copy_len = len;
292 if (copy_len) {
293 /* Copy and update relevant pointers and counters */
294 memcpy(
295 &ctx->partial_block_buffer[ctx->partial_block_buffer_length],
296 buffer, copy_len);
298 ctx->partial_block_buffer_length += copy_len;
299 ctx->incoming_buffer = (const void *)
300 ((const char *)buffer + copy_len);
301 ctx->incoming_buffer_length = len - copy_len;
304 /* The extra block should never contain more than 1 block */
305 assert(ctx->partial_block_buffer_length <= SHA256_BLOCK_SIZE);
308 * If the extra block buffer contains exactly 1 block,
309 * it can be hashed.
311 if (ctx->partial_block_buffer_length >= SHA256_BLOCK_SIZE) {
312 ctx->partial_block_buffer_length = 0;
314 ctx->job.buffer = ctx->partial_block_buffer;
315 ctx->job.len = 1;
316 ctx = (struct sha256_hash_ctx *)
317 sha256_job_mgr_submit(&mgr->mgr, &ctx->job);
321 return sha256_ctx_mgr_resubmit(mgr, ctx);
324 static struct sha256_hash_ctx *sha256_ctx_mgr_flush(struct sha256_ctx_mgr *mgr)
326 struct sha256_hash_ctx *ctx;
328 while (1) {
329 ctx = (struct sha256_hash_ctx *)
330 sha256_job_mgr_flush(&mgr->mgr);
332 /* If flush returned 0, there are no more jobs in flight. */
333 if (!ctx)
334 return NULL;
337 * If flush returned a job, resubmit the job to finish
338 * processing.
340 ctx = sha256_ctx_mgr_resubmit(mgr, ctx);
343 * If sha256_ctx_mgr_resubmit returned a job, it is ready to
344 * be returned. Otherwise, all jobs currently being managed by
345 * the sha256_ctx_mgr still need processing. Loop.
347 if (ctx)
348 return ctx;
352 static int sha256_mb_init(struct ahash_request *areq)
354 struct sha256_hash_ctx *sctx = ahash_request_ctx(areq);
356 hash_ctx_init(sctx);
357 sctx->job.result_digest[0] = SHA256_H0;
358 sctx->job.result_digest[1] = SHA256_H1;
359 sctx->job.result_digest[2] = SHA256_H2;
360 sctx->job.result_digest[3] = SHA256_H3;
361 sctx->job.result_digest[4] = SHA256_H4;
362 sctx->job.result_digest[5] = SHA256_H5;
363 sctx->job.result_digest[6] = SHA256_H6;
364 sctx->job.result_digest[7] = SHA256_H7;
365 sctx->total_length = 0;
366 sctx->partial_block_buffer_length = 0;
367 sctx->status = HASH_CTX_STS_IDLE;
369 return 0;
372 static int sha256_mb_set_results(struct mcryptd_hash_request_ctx *rctx)
374 int i;
375 struct sha256_hash_ctx *sctx = ahash_request_ctx(&rctx->areq);
376 __be32 *dst = (__be32 *) rctx->out;
378 for (i = 0; i < 8; ++i)
379 dst[i] = cpu_to_be32(sctx->job.result_digest[i]);
381 return 0;
384 static int sha_finish_walk(struct mcryptd_hash_request_ctx **ret_rctx,
385 struct mcryptd_alg_cstate *cstate, bool flush)
387 int flag = HASH_UPDATE;
388 int nbytes, err = 0;
389 struct mcryptd_hash_request_ctx *rctx = *ret_rctx;
390 struct sha256_hash_ctx *sha_ctx;
392 /* more work ? */
393 while (!(rctx->flag & HASH_DONE)) {
394 nbytes = crypto_ahash_walk_done(&rctx->walk, 0);
395 if (nbytes < 0) {
396 err = nbytes;
397 goto out;
399 /* check if the walk is done */
400 if (crypto_ahash_walk_last(&rctx->walk)) {
401 rctx->flag |= HASH_DONE;
402 if (rctx->flag & HASH_FINAL)
403 flag |= HASH_LAST;
406 sha_ctx = (struct sha256_hash_ctx *)
407 ahash_request_ctx(&rctx->areq);
408 kernel_fpu_begin();
409 sha_ctx = sha256_ctx_mgr_submit(cstate->mgr, sha_ctx,
410 rctx->walk.data, nbytes, flag);
411 if (!sha_ctx) {
412 if (flush)
413 sha_ctx = sha256_ctx_mgr_flush(cstate->mgr);
415 kernel_fpu_end();
416 if (sha_ctx)
417 rctx = cast_hash_to_mcryptd_ctx(sha_ctx);
418 else {
419 rctx = NULL;
420 goto out;
424 /* copy the results */
425 if (rctx->flag & HASH_FINAL)
426 sha256_mb_set_results(rctx);
428 out:
429 *ret_rctx = rctx;
430 return err;
433 static int sha_complete_job(struct mcryptd_hash_request_ctx *rctx,
434 struct mcryptd_alg_cstate *cstate,
435 int err)
437 struct ahash_request *req = cast_mcryptd_ctx_to_req(rctx);
438 struct sha256_hash_ctx *sha_ctx;
439 struct mcryptd_hash_request_ctx *req_ctx;
440 int ret;
442 /* remove from work list */
443 spin_lock(&cstate->work_lock);
444 list_del(&rctx->waiter);
445 spin_unlock(&cstate->work_lock);
447 if (irqs_disabled())
448 rctx->complete(&req->base, err);
449 else {
450 local_bh_disable();
451 rctx->complete(&req->base, err);
452 local_bh_enable();
455 /* check to see if there are other jobs that are done */
456 sha_ctx = sha256_ctx_mgr_get_comp_ctx(cstate->mgr);
457 while (sha_ctx) {
458 req_ctx = cast_hash_to_mcryptd_ctx(sha_ctx);
459 ret = sha_finish_walk(&req_ctx, cstate, false);
460 if (req_ctx) {
461 spin_lock(&cstate->work_lock);
462 list_del(&req_ctx->waiter);
463 spin_unlock(&cstate->work_lock);
465 req = cast_mcryptd_ctx_to_req(req_ctx);
466 if (irqs_disabled())
467 req_ctx->complete(&req->base, ret);
468 else {
469 local_bh_disable();
470 req_ctx->complete(&req->base, ret);
471 local_bh_enable();
474 sha_ctx = sha256_ctx_mgr_get_comp_ctx(cstate->mgr);
477 return 0;
480 static void sha256_mb_add_list(struct mcryptd_hash_request_ctx *rctx,
481 struct mcryptd_alg_cstate *cstate)
483 unsigned long next_flush;
484 unsigned long delay = usecs_to_jiffies(FLUSH_INTERVAL);
486 /* initialize tag */
487 rctx->tag.arrival = jiffies; /* tag the arrival time */
488 rctx->tag.seq_num = cstate->next_seq_num++;
489 next_flush = rctx->tag.arrival + delay;
490 rctx->tag.expire = next_flush;
492 spin_lock(&cstate->work_lock);
493 list_add_tail(&rctx->waiter, &cstate->work_list);
494 spin_unlock(&cstate->work_lock);
496 mcryptd_arm_flusher(cstate, delay);
499 static int sha256_mb_update(struct ahash_request *areq)
501 struct mcryptd_hash_request_ctx *rctx =
502 container_of(areq, struct mcryptd_hash_request_ctx, areq);
503 struct mcryptd_alg_cstate *cstate =
504 this_cpu_ptr(sha256_mb_alg_state.alg_cstate);
506 struct ahash_request *req = cast_mcryptd_ctx_to_req(rctx);
507 struct sha256_hash_ctx *sha_ctx;
508 int ret = 0, nbytes;
510 /* sanity check */
511 if (rctx->tag.cpu != smp_processor_id()) {
512 pr_err("mcryptd error: cpu clash\n");
513 goto done;
516 /* need to init context */
517 req_ctx_init(rctx, areq);
519 nbytes = crypto_ahash_walk_first(req, &rctx->walk);
521 if (nbytes < 0) {
522 ret = nbytes;
523 goto done;
526 if (crypto_ahash_walk_last(&rctx->walk))
527 rctx->flag |= HASH_DONE;
529 /* submit */
530 sha_ctx = (struct sha256_hash_ctx *) ahash_request_ctx(areq);
531 sha256_mb_add_list(rctx, cstate);
532 kernel_fpu_begin();
533 sha_ctx = sha256_ctx_mgr_submit(cstate->mgr, sha_ctx, rctx->walk.data,
534 nbytes, HASH_UPDATE);
535 kernel_fpu_end();
537 /* check if anything is returned */
538 if (!sha_ctx)
539 return -EINPROGRESS;
541 if (sha_ctx->error) {
542 ret = sha_ctx->error;
543 rctx = cast_hash_to_mcryptd_ctx(sha_ctx);
544 goto done;
547 rctx = cast_hash_to_mcryptd_ctx(sha_ctx);
548 ret = sha_finish_walk(&rctx, cstate, false);
550 if (!rctx)
551 return -EINPROGRESS;
552 done:
553 sha_complete_job(rctx, cstate, ret);
554 return ret;
557 static int sha256_mb_finup(struct ahash_request *areq)
559 struct mcryptd_hash_request_ctx *rctx =
560 container_of(areq, struct mcryptd_hash_request_ctx, areq);
561 struct mcryptd_alg_cstate *cstate =
562 this_cpu_ptr(sha256_mb_alg_state.alg_cstate);
564 struct ahash_request *req = cast_mcryptd_ctx_to_req(rctx);
565 struct sha256_hash_ctx *sha_ctx;
566 int ret = 0, flag = HASH_UPDATE, nbytes;
568 /* sanity check */
569 if (rctx->tag.cpu != smp_processor_id()) {
570 pr_err("mcryptd error: cpu clash\n");
571 goto done;
574 /* need to init context */
575 req_ctx_init(rctx, areq);
577 nbytes = crypto_ahash_walk_first(req, &rctx->walk);
579 if (nbytes < 0) {
580 ret = nbytes;
581 goto done;
584 if (crypto_ahash_walk_last(&rctx->walk)) {
585 rctx->flag |= HASH_DONE;
586 flag = HASH_LAST;
589 /* submit */
590 rctx->flag |= HASH_FINAL;
591 sha_ctx = (struct sha256_hash_ctx *) ahash_request_ctx(areq);
592 sha256_mb_add_list(rctx, cstate);
594 kernel_fpu_begin();
595 sha_ctx = sha256_ctx_mgr_submit(cstate->mgr, sha_ctx, rctx->walk.data,
596 nbytes, flag);
597 kernel_fpu_end();
599 /* check if anything is returned */
600 if (!sha_ctx)
601 return -EINPROGRESS;
603 if (sha_ctx->error) {
604 ret = sha_ctx->error;
605 goto done;
608 rctx = cast_hash_to_mcryptd_ctx(sha_ctx);
609 ret = sha_finish_walk(&rctx, cstate, false);
610 if (!rctx)
611 return -EINPROGRESS;
612 done:
613 sha_complete_job(rctx, cstate, ret);
614 return ret;
617 static int sha256_mb_final(struct ahash_request *areq)
619 struct mcryptd_hash_request_ctx *rctx =
620 container_of(areq, struct mcryptd_hash_request_ctx,
621 areq);
622 struct mcryptd_alg_cstate *cstate =
623 this_cpu_ptr(sha256_mb_alg_state.alg_cstate);
625 struct sha256_hash_ctx *sha_ctx;
626 int ret = 0;
627 u8 data;
629 /* sanity check */
630 if (rctx->tag.cpu != smp_processor_id()) {
631 pr_err("mcryptd error: cpu clash\n");
632 goto done;
635 /* need to init context */
636 req_ctx_init(rctx, areq);
638 rctx->flag |= HASH_DONE | HASH_FINAL;
640 sha_ctx = (struct sha256_hash_ctx *) ahash_request_ctx(areq);
641 /* flag HASH_FINAL and 0 data size */
642 sha256_mb_add_list(rctx, cstate);
643 kernel_fpu_begin();
644 sha_ctx = sha256_ctx_mgr_submit(cstate->mgr, sha_ctx, &data, 0,
645 HASH_LAST);
646 kernel_fpu_end();
648 /* check if anything is returned */
649 if (!sha_ctx)
650 return -EINPROGRESS;
652 if (sha_ctx->error) {
653 ret = sha_ctx->error;
654 rctx = cast_hash_to_mcryptd_ctx(sha_ctx);
655 goto done;
658 rctx = cast_hash_to_mcryptd_ctx(sha_ctx);
659 ret = sha_finish_walk(&rctx, cstate, false);
660 if (!rctx)
661 return -EINPROGRESS;
662 done:
663 sha_complete_job(rctx, cstate, ret);
664 return ret;
667 static int sha256_mb_export(struct ahash_request *areq, void *out)
669 struct sha256_hash_ctx *sctx = ahash_request_ctx(areq);
671 memcpy(out, sctx, sizeof(*sctx));
673 return 0;
676 static int sha256_mb_import(struct ahash_request *areq, const void *in)
678 struct sha256_hash_ctx *sctx = ahash_request_ctx(areq);
680 memcpy(sctx, in, sizeof(*sctx));
682 return 0;
685 static int sha256_mb_async_init_tfm(struct crypto_tfm *tfm)
687 struct mcryptd_ahash *mcryptd_tfm;
688 struct sha256_mb_ctx *ctx = crypto_tfm_ctx(tfm);
689 struct mcryptd_hash_ctx *mctx;
691 mcryptd_tfm = mcryptd_alloc_ahash("__intel_sha256-mb",
692 CRYPTO_ALG_INTERNAL,
693 CRYPTO_ALG_INTERNAL);
694 if (IS_ERR(mcryptd_tfm))
695 return PTR_ERR(mcryptd_tfm);
696 mctx = crypto_ahash_ctx(&mcryptd_tfm->base);
697 mctx->alg_state = &sha256_mb_alg_state;
698 ctx->mcryptd_tfm = mcryptd_tfm;
699 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
700 sizeof(struct ahash_request) +
701 crypto_ahash_reqsize(&mcryptd_tfm->base));
703 return 0;
706 static void sha256_mb_async_exit_tfm(struct crypto_tfm *tfm)
708 struct sha256_mb_ctx *ctx = crypto_tfm_ctx(tfm);
710 mcryptd_free_ahash(ctx->mcryptd_tfm);
713 static int sha256_mb_areq_init_tfm(struct crypto_tfm *tfm)
715 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
716 sizeof(struct ahash_request) +
717 sizeof(struct sha256_hash_ctx));
719 return 0;
722 static void sha256_mb_areq_exit_tfm(struct crypto_tfm *tfm)
724 struct sha256_mb_ctx *ctx = crypto_tfm_ctx(tfm);
726 mcryptd_free_ahash(ctx->mcryptd_tfm);
729 static struct ahash_alg sha256_mb_areq_alg = {
730 .init = sha256_mb_init,
731 .update = sha256_mb_update,
732 .final = sha256_mb_final,
733 .finup = sha256_mb_finup,
734 .export = sha256_mb_export,
735 .import = sha256_mb_import,
736 .halg = {
737 .digestsize = SHA256_DIGEST_SIZE,
738 .statesize = sizeof(struct sha256_hash_ctx),
739 .base = {
740 .cra_name = "__sha256-mb",
741 .cra_driver_name = "__intel_sha256-mb",
742 .cra_priority = 100,
744 * use ASYNC flag as some buffers in multi-buffer
745 * algo may not have completed before hashing thread
746 * sleep
748 .cra_flags = CRYPTO_ALG_ASYNC |
749 CRYPTO_ALG_INTERNAL,
750 .cra_blocksize = SHA256_BLOCK_SIZE,
751 .cra_module = THIS_MODULE,
752 .cra_list = LIST_HEAD_INIT
753 (sha256_mb_areq_alg.halg.base.cra_list),
754 .cra_init = sha256_mb_areq_init_tfm,
755 .cra_exit = sha256_mb_areq_exit_tfm,
756 .cra_ctxsize = sizeof(struct sha256_hash_ctx),
761 static int sha256_mb_async_init(struct ahash_request *req)
763 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
764 struct sha256_mb_ctx *ctx = crypto_ahash_ctx(tfm);
765 struct ahash_request *mcryptd_req = ahash_request_ctx(req);
766 struct mcryptd_ahash *mcryptd_tfm = ctx->mcryptd_tfm;
768 memcpy(mcryptd_req, req, sizeof(*req));
769 ahash_request_set_tfm(mcryptd_req, &mcryptd_tfm->base);
770 return crypto_ahash_init(mcryptd_req);
773 static int sha256_mb_async_update(struct ahash_request *req)
775 struct ahash_request *mcryptd_req = ahash_request_ctx(req);
777 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
778 struct sha256_mb_ctx *ctx = crypto_ahash_ctx(tfm);
779 struct mcryptd_ahash *mcryptd_tfm = ctx->mcryptd_tfm;
781 memcpy(mcryptd_req, req, sizeof(*req));
782 ahash_request_set_tfm(mcryptd_req, &mcryptd_tfm->base);
783 return crypto_ahash_update(mcryptd_req);
786 static int sha256_mb_async_finup(struct ahash_request *req)
788 struct ahash_request *mcryptd_req = ahash_request_ctx(req);
790 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
791 struct sha256_mb_ctx *ctx = crypto_ahash_ctx(tfm);
792 struct mcryptd_ahash *mcryptd_tfm = ctx->mcryptd_tfm;
794 memcpy(mcryptd_req, req, sizeof(*req));
795 ahash_request_set_tfm(mcryptd_req, &mcryptd_tfm->base);
796 return crypto_ahash_finup(mcryptd_req);
799 static int sha256_mb_async_final(struct ahash_request *req)
801 struct ahash_request *mcryptd_req = ahash_request_ctx(req);
803 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
804 struct sha256_mb_ctx *ctx = crypto_ahash_ctx(tfm);
805 struct mcryptd_ahash *mcryptd_tfm = ctx->mcryptd_tfm;
807 memcpy(mcryptd_req, req, sizeof(*req));
808 ahash_request_set_tfm(mcryptd_req, &mcryptd_tfm->base);
809 return crypto_ahash_final(mcryptd_req);
812 static int sha256_mb_async_digest(struct ahash_request *req)
814 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
815 struct sha256_mb_ctx *ctx = crypto_ahash_ctx(tfm);
816 struct ahash_request *mcryptd_req = ahash_request_ctx(req);
817 struct mcryptd_ahash *mcryptd_tfm = ctx->mcryptd_tfm;
819 memcpy(mcryptd_req, req, sizeof(*req));
820 ahash_request_set_tfm(mcryptd_req, &mcryptd_tfm->base);
821 return crypto_ahash_digest(mcryptd_req);
824 static int sha256_mb_async_export(struct ahash_request *req, void *out)
826 struct ahash_request *mcryptd_req = ahash_request_ctx(req);
827 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
828 struct sha256_mb_ctx *ctx = crypto_ahash_ctx(tfm);
829 struct mcryptd_ahash *mcryptd_tfm = ctx->mcryptd_tfm;
831 memcpy(mcryptd_req, req, sizeof(*req));
832 ahash_request_set_tfm(mcryptd_req, &mcryptd_tfm->base);
833 return crypto_ahash_export(mcryptd_req, out);
836 static int sha256_mb_async_import(struct ahash_request *req, const void *in)
838 struct ahash_request *mcryptd_req = ahash_request_ctx(req);
839 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
840 struct sha256_mb_ctx *ctx = crypto_ahash_ctx(tfm);
841 struct mcryptd_ahash *mcryptd_tfm = ctx->mcryptd_tfm;
842 struct crypto_ahash *child = mcryptd_ahash_child(mcryptd_tfm);
843 struct mcryptd_hash_request_ctx *rctx;
844 struct ahash_request *areq;
846 memcpy(mcryptd_req, req, sizeof(*req));
847 ahash_request_set_tfm(mcryptd_req, &mcryptd_tfm->base);
848 rctx = ahash_request_ctx(mcryptd_req);
849 areq = &rctx->areq;
851 ahash_request_set_tfm(areq, child);
852 ahash_request_set_callback(areq, CRYPTO_TFM_REQ_MAY_SLEEP,
853 rctx->complete, req);
855 return crypto_ahash_import(mcryptd_req, in);
858 static struct ahash_alg sha256_mb_async_alg = {
859 .init = sha256_mb_async_init,
860 .update = sha256_mb_async_update,
861 .final = sha256_mb_async_final,
862 .finup = sha256_mb_async_finup,
863 .export = sha256_mb_async_export,
864 .import = sha256_mb_async_import,
865 .digest = sha256_mb_async_digest,
866 .halg = {
867 .digestsize = SHA256_DIGEST_SIZE,
868 .statesize = sizeof(struct sha256_hash_ctx),
869 .base = {
870 .cra_name = "sha256",
871 .cra_driver_name = "sha256_mb",
873 * Low priority, since with few concurrent hash requests
874 * this is extremely slow due to the flush delay. Users
875 * whose workloads would benefit from this can request
876 * it explicitly by driver name, or can increase its
877 * priority at runtime using NETLINK_CRYPTO.
879 .cra_priority = 50,
880 .cra_flags = CRYPTO_ALG_ASYNC,
881 .cra_blocksize = SHA256_BLOCK_SIZE,
882 .cra_module = THIS_MODULE,
883 .cra_list = LIST_HEAD_INIT
884 (sha256_mb_async_alg.halg.base.cra_list),
885 .cra_init = sha256_mb_async_init_tfm,
886 .cra_exit = sha256_mb_async_exit_tfm,
887 .cra_ctxsize = sizeof(struct sha256_mb_ctx),
888 .cra_alignmask = 0,
893 static unsigned long sha256_mb_flusher(struct mcryptd_alg_cstate *cstate)
895 struct mcryptd_hash_request_ctx *rctx;
896 unsigned long cur_time;
897 unsigned long next_flush = 0;
898 struct sha256_hash_ctx *sha_ctx;
901 cur_time = jiffies;
903 while (!list_empty(&cstate->work_list)) {
904 rctx = list_entry(cstate->work_list.next,
905 struct mcryptd_hash_request_ctx, waiter);
906 if (time_before(cur_time, rctx->tag.expire))
907 break;
908 kernel_fpu_begin();
909 sha_ctx = (struct sha256_hash_ctx *)
910 sha256_ctx_mgr_flush(cstate->mgr);
911 kernel_fpu_end();
912 if (!sha_ctx) {
913 pr_err("sha256_mb error: nothing got"
914 " flushed for non-empty list\n");
915 break;
917 rctx = cast_hash_to_mcryptd_ctx(sha_ctx);
918 sha_finish_walk(&rctx, cstate, true);
919 sha_complete_job(rctx, cstate, 0);
922 if (!list_empty(&cstate->work_list)) {
923 rctx = list_entry(cstate->work_list.next,
924 struct mcryptd_hash_request_ctx, waiter);
925 /* get the hash context and then flush time */
926 next_flush = rctx->tag.expire;
927 mcryptd_arm_flusher(cstate, get_delay(next_flush));
929 return next_flush;
932 static int __init sha256_mb_mod_init(void)
935 int cpu;
936 int err;
937 struct mcryptd_alg_cstate *cpu_state;
939 /* check for dependent cpu features */
940 if (!boot_cpu_has(X86_FEATURE_AVX2) ||
941 !boot_cpu_has(X86_FEATURE_BMI2))
942 return -ENODEV;
944 /* initialize multibuffer structures */
945 sha256_mb_alg_state.alg_cstate = alloc_percpu
946 (struct mcryptd_alg_cstate);
948 sha256_job_mgr_init = sha256_mb_mgr_init_avx2;
949 sha256_job_mgr_submit = sha256_mb_mgr_submit_avx2;
950 sha256_job_mgr_flush = sha256_mb_mgr_flush_avx2;
951 sha256_job_mgr_get_comp_job = sha256_mb_mgr_get_comp_job_avx2;
953 if (!sha256_mb_alg_state.alg_cstate)
954 return -ENOMEM;
955 for_each_possible_cpu(cpu) {
956 cpu_state = per_cpu_ptr(sha256_mb_alg_state.alg_cstate, cpu);
957 cpu_state->next_flush = 0;
958 cpu_state->next_seq_num = 0;
959 cpu_state->flusher_engaged = false;
960 INIT_DELAYED_WORK(&cpu_state->flush, mcryptd_flusher);
961 cpu_state->cpu = cpu;
962 cpu_state->alg_state = &sha256_mb_alg_state;
963 cpu_state->mgr = kzalloc(sizeof(struct sha256_ctx_mgr),
964 GFP_KERNEL);
965 if (!cpu_state->mgr)
966 goto err2;
967 sha256_ctx_mgr_init(cpu_state->mgr);
968 INIT_LIST_HEAD(&cpu_state->work_list);
969 spin_lock_init(&cpu_state->work_lock);
971 sha256_mb_alg_state.flusher = &sha256_mb_flusher;
973 err = crypto_register_ahash(&sha256_mb_areq_alg);
974 if (err)
975 goto err2;
976 err = crypto_register_ahash(&sha256_mb_async_alg);
977 if (err)
978 goto err1;
981 return 0;
982 err1:
983 crypto_unregister_ahash(&sha256_mb_areq_alg);
984 err2:
985 for_each_possible_cpu(cpu) {
986 cpu_state = per_cpu_ptr(sha256_mb_alg_state.alg_cstate, cpu);
987 kfree(cpu_state->mgr);
989 free_percpu(sha256_mb_alg_state.alg_cstate);
990 return -ENODEV;
993 static void __exit sha256_mb_mod_fini(void)
995 int cpu;
996 struct mcryptd_alg_cstate *cpu_state;
998 crypto_unregister_ahash(&sha256_mb_async_alg);
999 crypto_unregister_ahash(&sha256_mb_areq_alg);
1000 for_each_possible_cpu(cpu) {
1001 cpu_state = per_cpu_ptr(sha256_mb_alg_state.alg_cstate, cpu);
1002 kfree(cpu_state->mgr);
1004 free_percpu(sha256_mb_alg_state.alg_cstate);
1007 module_init(sha256_mb_mod_init);
1008 module_exit(sha256_mb_mod_fini);
1010 MODULE_LICENSE("GPL");
1011 MODULE_DESCRIPTION("SHA256 Secure Hash Algorithm, multi buffer accelerated");
1013 MODULE_ALIAS_CRYPTO("sha256");