4 * Glue code for the SHA256 Secure Hash Algorithm assembler
5 * implementation using supplemental SSE3 / AVX / AVX2 instructions.
7 * This file is based on sha256_generic.c
9 * Copyright (C) 2013 Intel Corporation.
12 * Tim Chen <tim.c.chen@linux.intel.com>
14 * This program is free software; you can redistribute it and/or modify it
15 * under the terms of the GNU General Public License as published by the Free
16 * Software Foundation; either version 2 of the License, or (at your option)
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
20 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
22 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
23 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
24 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
25 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
32 #include <crypto/internal/hash.h>
33 #include <linux/init.h>
34 #include <linux/module.h>
36 #include <linux/cryptohash.h>
37 #include <linux/types.h>
38 #include <crypto/sha.h>
39 #include <asm/byteorder.h>
42 #include <asm/xsave.h>
43 #include <linux/string.h>
45 asmlinkage
void sha256_transform_ssse3(const char *data
, u32
*digest
,
48 asmlinkage
void sha256_transform_avx(const char *data
, u32
*digest
,
52 asmlinkage
void sha256_transform_rorx(const char *data
, u32
*digest
,
56 static asmlinkage
void (*sha256_transform_asm
)(const char *, u32
*, u64
);
59 static int sha256_ssse3_init(struct shash_desc
*desc
)
61 struct sha256_state
*sctx
= shash_desc_ctx(desc
);
63 sctx
->state
[0] = SHA256_H0
;
64 sctx
->state
[1] = SHA256_H1
;
65 sctx
->state
[2] = SHA256_H2
;
66 sctx
->state
[3] = SHA256_H3
;
67 sctx
->state
[4] = SHA256_H4
;
68 sctx
->state
[5] = SHA256_H5
;
69 sctx
->state
[6] = SHA256_H6
;
70 sctx
->state
[7] = SHA256_H7
;
76 static int __sha256_ssse3_update(struct shash_desc
*desc
, const u8
*data
,
77 unsigned int len
, unsigned int partial
)
79 struct sha256_state
*sctx
= shash_desc_ctx(desc
);
80 unsigned int done
= 0;
85 done
= SHA256_BLOCK_SIZE
- partial
;
86 memcpy(sctx
->buf
+ partial
, data
, done
);
87 sha256_transform_asm(sctx
->buf
, sctx
->state
, 1);
90 if (len
- done
>= SHA256_BLOCK_SIZE
) {
91 const unsigned int rounds
= (len
- done
) / SHA256_BLOCK_SIZE
;
93 sha256_transform_asm(data
+ done
, sctx
->state
, (u64
) rounds
);
95 done
+= rounds
* SHA256_BLOCK_SIZE
;
98 memcpy(sctx
->buf
, data
+ done
, len
- done
);
103 static int sha256_ssse3_update(struct shash_desc
*desc
, const u8
*data
,
106 struct sha256_state
*sctx
= shash_desc_ctx(desc
);
107 unsigned int partial
= sctx
->count
% SHA256_BLOCK_SIZE
;
110 /* Handle the fast case right here */
111 if (partial
+ len
< SHA256_BLOCK_SIZE
) {
113 memcpy(sctx
->buf
+ partial
, data
, len
);
118 if (!irq_fpu_usable()) {
119 res
= crypto_sha256_update(desc
, data
, len
);
122 res
= __sha256_ssse3_update(desc
, data
, len
, partial
);
130 /* Add padding and return the message digest. */
131 static int sha256_ssse3_final(struct shash_desc
*desc
, u8
*out
)
133 struct sha256_state
*sctx
= shash_desc_ctx(desc
);
134 unsigned int i
, index
, padlen
;
135 __be32
*dst
= (__be32
*)out
;
137 static const u8 padding
[SHA256_BLOCK_SIZE
] = { 0x80, };
139 bits
= cpu_to_be64(sctx
->count
<< 3);
141 /* Pad out to 56 mod 64 and append length */
142 index
= sctx
->count
% SHA256_BLOCK_SIZE
;
143 padlen
= (index
< 56) ? (56 - index
) : ((SHA256_BLOCK_SIZE
+56)-index
);
145 if (!irq_fpu_usable()) {
146 crypto_sha256_update(desc
, padding
, padlen
);
147 crypto_sha256_update(desc
, (const u8
*)&bits
, sizeof(bits
));
150 /* We need to fill a whole block for __sha256_ssse3_update() */
152 sctx
->count
+= padlen
;
153 memcpy(sctx
->buf
+ index
, padding
, padlen
);
155 __sha256_ssse3_update(desc
, padding
, padlen
, index
);
157 __sha256_ssse3_update(desc
, (const u8
*)&bits
,
162 /* Store state in digest */
163 for (i
= 0; i
< 8; i
++)
164 dst
[i
] = cpu_to_be32(sctx
->state
[i
]);
167 memset(sctx
, 0, sizeof(*sctx
));
172 static int sha256_ssse3_export(struct shash_desc
*desc
, void *out
)
174 struct sha256_state
*sctx
= shash_desc_ctx(desc
);
176 memcpy(out
, sctx
, sizeof(*sctx
));
181 static int sha256_ssse3_import(struct shash_desc
*desc
, const void *in
)
183 struct sha256_state
*sctx
= shash_desc_ctx(desc
);
185 memcpy(sctx
, in
, sizeof(*sctx
));
190 static int sha224_ssse3_init(struct shash_desc
*desc
)
192 struct sha256_state
*sctx
= shash_desc_ctx(desc
);
194 sctx
->state
[0] = SHA224_H0
;
195 sctx
->state
[1] = SHA224_H1
;
196 sctx
->state
[2] = SHA224_H2
;
197 sctx
->state
[3] = SHA224_H3
;
198 sctx
->state
[4] = SHA224_H4
;
199 sctx
->state
[5] = SHA224_H5
;
200 sctx
->state
[6] = SHA224_H6
;
201 sctx
->state
[7] = SHA224_H7
;
207 static int sha224_ssse3_final(struct shash_desc
*desc
, u8
*hash
)
209 u8 D
[SHA256_DIGEST_SIZE
];
211 sha256_ssse3_final(desc
, D
);
213 memcpy(hash
, D
, SHA224_DIGEST_SIZE
);
214 memset(D
, 0, SHA256_DIGEST_SIZE
);
219 static struct shash_alg algs
[] = { {
220 .digestsize
= SHA256_DIGEST_SIZE
,
221 .init
= sha256_ssse3_init
,
222 .update
= sha256_ssse3_update
,
223 .final
= sha256_ssse3_final
,
224 .export
= sha256_ssse3_export
,
225 .import
= sha256_ssse3_import
,
226 .descsize
= sizeof(struct sha256_state
),
227 .statesize
= sizeof(struct sha256_state
),
229 .cra_name
= "sha256",
230 .cra_driver_name
= "sha256-ssse3",
232 .cra_flags
= CRYPTO_ALG_TYPE_SHASH
,
233 .cra_blocksize
= SHA256_BLOCK_SIZE
,
234 .cra_module
= THIS_MODULE
,
237 .digestsize
= SHA224_DIGEST_SIZE
,
238 .init
= sha224_ssse3_init
,
239 .update
= sha256_ssse3_update
,
240 .final
= sha224_ssse3_final
,
241 .export
= sha256_ssse3_export
,
242 .import
= sha256_ssse3_import
,
243 .descsize
= sizeof(struct sha256_state
),
244 .statesize
= sizeof(struct sha256_state
),
246 .cra_name
= "sha224",
247 .cra_driver_name
= "sha224-ssse3",
249 .cra_flags
= CRYPTO_ALG_TYPE_SHASH
,
250 .cra_blocksize
= SHA224_BLOCK_SIZE
,
251 .cra_module
= THIS_MODULE
,
256 static bool __init
avx_usable(void)
260 if (!cpu_has_avx
|| !cpu_has_osxsave
)
263 xcr0
= xgetbv(XCR_XFEATURE_ENABLED_MASK
);
264 if ((xcr0
& (XSTATE_SSE
| XSTATE_YMM
)) != (XSTATE_SSE
| XSTATE_YMM
)) {
265 pr_info("AVX detected but unusable.\n");
274 static int __init
sha256_ssse3_mod_init(void)
276 /* test for SSSE3 first */
278 sha256_transform_asm
= sha256_transform_ssse3
;
281 /* allow AVX to override SSSE3, it's a little faster */
283 #ifdef CONFIG_AS_AVX2
284 if (boot_cpu_has(X86_FEATURE_AVX2
) && boot_cpu_has(X86_FEATURE_BMI2
))
285 sha256_transform_asm
= sha256_transform_rorx
;
288 sha256_transform_asm
= sha256_transform_avx
;
292 if (sha256_transform_asm
) {
294 if (sha256_transform_asm
== sha256_transform_avx
)
295 pr_info("Using AVX optimized SHA-256 implementation\n");
296 #ifdef CONFIG_AS_AVX2
297 else if (sha256_transform_asm
== sha256_transform_rorx
)
298 pr_info("Using AVX2 optimized SHA-256 implementation\n");
302 pr_info("Using SSSE3 optimized SHA-256 implementation\n");
303 return crypto_register_shashes(algs
, ARRAY_SIZE(algs
));
305 pr_info("Neither AVX nor SSSE3 is available/usable.\n");
310 static void __exit
sha256_ssse3_mod_fini(void)
312 crypto_unregister_shashes(algs
, ARRAY_SIZE(algs
));
315 module_init(sha256_ssse3_mod_init
);
316 module_exit(sha256_ssse3_mod_fini
);
318 MODULE_LICENSE("GPL");
319 MODULE_DESCRIPTION("SHA256 Secure Hash Algorithm, Supplemental SSE3 accelerated");
321 MODULE_ALIAS("sha256");
322 MODULE_ALIAS("sha384");