4 * Glue code for the SHA512 Secure Hash Algorithm assembler
5 * implementation using supplemental SSE3 / AVX / AVX2 instructions.
7 * This file is based on sha512_generic.c
9 * Copyright (C) 2013 Intel Corporation
10 * Author: Tim Chen <tim.c.chen@linux.intel.com>
12 * This program is free software; you can redistribute it and/or modify it
13 * under the terms of the GNU General Public License as published by the Free
14 * Software Foundation; either version 2 of the License, or (at your option)
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
20 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
21 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
22 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
23 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
28 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
30 #include <crypto/internal/hash.h>
31 #include <linux/init.h>
32 #include <linux/module.h>
34 #include <linux/cryptohash.h>
35 #include <linux/types.h>
36 #include <crypto/sha.h>
37 #include <asm/byteorder.h>
40 #include <asm/xsave.h>
42 #include <linux/string.h>
44 asmlinkage
void sha512_transform_ssse3(const char *data
, u64
*digest
,
47 asmlinkage
void sha512_transform_avx(const char *data
, u64
*digest
,
51 asmlinkage
void sha512_transform_rorx(const char *data
, u64
*digest
,
55 static asmlinkage
void (*sha512_transform_asm
)(const char *, u64
*, u64
);
58 static int sha512_ssse3_init(struct shash_desc
*desc
)
60 struct sha512_state
*sctx
= shash_desc_ctx(desc
);
62 sctx
->state
[0] = SHA512_H0
;
63 sctx
->state
[1] = SHA512_H1
;
64 sctx
->state
[2] = SHA512_H2
;
65 sctx
->state
[3] = SHA512_H3
;
66 sctx
->state
[4] = SHA512_H4
;
67 sctx
->state
[5] = SHA512_H5
;
68 sctx
->state
[6] = SHA512_H6
;
69 sctx
->state
[7] = SHA512_H7
;
70 sctx
->count
[0] = sctx
->count
[1] = 0;
75 static int __sha512_ssse3_update(struct shash_desc
*desc
, const u8
*data
,
76 unsigned int len
, unsigned int partial
)
78 struct sha512_state
*sctx
= shash_desc_ctx(desc
);
79 unsigned int done
= 0;
81 sctx
->count
[0] += len
;
82 if (sctx
->count
[0] < len
)
86 done
= SHA512_BLOCK_SIZE
- partial
;
87 memcpy(sctx
->buf
+ partial
, data
, done
);
88 sha512_transform_asm(sctx
->buf
, sctx
->state
, 1);
91 if (len
- done
>= SHA512_BLOCK_SIZE
) {
92 const unsigned int rounds
= (len
- done
) / SHA512_BLOCK_SIZE
;
94 sha512_transform_asm(data
+ done
, sctx
->state
, (u64
) rounds
);
96 done
+= rounds
* SHA512_BLOCK_SIZE
;
99 memcpy(sctx
->buf
, data
+ done
, len
- done
);
104 static int sha512_ssse3_update(struct shash_desc
*desc
, const u8
*data
,
107 struct sha512_state
*sctx
= shash_desc_ctx(desc
);
108 unsigned int partial
= sctx
->count
[0] % SHA512_BLOCK_SIZE
;
111 /* Handle the fast case right here */
112 if (partial
+ len
< SHA512_BLOCK_SIZE
) {
113 sctx
->count
[0] += len
;
114 if (sctx
->count
[0] < len
)
116 memcpy(sctx
->buf
+ partial
, data
, len
);
121 if (!irq_fpu_usable()) {
122 res
= crypto_sha512_update(desc
, data
, len
);
125 res
= __sha512_ssse3_update(desc
, data
, len
, partial
);
133 /* Add padding and return the message digest. */
134 static int sha512_ssse3_final(struct shash_desc
*desc
, u8
*out
)
136 struct sha512_state
*sctx
= shash_desc_ctx(desc
);
137 unsigned int i
, index
, padlen
;
138 __be64
*dst
= (__be64
*)out
;
140 static const u8 padding
[SHA512_BLOCK_SIZE
] = { 0x80, };
142 /* save number of bits */
143 bits
[1] = cpu_to_be64(sctx
->count
[0] << 3);
144 bits
[0] = cpu_to_be64(sctx
->count
[1] << 3) | sctx
->count
[0] >> 61;
146 /* Pad out to 112 mod 128 and append length */
147 index
= sctx
->count
[0] & 0x7f;
148 padlen
= (index
< 112) ? (112 - index
) : ((128+112) - index
);
150 if (!irq_fpu_usable()) {
151 crypto_sha512_update(desc
, padding
, padlen
);
152 crypto_sha512_update(desc
, (const u8
*)&bits
, sizeof(bits
));
155 /* We need to fill a whole block for __sha512_ssse3_update() */
157 sctx
->count
[0] += padlen
;
158 if (sctx
->count
[0] < padlen
)
160 memcpy(sctx
->buf
+ index
, padding
, padlen
);
162 __sha512_ssse3_update(desc
, padding
, padlen
, index
);
164 __sha512_ssse3_update(desc
, (const u8
*)&bits
,
169 /* Store state in digest */
170 for (i
= 0; i
< 8; i
++)
171 dst
[i
] = cpu_to_be64(sctx
->state
[i
]);
174 memset(sctx
, 0, sizeof(*sctx
));
179 static int sha512_ssse3_export(struct shash_desc
*desc
, void *out
)
181 struct sha512_state
*sctx
= shash_desc_ctx(desc
);
183 memcpy(out
, sctx
, sizeof(*sctx
));
188 static int sha512_ssse3_import(struct shash_desc
*desc
, const void *in
)
190 struct sha512_state
*sctx
= shash_desc_ctx(desc
);
192 memcpy(sctx
, in
, sizeof(*sctx
));
197 static struct shash_alg alg
= {
198 .digestsize
= SHA512_DIGEST_SIZE
,
199 .init
= sha512_ssse3_init
,
200 .update
= sha512_ssse3_update
,
201 .final
= sha512_ssse3_final
,
202 .export
= sha512_ssse3_export
,
203 .import
= sha512_ssse3_import
,
204 .descsize
= sizeof(struct sha512_state
),
205 .statesize
= sizeof(struct sha512_state
),
207 .cra_name
= "sha512",
208 .cra_driver_name
= "sha512-ssse3",
210 .cra_flags
= CRYPTO_ALG_TYPE_SHASH
,
211 .cra_blocksize
= SHA512_BLOCK_SIZE
,
212 .cra_module
= THIS_MODULE
,
217 static bool __init
avx_usable(void)
221 if (!cpu_has_avx
|| !cpu_has_osxsave
)
224 xcr0
= xgetbv(XCR_XFEATURE_ENABLED_MASK
);
225 if ((xcr0
& (XSTATE_SSE
| XSTATE_YMM
)) != (XSTATE_SSE
| XSTATE_YMM
)) {
226 pr_info("AVX detected but unusable.\n");
235 static int __init
sha512_ssse3_mod_init(void)
237 /* test for SSE3 first */
239 sha512_transform_asm
= sha512_transform_ssse3
;
242 /* allow AVX to override SSSE3, it's a little faster */
244 #ifdef CONFIG_AS_AVX2
245 if (boot_cpu_has(X86_FEATURE_AVX2
))
246 sha512_transform_asm
= sha512_transform_rorx
;
249 sha512_transform_asm
= sha512_transform_avx
;
253 if (sha512_transform_asm
) {
255 if (sha512_transform_asm
== sha512_transform_avx
)
256 pr_info("Using AVX optimized SHA-512 implementation\n");
257 #ifdef CONFIG_AS_AVX2
258 else if (sha512_transform_asm
== sha512_transform_rorx
)
259 pr_info("Using AVX2 optimized SHA-512 implementation\n");
263 pr_info("Using SSSE3 optimized SHA-512 implementation\n");
264 return crypto_register_shash(&alg
);
266 pr_info("Neither AVX nor SSSE3 is available/usable.\n");
271 static void __exit
sha512_ssse3_mod_fini(void)
273 crypto_unregister_shash(&alg
);
276 module_init(sha512_ssse3_mod_init
);
277 module_exit(sha512_ssse3_mod_fini
);
279 MODULE_LICENSE("GPL");
280 MODULE_DESCRIPTION("SHA512 Secure Hash Algorithm, Supplemental SSE3 accelerated");
282 MODULE_ALIAS("sha512");