treewide: remove redundant IS_ERR() before error code check
[linux/fpc-iii.git] / arch / arm64 / crypto / sha256-glue.c
blobddf4a0d85c1c2083a3fff1fe485a70a4bae2a79b
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * Linux/arm64 port of the OpenSSL SHA256 implementation for AArch64
5 * Copyright (c) 2016 Linaro Ltd. <ard.biesheuvel@linaro.org>
6 */
8 #include <asm/hwcap.h>
9 #include <asm/neon.h>
10 #include <asm/simd.h>
11 #include <crypto/internal/hash.h>
12 #include <crypto/internal/simd.h>
13 #include <crypto/sha.h>
14 #include <crypto/sha256_base.h>
15 #include <linux/cryptohash.h>
16 #include <linux/types.h>
17 #include <linux/string.h>
19 MODULE_DESCRIPTION("SHA-224/SHA-256 secure hash for arm64");
20 MODULE_AUTHOR("Andy Polyakov <appro@openssl.org>");
21 MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>");
22 MODULE_LICENSE("GPL v2");
23 MODULE_ALIAS_CRYPTO("sha224");
24 MODULE_ALIAS_CRYPTO("sha256");
26 asmlinkage void sha256_block_data_order(u32 *digest, const void *data,
27 unsigned int num_blks);
28 EXPORT_SYMBOL(sha256_block_data_order);
30 static void __sha256_block_data_order(struct sha256_state *sst, u8 const *src,
31 int blocks)
33 sha256_block_data_order(sst->state, src, blocks);
36 asmlinkage void sha256_block_neon(u32 *digest, const void *data,
37 unsigned int num_blks);
39 static void __sha256_block_neon(struct sha256_state *sst, u8 const *src,
40 int blocks)
42 sha256_block_neon(sst->state, src, blocks);
45 static int crypto_sha256_arm64_update(struct shash_desc *desc, const u8 *data,
46 unsigned int len)
48 return sha256_base_do_update(desc, data, len,
49 __sha256_block_data_order);
52 static int crypto_sha256_arm64_finup(struct shash_desc *desc, const u8 *data,
53 unsigned int len, u8 *out)
55 if (len)
56 sha256_base_do_update(desc, data, len,
57 __sha256_block_data_order);
58 sha256_base_do_finalize(desc, __sha256_block_data_order);
60 return sha256_base_finish(desc, out);
63 static int crypto_sha256_arm64_final(struct shash_desc *desc, u8 *out)
65 return crypto_sha256_arm64_finup(desc, NULL, 0, out);
68 static struct shash_alg algs[] = { {
69 .digestsize = SHA256_DIGEST_SIZE,
70 .init = sha256_base_init,
71 .update = crypto_sha256_arm64_update,
72 .final = crypto_sha256_arm64_final,
73 .finup = crypto_sha256_arm64_finup,
74 .descsize = sizeof(struct sha256_state),
75 .base.cra_name = "sha256",
76 .base.cra_driver_name = "sha256-arm64",
77 .base.cra_priority = 125,
78 .base.cra_blocksize = SHA256_BLOCK_SIZE,
79 .base.cra_module = THIS_MODULE,
80 }, {
81 .digestsize = SHA224_DIGEST_SIZE,
82 .init = sha224_base_init,
83 .update = crypto_sha256_arm64_update,
84 .final = crypto_sha256_arm64_final,
85 .finup = crypto_sha256_arm64_finup,
86 .descsize = sizeof(struct sha256_state),
87 .base.cra_name = "sha224",
88 .base.cra_driver_name = "sha224-arm64",
89 .base.cra_priority = 125,
90 .base.cra_blocksize = SHA224_BLOCK_SIZE,
91 .base.cra_module = THIS_MODULE,
92 } };
94 static int sha256_update_neon(struct shash_desc *desc, const u8 *data,
95 unsigned int len)
97 struct sha256_state *sctx = shash_desc_ctx(desc);
99 if (!crypto_simd_usable())
100 return sha256_base_do_update(desc, data, len,
101 __sha256_block_data_order);
103 while (len > 0) {
104 unsigned int chunk = len;
107 * Don't hog the CPU for the entire time it takes to process all
108 * input when running on a preemptible kernel, but process the
109 * data block by block instead.
111 if (IS_ENABLED(CONFIG_PREEMPTION) &&
112 chunk + sctx->count % SHA256_BLOCK_SIZE > SHA256_BLOCK_SIZE)
113 chunk = SHA256_BLOCK_SIZE -
114 sctx->count % SHA256_BLOCK_SIZE;
116 kernel_neon_begin();
117 sha256_base_do_update(desc, data, chunk, __sha256_block_neon);
118 kernel_neon_end();
119 data += chunk;
120 len -= chunk;
122 return 0;
125 static int sha256_finup_neon(struct shash_desc *desc, const u8 *data,
126 unsigned int len, u8 *out)
128 if (!crypto_simd_usable()) {
129 if (len)
130 sha256_base_do_update(desc, data, len,
131 __sha256_block_data_order);
132 sha256_base_do_finalize(desc, __sha256_block_data_order);
133 } else {
134 if (len)
135 sha256_update_neon(desc, data, len);
136 kernel_neon_begin();
137 sha256_base_do_finalize(desc, __sha256_block_neon);
138 kernel_neon_end();
140 return sha256_base_finish(desc, out);
143 static int sha256_final_neon(struct shash_desc *desc, u8 *out)
145 return sha256_finup_neon(desc, NULL, 0, out);
148 static struct shash_alg neon_algs[] = { {
149 .digestsize = SHA256_DIGEST_SIZE,
150 .init = sha256_base_init,
151 .update = sha256_update_neon,
152 .final = sha256_final_neon,
153 .finup = sha256_finup_neon,
154 .descsize = sizeof(struct sha256_state),
155 .base.cra_name = "sha256",
156 .base.cra_driver_name = "sha256-arm64-neon",
157 .base.cra_priority = 150,
158 .base.cra_blocksize = SHA256_BLOCK_SIZE,
159 .base.cra_module = THIS_MODULE,
160 }, {
161 .digestsize = SHA224_DIGEST_SIZE,
162 .init = sha224_base_init,
163 .update = sha256_update_neon,
164 .final = sha256_final_neon,
165 .finup = sha256_finup_neon,
166 .descsize = sizeof(struct sha256_state),
167 .base.cra_name = "sha224",
168 .base.cra_driver_name = "sha224-arm64-neon",
169 .base.cra_priority = 150,
170 .base.cra_blocksize = SHA224_BLOCK_SIZE,
171 .base.cra_module = THIS_MODULE,
172 } };
174 static int __init sha256_mod_init(void)
176 int ret = crypto_register_shashes(algs, ARRAY_SIZE(algs));
177 if (ret)
178 return ret;
180 if (cpu_have_named_feature(ASIMD)) {
181 ret = crypto_register_shashes(neon_algs, ARRAY_SIZE(neon_algs));
182 if (ret)
183 crypto_unregister_shashes(algs, ARRAY_SIZE(algs));
185 return ret;
188 static void __exit sha256_mod_fini(void)
190 if (cpu_have_named_feature(ASIMD))
191 crypto_unregister_shashes(neon_algs, ARRAY_SIZE(neon_algs));
192 crypto_unregister_shashes(algs, ARRAY_SIZE(algs));
195 module_init(sha256_mod_init);
196 module_exit(sha256_mod_fini);