gpio: rcar: Fix runtime PM imbalance on error
[linux/fpc-iii.git] / arch / x86 / crypto / chacha_glue.c
blobb412c21ee06e29a7d29aa94b6ada32e4da09484d
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * x64 SIMD accelerated ChaCha and XChaCha stream ciphers,
4 * including ChaCha20 (RFC7539)
6 * Copyright (C) 2015 Martin Willi
7 */
9 #include <crypto/algapi.h>
10 #include <crypto/internal/chacha.h>
11 #include <crypto/internal/simd.h>
12 #include <crypto/internal/skcipher.h>
13 #include <linux/kernel.h>
14 #include <linux/module.h>
15 #include <asm/simd.h>
17 #define CHACHA_STATE_ALIGN 16
19 asmlinkage void chacha_block_xor_ssse3(u32 *state, u8 *dst, const u8 *src,
20 unsigned int len, int nrounds);
21 asmlinkage void chacha_4block_xor_ssse3(u32 *state, u8 *dst, const u8 *src,
22 unsigned int len, int nrounds);
23 asmlinkage void hchacha_block_ssse3(const u32 *state, u32 *out, int nrounds);
25 asmlinkage void chacha_2block_xor_avx2(u32 *state, u8 *dst, const u8 *src,
26 unsigned int len, int nrounds);
27 asmlinkage void chacha_4block_xor_avx2(u32 *state, u8 *dst, const u8 *src,
28 unsigned int len, int nrounds);
29 asmlinkage void chacha_8block_xor_avx2(u32 *state, u8 *dst, const u8 *src,
30 unsigned int len, int nrounds);
32 asmlinkage void chacha_2block_xor_avx512vl(u32 *state, u8 *dst, const u8 *src,
33 unsigned int len, int nrounds);
34 asmlinkage void chacha_4block_xor_avx512vl(u32 *state, u8 *dst, const u8 *src,
35 unsigned int len, int nrounds);
36 asmlinkage void chacha_8block_xor_avx512vl(u32 *state, u8 *dst, const u8 *src,
37 unsigned int len, int nrounds);
39 static __ro_after_init DEFINE_STATIC_KEY_FALSE(chacha_use_simd);
40 static __ro_after_init DEFINE_STATIC_KEY_FALSE(chacha_use_avx2);
41 static __ro_after_init DEFINE_STATIC_KEY_FALSE(chacha_use_avx512vl);
43 static unsigned int chacha_advance(unsigned int len, unsigned int maxblocks)
45 len = min(len, maxblocks * CHACHA_BLOCK_SIZE);
46 return round_up(len, CHACHA_BLOCK_SIZE) / CHACHA_BLOCK_SIZE;
49 static void chacha_dosimd(u32 *state, u8 *dst, const u8 *src,
50 unsigned int bytes, int nrounds)
52 if (IS_ENABLED(CONFIG_AS_AVX512) &&
53 static_branch_likely(&chacha_use_avx512vl)) {
54 while (bytes >= CHACHA_BLOCK_SIZE * 8) {
55 chacha_8block_xor_avx512vl(state, dst, src, bytes,
56 nrounds);
57 bytes -= CHACHA_BLOCK_SIZE * 8;
58 src += CHACHA_BLOCK_SIZE * 8;
59 dst += CHACHA_BLOCK_SIZE * 8;
60 state[12] += 8;
62 if (bytes > CHACHA_BLOCK_SIZE * 4) {
63 chacha_8block_xor_avx512vl(state, dst, src, bytes,
64 nrounds);
65 state[12] += chacha_advance(bytes, 8);
66 return;
68 if (bytes > CHACHA_BLOCK_SIZE * 2) {
69 chacha_4block_xor_avx512vl(state, dst, src, bytes,
70 nrounds);
71 state[12] += chacha_advance(bytes, 4);
72 return;
74 if (bytes) {
75 chacha_2block_xor_avx512vl(state, dst, src, bytes,
76 nrounds);
77 state[12] += chacha_advance(bytes, 2);
78 return;
82 if (static_branch_likely(&chacha_use_avx2)) {
83 while (bytes >= CHACHA_BLOCK_SIZE * 8) {
84 chacha_8block_xor_avx2(state, dst, src, bytes, nrounds);
85 bytes -= CHACHA_BLOCK_SIZE * 8;
86 src += CHACHA_BLOCK_SIZE * 8;
87 dst += CHACHA_BLOCK_SIZE * 8;
88 state[12] += 8;
90 if (bytes > CHACHA_BLOCK_SIZE * 4) {
91 chacha_8block_xor_avx2(state, dst, src, bytes, nrounds);
92 state[12] += chacha_advance(bytes, 8);
93 return;
95 if (bytes > CHACHA_BLOCK_SIZE * 2) {
96 chacha_4block_xor_avx2(state, dst, src, bytes, nrounds);
97 state[12] += chacha_advance(bytes, 4);
98 return;
100 if (bytes > CHACHA_BLOCK_SIZE) {
101 chacha_2block_xor_avx2(state, dst, src, bytes, nrounds);
102 state[12] += chacha_advance(bytes, 2);
103 return;
107 while (bytes >= CHACHA_BLOCK_SIZE * 4) {
108 chacha_4block_xor_ssse3(state, dst, src, bytes, nrounds);
109 bytes -= CHACHA_BLOCK_SIZE * 4;
110 src += CHACHA_BLOCK_SIZE * 4;
111 dst += CHACHA_BLOCK_SIZE * 4;
112 state[12] += 4;
114 if (bytes > CHACHA_BLOCK_SIZE) {
115 chacha_4block_xor_ssse3(state, dst, src, bytes, nrounds);
116 state[12] += chacha_advance(bytes, 4);
117 return;
119 if (bytes) {
120 chacha_block_xor_ssse3(state, dst, src, bytes, nrounds);
121 state[12]++;
125 void hchacha_block_arch(const u32 *state, u32 *stream, int nrounds)
127 state = PTR_ALIGN(state, CHACHA_STATE_ALIGN);
129 if (!static_branch_likely(&chacha_use_simd) || !crypto_simd_usable()) {
130 hchacha_block_generic(state, stream, nrounds);
131 } else {
132 kernel_fpu_begin();
133 hchacha_block_ssse3(state, stream, nrounds);
134 kernel_fpu_end();
137 EXPORT_SYMBOL(hchacha_block_arch);
139 void chacha_init_arch(u32 *state, const u32 *key, const u8 *iv)
141 state = PTR_ALIGN(state, CHACHA_STATE_ALIGN);
143 chacha_init_generic(state, key, iv);
145 EXPORT_SYMBOL(chacha_init_arch);
147 void chacha_crypt_arch(u32 *state, u8 *dst, const u8 *src, unsigned int bytes,
148 int nrounds)
150 state = PTR_ALIGN(state, CHACHA_STATE_ALIGN);
152 if (!static_branch_likely(&chacha_use_simd) || !crypto_simd_usable() ||
153 bytes <= CHACHA_BLOCK_SIZE)
154 return chacha_crypt_generic(state, dst, src, bytes, nrounds);
156 kernel_fpu_begin();
157 chacha_dosimd(state, dst, src, bytes, nrounds);
158 kernel_fpu_end();
160 EXPORT_SYMBOL(chacha_crypt_arch);
162 static int chacha_simd_stream_xor(struct skcipher_request *req,
163 const struct chacha_ctx *ctx, const u8 *iv)
165 u32 *state, state_buf[16 + 2] __aligned(8);
166 struct skcipher_walk walk;
167 int err;
169 err = skcipher_walk_virt(&walk, req, false);
171 BUILD_BUG_ON(CHACHA_STATE_ALIGN != 16);
172 state = PTR_ALIGN(state_buf + 0, CHACHA_STATE_ALIGN);
174 chacha_init_generic(state, ctx->key, iv);
176 while (walk.nbytes > 0) {
177 unsigned int nbytes = walk.nbytes;
179 if (nbytes < walk.total)
180 nbytes = round_down(nbytes, walk.stride);
182 if (!static_branch_likely(&chacha_use_simd) ||
183 !crypto_simd_usable()) {
184 chacha_crypt_generic(state, walk.dst.virt.addr,
185 walk.src.virt.addr, nbytes,
186 ctx->nrounds);
187 } else {
188 kernel_fpu_begin();
189 chacha_dosimd(state, walk.dst.virt.addr,
190 walk.src.virt.addr, nbytes,
191 ctx->nrounds);
192 kernel_fpu_end();
194 err = skcipher_walk_done(&walk, walk.nbytes - nbytes);
197 return err;
200 static int chacha_simd(struct skcipher_request *req)
202 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
203 struct chacha_ctx *ctx = crypto_skcipher_ctx(tfm);
205 return chacha_simd_stream_xor(req, ctx, req->iv);
208 static int xchacha_simd(struct skcipher_request *req)
210 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
211 struct chacha_ctx *ctx = crypto_skcipher_ctx(tfm);
212 u32 *state, state_buf[16 + 2] __aligned(8);
213 struct chacha_ctx subctx;
214 u8 real_iv[16];
216 BUILD_BUG_ON(CHACHA_STATE_ALIGN != 16);
217 state = PTR_ALIGN(state_buf + 0, CHACHA_STATE_ALIGN);
218 chacha_init_generic(state, ctx->key, req->iv);
220 if (req->cryptlen > CHACHA_BLOCK_SIZE && crypto_simd_usable()) {
221 kernel_fpu_begin();
222 hchacha_block_ssse3(state, subctx.key, ctx->nrounds);
223 kernel_fpu_end();
224 } else {
225 hchacha_block_generic(state, subctx.key, ctx->nrounds);
227 subctx.nrounds = ctx->nrounds;
229 memcpy(&real_iv[0], req->iv + 24, 8);
230 memcpy(&real_iv[8], req->iv + 16, 8);
231 return chacha_simd_stream_xor(req, &subctx, real_iv);
234 static struct skcipher_alg algs[] = {
236 .base.cra_name = "chacha20",
237 .base.cra_driver_name = "chacha20-simd",
238 .base.cra_priority = 300,
239 .base.cra_blocksize = 1,
240 .base.cra_ctxsize = sizeof(struct chacha_ctx),
241 .base.cra_module = THIS_MODULE,
243 .min_keysize = CHACHA_KEY_SIZE,
244 .max_keysize = CHACHA_KEY_SIZE,
245 .ivsize = CHACHA_IV_SIZE,
246 .chunksize = CHACHA_BLOCK_SIZE,
247 .setkey = chacha20_setkey,
248 .encrypt = chacha_simd,
249 .decrypt = chacha_simd,
250 }, {
251 .base.cra_name = "xchacha20",
252 .base.cra_driver_name = "xchacha20-simd",
253 .base.cra_priority = 300,
254 .base.cra_blocksize = 1,
255 .base.cra_ctxsize = sizeof(struct chacha_ctx),
256 .base.cra_module = THIS_MODULE,
258 .min_keysize = CHACHA_KEY_SIZE,
259 .max_keysize = CHACHA_KEY_SIZE,
260 .ivsize = XCHACHA_IV_SIZE,
261 .chunksize = CHACHA_BLOCK_SIZE,
262 .setkey = chacha20_setkey,
263 .encrypt = xchacha_simd,
264 .decrypt = xchacha_simd,
265 }, {
266 .base.cra_name = "xchacha12",
267 .base.cra_driver_name = "xchacha12-simd",
268 .base.cra_priority = 300,
269 .base.cra_blocksize = 1,
270 .base.cra_ctxsize = sizeof(struct chacha_ctx),
271 .base.cra_module = THIS_MODULE,
273 .min_keysize = CHACHA_KEY_SIZE,
274 .max_keysize = CHACHA_KEY_SIZE,
275 .ivsize = XCHACHA_IV_SIZE,
276 .chunksize = CHACHA_BLOCK_SIZE,
277 .setkey = chacha12_setkey,
278 .encrypt = xchacha_simd,
279 .decrypt = xchacha_simd,
283 static int __init chacha_simd_mod_init(void)
285 if (!boot_cpu_has(X86_FEATURE_SSSE3))
286 return 0;
288 static_branch_enable(&chacha_use_simd);
290 if (boot_cpu_has(X86_FEATURE_AVX) &&
291 boot_cpu_has(X86_FEATURE_AVX2) &&
292 cpu_has_xfeatures(XFEATURE_MASK_SSE | XFEATURE_MASK_YMM, NULL)) {
293 static_branch_enable(&chacha_use_avx2);
295 if (IS_ENABLED(CONFIG_AS_AVX512) &&
296 boot_cpu_has(X86_FEATURE_AVX512VL) &&
297 boot_cpu_has(X86_FEATURE_AVX512BW)) /* kmovq */
298 static_branch_enable(&chacha_use_avx512vl);
300 return IS_REACHABLE(CONFIG_CRYPTO_SKCIPHER) ?
301 crypto_register_skciphers(algs, ARRAY_SIZE(algs)) : 0;
304 static void __exit chacha_simd_mod_fini(void)
306 if (IS_REACHABLE(CONFIG_CRYPTO_SKCIPHER) && boot_cpu_has(X86_FEATURE_SSSE3))
307 crypto_unregister_skciphers(algs, ARRAY_SIZE(algs));
310 module_init(chacha_simd_mod_init);
311 module_exit(chacha_simd_mod_fini);
313 MODULE_LICENSE("GPL");
314 MODULE_AUTHOR("Martin Willi <martin@strongswan.org>");
315 MODULE_DESCRIPTION("ChaCha and XChaCha stream ciphers (x64 SIMD accelerated)");
316 MODULE_ALIAS_CRYPTO("chacha20");
317 MODULE_ALIAS_CRYPTO("chacha20-simd");
318 MODULE_ALIAS_CRYPTO("xchacha20");
319 MODULE_ALIAS_CRYPTO("xchacha20-simd");
320 MODULE_ALIAS_CRYPTO("xchacha12");
321 MODULE_ALIAS_CRYPTO("xchacha12-simd");