Linux 5.7.6
[linux/fpc-iii.git] / arch / x86 / include / asm / xor_avx.h
blob0c4e5b5e3852bdaa16e4eb1199614f8f31c1bdb5
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 #ifndef _ASM_X86_XOR_AVX_H
3 #define _ASM_X86_XOR_AVX_H
5 /*
6 * Optimized RAID-5 checksumming functions for AVX
8 * Copyright (C) 2012 Intel Corporation
9 * Author: Jim Kukunas <james.t.kukunas@linux.intel.com>
11 * Based on Ingo Molnar and Zach Brown's respective MMX and SSE routines
14 #include <linux/compiler.h>
15 #include <asm/fpu/api.h>
17 #define BLOCK4(i) \
18 BLOCK(32 * i, 0) \
19 BLOCK(32 * (i + 1), 1) \
20 BLOCK(32 * (i + 2), 2) \
21 BLOCK(32 * (i + 3), 3)
23 #define BLOCK16() \
24 BLOCK4(0) \
25 BLOCK4(4) \
26 BLOCK4(8) \
27 BLOCK4(12)
29 static void xor_avx_2(unsigned long bytes, unsigned long *p0, unsigned long *p1)
31 unsigned long lines = bytes >> 9;
33 kernel_fpu_begin();
35 while (lines--) {
36 #undef BLOCK
37 #define BLOCK(i, reg) \
38 do { \
39 asm volatile("vmovdqa %0, %%ymm" #reg : : "m" (p1[i / sizeof(*p1)])); \
40 asm volatile("vxorps %0, %%ymm" #reg ", %%ymm" #reg : : \
41 "m" (p0[i / sizeof(*p0)])); \
42 asm volatile("vmovdqa %%ymm" #reg ", %0" : \
43 "=m" (p0[i / sizeof(*p0)])); \
44 } while (0);
46 BLOCK16()
48 p0 = (unsigned long *)((uintptr_t)p0 + 512);
49 p1 = (unsigned long *)((uintptr_t)p1 + 512);
52 kernel_fpu_end();
55 static void xor_avx_3(unsigned long bytes, unsigned long *p0, unsigned long *p1,
56 unsigned long *p2)
58 unsigned long lines = bytes >> 9;
60 kernel_fpu_begin();
62 while (lines--) {
63 #undef BLOCK
64 #define BLOCK(i, reg) \
65 do { \
66 asm volatile("vmovdqa %0, %%ymm" #reg : : "m" (p2[i / sizeof(*p2)])); \
67 asm volatile("vxorps %0, %%ymm" #reg ", %%ymm" #reg : : \
68 "m" (p1[i / sizeof(*p1)])); \
69 asm volatile("vxorps %0, %%ymm" #reg ", %%ymm" #reg : : \
70 "m" (p0[i / sizeof(*p0)])); \
71 asm volatile("vmovdqa %%ymm" #reg ", %0" : \
72 "=m" (p0[i / sizeof(*p0)])); \
73 } while (0);
75 BLOCK16()
77 p0 = (unsigned long *)((uintptr_t)p0 + 512);
78 p1 = (unsigned long *)((uintptr_t)p1 + 512);
79 p2 = (unsigned long *)((uintptr_t)p2 + 512);
82 kernel_fpu_end();
85 static void xor_avx_4(unsigned long bytes, unsigned long *p0, unsigned long *p1,
86 unsigned long *p2, unsigned long *p3)
88 unsigned long lines = bytes >> 9;
90 kernel_fpu_begin();
92 while (lines--) {
93 #undef BLOCK
94 #define BLOCK(i, reg) \
95 do { \
96 asm volatile("vmovdqa %0, %%ymm" #reg : : "m" (p3[i / sizeof(*p3)])); \
97 asm volatile("vxorps %0, %%ymm" #reg ", %%ymm" #reg : : \
98 "m" (p2[i / sizeof(*p2)])); \
99 asm volatile("vxorps %0, %%ymm" #reg ", %%ymm" #reg : : \
100 "m" (p1[i / sizeof(*p1)])); \
101 asm volatile("vxorps %0, %%ymm" #reg ", %%ymm" #reg : : \
102 "m" (p0[i / sizeof(*p0)])); \
103 asm volatile("vmovdqa %%ymm" #reg ", %0" : \
104 "=m" (p0[i / sizeof(*p0)])); \
105 } while (0);
107 BLOCK16();
109 p0 = (unsigned long *)((uintptr_t)p0 + 512);
110 p1 = (unsigned long *)((uintptr_t)p1 + 512);
111 p2 = (unsigned long *)((uintptr_t)p2 + 512);
112 p3 = (unsigned long *)((uintptr_t)p3 + 512);
115 kernel_fpu_end();
118 static void xor_avx_5(unsigned long bytes, unsigned long *p0, unsigned long *p1,
119 unsigned long *p2, unsigned long *p3, unsigned long *p4)
121 unsigned long lines = bytes >> 9;
123 kernel_fpu_begin();
125 while (lines--) {
126 #undef BLOCK
127 #define BLOCK(i, reg) \
128 do { \
129 asm volatile("vmovdqa %0, %%ymm" #reg : : "m" (p4[i / sizeof(*p4)])); \
130 asm volatile("vxorps %0, %%ymm" #reg ", %%ymm" #reg : : \
131 "m" (p3[i / sizeof(*p3)])); \
132 asm volatile("vxorps %0, %%ymm" #reg ", %%ymm" #reg : : \
133 "m" (p2[i / sizeof(*p2)])); \
134 asm volatile("vxorps %0, %%ymm" #reg ", %%ymm" #reg : : \
135 "m" (p1[i / sizeof(*p1)])); \
136 asm volatile("vxorps %0, %%ymm" #reg ", %%ymm" #reg : : \
137 "m" (p0[i / sizeof(*p0)])); \
138 asm volatile("vmovdqa %%ymm" #reg ", %0" : \
139 "=m" (p0[i / sizeof(*p0)])); \
140 } while (0);
142 BLOCK16()
144 p0 = (unsigned long *)((uintptr_t)p0 + 512);
145 p1 = (unsigned long *)((uintptr_t)p1 + 512);
146 p2 = (unsigned long *)((uintptr_t)p2 + 512);
147 p3 = (unsigned long *)((uintptr_t)p3 + 512);
148 p4 = (unsigned long *)((uintptr_t)p4 + 512);
151 kernel_fpu_end();
154 static struct xor_block_template xor_block_avx = {
155 .name = "avx",
156 .do_2 = xor_avx_2,
157 .do_3 = xor_avx_3,
158 .do_4 = xor_avx_4,
159 .do_5 = xor_avx_5,
162 #define AVX_XOR_SPEED \
163 do { \
164 if (boot_cpu_has(X86_FEATURE_AVX) && boot_cpu_has(X86_FEATURE_OSXSAVE)) \
165 xor_speed(&xor_block_avx); \
166 } while (0)
168 #define AVX_SELECT(FASTEST) \
169 (boot_cpu_has(X86_FEATURE_AVX) && boot_cpu_has(X86_FEATURE_OSXSAVE) ? &xor_block_avx : FASTEST)
171 #endif