1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2012 Intel Corporation
8 #include <linux/raid/pq.h>
11 static int raid6_has_ssse3(void)
13 return boot_cpu_has(X86_FEATURE_XMM
) &&
14 boot_cpu_has(X86_FEATURE_XMM2
) &&
15 boot_cpu_has(X86_FEATURE_SSSE3
);
18 static void raid6_2data_recov_ssse3(int disks
, size_t bytes
, int faila
,
19 int failb
, void **ptrs
)
22 const u8
*pbmul
; /* P multiplier table for B data */
23 const u8
*qmul
; /* Q multiplier table (for both) */
24 static const u8
__aligned(16) x0f
[16] = {
25 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f,
26 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f};
28 p
= (u8
*)ptrs
[disks
-2];
29 q
= (u8
*)ptrs
[disks
-1];
31 /* Compute syndrome with zero for the missing data pages
32 Use the dead data pages as temporary storage for
33 delta p and delta q */
34 dp
= (u8
*)ptrs
[faila
];
35 ptrs
[faila
] = (void *)raid6_empty_zero_page
;
37 dq
= (u8
*)ptrs
[failb
];
38 ptrs
[failb
] = (void *)raid6_empty_zero_page
;
41 raid6_call
.gen_syndrome(disks
, bytes
, ptrs
);
43 /* Restore pointer table */
49 /* Now, pick the proper data tables */
50 pbmul
= raid6_vgfmul
[raid6_gfexi
[failb
-faila
]];
51 qmul
= raid6_vgfmul
[raid6_gfinv
[raid6_gfexp
[faila
] ^
56 asm volatile("movdqa %0,%%xmm7" : : "m" (x0f
[0]));
59 asm volatile("movdqa %0,%%xmm6" : : "m" (qmul
[0]));
60 asm volatile("movdqa %0,%%xmm14" : : "m" (pbmul
[0]));
61 asm volatile("movdqa %0,%%xmm15" : : "m" (pbmul
[16]));
67 /* xmm6, xmm14, xmm15 */
69 asm volatile("movdqa %0,%%xmm1" : : "m" (q
[0]));
70 asm volatile("movdqa %0,%%xmm9" : : "m" (q
[16]));
71 asm volatile("movdqa %0,%%xmm0" : : "m" (p
[0]));
72 asm volatile("movdqa %0,%%xmm8" : : "m" (p
[16]));
73 asm volatile("pxor %0,%%xmm1" : : "m" (dq
[0]));
74 asm volatile("pxor %0,%%xmm9" : : "m" (dq
[16]));
75 asm volatile("pxor %0,%%xmm0" : : "m" (dp
[0]));
76 asm volatile("pxor %0,%%xmm8" : : "m" (dp
[16]));
80 asm volatile("movdqa %xmm6,%xmm4");
81 asm volatile("movdqa %0,%%xmm5" : : "m" (qmul
[16]));
82 asm volatile("movdqa %xmm6,%xmm12");
83 asm volatile("movdqa %xmm5,%xmm13");
84 asm volatile("movdqa %xmm1,%xmm3");
85 asm volatile("movdqa %xmm9,%xmm11");
86 asm volatile("movdqa %xmm0,%xmm2"); /* xmm2/10 = px */
87 asm volatile("movdqa %xmm8,%xmm10");
88 asm volatile("psraw $4,%xmm1");
89 asm volatile("psraw $4,%xmm9");
90 asm volatile("pand %xmm7,%xmm3");
91 asm volatile("pand %xmm7,%xmm11");
92 asm volatile("pand %xmm7,%xmm1");
93 asm volatile("pand %xmm7,%xmm9");
94 asm volatile("pshufb %xmm3,%xmm4");
95 asm volatile("pshufb %xmm11,%xmm12");
96 asm volatile("pshufb %xmm1,%xmm5");
97 asm volatile("pshufb %xmm9,%xmm13");
98 asm volatile("pxor %xmm4,%xmm5");
99 asm volatile("pxor %xmm12,%xmm13");
103 asm volatile("movdqa %xmm14,%xmm4");
104 asm volatile("movdqa %xmm15,%xmm1");
105 asm volatile("movdqa %xmm14,%xmm12");
106 asm volatile("movdqa %xmm15,%xmm9");
107 asm volatile("movdqa %xmm2,%xmm3");
108 asm volatile("movdqa %xmm10,%xmm11");
109 asm volatile("psraw $4,%xmm2");
110 asm volatile("psraw $4,%xmm10");
111 asm volatile("pand %xmm7,%xmm3");
112 asm volatile("pand %xmm7,%xmm11");
113 asm volatile("pand %xmm7,%xmm2");
114 asm volatile("pand %xmm7,%xmm10");
115 asm volatile("pshufb %xmm3,%xmm4");
116 asm volatile("pshufb %xmm11,%xmm12");
117 asm volatile("pshufb %xmm2,%xmm1");
118 asm volatile("pshufb %xmm10,%xmm9");
119 asm volatile("pxor %xmm4,%xmm1");
120 asm volatile("pxor %xmm12,%xmm9");
122 /* xmm1/9 = pbmul[px] */
123 asm volatile("pxor %xmm5,%xmm1");
124 asm volatile("pxor %xmm13,%xmm9");
125 /* xmm1/9 = db = DQ */
126 asm volatile("movdqa %%xmm1,%0" : "=m" (dq
[0]));
127 asm volatile("movdqa %%xmm9,%0" : "=m" (dq
[16]));
129 asm volatile("pxor %xmm1,%xmm0");
130 asm volatile("pxor %xmm9,%xmm8");
131 asm volatile("movdqa %%xmm0,%0" : "=m" (dp
[0]));
132 asm volatile("movdqa %%xmm8,%0" : "=m" (dp
[16]));
140 asm volatile("movdqa %0,%%xmm1" : : "m" (*q
));
141 asm volatile("movdqa %0,%%xmm0" : : "m" (*p
));
142 asm volatile("pxor %0,%%xmm1" : : "m" (*dq
));
143 asm volatile("pxor %0,%%xmm0" : : "m" (*dp
));
148 asm volatile("movdqa %0,%%xmm4" : : "m" (qmul
[0]));
149 asm volatile("movdqa %0,%%xmm5" : : "m" (qmul
[16]));
151 asm volatile("movdqa %xmm1,%xmm3");
152 asm volatile("psraw $4,%xmm1");
153 asm volatile("pand %xmm7,%xmm3");
154 asm volatile("pand %xmm7,%xmm1");
155 asm volatile("pshufb %xmm3,%xmm4");
156 asm volatile("pshufb %xmm1,%xmm5");
157 asm volatile("pxor %xmm4,%xmm5");
159 asm volatile("movdqa %xmm0,%xmm2"); /* xmm2 = px */
163 asm volatile("movdqa %0,%%xmm4" : : "m" (pbmul
[0]));
164 asm volatile("movdqa %0,%%xmm1" : : "m" (pbmul
[16]));
165 asm volatile("movdqa %xmm2,%xmm3");
166 asm volatile("psraw $4,%xmm2");
167 asm volatile("pand %xmm7,%xmm3");
168 asm volatile("pand %xmm7,%xmm2");
169 asm volatile("pshufb %xmm3,%xmm4");
170 asm volatile("pshufb %xmm2,%xmm1");
171 asm volatile("pxor %xmm4,%xmm1");
173 /* xmm1 = pbmul[px] */
174 asm volatile("pxor %xmm5,%xmm1");
176 asm volatile("movdqa %%xmm1,%0" : "=m" (*dq
));
178 asm volatile("pxor %xmm1,%xmm0");
179 asm volatile("movdqa %%xmm0,%0" : "=m" (*dp
));
193 static void raid6_datap_recov_ssse3(int disks
, size_t bytes
, int faila
,
197 const u8
*qmul
; /* Q multiplier table */
198 static const u8
__aligned(16) x0f
[16] = {
199 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f,
200 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f};
202 p
= (u8
*)ptrs
[disks
-2];
203 q
= (u8
*)ptrs
[disks
-1];
205 /* Compute syndrome with zero for the missing data page
206 Use the dead data page as temporary storage for delta q */
207 dq
= (u8
*)ptrs
[faila
];
208 ptrs
[faila
] = (void *)raid6_empty_zero_page
;
211 raid6_call
.gen_syndrome(disks
, bytes
, ptrs
);
213 /* Restore pointer table */
217 /* Now, pick the proper data tables */
218 qmul
= raid6_vgfmul
[raid6_gfinv
[raid6_gfexp
[faila
]]];
222 asm volatile("movdqa %0, %%xmm7" : : "m" (x0f
[0]));
226 asm volatile("movdqa %0, %%xmm3" : : "m" (dq
[0]));
227 asm volatile("movdqa %0, %%xmm4" : : "m" (dq
[16]));
228 asm volatile("pxor %0, %%xmm3" : : "m" (q
[0]));
229 asm volatile("movdqa %0, %%xmm0" : : "m" (qmul
[0]));
231 /* xmm3 = q[0] ^ dq[0] */
233 asm volatile("pxor %0, %%xmm4" : : "m" (q
[16]));
234 asm volatile("movdqa %0, %%xmm1" : : "m" (qmul
[16]));
236 /* xmm4 = q[16] ^ dq[16] */
238 asm volatile("movdqa %xmm3, %xmm6");
239 asm volatile("movdqa %xmm4, %xmm8");
241 /* xmm4 = xmm8 = q[16] ^ dq[16] */
243 asm volatile("psraw $4, %xmm3");
244 asm volatile("pand %xmm7, %xmm6");
245 asm volatile("pand %xmm7, %xmm3");
246 asm volatile("pshufb %xmm6, %xmm0");
247 asm volatile("pshufb %xmm3, %xmm1");
248 asm volatile("movdqa %0, %%xmm10" : : "m" (qmul
[0]));
249 asm volatile("pxor %xmm0, %xmm1");
250 asm volatile("movdqa %0, %%xmm11" : : "m" (qmul
[16]));
252 /* xmm1 = qmul[q[0] ^ dq[0]] */
254 asm volatile("psraw $4, %xmm4");
255 asm volatile("pand %xmm7, %xmm8");
256 asm volatile("pand %xmm7, %xmm4");
257 asm volatile("pshufb %xmm8, %xmm10");
258 asm volatile("pshufb %xmm4, %xmm11");
259 asm volatile("movdqa %0, %%xmm2" : : "m" (p
[0]));
260 asm volatile("pxor %xmm10, %xmm11");
261 asm volatile("movdqa %0, %%xmm12" : : "m" (p
[16]));
263 /* xmm11 = qmul[q[16] ^ dq[16]] */
265 asm volatile("pxor %xmm1, %xmm2");
267 /* xmm2 = p[0] ^ qmul[q[0] ^ dq[0]] */
269 asm volatile("pxor %xmm11, %xmm12");
271 /* xmm12 = p[16] ^ qmul[q[16] ^ dq[16]] */
273 asm volatile("movdqa %%xmm1, %0" : "=m" (dq
[0]));
274 asm volatile("movdqa %%xmm11, %0" : "=m" (dq
[16]));
276 asm volatile("movdqa %%xmm2, %0" : "=m" (p
[0]));
277 asm volatile("movdqa %%xmm12, %0" : "=m" (p
[16]));
285 asm volatile("movdqa %0, %%xmm3" : : "m" (dq
[0]));
286 asm volatile("movdqa %0, %%xmm0" : : "m" (qmul
[0]));
287 asm volatile("pxor %0, %%xmm3" : : "m" (q
[0]));
288 asm volatile("movdqa %0, %%xmm1" : : "m" (qmul
[16]));
290 /* xmm3 = *q ^ *dq */
292 asm volatile("movdqa %xmm3, %xmm6");
293 asm volatile("movdqa %0, %%xmm2" : : "m" (p
[0]));
294 asm volatile("psraw $4, %xmm3");
295 asm volatile("pand %xmm7, %xmm6");
296 asm volatile("pand %xmm7, %xmm3");
297 asm volatile("pshufb %xmm6, %xmm0");
298 asm volatile("pshufb %xmm3, %xmm1");
299 asm volatile("pxor %xmm0, %xmm1");
301 /* xmm1 = qmul[*q ^ *dq */
303 asm volatile("pxor %xmm1, %xmm2");
305 /* xmm2 = *p ^ qmul[*q ^ *dq] */
307 asm volatile("movdqa %%xmm1, %0" : "=m" (dq
[0]));
308 asm volatile("movdqa %%xmm2, %0" : "=m" (p
[0]));
320 const struct raid6_recov_calls raid6_recov_ssse3
= {
321 .data2
= raid6_2data_recov_ssse3
,
322 .datap
= raid6_datap_recov_ssse3
,
323 .valid
= raid6_has_ssse3
,
333 #warning "your version of binutils lacks SSSE3 support"