1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 #ifndef _ASM_X86_XOR_32_H
3 #define _ASM_X86_XOR_32_H
6 * Optimized RAID-5 checksumming functions for MMX.
10 * High-speed RAID5 checksumming functions utilizing MMX instructions.
11 * Copyright (C) 1998 Ingo Molnar.
14 #define LD(x, y) " movq 8*("#x")(%1), %%mm"#y" ;\n"
15 #define ST(x, y) " movq %%mm"#y", 8*("#x")(%1) ;\n"
16 #define XO1(x, y) " pxor 8*("#x")(%2), %%mm"#y" ;\n"
17 #define XO2(x, y) " pxor 8*("#x")(%3), %%mm"#y" ;\n"
18 #define XO3(x, y) " pxor 8*("#x")(%4), %%mm"#y" ;\n"
19 #define XO4(x, y) " pxor 8*("#x")(%5), %%mm"#y" ;\n"
21 #include <asm/fpu/api.h>
24 xor_pII_mmx_2(unsigned long bytes
, unsigned long *p1
, unsigned long *p2
)
26 unsigned long lines
= bytes
>> 7;
67 xor_pII_mmx_3(unsigned long bytes
, unsigned long *p1
, unsigned long *p2
,
70 unsigned long lines
= bytes
>> 7;
108 "+r" (p1
), "+r" (p2
), "+r" (p3
)
116 xor_pII_mmx_4(unsigned long bytes
, unsigned long *p1
, unsigned long *p2
,
117 unsigned long *p3
, unsigned long *p4
)
119 unsigned long lines
= bytes
>> 7;
162 "+r" (p1
), "+r" (p2
), "+r" (p3
), "+r" (p4
)
171 xor_pII_mmx_5(unsigned long bytes
, unsigned long *p1
, unsigned long *p2
,
172 unsigned long *p3
, unsigned long *p4
, unsigned long *p5
)
174 unsigned long lines
= bytes
>> 7;
178 /* Make sure GCC forgets anything it knows about p4 or p5,
179 such that it won't pass to the asm volatile below a
180 register that is shared with any other variable. That's
181 because we modify p4 and p5 there, but we can't mark them
182 as read/write, otherwise we'd overflow the 10-asm-operands
183 limit of GCC < 3.1. */
184 asm("" : "+r" (p4
), "+r" (p5
));
230 "+r" (p1
), "+r" (p2
), "+r" (p3
)
234 /* p4 and p5 were modified, and now the variables are dead.
235 Clobber them just to be sure nobody does something stupid
236 like assuming they have some legal value. */
237 asm("" : "=r" (p4
), "=r" (p5
));
251 xor_p5_mmx_2(unsigned long bytes
, unsigned long *p1
, unsigned long *p2
)
253 unsigned long lines
= bytes
>> 6;
260 " movq (%1), %%mm0 ;\n"
261 " movq 8(%1), %%mm1 ;\n"
262 " pxor (%2), %%mm0 ;\n"
263 " movq 16(%1), %%mm2 ;\n"
264 " movq %%mm0, (%1) ;\n"
265 " pxor 8(%2), %%mm1 ;\n"
266 " movq 24(%1), %%mm3 ;\n"
267 " movq %%mm1, 8(%1) ;\n"
268 " pxor 16(%2), %%mm2 ;\n"
269 " movq 32(%1), %%mm4 ;\n"
270 " movq %%mm2, 16(%1) ;\n"
271 " pxor 24(%2), %%mm3 ;\n"
272 " movq 40(%1), %%mm5 ;\n"
273 " movq %%mm3, 24(%1) ;\n"
274 " pxor 32(%2), %%mm4 ;\n"
275 " movq 48(%1), %%mm6 ;\n"
276 " movq %%mm4, 32(%1) ;\n"
277 " pxor 40(%2), %%mm5 ;\n"
278 " movq 56(%1), %%mm7 ;\n"
279 " movq %%mm5, 40(%1) ;\n"
280 " pxor 48(%2), %%mm6 ;\n"
281 " pxor 56(%2), %%mm7 ;\n"
282 " movq %%mm6, 48(%1) ;\n"
283 " movq %%mm7, 56(%1) ;\n"
298 xor_p5_mmx_3(unsigned long bytes
, unsigned long *p1
, unsigned long *p2
,
301 unsigned long lines
= bytes
>> 6;
306 " .align 32,0x90 ;\n"
308 " movq (%1), %%mm0 ;\n"
309 " movq 8(%1), %%mm1 ;\n"
310 " pxor (%2), %%mm0 ;\n"
311 " movq 16(%1), %%mm2 ;\n"
312 " pxor 8(%2), %%mm1 ;\n"
313 " pxor (%3), %%mm0 ;\n"
314 " pxor 16(%2), %%mm2 ;\n"
315 " movq %%mm0, (%1) ;\n"
316 " pxor 8(%3), %%mm1 ;\n"
317 " pxor 16(%3), %%mm2 ;\n"
318 " movq 24(%1), %%mm3 ;\n"
319 " movq %%mm1, 8(%1) ;\n"
320 " movq 32(%1), %%mm4 ;\n"
321 " movq 40(%1), %%mm5 ;\n"
322 " pxor 24(%2), %%mm3 ;\n"
323 " movq %%mm2, 16(%1) ;\n"
324 " pxor 32(%2), %%mm4 ;\n"
325 " pxor 24(%3), %%mm3 ;\n"
326 " pxor 40(%2), %%mm5 ;\n"
327 " movq %%mm3, 24(%1) ;\n"
328 " pxor 32(%3), %%mm4 ;\n"
329 " pxor 40(%3), %%mm5 ;\n"
330 " movq 48(%1), %%mm6 ;\n"
331 " movq %%mm4, 32(%1) ;\n"
332 " movq 56(%1), %%mm7 ;\n"
333 " pxor 48(%2), %%mm6 ;\n"
334 " movq %%mm5, 40(%1) ;\n"
335 " pxor 56(%2), %%mm7 ;\n"
336 " pxor 48(%3), %%mm6 ;\n"
337 " pxor 56(%3), %%mm7 ;\n"
338 " movq %%mm6, 48(%1) ;\n"
339 " movq %%mm7, 56(%1) ;\n"
347 "+r" (p1
), "+r" (p2
), "+r" (p3
)
355 xor_p5_mmx_4(unsigned long bytes
, unsigned long *p1
, unsigned long *p2
,
356 unsigned long *p3
, unsigned long *p4
)
358 unsigned long lines
= bytes
>> 6;
363 " .align 32,0x90 ;\n"
365 " movq (%1), %%mm0 ;\n"
366 " movq 8(%1), %%mm1 ;\n"
367 " pxor (%2), %%mm0 ;\n"
368 " movq 16(%1), %%mm2 ;\n"
369 " pxor 8(%2), %%mm1 ;\n"
370 " pxor (%3), %%mm0 ;\n"
371 " pxor 16(%2), %%mm2 ;\n"
372 " pxor 8(%3), %%mm1 ;\n"
373 " pxor (%4), %%mm0 ;\n"
374 " movq 24(%1), %%mm3 ;\n"
375 " pxor 16(%3), %%mm2 ;\n"
376 " pxor 8(%4), %%mm1 ;\n"
377 " movq %%mm0, (%1) ;\n"
378 " movq 32(%1), %%mm4 ;\n"
379 " pxor 24(%2), %%mm3 ;\n"
380 " pxor 16(%4), %%mm2 ;\n"
381 " movq %%mm1, 8(%1) ;\n"
382 " movq 40(%1), %%mm5 ;\n"
383 " pxor 32(%2), %%mm4 ;\n"
384 " pxor 24(%3), %%mm3 ;\n"
385 " movq %%mm2, 16(%1) ;\n"
386 " pxor 40(%2), %%mm5 ;\n"
387 " pxor 32(%3), %%mm4 ;\n"
388 " pxor 24(%4), %%mm3 ;\n"
389 " movq %%mm3, 24(%1) ;\n"
390 " movq 56(%1), %%mm7 ;\n"
391 " movq 48(%1), %%mm6 ;\n"
392 " pxor 40(%3), %%mm5 ;\n"
393 " pxor 32(%4), %%mm4 ;\n"
394 " pxor 48(%2), %%mm6 ;\n"
395 " movq %%mm4, 32(%1) ;\n"
396 " pxor 56(%2), %%mm7 ;\n"
397 " pxor 40(%4), %%mm5 ;\n"
398 " pxor 48(%3), %%mm6 ;\n"
399 " pxor 56(%3), %%mm7 ;\n"
400 " movq %%mm5, 40(%1) ;\n"
401 " pxor 48(%4), %%mm6 ;\n"
402 " pxor 56(%4), %%mm7 ;\n"
403 " movq %%mm6, 48(%1) ;\n"
404 " movq %%mm7, 56(%1) ;\n"
413 "+r" (p1
), "+r" (p2
), "+r" (p3
), "+r" (p4
)
421 xor_p5_mmx_5(unsigned long bytes
, unsigned long *p1
, unsigned long *p2
,
422 unsigned long *p3
, unsigned long *p4
, unsigned long *p5
)
424 unsigned long lines
= bytes
>> 6;
428 /* Make sure GCC forgets anything it knows about p4 or p5,
429 such that it won't pass to the asm volatile below a
430 register that is shared with any other variable. That's
431 because we modify p4 and p5 there, but we can't mark them
432 as read/write, otherwise we'd overflow the 10-asm-operands
433 limit of GCC < 3.1. */
434 asm("" : "+r" (p4
), "+r" (p5
));
437 " .align 32,0x90 ;\n"
439 " movq (%1), %%mm0 ;\n"
440 " movq 8(%1), %%mm1 ;\n"
441 " pxor (%2), %%mm0 ;\n"
442 " pxor 8(%2), %%mm1 ;\n"
443 " movq 16(%1), %%mm2 ;\n"
444 " pxor (%3), %%mm0 ;\n"
445 " pxor 8(%3), %%mm1 ;\n"
446 " pxor 16(%2), %%mm2 ;\n"
447 " pxor (%4), %%mm0 ;\n"
448 " pxor 8(%4), %%mm1 ;\n"
449 " pxor 16(%3), %%mm2 ;\n"
450 " movq 24(%1), %%mm3 ;\n"
451 " pxor (%5), %%mm0 ;\n"
452 " pxor 8(%5), %%mm1 ;\n"
453 " movq %%mm0, (%1) ;\n"
454 " pxor 16(%4), %%mm2 ;\n"
455 " pxor 24(%2), %%mm3 ;\n"
456 " movq %%mm1, 8(%1) ;\n"
457 " pxor 16(%5), %%mm2 ;\n"
458 " pxor 24(%3), %%mm3 ;\n"
459 " movq 32(%1), %%mm4 ;\n"
460 " movq %%mm2, 16(%1) ;\n"
461 " pxor 24(%4), %%mm3 ;\n"
462 " pxor 32(%2), %%mm4 ;\n"
463 " movq 40(%1), %%mm5 ;\n"
464 " pxor 24(%5), %%mm3 ;\n"
465 " pxor 32(%3), %%mm4 ;\n"
466 " pxor 40(%2), %%mm5 ;\n"
467 " movq %%mm3, 24(%1) ;\n"
468 " pxor 32(%4), %%mm4 ;\n"
469 " pxor 40(%3), %%mm5 ;\n"
470 " movq 48(%1), %%mm6 ;\n"
471 " movq 56(%1), %%mm7 ;\n"
472 " pxor 32(%5), %%mm4 ;\n"
473 " pxor 40(%4), %%mm5 ;\n"
474 " pxor 48(%2), %%mm6 ;\n"
475 " pxor 56(%2), %%mm7 ;\n"
476 " movq %%mm4, 32(%1) ;\n"
477 " pxor 48(%3), %%mm6 ;\n"
478 " pxor 56(%3), %%mm7 ;\n"
479 " pxor 40(%5), %%mm5 ;\n"
480 " pxor 48(%4), %%mm6 ;\n"
481 " pxor 56(%4), %%mm7 ;\n"
482 " movq %%mm5, 40(%1) ;\n"
483 " pxor 48(%5), %%mm6 ;\n"
484 " pxor 56(%5), %%mm7 ;\n"
485 " movq %%mm6, 48(%1) ;\n"
486 " movq %%mm7, 56(%1) ;\n"
496 "+r" (p1
), "+r" (p2
), "+r" (p3
)
500 /* p4 and p5 were modified, and now the variables are dead.
501 Clobber them just to be sure nobody does something stupid
502 like assuming they have some legal value. */
503 asm("" : "=r" (p4
), "=r" (p5
));
508 static struct xor_block_template xor_block_pII_mmx
= {
510 .do_2
= xor_pII_mmx_2
,
511 .do_3
= xor_pII_mmx_3
,
512 .do_4
= xor_pII_mmx_4
,
513 .do_5
= xor_pII_mmx_5
,
516 static struct xor_block_template xor_block_p5_mmx
= {
518 .do_2
= xor_p5_mmx_2
,
519 .do_3
= xor_p5_mmx_3
,
520 .do_4
= xor_p5_mmx_4
,
521 .do_5
= xor_p5_mmx_5
,
524 static struct xor_block_template xor_block_pIII_sse
= {
532 /* Also try the AVX routines */
533 #include <asm/xor_avx.h>
535 /* Also try the generic routines. */
536 #include <asm-generic/xor.h>
538 /* We force the use of the SSE xor block because it can write around L2.
539 We may also be able to load into the L1 only depending on how the cpu
540 deals with a load to a line that is being prefetched. */
541 #undef XOR_TRY_TEMPLATES
542 #define XOR_TRY_TEMPLATES \
545 if (boot_cpu_has(X86_FEATURE_XMM)) { \
546 xor_speed(&xor_block_pIII_sse); \
547 xor_speed(&xor_block_sse_pf64); \
548 } else if (boot_cpu_has(X86_FEATURE_MMX)) { \
549 xor_speed(&xor_block_pII_mmx); \
550 xor_speed(&xor_block_p5_mmx); \
552 xor_speed(&xor_block_8regs); \
553 xor_speed(&xor_block_8regs_p); \
554 xor_speed(&xor_block_32regs); \
555 xor_speed(&xor_block_32regs_p); \
559 #endif /* _ASM_X86_XOR_32_H */