2 * Optimized RAID-5 checksumming functions for MMX and SSE.
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2, or (at your option)
9 * You should have received a copy of the GNU General Public License
10 * (for example /usr/src/linux/COPYING); if not, write to the Free
11 * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
15 * High-speed RAID5 checksumming functions utilizing MMX instructions.
16 * Copyright (C) 1998 Ingo Molnar.
19 #define LD(x,y) " movq 8*("#x")(%1), %%mm"#y" ;\n"
20 #define ST(x,y) " movq %%mm"#y", 8*("#x")(%1) ;\n"
21 #define XO1(x,y) " pxor 8*("#x")(%2), %%mm"#y" ;\n"
22 #define XO2(x,y) " pxor 8*("#x")(%3), %%mm"#y" ;\n"
23 #define XO3(x,y) " pxor 8*("#x")(%4), %%mm"#y" ;\n"
24 #define XO4(x,y) " pxor 8*("#x")(%5), %%mm"#y" ;\n"
29 xor_pII_mmx_2(unsigned long bytes
, unsigned long *p1
, unsigned long *p2
)
31 unsigned long lines
= bytes
>> 7;
35 __asm__
__volatile__ (
72 xor_pII_mmx_3(unsigned long bytes
, unsigned long *p1
, unsigned long *p2
,
75 unsigned long lines
= bytes
>> 7;
79 __asm__
__volatile__ (
113 "+r" (p1
), "+r" (p2
), "+r" (p3
)
121 xor_pII_mmx_4(unsigned long bytes
, unsigned long *p1
, unsigned long *p2
,
122 unsigned long *p3
, unsigned long *p4
)
124 unsigned long lines
= bytes
>> 7;
128 __asm__
__volatile__ (
167 "+r" (p1
), "+r" (p2
), "+r" (p3
), "+r" (p4
)
176 xor_pII_mmx_5(unsigned long bytes
, unsigned long *p1
, unsigned long *p2
,
177 unsigned long *p3
, unsigned long *p4
, unsigned long *p5
)
179 unsigned long lines
= bytes
>> 7;
183 /* Make sure GCC forgets anything it knows about p4 or p5,
184 such that it won't pass to the asm volatile below a
185 register that is shared with any other variable. That's
186 because we modify p4 and p5 there, but we can't mark them
187 as read/write, otherwise we'd overflow the 10-asm-operands
188 limit of GCC < 3.1. */
189 __asm__ ("" : "+r" (p4
), "+r" (p5
));
191 __asm__
__volatile__ (
235 "+r" (p1
), "+r" (p2
), "+r" (p3
)
239 /* p4 and p5 were modified, and now the variables are dead.
240 Clobber them just to be sure nobody does something stupid
241 like assuming they have some legal value. */
242 __asm__ ("" : "=r" (p4
), "=r" (p5
));
256 xor_p5_mmx_2(unsigned long bytes
, unsigned long *p1
, unsigned long *p2
)
258 unsigned long lines
= bytes
>> 6;
262 __asm__
__volatile__ (
265 " movq (%1), %%mm0 ;\n"
266 " movq 8(%1), %%mm1 ;\n"
267 " pxor (%2), %%mm0 ;\n"
268 " movq 16(%1), %%mm2 ;\n"
269 " movq %%mm0, (%1) ;\n"
270 " pxor 8(%2), %%mm1 ;\n"
271 " movq 24(%1), %%mm3 ;\n"
272 " movq %%mm1, 8(%1) ;\n"
273 " pxor 16(%2), %%mm2 ;\n"
274 " movq 32(%1), %%mm4 ;\n"
275 " movq %%mm2, 16(%1) ;\n"
276 " pxor 24(%2), %%mm3 ;\n"
277 " movq 40(%1), %%mm5 ;\n"
278 " movq %%mm3, 24(%1) ;\n"
279 " pxor 32(%2), %%mm4 ;\n"
280 " movq 48(%1), %%mm6 ;\n"
281 " movq %%mm4, 32(%1) ;\n"
282 " pxor 40(%2), %%mm5 ;\n"
283 " movq 56(%1), %%mm7 ;\n"
284 " movq %%mm5, 40(%1) ;\n"
285 " pxor 48(%2), %%mm6 ;\n"
286 " pxor 56(%2), %%mm7 ;\n"
287 " movq %%mm6, 48(%1) ;\n"
288 " movq %%mm7, 56(%1) ;\n"
303 xor_p5_mmx_3(unsigned long bytes
, unsigned long *p1
, unsigned long *p2
,
306 unsigned long lines
= bytes
>> 6;
310 __asm__
__volatile__ (
311 " .align 32,0x90 ;\n"
313 " movq (%1), %%mm0 ;\n"
314 " movq 8(%1), %%mm1 ;\n"
315 " pxor (%2), %%mm0 ;\n"
316 " movq 16(%1), %%mm2 ;\n"
317 " pxor 8(%2), %%mm1 ;\n"
318 " pxor (%3), %%mm0 ;\n"
319 " pxor 16(%2), %%mm2 ;\n"
320 " movq %%mm0, (%1) ;\n"
321 " pxor 8(%3), %%mm1 ;\n"
322 " pxor 16(%3), %%mm2 ;\n"
323 " movq 24(%1), %%mm3 ;\n"
324 " movq %%mm1, 8(%1) ;\n"
325 " movq 32(%1), %%mm4 ;\n"
326 " movq 40(%1), %%mm5 ;\n"
327 " pxor 24(%2), %%mm3 ;\n"
328 " movq %%mm2, 16(%1) ;\n"
329 " pxor 32(%2), %%mm4 ;\n"
330 " pxor 24(%3), %%mm3 ;\n"
331 " pxor 40(%2), %%mm5 ;\n"
332 " movq %%mm3, 24(%1) ;\n"
333 " pxor 32(%3), %%mm4 ;\n"
334 " pxor 40(%3), %%mm5 ;\n"
335 " movq 48(%1), %%mm6 ;\n"
336 " movq %%mm4, 32(%1) ;\n"
337 " movq 56(%1), %%mm7 ;\n"
338 " pxor 48(%2), %%mm6 ;\n"
339 " movq %%mm5, 40(%1) ;\n"
340 " pxor 56(%2), %%mm7 ;\n"
341 " pxor 48(%3), %%mm6 ;\n"
342 " pxor 56(%3), %%mm7 ;\n"
343 " movq %%mm6, 48(%1) ;\n"
344 " movq %%mm7, 56(%1) ;\n"
352 "+r" (p1
), "+r" (p2
), "+r" (p3
)
360 xor_p5_mmx_4(unsigned long bytes
, unsigned long *p1
, unsigned long *p2
,
361 unsigned long *p3
, unsigned long *p4
)
363 unsigned long lines
= bytes
>> 6;
367 __asm__
__volatile__ (
368 " .align 32,0x90 ;\n"
370 " movq (%1), %%mm0 ;\n"
371 " movq 8(%1), %%mm1 ;\n"
372 " pxor (%2), %%mm0 ;\n"
373 " movq 16(%1), %%mm2 ;\n"
374 " pxor 8(%2), %%mm1 ;\n"
375 " pxor (%3), %%mm0 ;\n"
376 " pxor 16(%2), %%mm2 ;\n"
377 " pxor 8(%3), %%mm1 ;\n"
378 " pxor (%4), %%mm0 ;\n"
379 " movq 24(%1), %%mm3 ;\n"
380 " pxor 16(%3), %%mm2 ;\n"
381 " pxor 8(%4), %%mm1 ;\n"
382 " movq %%mm0, (%1) ;\n"
383 " movq 32(%1), %%mm4 ;\n"
384 " pxor 24(%2), %%mm3 ;\n"
385 " pxor 16(%4), %%mm2 ;\n"
386 " movq %%mm1, 8(%1) ;\n"
387 " movq 40(%1), %%mm5 ;\n"
388 " pxor 32(%2), %%mm4 ;\n"
389 " pxor 24(%3), %%mm3 ;\n"
390 " movq %%mm2, 16(%1) ;\n"
391 " pxor 40(%2), %%mm5 ;\n"
392 " pxor 32(%3), %%mm4 ;\n"
393 " pxor 24(%4), %%mm3 ;\n"
394 " movq %%mm3, 24(%1) ;\n"
395 " movq 56(%1), %%mm7 ;\n"
396 " movq 48(%1), %%mm6 ;\n"
397 " pxor 40(%3), %%mm5 ;\n"
398 " pxor 32(%4), %%mm4 ;\n"
399 " pxor 48(%2), %%mm6 ;\n"
400 " movq %%mm4, 32(%1) ;\n"
401 " pxor 56(%2), %%mm7 ;\n"
402 " pxor 40(%4), %%mm5 ;\n"
403 " pxor 48(%3), %%mm6 ;\n"
404 " pxor 56(%3), %%mm7 ;\n"
405 " movq %%mm5, 40(%1) ;\n"
406 " pxor 48(%4), %%mm6 ;\n"
407 " pxor 56(%4), %%mm7 ;\n"
408 " movq %%mm6, 48(%1) ;\n"
409 " movq %%mm7, 56(%1) ;\n"
418 "+r" (p1
), "+r" (p2
), "+r" (p3
), "+r" (p4
)
426 xor_p5_mmx_5(unsigned long bytes
, unsigned long *p1
, unsigned long *p2
,
427 unsigned long *p3
, unsigned long *p4
, unsigned long *p5
)
429 unsigned long lines
= bytes
>> 6;
433 /* Make sure GCC forgets anything it knows about p4 or p5,
434 such that it won't pass to the asm volatile below a
435 register that is shared with any other variable. That's
436 because we modify p4 and p5 there, but we can't mark them
437 as read/write, otherwise we'd overflow the 10-asm-operands
438 limit of GCC < 3.1. */
439 __asm__ ("" : "+r" (p4
), "+r" (p5
));
441 __asm__
__volatile__ (
442 " .align 32,0x90 ;\n"
444 " movq (%1), %%mm0 ;\n"
445 " movq 8(%1), %%mm1 ;\n"
446 " pxor (%2), %%mm0 ;\n"
447 " pxor 8(%2), %%mm1 ;\n"
448 " movq 16(%1), %%mm2 ;\n"
449 " pxor (%3), %%mm0 ;\n"
450 " pxor 8(%3), %%mm1 ;\n"
451 " pxor 16(%2), %%mm2 ;\n"
452 " pxor (%4), %%mm0 ;\n"
453 " pxor 8(%4), %%mm1 ;\n"
454 " pxor 16(%3), %%mm2 ;\n"
455 " movq 24(%1), %%mm3 ;\n"
456 " pxor (%5), %%mm0 ;\n"
457 " pxor 8(%5), %%mm1 ;\n"
458 " movq %%mm0, (%1) ;\n"
459 " pxor 16(%4), %%mm2 ;\n"
460 " pxor 24(%2), %%mm3 ;\n"
461 " movq %%mm1, 8(%1) ;\n"
462 " pxor 16(%5), %%mm2 ;\n"
463 " pxor 24(%3), %%mm3 ;\n"
464 " movq 32(%1), %%mm4 ;\n"
465 " movq %%mm2, 16(%1) ;\n"
466 " pxor 24(%4), %%mm3 ;\n"
467 " pxor 32(%2), %%mm4 ;\n"
468 " movq 40(%1), %%mm5 ;\n"
469 " pxor 24(%5), %%mm3 ;\n"
470 " pxor 32(%3), %%mm4 ;\n"
471 " pxor 40(%2), %%mm5 ;\n"
472 " movq %%mm3, 24(%1) ;\n"
473 " pxor 32(%4), %%mm4 ;\n"
474 " pxor 40(%3), %%mm5 ;\n"
475 " movq 48(%1), %%mm6 ;\n"
476 " movq 56(%1), %%mm7 ;\n"
477 " pxor 32(%5), %%mm4 ;\n"
478 " pxor 40(%4), %%mm5 ;\n"
479 " pxor 48(%2), %%mm6 ;\n"
480 " pxor 56(%2), %%mm7 ;\n"
481 " movq %%mm4, 32(%1) ;\n"
482 " pxor 48(%3), %%mm6 ;\n"
483 " pxor 56(%3), %%mm7 ;\n"
484 " pxor 40(%5), %%mm5 ;\n"
485 " pxor 48(%4), %%mm6 ;\n"
486 " pxor 56(%4), %%mm7 ;\n"
487 " movq %%mm5, 40(%1) ;\n"
488 " pxor 48(%5), %%mm6 ;\n"
489 " pxor 56(%5), %%mm7 ;\n"
490 " movq %%mm6, 48(%1) ;\n"
491 " movq %%mm7, 56(%1) ;\n"
501 "+r" (p1
), "+r" (p2
), "+r" (p3
)
505 /* p4 and p5 were modified, and now the variables are dead.
506 Clobber them just to be sure nobody does something stupid
507 like assuming they have some legal value. */
508 __asm__ ("" : "=r" (p4
), "=r" (p5
));
513 static struct xor_block_template xor_block_pII_mmx
= {
515 .do_2
= xor_pII_mmx_2
,
516 .do_3
= xor_pII_mmx_3
,
517 .do_4
= xor_pII_mmx_4
,
518 .do_5
= xor_pII_mmx_5
,
521 static struct xor_block_template xor_block_p5_mmx
= {
523 .do_2
= xor_p5_mmx_2
,
524 .do_3
= xor_p5_mmx_3
,
525 .do_4
= xor_p5_mmx_4
,
526 .do_5
= xor_p5_mmx_5
,
530 * Cache avoiding checksumming functions utilizing KNI instructions
531 * Copyright (C) 1999 Zach Brown (with obvious credit due Ingo)
534 #define XMMS_SAVE do { \
538 __asm__ __volatile__ ( \
539 "movups %%xmm0,(%0) ;\n\t" \
540 "movups %%xmm1,0x10(%0) ;\n\t" \
541 "movups %%xmm2,0x20(%0) ;\n\t" \
542 "movups %%xmm3,0x30(%0) ;\n\t" \
548 #define XMMS_RESTORE do { \
549 __asm__ __volatile__ ( \
551 "movups (%0),%%xmm0 ;\n\t" \
552 "movups 0x10(%0),%%xmm1 ;\n\t" \
553 "movups 0x20(%0),%%xmm2 ;\n\t" \
554 "movups 0x30(%0),%%xmm3 ;\n\t" \
562 #define ALIGN16 __attribute__((aligned(16)))
564 #define OFFS(x) "16*("#x")"
565 #define PF_OFFS(x) "256+16*("#x")"
566 #define PF0(x) " prefetchnta "PF_OFFS(x)"(%1) ;\n"
567 #define LD(x,y) " movaps "OFFS(x)"(%1), %%xmm"#y" ;\n"
568 #define ST(x,y) " movaps %%xmm"#y", "OFFS(x)"(%1) ;\n"
569 #define PF1(x) " prefetchnta "PF_OFFS(x)"(%2) ;\n"
570 #define PF2(x) " prefetchnta "PF_OFFS(x)"(%3) ;\n"
571 #define PF3(x) " prefetchnta "PF_OFFS(x)"(%4) ;\n"
572 #define PF4(x) " prefetchnta "PF_OFFS(x)"(%5) ;\n"
573 #define PF5(x) " prefetchnta "PF_OFFS(x)"(%6) ;\n"
574 #define XO1(x,y) " xorps "OFFS(x)"(%2), %%xmm"#y" ;\n"
575 #define XO2(x,y) " xorps "OFFS(x)"(%3), %%xmm"#y" ;\n"
576 #define XO3(x,y) " xorps "OFFS(x)"(%4), %%xmm"#y" ;\n"
577 #define XO4(x,y) " xorps "OFFS(x)"(%5), %%xmm"#y" ;\n"
578 #define XO5(x,y) " xorps "OFFS(x)"(%6), %%xmm"#y" ;\n"
582 xor_sse_2(unsigned long bytes
, unsigned long *p1
, unsigned long *p2
)
584 unsigned long lines
= bytes
>> 8;
585 char xmm_save
[16*4] ALIGN16
;
590 __asm__
__volatile__ (
635 xor_sse_3(unsigned long bytes
, unsigned long *p1
, unsigned long *p2
,
638 unsigned long lines
= bytes
>> 8;
639 char xmm_save
[16*4] ALIGN16
;
644 __asm__
__volatile__ (
688 "+r" (p1
), "+r"(p2
), "+r"(p3
)
696 xor_sse_4(unsigned long bytes
, unsigned long *p1
, unsigned long *p2
,
697 unsigned long *p3
, unsigned long *p4
)
699 unsigned long lines
= bytes
>> 8;
700 char xmm_save
[16*4] ALIGN16
;
705 __asm__
__volatile__ (
756 "+r" (p1
), "+r" (p2
), "+r" (p3
), "+r" (p4
)
764 xor_sse_5(unsigned long bytes
, unsigned long *p1
, unsigned long *p2
,
765 unsigned long *p3
, unsigned long *p4
, unsigned long *p5
)
767 unsigned long lines
= bytes
>> 8;
768 char xmm_save
[16*4] ALIGN16
;
773 /* Make sure GCC forgets anything it knows about p4 or p5,
774 such that it won't pass to the asm volatile below a
775 register that is shared with any other variable. That's
776 because we modify p4 and p5 there, but we can't mark them
777 as read/write, otherwise we'd overflow the 10-asm-operands
778 limit of GCC < 3.1. */
779 __asm__ ("" : "+r" (p4
), "+r" (p5
));
781 __asm__
__volatile__ (
839 "+r" (p1
), "+r" (p2
), "+r" (p3
)
843 /* p4 and p5 were modified, and now the variables are dead.
844 Clobber them just to be sure nobody does something stupid
845 like assuming they have some legal value. */
846 __asm__ ("" : "=r" (p4
), "=r" (p5
));
851 static struct xor_block_template xor_block_pIII_sse
= {
859 /* Also try the generic routines. */
860 #include <asm-generic/xor.h>
862 #undef XOR_TRY_TEMPLATES
863 #define XOR_TRY_TEMPLATES \
865 xor_speed(&xor_block_8regs); \
866 xor_speed(&xor_block_8regs_p); \
867 xor_speed(&xor_block_32regs); \
868 xor_speed(&xor_block_32regs_p); \
870 xor_speed(&xor_block_pIII_sse); \
872 xor_speed(&xor_block_pII_mmx); \
873 xor_speed(&xor_block_p5_mmx); \
877 /* We force the use of the SSE xor block because it can write around L2.
878 We may also be able to load into the L1 only depending on how the cpu
879 deals with a load to a line that is being prefetched. */
880 #define XOR_SELECT_TEMPLATE(FASTEST) \
881 (cpu_has_xmm ? &xor_block_pIII_sse : FASTEST)