2 #if !(defined(__GNUC__) && __GNUC__>=2)
3 # include "../bn_asm.c" /* kind of dirty hack for Sun Studio */
6 * x86_64 BIGNUM accelerator version 0.1, December 2002.
8 * Implemented by Andy Polyakov <appro@fy.chalmers.se> for the OpenSSL
11 * Rights for redistribution and usage in source and binary forms are
12 * granted according to the OpenSSL license. Warranty of any kind is
15 * Q. Version 0.1? It doesn't sound like Andy, he used to assign real
16 * versions, like 1.0...
17 * A. Well, that's because this code is basically a quick-n-dirty
18 * proof-of-concept hack. As you can see it's implemented with
19 * inline assembler, which means that you're bound to GCC and that
20 * there might be enough room for further improvement.
22 * Q. Why inline assembler?
23 * A. x86_64 features own ABI which I'm not familiar with. This is
24 * why I decided to let the compiler take care of subroutine
25 * prologue/epilogue as well as register allocation. For reference.
26 * Win64 implements different ABI for AMD64, different from Linux.
28 * Q. How much faster does it get?
29 * A. 'apps/openssl speed rsa dsa' output with no-asm:
31 * sign verify sign/s verify/s
32 * rsa 512 bits 0.0006s 0.0001s 1683.8 18456.2
33 * rsa 1024 bits 0.0028s 0.0002s 356.0 6407.0
34 * rsa 2048 bits 0.0172s 0.0005s 58.0 1957.8
35 * rsa 4096 bits 0.1155s 0.0018s 8.7 555.6
36 * sign verify sign/s verify/s
37 * dsa 512 bits 0.0005s 0.0006s 2100.8 1768.3
38 * dsa 1024 bits 0.0014s 0.0018s 692.3 559.2
39 * dsa 2048 bits 0.0049s 0.0061s 204.7 165.0
41 * 'apps/openssl speed rsa dsa' output with this module:
43 * sign verify sign/s verify/s
44 * rsa 512 bits 0.0004s 0.0000s 2767.1 33297.9
45 * rsa 1024 bits 0.0012s 0.0001s 867.4 14674.7
46 * rsa 2048 bits 0.0061s 0.0002s 164.0 5270.0
47 * rsa 4096 bits 0.0384s 0.0006s 26.1 1650.8
48 * sign verify sign/s verify/s
49 * dsa 512 bits 0.0002s 0.0003s 4442.2 3786.3
50 * dsa 1024 bits 0.0005s 0.0007s 1835.1 1497.4
51 * dsa 2048 bits 0.0016s 0.0020s 620.4 504.6
53 * For the reference. IA-32 assembler implementation performs
54 * very much like 64-bit code compiled with no-asm on the same
59 #define BN_ULONG unsigned long long
61 #define BN_ULONG unsigned long
69 * "m"(a), "+m"(r) is the way to favor DirectPath ยต-code;
70 * "g"(0) let the compiler to decide where does it
71 * want to keep the value of zero;
73 #define mul_add(r,a,word,carry) do { \
74 register BN_ULONG high,low; \
76 : "=a"(low),"=d"(high) \
79 asm ("addq %2,%0; adcq %3,%1" \
80 : "+r"(carry),"+d"(high)\
83 asm ("addq %2,%0; adcq %3,%1" \
84 : "+m"(r),"+d"(high) \
90 #define mul(r,a,word,carry) do { \
91 register BN_ULONG high,low; \
93 : "=a"(low),"=d"(high) \
96 asm ("addq %2,%0; adcq %3,%1" \
97 : "+r"(carry),"+d"(high)\
100 (r)=carry, carry=high; \
103 #define sqr(r0,r1,a) \
105 : "=a"(r0),"=d"(r1) \
109 BN_ULONG
bn_mul_add_words(BN_ULONG
*rp
, const BN_ULONG
*ap
, int num
, BN_ULONG w
)
113 if (num
<= 0) return(c1
);
117 mul_add(rp
[0],ap
[0],w
,c1
);
118 mul_add(rp
[1],ap
[1],w
,c1
);
119 mul_add(rp
[2],ap
[2],w
,c1
);
120 mul_add(rp
[3],ap
[3],w
,c1
);
121 ap
+=4; rp
+=4; num
-=4;
125 mul_add(rp
[0],ap
[0],w
,c1
); if (--num
==0) return c1
;
126 mul_add(rp
[1],ap
[1],w
,c1
); if (--num
==0) return c1
;
127 mul_add(rp
[2],ap
[2],w
,c1
); return c1
;
133 BN_ULONG
bn_mul_words(BN_ULONG
*rp
, const BN_ULONG
*ap
, int num
, BN_ULONG w
)
137 if (num
<= 0) return(c1
);
141 mul(rp
[0],ap
[0],w
,c1
);
142 mul(rp
[1],ap
[1],w
,c1
);
143 mul(rp
[2],ap
[2],w
,c1
);
144 mul(rp
[3],ap
[3],w
,c1
);
145 ap
+=4; rp
+=4; num
-=4;
149 mul(rp
[0],ap
[0],w
,c1
); if (--num
== 0) return c1
;
150 mul(rp
[1],ap
[1],w
,c1
); if (--num
== 0) return c1
;
151 mul(rp
[2],ap
[2],w
,c1
);
156 void bn_sqr_words(BN_ULONG
*r
, const BN_ULONG
*a
, int n
)
170 sqr(r
[0],r
[1],a
[0]); if (--n
== 0) return;
171 sqr(r
[2],r
[3],a
[1]); if (--n
== 0) return;
176 BN_ULONG
bn_div_words(BN_ULONG h
, BN_ULONG l
, BN_ULONG d
)
177 { BN_ULONG ret
,waste
;
180 : "=a"(ret
),"=d"(waste
)
181 : "a"(l
),"d"(h
),"g"(d
)
187 BN_ULONG
bn_add_words (BN_ULONG
*rp
, const BN_ULONG
*ap
, const BN_ULONG
*bp
,int n
)
188 { BN_ULONG ret
=0,i
=0;
190 if (n
<= 0) return 0;
195 "1: movq (%4,%2,8),%0 \n"
196 " adcq (%5,%2,8),%0 \n"
197 " movq %0,(%3,%2,8) \n"
201 : "=&a"(ret
),"+c"(n
),"=&r"(i
)
202 : "r"(rp
),"r"(ap
),"r"(bp
)
210 BN_ULONG
bn_sub_words (BN_ULONG
*rp
, const BN_ULONG
*ap
, const BN_ULONG
*bp
,int n
)
211 { BN_ULONG ret
=0,i
=0;
213 if (n
<= 0) return 0;
218 "1: movq (%4,%2,8),%0 \n"
219 " sbbq (%5,%2,8),%0 \n"
220 " movq %0,(%3,%2,8) \n"
224 : "=&a"(ret
),"+c"(n
),"=&r"(i
)
225 : "r"(rp
),"r"(ap
),"r"(bp
)
232 /* Simics 1.4<7 has buggy sbbq:-( */
233 #define BN_MASK2 0xffffffffffffffffL
234 BN_ULONG
bn_sub_words(BN_ULONG
*r
, BN_ULONG
*a
, BN_ULONG
*b
, int n
)
239 if (n
<= 0) return((BN_ULONG
)0);
244 r
[0]=(t1
-t2
-c
)&BN_MASK2
;
245 if (t1
!= t2
) c
=(t1
< t2
);
249 r
[1]=(t1
-t2
-c
)&BN_MASK2
;
250 if (t1
!= t2
) c
=(t1
< t2
);
254 r
[2]=(t1
-t2
-c
)&BN_MASK2
;
255 if (t1
!= t2
) c
=(t1
< t2
);
259 r
[3]=(t1
-t2
-c
)&BN_MASK2
;
260 if (t1
!= t2
) c
=(t1
< t2
);
271 /* mul_add_c(a,b,c0,c1,c2) -- c+=a*b for three word number c=(c2,c1,c0) */
272 /* mul_add_c2(a,b,c0,c1,c2) -- c+=2*a*b for three word number c=(c2,c1,c0) */
273 /* sqr_add_c(a,i,c0,c1,c2) -- c+=a[i]^2 for three word number c=(c2,c1,c0) */
274 /* sqr_add_c2(a,i,c0,c1,c2) -- c+=2*a[i]*a[j] for three word number c=(c2,c1,c0) */
277 /* original macros are kept for reference purposes */
278 #define mul_add_c(a,b,c0,c1,c2) { \
279 BN_ULONG ta=(a),tb=(b); \
281 t2 = BN_UMULT_HIGH(ta,tb); \
282 c0 += t1; t2 += (c0<t1)?1:0; \
283 c1 += t2; c2 += (c1<t2)?1:0; \
286 #define mul_add_c2(a,b,c0,c1,c2) { \
287 BN_ULONG ta=(a),tb=(b),t0; \
288 t1 = BN_UMULT_HIGH(ta,tb); \
290 t2 = t1+t1; c2 += (t2<t1)?1:0; \
291 t1 = t0+t0; t2 += (t1<t0)?1:0; \
292 c0 += t1; t2 += (c0<t1)?1:0; \
293 c1 += t2; c2 += (c1<t2)?1:0; \
296 #define mul_add_c(a,b,c0,c1,c2) do { \
298 : "=a"(t1),"=d"(t2) \
301 asm ("addq %2,%0; adcq %3,%1" \
302 : "+r"(c0),"+d"(t2) \
305 asm ("addq %2,%0; adcq %3,%1" \
306 : "+r"(c1),"+r"(c2) \
311 #define sqr_add_c(a,i,c0,c1,c2) do { \
313 : "=a"(t1),"=d"(t2) \
316 asm ("addq %2,%0; adcq %3,%1" \
317 : "+r"(c0),"+d"(t2) \
320 asm ("addq %2,%0; adcq %3,%1" \
321 : "+r"(c1),"+r"(c2) \
326 #define mul_add_c2(a,b,c0,c1,c2) do { \
328 : "=a"(t1),"=d"(t2) \
331 asm ("addq %0,%0; adcq %2,%1" \
332 : "+d"(t2),"+r"(c2) \
335 asm ("addq %0,%0; adcq %2,%1" \
336 : "+a"(t1),"+d"(t2) \
339 asm ("addq %2,%0; adcq %3,%1" \
340 : "+r"(c0),"+d"(t2) \
343 asm ("addq %2,%0; adcq %3,%1" \
344 : "+r"(c1),"+r"(c2) \
350 #define sqr_add_c2(a,i,j,c0,c1,c2) \
351 mul_add_c2((a)[i],(a)[j],c0,c1,c2)
353 void bn_mul_comba8(BN_ULONG
*r
, BN_ULONG
*a
, BN_ULONG
*b
)
361 mul_add_c(a
[0],b
[0],c1
,c2
,c3
);
364 mul_add_c(a
[0],b
[1],c2
,c3
,c1
);
365 mul_add_c(a
[1],b
[0],c2
,c3
,c1
);
368 mul_add_c(a
[2],b
[0],c3
,c1
,c2
);
369 mul_add_c(a
[1],b
[1],c3
,c1
,c2
);
370 mul_add_c(a
[0],b
[2],c3
,c1
,c2
);
373 mul_add_c(a
[0],b
[3],c1
,c2
,c3
);
374 mul_add_c(a
[1],b
[2],c1
,c2
,c3
);
375 mul_add_c(a
[2],b
[1],c1
,c2
,c3
);
376 mul_add_c(a
[3],b
[0],c1
,c2
,c3
);
379 mul_add_c(a
[4],b
[0],c2
,c3
,c1
);
380 mul_add_c(a
[3],b
[1],c2
,c3
,c1
);
381 mul_add_c(a
[2],b
[2],c2
,c3
,c1
);
382 mul_add_c(a
[1],b
[3],c2
,c3
,c1
);
383 mul_add_c(a
[0],b
[4],c2
,c3
,c1
);
386 mul_add_c(a
[0],b
[5],c3
,c1
,c2
);
387 mul_add_c(a
[1],b
[4],c3
,c1
,c2
);
388 mul_add_c(a
[2],b
[3],c3
,c1
,c2
);
389 mul_add_c(a
[3],b
[2],c3
,c1
,c2
);
390 mul_add_c(a
[4],b
[1],c3
,c1
,c2
);
391 mul_add_c(a
[5],b
[0],c3
,c1
,c2
);
394 mul_add_c(a
[6],b
[0],c1
,c2
,c3
);
395 mul_add_c(a
[5],b
[1],c1
,c2
,c3
);
396 mul_add_c(a
[4],b
[2],c1
,c2
,c3
);
397 mul_add_c(a
[3],b
[3],c1
,c2
,c3
);
398 mul_add_c(a
[2],b
[4],c1
,c2
,c3
);
399 mul_add_c(a
[1],b
[5],c1
,c2
,c3
);
400 mul_add_c(a
[0],b
[6],c1
,c2
,c3
);
403 mul_add_c(a
[0],b
[7],c2
,c3
,c1
);
404 mul_add_c(a
[1],b
[6],c2
,c3
,c1
);
405 mul_add_c(a
[2],b
[5],c2
,c3
,c1
);
406 mul_add_c(a
[3],b
[4],c2
,c3
,c1
);
407 mul_add_c(a
[4],b
[3],c2
,c3
,c1
);
408 mul_add_c(a
[5],b
[2],c2
,c3
,c1
);
409 mul_add_c(a
[6],b
[1],c2
,c3
,c1
);
410 mul_add_c(a
[7],b
[0],c2
,c3
,c1
);
413 mul_add_c(a
[7],b
[1],c3
,c1
,c2
);
414 mul_add_c(a
[6],b
[2],c3
,c1
,c2
);
415 mul_add_c(a
[5],b
[3],c3
,c1
,c2
);
416 mul_add_c(a
[4],b
[4],c3
,c1
,c2
);
417 mul_add_c(a
[3],b
[5],c3
,c1
,c2
);
418 mul_add_c(a
[2],b
[6],c3
,c1
,c2
);
419 mul_add_c(a
[1],b
[7],c3
,c1
,c2
);
422 mul_add_c(a
[2],b
[7],c1
,c2
,c3
);
423 mul_add_c(a
[3],b
[6],c1
,c2
,c3
);
424 mul_add_c(a
[4],b
[5],c1
,c2
,c3
);
425 mul_add_c(a
[5],b
[4],c1
,c2
,c3
);
426 mul_add_c(a
[6],b
[3],c1
,c2
,c3
);
427 mul_add_c(a
[7],b
[2],c1
,c2
,c3
);
430 mul_add_c(a
[7],b
[3],c2
,c3
,c1
);
431 mul_add_c(a
[6],b
[4],c2
,c3
,c1
);
432 mul_add_c(a
[5],b
[5],c2
,c3
,c1
);
433 mul_add_c(a
[4],b
[6],c2
,c3
,c1
);
434 mul_add_c(a
[3],b
[7],c2
,c3
,c1
);
437 mul_add_c(a
[4],b
[7],c3
,c1
,c2
);
438 mul_add_c(a
[5],b
[6],c3
,c1
,c2
);
439 mul_add_c(a
[6],b
[5],c3
,c1
,c2
);
440 mul_add_c(a
[7],b
[4],c3
,c1
,c2
);
443 mul_add_c(a
[7],b
[5],c1
,c2
,c3
);
444 mul_add_c(a
[6],b
[6],c1
,c2
,c3
);
445 mul_add_c(a
[5],b
[7],c1
,c2
,c3
);
448 mul_add_c(a
[6],b
[7],c2
,c3
,c1
);
449 mul_add_c(a
[7],b
[6],c2
,c3
,c1
);
452 mul_add_c(a
[7],b
[7],c3
,c1
,c2
);
457 void bn_mul_comba4(BN_ULONG
*r
, BN_ULONG
*a
, BN_ULONG
*b
)
465 mul_add_c(a
[0],b
[0],c1
,c2
,c3
);
468 mul_add_c(a
[0],b
[1],c2
,c3
,c1
);
469 mul_add_c(a
[1],b
[0],c2
,c3
,c1
);
472 mul_add_c(a
[2],b
[0],c3
,c1
,c2
);
473 mul_add_c(a
[1],b
[1],c3
,c1
,c2
);
474 mul_add_c(a
[0],b
[2],c3
,c1
,c2
);
477 mul_add_c(a
[0],b
[3],c1
,c2
,c3
);
478 mul_add_c(a
[1],b
[2],c1
,c2
,c3
);
479 mul_add_c(a
[2],b
[1],c1
,c2
,c3
);
480 mul_add_c(a
[3],b
[0],c1
,c2
,c3
);
483 mul_add_c(a
[3],b
[1],c2
,c3
,c1
);
484 mul_add_c(a
[2],b
[2],c2
,c3
,c1
);
485 mul_add_c(a
[1],b
[3],c2
,c3
,c1
);
488 mul_add_c(a
[2],b
[3],c3
,c1
,c2
);
489 mul_add_c(a
[3],b
[2],c3
,c1
,c2
);
492 mul_add_c(a
[3],b
[3],c1
,c2
,c3
);
497 void bn_sqr_comba8(BN_ULONG
*r
, const BN_ULONG
*a
)
505 sqr_add_c(a
,0,c1
,c2
,c3
);
508 sqr_add_c2(a
,1,0,c2
,c3
,c1
);
511 sqr_add_c(a
,1,c3
,c1
,c2
);
512 sqr_add_c2(a
,2,0,c3
,c1
,c2
);
515 sqr_add_c2(a
,3,0,c1
,c2
,c3
);
516 sqr_add_c2(a
,2,1,c1
,c2
,c3
);
519 sqr_add_c(a
,2,c2
,c3
,c1
);
520 sqr_add_c2(a
,3,1,c2
,c3
,c1
);
521 sqr_add_c2(a
,4,0,c2
,c3
,c1
);
524 sqr_add_c2(a
,5,0,c3
,c1
,c2
);
525 sqr_add_c2(a
,4,1,c3
,c1
,c2
);
526 sqr_add_c2(a
,3,2,c3
,c1
,c2
);
529 sqr_add_c(a
,3,c1
,c2
,c3
);
530 sqr_add_c2(a
,4,2,c1
,c2
,c3
);
531 sqr_add_c2(a
,5,1,c1
,c2
,c3
);
532 sqr_add_c2(a
,6,0,c1
,c2
,c3
);
535 sqr_add_c2(a
,7,0,c2
,c3
,c1
);
536 sqr_add_c2(a
,6,1,c2
,c3
,c1
);
537 sqr_add_c2(a
,5,2,c2
,c3
,c1
);
538 sqr_add_c2(a
,4,3,c2
,c3
,c1
);
541 sqr_add_c(a
,4,c3
,c1
,c2
);
542 sqr_add_c2(a
,5,3,c3
,c1
,c2
);
543 sqr_add_c2(a
,6,2,c3
,c1
,c2
);
544 sqr_add_c2(a
,7,1,c3
,c1
,c2
);
547 sqr_add_c2(a
,7,2,c1
,c2
,c3
);
548 sqr_add_c2(a
,6,3,c1
,c2
,c3
);
549 sqr_add_c2(a
,5,4,c1
,c2
,c3
);
552 sqr_add_c(a
,5,c2
,c3
,c1
);
553 sqr_add_c2(a
,6,4,c2
,c3
,c1
);
554 sqr_add_c2(a
,7,3,c2
,c3
,c1
);
557 sqr_add_c2(a
,7,4,c3
,c1
,c2
);
558 sqr_add_c2(a
,6,5,c3
,c1
,c2
);
561 sqr_add_c(a
,6,c1
,c2
,c3
);
562 sqr_add_c2(a
,7,5,c1
,c2
,c3
);
565 sqr_add_c2(a
,7,6,c2
,c3
,c1
);
568 sqr_add_c(a
,7,c3
,c1
,c2
);
573 void bn_sqr_comba4(BN_ULONG
*r
, const BN_ULONG
*a
)
581 sqr_add_c(a
,0,c1
,c2
,c3
);
584 sqr_add_c2(a
,1,0,c2
,c3
,c1
);
587 sqr_add_c(a
,1,c3
,c1
,c2
);
588 sqr_add_c2(a
,2,0,c3
,c1
,c2
);
591 sqr_add_c2(a
,3,0,c1
,c2
,c3
);
592 sqr_add_c2(a
,2,1,c1
,c2
,c3
);
595 sqr_add_c(a
,2,c2
,c3
,c1
);
596 sqr_add_c2(a
,3,1,c2
,c3
,c1
);
599 sqr_add_c2(a
,3,2,c3
,c1
,c2
);
602 sqr_add_c(a
,3,c1
,c2
,c3
);