38 # qhasm: int64 caller1
40 # qhasm: int64 caller2
42 # qhasm: int64 caller3
44 # qhasm: int64 caller4
46 # qhasm: int64 caller5
48 # qhasm: int64 caller6
50 # qhasm: int64 caller7
52 # qhasm: caller caller1
54 # qhasm: caller caller2
56 # qhasm: caller caller3
58 # qhasm: caller caller4
60 # qhasm: caller caller5
62 # qhasm: caller caller6
64 # qhasm: caller caller7
66 # qhasm: stack64 caller1_stack
68 # qhasm: stack64 caller2_stack
70 # qhasm: stack64 caller3_stack
72 # qhasm: stack64 caller4_stack
74 # qhasm: stack64 caller5_stack
76 # qhasm: stack64 caller6_stack
78 # qhasm: stack64 caller7_stack
80 # qhasm: enter crypto_sign_ed25519_amd64_51_30k_batch_ull4_mul
83 .globl _crypto_sign_ed25519_amd64_51_30k_batch_ull4_mul
84 .globl crypto_sign_ed25519_amd64_51_30k_batch_ull4_mul
85 _crypto_sign_ed25519_amd64_51_30k_batch_ull4_mul
:
86 crypto_sign_ed25519_amd64_51_30k_batch_ull4_mul
:
92 # qhasm: caller1_stack = caller1
93 # asm 1: movq <caller1=int64#9,>caller1_stack=stack64#1
94 # asm 2: movq <caller1=%r11,>caller1_stack=0(%rsp)
97 # qhasm: caller2_stack = caller2
98 # asm 1: movq <caller2=int64#10,>caller2_stack=stack64#2
99 # asm 2: movq <caller2=%r12,>caller2_stack=8(%rsp)
102 # qhasm: caller3_stack = caller3
103 # asm 1: movq <caller3=int64#11,>caller3_stack=stack64#3
104 # asm 2: movq <caller3=%r13,>caller3_stack=16(%rsp)
107 # qhasm: caller4_stack = caller4
108 # asm 1: movq <caller4=int64#12,>caller4_stack=stack64#4
109 # asm 2: movq <caller4=%r14,>caller4_stack=24(%rsp)
112 # qhasm: caller5_stack = caller5
113 # asm 1: movq <caller5=int64#13,>caller5_stack=stack64#5
114 # asm 2: movq <caller5=%r15,>caller5_stack=32(%rsp)
117 # qhasm: caller6_stack = caller6
118 # asm 1: movq <caller6=int64#14,>caller6_stack=stack64#6
119 # asm 2: movq <caller6=%rbx,>caller6_stack=40(%rsp)
122 # qhasm: caller7_stack = caller7
123 # asm 1: movq <caller7=int64#15,>caller7_stack=stack64#7
124 # asm 2: movq <caller7=%rbp,>caller7_stack=48(%rsp)
128 # asm 1: mov <yp=int64#3,>yp=int64#4
129 # asm 2: mov <yp=%rdx,>yp=%rcx
133 # asm 1: mov $0,>r4=int64#5
134 # asm 2: mov $0,>r4=%r8
138 # asm 1: mov $0,>r5=int64#6
139 # asm 2: mov $0,>r5=%r9
143 # asm 1: mov $0,>r6=int64#8
144 # asm 2: mov $0,>r6=%r10
148 # asm 1: mov $0,>r7=int64#9
149 # asm 2: mov $0,>r7=%r11
153 # asm 1: mov $0,>zero=int64#10
154 # asm 2: mov $0,>zero=%r12
157 # qhasm: rax = *(uint64 *)(xp + 0)
158 # asm 1: movq 0(<xp=int64#2),>rax=int64#7
159 # asm 2: movq 0(<xp=%rsi),>rax=%rax
162 # qhasm: (uint128) rdx rax = rax * *(uint64 *)(yp + 0)
163 # asm 1: mulq 0(<yp=int64#4)
164 # asm 2: mulq 0(<yp=%rcx)
168 # asm 1: mov <rax=int64#7,>r0=int64#11
169 # asm 2: mov <rax=%rax,>r0=%r13
173 # asm 1: mov <rdx=int64#3,>c=int64#12
174 # asm 2: mov <rdx=%rdx,>c=%r14
177 # qhasm: rax = *(uint64 *)(xp + 0)
178 # asm 1: movq 0(<xp=int64#2),>rax=int64#7
179 # asm 2: movq 0(<xp=%rsi),>rax=%rax
182 # qhasm: (uint128) rdx rax = rax * *(uint64 *)(yp + 8)
183 # asm 1: mulq 8(<yp=int64#4)
184 # asm 2: mulq 8(<yp=%rcx)
188 # asm 1: mov <rax=int64#7,>r1=int64#13
189 # asm 2: mov <rax=%rax,>r1=%r15
192 # qhasm: carry? r1 += c
193 # asm 1: add <c=int64#12,<r1=int64#13
194 # asm 2: add <c=%r14,<r1=%r15
198 # asm 1: mov $0,>c=int64#12
199 # asm 2: mov $0,>c=%r14
202 # qhasm: c += rdx + carry
203 # asm 1: adc <rdx=int64#3,<c=int64#12
204 # asm 2: adc <rdx=%rdx,<c=%r14
207 # qhasm: rax = *(uint64 *)(xp + 0)
208 # asm 1: movq 0(<xp=int64#2),>rax=int64#7
209 # asm 2: movq 0(<xp=%rsi),>rax=%rax
212 # qhasm: (uint128) rdx rax = rax * *(uint64 *)(yp + 16)
213 # asm 1: mulq 16(<yp=int64#4)
214 # asm 2: mulq 16(<yp=%rcx)
218 # asm 1: mov <rax=int64#7,>r2=int64#14
219 # asm 2: mov <rax=%rax,>r2=%rbx
222 # qhasm: carry? r2 += c
223 # asm 1: add <c=int64#12,<r2=int64#14
224 # asm 2: add <c=%r14,<r2=%rbx
228 # asm 1: mov $0,>c=int64#12
229 # asm 2: mov $0,>c=%r14
232 # qhasm: c += rdx + carry
233 # asm 1: adc <rdx=int64#3,<c=int64#12
234 # asm 2: adc <rdx=%rdx,<c=%r14
237 # qhasm: rax = *(uint64 *)(xp + 0)
238 # asm 1: movq 0(<xp=int64#2),>rax=int64#7
239 # asm 2: movq 0(<xp=%rsi),>rax=%rax
242 # qhasm: (uint128) rdx rax = rax * *(uint64 *)(yp + 24)
243 # asm 1: mulq 24(<yp=int64#4)
244 # asm 2: mulq 24(<yp=%rcx)
248 # asm 1: mov <rax=int64#7,>r3=int64#15
249 # asm 2: mov <rax=%rax,>r3=%rbp
252 # qhasm: carry? r3 += c
253 # asm 1: add <c=int64#12,<r3=int64#15
254 # asm 2: add <c=%r14,<r3=%rbp
257 # qhasm: r4 += rdx + carry
258 # asm 1: adc <rdx=int64#3,<r4=int64#5
259 # asm 2: adc <rdx=%rdx,<r4=%r8
262 # qhasm: rax = *(uint64 *)(xp + 8)
263 # asm 1: movq 8(<xp=int64#2),>rax=int64#7
264 # asm 2: movq 8(<xp=%rsi),>rax=%rax
267 # qhasm: (uint128) rdx rax = rax * *(uint64 *)(yp + 0)
268 # asm 1: mulq 0(<yp=int64#4)
269 # asm 2: mulq 0(<yp=%rcx)
272 # qhasm: carry? r1 += rax
273 # asm 1: add <rax=int64#7,<r1=int64#13
274 # asm 2: add <rax=%rax,<r1=%r15
278 # asm 1: mov $0,>c=int64#12
279 # asm 2: mov $0,>c=%r14
282 # qhasm: c += rdx + carry
283 # asm 1: adc <rdx=int64#3,<c=int64#12
284 # asm 2: adc <rdx=%rdx,<c=%r14
287 # qhasm: rax = *(uint64 *)(xp + 8)
288 # asm 1: movq 8(<xp=int64#2),>rax=int64#7
289 # asm 2: movq 8(<xp=%rsi),>rax=%rax
292 # qhasm: (uint128) rdx rax = rax * *(uint64 *)(yp + 8)
293 # asm 1: mulq 8(<yp=int64#4)
294 # asm 2: mulq 8(<yp=%rcx)
297 # qhasm: carry? r2 += rax
298 # asm 1: add <rax=int64#7,<r2=int64#14
299 # asm 2: add <rax=%rax,<r2=%rbx
302 # qhasm: rdx += zero + carry
303 # asm 1: adc <zero=int64#10,<rdx=int64#3
304 # asm 2: adc <zero=%r12,<rdx=%rdx
307 # qhasm: carry? r2 += c
308 # asm 1: add <c=int64#12,<r2=int64#14
309 # asm 2: add <c=%r14,<r2=%rbx
313 # asm 1: mov $0,>c=int64#12
314 # asm 2: mov $0,>c=%r14
317 # qhasm: c += rdx + carry
318 # asm 1: adc <rdx=int64#3,<c=int64#12
319 # asm 2: adc <rdx=%rdx,<c=%r14
322 # qhasm: rax = *(uint64 *)(xp + 8)
323 # asm 1: movq 8(<xp=int64#2),>rax=int64#7
324 # asm 2: movq 8(<xp=%rsi),>rax=%rax
327 # qhasm: (uint128) rdx rax = rax * *(uint64 *)(yp + 16)
328 # asm 1: mulq 16(<yp=int64#4)
329 # asm 2: mulq 16(<yp=%rcx)
332 # qhasm: carry? r3 += rax
333 # asm 1: add <rax=int64#7,<r3=int64#15
334 # asm 2: add <rax=%rax,<r3=%rbp
337 # qhasm: rdx += zero + carry
338 # asm 1: adc <zero=int64#10,<rdx=int64#3
339 # asm 2: adc <zero=%r12,<rdx=%rdx
342 # qhasm: carry? r3 += c
343 # asm 1: add <c=int64#12,<r3=int64#15
344 # asm 2: add <c=%r14,<r3=%rbp
348 # asm 1: mov $0,>c=int64#12
349 # asm 2: mov $0,>c=%r14
352 # qhasm: c += rdx + carry
353 # asm 1: adc <rdx=int64#3,<c=int64#12
354 # asm 2: adc <rdx=%rdx,<c=%r14
357 # qhasm: rax = *(uint64 *)(xp + 8)
358 # asm 1: movq 8(<xp=int64#2),>rax=int64#7
359 # asm 2: movq 8(<xp=%rsi),>rax=%rax
362 # qhasm: (uint128) rdx rax = rax * *(uint64 *)(yp + 24)
363 # asm 1: mulq 24(<yp=int64#4)
364 # asm 2: mulq 24(<yp=%rcx)
367 # qhasm: carry? r4 += rax
368 # asm 1: add <rax=int64#7,<r4=int64#5
369 # asm 2: add <rax=%rax,<r4=%r8
372 # qhasm: rdx += zero + carry
373 # asm 1: adc <zero=int64#10,<rdx=int64#3
374 # asm 2: adc <zero=%r12,<rdx=%rdx
377 # qhasm: carry? r4 += c
378 # asm 1: add <c=int64#12,<r4=int64#5
379 # asm 2: add <c=%r14,<r4=%r8
382 # qhasm: r5 += rdx + carry
383 # asm 1: adc <rdx=int64#3,<r5=int64#6
384 # asm 2: adc <rdx=%rdx,<r5=%r9
387 # qhasm: rax = *(uint64 *)(xp + 16)
388 # asm 1: movq 16(<xp=int64#2),>rax=int64#7
389 # asm 2: movq 16(<xp=%rsi),>rax=%rax
392 # qhasm: (uint128) rdx rax = rax * *(uint64 *)(yp + 0)
393 # asm 1: mulq 0(<yp=int64#4)
394 # asm 2: mulq 0(<yp=%rcx)
397 # qhasm: carry? r2 += rax
398 # asm 1: add <rax=int64#7,<r2=int64#14
399 # asm 2: add <rax=%rax,<r2=%rbx
403 # asm 1: mov $0,>c=int64#12
404 # asm 2: mov $0,>c=%r14
407 # qhasm: c += rdx + carry
408 # asm 1: adc <rdx=int64#3,<c=int64#12
409 # asm 2: adc <rdx=%rdx,<c=%r14
412 # qhasm: rax = *(uint64 *)(xp + 16)
413 # asm 1: movq 16(<xp=int64#2),>rax=int64#7
414 # asm 2: movq 16(<xp=%rsi),>rax=%rax
417 # qhasm: (uint128) rdx rax = rax * *(uint64 *)(yp + 8)
418 # asm 1: mulq 8(<yp=int64#4)
419 # asm 2: mulq 8(<yp=%rcx)
422 # qhasm: carry? r3 += rax
423 # asm 1: add <rax=int64#7,<r3=int64#15
424 # asm 2: add <rax=%rax,<r3=%rbp
427 # qhasm: rdx += zero + carry
428 # asm 1: adc <zero=int64#10,<rdx=int64#3
429 # asm 2: adc <zero=%r12,<rdx=%rdx
432 # qhasm: carry? r3 += c
433 # asm 1: add <c=int64#12,<r3=int64#15
434 # asm 2: add <c=%r14,<r3=%rbp
438 # asm 1: mov $0,>c=int64#12
439 # asm 2: mov $0,>c=%r14
442 # qhasm: c += rdx + carry
443 # asm 1: adc <rdx=int64#3,<c=int64#12
444 # asm 2: adc <rdx=%rdx,<c=%r14
447 # qhasm: rax = *(uint64 *)(xp + 16)
448 # asm 1: movq 16(<xp=int64#2),>rax=int64#7
449 # asm 2: movq 16(<xp=%rsi),>rax=%rax
452 # qhasm: (uint128) rdx rax = rax * *(uint64 *)(yp + 16)
453 # asm 1: mulq 16(<yp=int64#4)
454 # asm 2: mulq 16(<yp=%rcx)
457 # qhasm: carry? r4 += rax
458 # asm 1: add <rax=int64#7,<r4=int64#5
459 # asm 2: add <rax=%rax,<r4=%r8
462 # qhasm: rdx += zero + carry
463 # asm 1: adc <zero=int64#10,<rdx=int64#3
464 # asm 2: adc <zero=%r12,<rdx=%rdx
467 # qhasm: carry? r4 += c
468 # asm 1: add <c=int64#12,<r4=int64#5
469 # asm 2: add <c=%r14,<r4=%r8
473 # asm 1: mov $0,>c=int64#12
474 # asm 2: mov $0,>c=%r14
477 # qhasm: c += rdx + carry
478 # asm 1: adc <rdx=int64#3,<c=int64#12
479 # asm 2: adc <rdx=%rdx,<c=%r14
482 # qhasm: rax = *(uint64 *)(xp + 16)
483 # asm 1: movq 16(<xp=int64#2),>rax=int64#7
484 # asm 2: movq 16(<xp=%rsi),>rax=%rax
487 # qhasm: (uint128) rdx rax = rax * *(uint64 *)(yp + 24)
488 # asm 1: mulq 24(<yp=int64#4)
489 # asm 2: mulq 24(<yp=%rcx)
492 # qhasm: carry? r5 += rax
493 # asm 1: add <rax=int64#7,<r5=int64#6
494 # asm 2: add <rax=%rax,<r5=%r9
497 # qhasm: rdx += zero + carry
498 # asm 1: adc <zero=int64#10,<rdx=int64#3
499 # asm 2: adc <zero=%r12,<rdx=%rdx
502 # qhasm: carry? r5 += c
503 # asm 1: add <c=int64#12,<r5=int64#6
504 # asm 2: add <c=%r14,<r5=%r9
507 # qhasm: r6 += rdx + carry
508 # asm 1: adc <rdx=int64#3,<r6=int64#8
509 # asm 2: adc <rdx=%rdx,<r6=%r10
512 # qhasm: rax = *(uint64 *)(xp + 24)
513 # asm 1: movq 24(<xp=int64#2),>rax=int64#7
514 # asm 2: movq 24(<xp=%rsi),>rax=%rax
517 # qhasm: (uint128) rdx rax = rax * *(uint64 *)(yp + 0)
518 # asm 1: mulq 0(<yp=int64#4)
519 # asm 2: mulq 0(<yp=%rcx)
522 # qhasm: carry? r3 += rax
523 # asm 1: add <rax=int64#7,<r3=int64#15
524 # asm 2: add <rax=%rax,<r3=%rbp
528 # asm 1: mov $0,>c=int64#12
529 # asm 2: mov $0,>c=%r14
532 # qhasm: c += rdx + carry
533 # asm 1: adc <rdx=int64#3,<c=int64#12
534 # asm 2: adc <rdx=%rdx,<c=%r14
537 # qhasm: rax = *(uint64 *)(xp + 24)
538 # asm 1: movq 24(<xp=int64#2),>rax=int64#7
539 # asm 2: movq 24(<xp=%rsi),>rax=%rax
542 # qhasm: (uint128) rdx rax = rax * *(uint64 *)(yp + 8)
543 # asm 1: mulq 8(<yp=int64#4)
544 # asm 2: mulq 8(<yp=%rcx)
547 # qhasm: carry? r4 += rax
548 # asm 1: add <rax=int64#7,<r4=int64#5
549 # asm 2: add <rax=%rax,<r4=%r8
552 # qhasm: rdx += zero + carry
553 # asm 1: adc <zero=int64#10,<rdx=int64#3
554 # asm 2: adc <zero=%r12,<rdx=%rdx
557 # qhasm: carry? r4 += c
558 # asm 1: add <c=int64#12,<r4=int64#5
559 # asm 2: add <c=%r14,<r4=%r8
563 # asm 1: mov $0,>c=int64#12
564 # asm 2: mov $0,>c=%r14
567 # qhasm: c += rdx + carry
568 # asm 1: adc <rdx=int64#3,<c=int64#12
569 # asm 2: adc <rdx=%rdx,<c=%r14
572 # qhasm: rax = *(uint64 *)(xp + 24)
573 # asm 1: movq 24(<xp=int64#2),>rax=int64#7
574 # asm 2: movq 24(<xp=%rsi),>rax=%rax
577 # qhasm: (uint128) rdx rax = rax * *(uint64 *)(yp + 16)
578 # asm 1: mulq 16(<yp=int64#4)
579 # asm 2: mulq 16(<yp=%rcx)
582 # qhasm: carry? r5 += rax
583 # asm 1: add <rax=int64#7,<r5=int64#6
584 # asm 2: add <rax=%rax,<r5=%r9
587 # qhasm: rdx += zero + carry
588 # asm 1: adc <zero=int64#10,<rdx=int64#3
589 # asm 2: adc <zero=%r12,<rdx=%rdx
592 # qhasm: carry? r5 += c
593 # asm 1: add <c=int64#12,<r5=int64#6
594 # asm 2: add <c=%r14,<r5=%r9
598 # asm 1: mov $0,>c=int64#12
599 # asm 2: mov $0,>c=%r14
602 # qhasm: c += rdx + carry
603 # asm 1: adc <rdx=int64#3,<c=int64#12
604 # asm 2: adc <rdx=%rdx,<c=%r14
607 # qhasm: rax = *(uint64 *)(xp + 24)
608 # asm 1: movq 24(<xp=int64#2),>rax=int64#7
609 # asm 2: movq 24(<xp=%rsi),>rax=%rax
612 # qhasm: (uint128) rdx rax = rax * *(uint64 *)(yp + 24)
613 # asm 1: mulq 24(<yp=int64#4)
614 # asm 2: mulq 24(<yp=%rcx)
617 # qhasm: carry? r6 += rax
618 # asm 1: add <rax=int64#7,<r6=int64#8
619 # asm 2: add <rax=%rax,<r6=%r10
622 # qhasm: rdx += zero + carry
623 # asm 1: adc <zero=int64#10,<rdx=int64#3
624 # asm 2: adc <zero=%r12,<rdx=%rdx
627 # qhasm: carry? r6 += c
628 # asm 1: add <c=int64#12,<r6=int64#8
629 # asm 2: add <c=%r14,<r6=%r10
632 # qhasm: r7 += rdx + carry
633 # asm 1: adc <rdx=int64#3,<r7=int64#9
634 # asm 2: adc <rdx=%rdx,<r7=%r11
637 # qhasm: *(uint64 *)(rp + 0) = r0
638 # asm 1: movq <r0=int64#11,0(<rp=int64#1)
639 # asm 2: movq <r0=%r13,0(<rp=%rdi)
642 # qhasm: *(uint64 *)(rp + 8) = r1
643 # asm 1: movq <r1=int64#13,8(<rp=int64#1)
644 # asm 2: movq <r1=%r15,8(<rp=%rdi)
647 # qhasm: *(uint64 *)(rp + 16) = r2
648 # asm 1: movq <r2=int64#14,16(<rp=int64#1)
649 # asm 2: movq <r2=%rbx,16(<rp=%rdi)
652 # qhasm: *(uint64 *)(rp + 24) = r3
653 # asm 1: movq <r3=int64#15,24(<rp=int64#1)
654 # asm 2: movq <r3=%rbp,24(<rp=%rdi)
657 # qhasm: *(uint64 *)(rp + 32) = r4
658 # asm 1: movq <r4=int64#5,32(<rp=int64#1)
659 # asm 2: movq <r4=%r8,32(<rp=%rdi)
662 # qhasm: *(uint64 *)(rp + 40) = r5
663 # asm 1: movq <r5=int64#6,40(<rp=int64#1)
664 # asm 2: movq <r5=%r9,40(<rp=%rdi)
667 # qhasm: *(uint64 *)(rp + 48) = r6
668 # asm 1: movq <r6=int64#8,48(<rp=int64#1)
669 # asm 2: movq <r6=%r10,48(<rp=%rdi)
672 # qhasm: *(uint64 *)(rp + 56) = r7
673 # asm 1: movq <r7=int64#9,56(<rp=int64#1)
674 # asm 2: movq <r7=%r11,56(<rp=%rdi)
677 # qhasm: caller1 = caller1_stack
678 # asm 1: movq <caller1_stack=stack64#1,>caller1=int64#9
679 # asm 2: movq <caller1_stack=0(%rsp),>caller1=%r11
682 # qhasm: caller2 = caller2_stack
683 # asm 1: movq <caller2_stack=stack64#2,>caller2=int64#10
684 # asm 2: movq <caller2_stack=8(%rsp),>caller2=%r12
687 # qhasm: caller3 = caller3_stack
688 # asm 1: movq <caller3_stack=stack64#3,>caller3=int64#11
689 # asm 2: movq <caller3_stack=16(%rsp),>caller3=%r13
692 # qhasm: caller4 = caller4_stack
693 # asm 1: movq <caller4_stack=stack64#4,>caller4=int64#12
694 # asm 2: movq <caller4_stack=24(%rsp),>caller4=%r14
697 # qhasm: caller5 = caller5_stack
698 # asm 1: movq <caller5_stack=stack64#5,>caller5=int64#13
699 # asm 2: movq <caller5_stack=32(%rsp),>caller5=%r15
702 # qhasm: caller6 = caller6_stack
703 # asm 1: movq <caller6_stack=stack64#6,>caller6=int64#14
704 # asm 2: movq <caller6_stack=40(%rsp),>caller6=%rbx
707 # qhasm: caller7 = caller7_stack
708 # asm 1: movq <caller7_stack=stack64#7,>caller7=int64#15
709 # asm 2: movq <caller7_stack=48(%rsp),>caller7=%rbp