10 # qhasm: int64 caller1
12 # qhasm: int64 caller2
14 # qhasm: int64 caller3
16 # qhasm: int64 caller4
18 # qhasm: int64 caller5
20 # qhasm: int64 caller6
22 # qhasm: int64 caller7
24 # qhasm: caller caller1
26 # qhasm: caller caller2
28 # qhasm: caller caller3
30 # qhasm: caller caller4
32 # qhasm: caller caller5
34 # qhasm: caller caller6
36 # qhasm: caller caller7
38 # qhasm: stack64 caller1_stack
40 # qhasm: stack64 caller2_stack
42 # qhasm: stack64 caller3_stack
44 # qhasm: stack64 caller4_stack
46 # qhasm: stack64 caller5_stack
48 # qhasm: stack64 caller6_stack
50 # qhasm: stack64 caller7_stack
102 # qhasm: stack64 q30_stack
104 # qhasm: stack64 q31_stack
106 # qhasm: stack64 q32_stack
108 # qhasm: stack64 q33_stack
110 # qhasm: enter crypto_sign_ed25519_amd64_51_30k_batch_sc25519_barrett
113 .globl _crypto_sign_ed25519_amd64_51_30k_batch_sc25519_barrett
114 .globl crypto_sign_ed25519_amd64_51_30k_batch_sc25519_barrett
115 _crypto_sign_ed25519_amd64_51_30k_batch_sc25519_barrett
:
116 crypto_sign_ed25519_amd64_51_30k_batch_sc25519_barrett
:
122 # qhasm: caller1_stack = caller1
123 # asm 1: movq <caller1=int64#9,>caller1_stack=stack64#1
124 # asm 2: movq <caller1=%r11,>caller1_stack=0(%rsp)
127 # qhasm: caller2_stack = caller2
128 # asm 1: movq <caller2=int64#10,>caller2_stack=stack64#2
129 # asm 2: movq <caller2=%r12,>caller2_stack=8(%rsp)
132 # qhasm: caller3_stack = caller3
133 # asm 1: movq <caller3=int64#11,>caller3_stack=stack64#3
134 # asm 2: movq <caller3=%r13,>caller3_stack=16(%rsp)
137 # qhasm: caller4_stack = caller4
138 # asm 1: movq <caller4=int64#12,>caller4_stack=stack64#4
139 # asm 2: movq <caller4=%r14,>caller4_stack=24(%rsp)
142 # qhasm: caller5_stack = caller5
143 # asm 1: movq <caller5=int64#13,>caller5_stack=stack64#5
144 # asm 2: movq <caller5=%r15,>caller5_stack=32(%rsp)
147 # qhasm: caller6_stack = caller6
148 # asm 1: movq <caller6=int64#14,>caller6_stack=stack64#6
149 # asm 2: movq <caller6=%rbx,>caller6_stack=40(%rsp)
152 # qhasm: caller7_stack = caller7
153 # asm 1: movq <caller7=int64#15,>caller7_stack=stack64#7
154 # asm 2: movq <caller7=%rbp,>caller7_stack=48(%rsp)
157 # qhasm: zero ^= zero
158 # asm 1: xor <zero=int64#4,<zero=int64#4
159 # asm 2: xor <zero=%rcx,<zero=%rcx
163 # asm 1: xor <q30=int64#5,<q30=int64#5
164 # asm 2: xor <q30=%r8,<q30=%r8
168 # asm 1: xor <q31=int64#6,<q31=int64#6
169 # asm 2: xor <q31=%r9,<q31=%r9
173 # asm 1: xor <q32=int64#8,<q32=int64#8
174 # asm 2: xor <q32=%r10,<q32=%r10
178 # asm 1: xor <q33=int64#9,<q33=int64#9
179 # asm 2: xor <q33=%r11,<q33=%r11
182 # qhasm: rax = *(uint64 *)(xp + 24)
183 # asm 1: movq 24(<xp=int64#2),>rax=int64#7
184 # asm 2: movq 24(<xp=%rsi),>rax=%rax
187 # qhasm: (uint128) rdx rax = rax * *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_MU3
188 mulq crypto_sign_ed25519_amd64_51_30k_batch_MU3
191 # asm 1: mov <rax=int64#7,>q23=int64#10
192 # asm 2: mov <rax=%rax,>q23=%r12
196 # asm 1: mov <rdx=int64#3,>c=int64#11
197 # asm 2: mov <rdx=%rdx,>c=%r13
200 # qhasm: rax = *(uint64 *)(xp + 24)
201 # asm 1: movq 24(<xp=int64#2),>rax=int64#7
202 # asm 2: movq 24(<xp=%rsi),>rax=%rax
205 # qhasm: (uint128) rdx rax = rax * *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_MU4
206 mulq crypto_sign_ed25519_amd64_51_30k_batch_MU4
209 # asm 1: mov <rax=int64#7,>q24=int64#12
210 # asm 2: mov <rax=%rax,>q24=%r14
213 # qhasm: carry? q24 += c
214 # asm 1: add <c=int64#11,<q24=int64#12
215 # asm 2: add <c=%r13,<q24=%r14
218 # qhasm: q30 += rdx + carry
219 # asm 1: adc <rdx=int64#3,<q30=int64#5
220 # asm 2: adc <rdx=%rdx,<q30=%r8
223 # qhasm: rax = *(uint64 *)(xp + 32)
224 # asm 1: movq 32(<xp=int64#2),>rax=int64#7
225 # asm 2: movq 32(<xp=%rsi),>rax=%rax
228 # qhasm: (uint128) rdx rax = rax * *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_MU2
229 mulq crypto_sign_ed25519_amd64_51_30k_batch_MU2
231 # qhasm: carry? q23 += rax
232 # asm 1: add <rax=int64#7,<q23=int64#10
233 # asm 2: add <rax=%rax,<q23=%r12
237 # asm 1: mov $0,>c=int64#11
238 # asm 2: mov $0,>c=%r13
241 # qhasm: c += rdx + carry
242 # asm 1: adc <rdx=int64#3,<c=int64#11
243 # asm 2: adc <rdx=%rdx,<c=%r13
246 # qhasm: rax = *(uint64 *)(xp + 32)
247 # asm 1: movq 32(<xp=int64#2),>rax=int64#7
248 # asm 2: movq 32(<xp=%rsi),>rax=%rax
251 # qhasm: (uint128) rdx rax = rax * *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_MU3
252 mulq crypto_sign_ed25519_amd64_51_30k_batch_MU3
254 # qhasm: carry? q24 += rax
255 # asm 1: add <rax=int64#7,<q24=int64#12
256 # asm 2: add <rax=%rax,<q24=%r14
259 # qhasm: rdx += zero + carry
260 # asm 1: adc <zero=int64#4,<rdx=int64#3
261 # asm 2: adc <zero=%rcx,<rdx=%rdx
264 # qhasm: carry? q24 += c
265 # asm 1: add <c=int64#11,<q24=int64#12
266 # asm 2: add <c=%r13,<q24=%r14
270 # asm 1: mov $0,>c=int64#11
271 # asm 2: mov $0,>c=%r13
274 # qhasm: c += rdx + carry
275 # asm 1: adc <rdx=int64#3,<c=int64#11
276 # asm 2: adc <rdx=%rdx,<c=%r13
279 # qhasm: rax = *(uint64 *)(xp + 32)
280 # asm 1: movq 32(<xp=int64#2),>rax=int64#7
281 # asm 2: movq 32(<xp=%rsi),>rax=%rax
284 # qhasm: (uint128) rdx rax = rax * *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_MU4
285 mulq crypto_sign_ed25519_amd64_51_30k_batch_MU4
287 # qhasm: carry? q30 += rax
288 # asm 1: add <rax=int64#7,<q30=int64#5
289 # asm 2: add <rax=%rax,<q30=%r8
292 # qhasm: rdx += zero + carry
293 # asm 1: adc <zero=int64#4,<rdx=int64#3
294 # asm 2: adc <zero=%rcx,<rdx=%rdx
297 # qhasm: carry? q30 += c
298 # asm 1: add <c=int64#11,<q30=int64#5
299 # asm 2: add <c=%r13,<q30=%r8
302 # qhasm: q31 += rdx + carry
303 # asm 1: adc <rdx=int64#3,<q31=int64#6
304 # asm 2: adc <rdx=%rdx,<q31=%r9
307 # qhasm: rax = *(uint64 *)(xp + 40)
308 # asm 1: movq 40(<xp=int64#2),>rax=int64#7
309 # asm 2: movq 40(<xp=%rsi),>rax=%rax
312 # qhasm: (uint128) rdx rax = rax * *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_MU1
313 mulq crypto_sign_ed25519_amd64_51_30k_batch_MU1
315 # qhasm: carry? q23 += rax
316 # asm 1: add <rax=int64#7,<q23=int64#10
317 # asm 2: add <rax=%rax,<q23=%r12
321 # asm 1: mov $0,>c=int64#11
322 # asm 2: mov $0,>c=%r13
325 # qhasm: c += rdx + carry
326 # asm 1: adc <rdx=int64#3,<c=int64#11
327 # asm 2: adc <rdx=%rdx,<c=%r13
330 # qhasm: rax = *(uint64 *)(xp + 40)
331 # asm 1: movq 40(<xp=int64#2),>rax=int64#7
332 # asm 2: movq 40(<xp=%rsi),>rax=%rax
335 # qhasm: (uint128) rdx rax = rax * *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_MU2
336 mulq crypto_sign_ed25519_amd64_51_30k_batch_MU2
338 # qhasm: carry? q24 += rax
339 # asm 1: add <rax=int64#7,<q24=int64#12
340 # asm 2: add <rax=%rax,<q24=%r14
343 # qhasm: rdx += zero + carry
344 # asm 1: adc <zero=int64#4,<rdx=int64#3
345 # asm 2: adc <zero=%rcx,<rdx=%rdx
348 # qhasm: carry? q24 += c
349 # asm 1: add <c=int64#11,<q24=int64#12
350 # asm 2: add <c=%r13,<q24=%r14
354 # asm 1: mov $0,>c=int64#11
355 # asm 2: mov $0,>c=%r13
358 # qhasm: c += rdx + carry
359 # asm 1: adc <rdx=int64#3,<c=int64#11
360 # asm 2: adc <rdx=%rdx,<c=%r13
363 # qhasm: rax = *(uint64 *)(xp + 40)
364 # asm 1: movq 40(<xp=int64#2),>rax=int64#7
365 # asm 2: movq 40(<xp=%rsi),>rax=%rax
368 # qhasm: (uint128) rdx rax = rax * *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_MU3
369 mulq crypto_sign_ed25519_amd64_51_30k_batch_MU3
371 # qhasm: carry? q30 += rax
372 # asm 1: add <rax=int64#7,<q30=int64#5
373 # asm 2: add <rax=%rax,<q30=%r8
376 # qhasm: rdx += zero + carry
377 # asm 1: adc <zero=int64#4,<rdx=int64#3
378 # asm 2: adc <zero=%rcx,<rdx=%rdx
381 # qhasm: carry? q30 += c
382 # asm 1: add <c=int64#11,<q30=int64#5
383 # asm 2: add <c=%r13,<q30=%r8
387 # asm 1: mov $0,>c=int64#11
388 # asm 2: mov $0,>c=%r13
391 # qhasm: c += rdx + carry
392 # asm 1: adc <rdx=int64#3,<c=int64#11
393 # asm 2: adc <rdx=%rdx,<c=%r13
396 # qhasm: rax = *(uint64 *)(xp + 40)
397 # asm 1: movq 40(<xp=int64#2),>rax=int64#7
398 # asm 2: movq 40(<xp=%rsi),>rax=%rax
401 # qhasm: (uint128) rdx rax = rax * *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_MU4
402 mulq crypto_sign_ed25519_amd64_51_30k_batch_MU4
404 # qhasm: carry? q31 += rax
405 # asm 1: add <rax=int64#7,<q31=int64#6
406 # asm 2: add <rax=%rax,<q31=%r9
409 # qhasm: rdx += zero + carry
410 # asm 1: adc <zero=int64#4,<rdx=int64#3
411 # asm 2: adc <zero=%rcx,<rdx=%rdx
414 # qhasm: carry? q31 += c
415 # asm 1: add <c=int64#11,<q31=int64#6
416 # asm 2: add <c=%r13,<q31=%r9
419 # qhasm: q32 += rdx + carry
420 # asm 1: adc <rdx=int64#3,<q32=int64#8
421 # asm 2: adc <rdx=%rdx,<q32=%r10
424 # qhasm: rax = *(uint64 *)(xp + 48)
425 # asm 1: movq 48(<xp=int64#2),>rax=int64#7
426 # asm 2: movq 48(<xp=%rsi),>rax=%rax
429 # qhasm: (uint128) rdx rax = rax * *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_MU0
430 mulq crypto_sign_ed25519_amd64_51_30k_batch_MU0
432 # qhasm: carry? q23 += rax
433 # asm 1: add <rax=int64#7,<q23=int64#10
434 # asm 2: add <rax=%rax,<q23=%r12
438 # asm 1: mov $0,>c=int64#10
439 # asm 2: mov $0,>c=%r12
442 # qhasm: c += rdx + carry
443 # asm 1: adc <rdx=int64#3,<c=int64#10
444 # asm 2: adc <rdx=%rdx,<c=%r12
447 # qhasm: rax = *(uint64 *)(xp + 48)
448 # asm 1: movq 48(<xp=int64#2),>rax=int64#7
449 # asm 2: movq 48(<xp=%rsi),>rax=%rax
452 # qhasm: (uint128) rdx rax = rax * *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_MU1
453 mulq crypto_sign_ed25519_amd64_51_30k_batch_MU1
455 # qhasm: carry? q24 += rax
456 # asm 1: add <rax=int64#7,<q24=int64#12
457 # asm 2: add <rax=%rax,<q24=%r14
460 # qhasm: rdx += zero + carry
461 # asm 1: adc <zero=int64#4,<rdx=int64#3
462 # asm 2: adc <zero=%rcx,<rdx=%rdx
465 # qhasm: carry? q24 += c
466 # asm 1: add <c=int64#10,<q24=int64#12
467 # asm 2: add <c=%r12,<q24=%r14
471 # asm 1: mov $0,>c=int64#10
472 # asm 2: mov $0,>c=%r12
475 # qhasm: c += rdx + carry
476 # asm 1: adc <rdx=int64#3,<c=int64#10
477 # asm 2: adc <rdx=%rdx,<c=%r12
480 # qhasm: rax = *(uint64 *)(xp + 48)
481 # asm 1: movq 48(<xp=int64#2),>rax=int64#7
482 # asm 2: movq 48(<xp=%rsi),>rax=%rax
485 # qhasm: (uint128) rdx rax = rax * *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_MU2
486 mulq crypto_sign_ed25519_amd64_51_30k_batch_MU2
488 # qhasm: carry? q30 += rax
489 # asm 1: add <rax=int64#7,<q30=int64#5
490 # asm 2: add <rax=%rax,<q30=%r8
493 # qhasm: rdx += zero + carry
494 # asm 1: adc <zero=int64#4,<rdx=int64#3
495 # asm 2: adc <zero=%rcx,<rdx=%rdx
498 # qhasm: carry? q30 += c
499 # asm 1: add <c=int64#10,<q30=int64#5
500 # asm 2: add <c=%r12,<q30=%r8
504 # asm 1: mov $0,>c=int64#10
505 # asm 2: mov $0,>c=%r12
508 # qhasm: c += rdx + carry
509 # asm 1: adc <rdx=int64#3,<c=int64#10
510 # asm 2: adc <rdx=%rdx,<c=%r12
513 # qhasm: rax = *(uint64 *)(xp + 48)
514 # asm 1: movq 48(<xp=int64#2),>rax=int64#7
515 # asm 2: movq 48(<xp=%rsi),>rax=%rax
518 # qhasm: (uint128) rdx rax = rax * *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_MU3
519 mulq crypto_sign_ed25519_amd64_51_30k_batch_MU3
521 # qhasm: carry? q31 += rax
522 # asm 1: add <rax=int64#7,<q31=int64#6
523 # asm 2: add <rax=%rax,<q31=%r9
526 # qhasm: rdx += zero + carry
527 # asm 1: adc <zero=int64#4,<rdx=int64#3
528 # asm 2: adc <zero=%rcx,<rdx=%rdx
531 # qhasm: carry? q31 += c
532 # asm 1: add <c=int64#10,<q31=int64#6
533 # asm 2: add <c=%r12,<q31=%r9
537 # asm 1: mov $0,>c=int64#10
538 # asm 2: mov $0,>c=%r12
541 # qhasm: c += rdx + carry
542 # asm 1: adc <rdx=int64#3,<c=int64#10
543 # asm 2: adc <rdx=%rdx,<c=%r12
546 # qhasm: rax = *(uint64 *)(xp + 48)
547 # asm 1: movq 48(<xp=int64#2),>rax=int64#7
548 # asm 2: movq 48(<xp=%rsi),>rax=%rax
551 # qhasm: (uint128) rdx rax = rax * *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_MU4
552 mulq crypto_sign_ed25519_amd64_51_30k_batch_MU4
554 # qhasm: carry? q32 += rax
555 # asm 1: add <rax=int64#7,<q32=int64#8
556 # asm 2: add <rax=%rax,<q32=%r10
559 # qhasm: rdx += zero + carry
560 # asm 1: adc <zero=int64#4,<rdx=int64#3
561 # asm 2: adc <zero=%rcx,<rdx=%rdx
564 # qhasm: carry? q32 += c
565 # asm 1: add <c=int64#10,<q32=int64#8
566 # asm 2: add <c=%r12,<q32=%r10
569 # qhasm: q33 += rdx + carry
570 # asm 1: adc <rdx=int64#3,<q33=int64#9
571 # asm 2: adc <rdx=%rdx,<q33=%r11
574 # qhasm: rax = *(uint64 *)(xp + 56)
575 # asm 1: movq 56(<xp=int64#2),>rax=int64#7
576 # asm 2: movq 56(<xp=%rsi),>rax=%rax
579 # qhasm: (uint128) rdx rax = rax * *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_MU0
580 mulq crypto_sign_ed25519_amd64_51_30k_batch_MU0
582 # qhasm: carry? q24 += rax
583 # asm 1: add <rax=int64#7,<q24=int64#12
584 # asm 2: add <rax=%rax,<q24=%r14
590 # asm 1: mov $0,>c=int64#10
591 # asm 2: mov $0,>c=%r12
594 # qhasm: c += rdx + carry
595 # asm 1: adc <rdx=int64#3,<c=int64#10
596 # asm 2: adc <rdx=%rdx,<c=%r12
599 # qhasm: rax = *(uint64 *)(xp + 56)
600 # asm 1: movq 56(<xp=int64#2),>rax=int64#7
601 # asm 2: movq 56(<xp=%rsi),>rax=%rax
604 # qhasm: (uint128) rdx rax = rax * *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_MU1
605 mulq crypto_sign_ed25519_amd64_51_30k_batch_MU1
607 # qhasm: carry? q30 += rax
608 # asm 1: add <rax=int64#7,<q30=int64#5
609 # asm 2: add <rax=%rax,<q30=%r8
612 # qhasm: rdx += zero + carry
613 # asm 1: adc <zero=int64#4,<rdx=int64#3
614 # asm 2: adc <zero=%rcx,<rdx=%rdx
617 # qhasm: carry? q30 += c
618 # asm 1: add <c=int64#10,<q30=int64#5
619 # asm 2: add <c=%r12,<q30=%r8
623 # asm 1: mov $0,>c=int64#10
624 # asm 2: mov $0,>c=%r12
627 # qhasm: c += rdx + carry
628 # asm 1: adc <rdx=int64#3,<c=int64#10
629 # asm 2: adc <rdx=%rdx,<c=%r12
632 # qhasm: q30_stack = q30
633 # asm 1: movq <q30=int64#5,>q30_stack=stack64#8
634 # asm 2: movq <q30=%r8,>q30_stack=56(%rsp)
637 # qhasm: rax = *(uint64 *)(xp + 56)
638 # asm 1: movq 56(<xp=int64#2),>rax=int64#7
639 # asm 2: movq 56(<xp=%rsi),>rax=%rax
642 # qhasm: (uint128) rdx rax = rax * *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_MU2
643 mulq crypto_sign_ed25519_amd64_51_30k_batch_MU2
645 # qhasm: carry? q31 += rax
646 # asm 1: add <rax=int64#7,<q31=int64#6
647 # asm 2: add <rax=%rax,<q31=%r9
650 # qhasm: rdx += zero + carry
651 # asm 1: adc <zero=int64#4,<rdx=int64#3
652 # asm 2: adc <zero=%rcx,<rdx=%rdx
655 # qhasm: carry? q31 += c
656 # asm 1: add <c=int64#10,<q31=int64#6
657 # asm 2: add <c=%r12,<q31=%r9
661 # asm 1: mov $0,>c=int64#5
662 # asm 2: mov $0,>c=%r8
665 # qhasm: c += rdx + carry
666 # asm 1: adc <rdx=int64#3,<c=int64#5
667 # asm 2: adc <rdx=%rdx,<c=%r8
670 # qhasm: q31_stack = q31
671 # asm 1: movq <q31=int64#6,>q31_stack=stack64#9
672 # asm 2: movq <q31=%r9,>q31_stack=64(%rsp)
675 # qhasm: rax = *(uint64 *)(xp + 56)
676 # asm 1: movq 56(<xp=int64#2),>rax=int64#7
677 # asm 2: movq 56(<xp=%rsi),>rax=%rax
680 # qhasm: (uint128) rdx rax = rax * *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_MU3
681 mulq crypto_sign_ed25519_amd64_51_30k_batch_MU3
683 # qhasm: carry? q32 += rax
684 # asm 1: add <rax=int64#7,<q32=int64#8
685 # asm 2: add <rax=%rax,<q32=%r10
688 # qhasm: rdx += zero + carry
689 # asm 1: adc <zero=int64#4,<rdx=int64#3
690 # asm 2: adc <zero=%rcx,<rdx=%rdx
693 # qhasm: carry? q32 += c
694 # asm 1: add <c=int64#5,<q32=int64#8
695 # asm 2: add <c=%r8,<q32=%r10
699 # asm 1: mov $0,>c=int64#5
700 # asm 2: mov $0,>c=%r8
703 # qhasm: c += rdx + carry
704 # asm 1: adc <rdx=int64#3,<c=int64#5
705 # asm 2: adc <rdx=%rdx,<c=%r8
708 # qhasm: q32_stack = q32
709 # asm 1: movq <q32=int64#8,>q32_stack=stack64#10
710 # asm 2: movq <q32=%r10,>q32_stack=72(%rsp)
713 # qhasm: rax = *(uint64 *)(xp + 56)
714 # asm 1: movq 56(<xp=int64#2),>rax=int64#7
715 # asm 2: movq 56(<xp=%rsi),>rax=%rax
718 # qhasm: (uint128) rdx rax = rax * *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_MU4
719 mulq crypto_sign_ed25519_amd64_51_30k_batch_MU4
721 # qhasm: carry? q33 += rax
722 # asm 1: add <rax=int64#7,<q33=int64#9
723 # asm 2: add <rax=%rax,<q33=%r11
726 # qhasm: rdx += zero + carry
727 # asm 1: adc <zero=int64#4,<rdx=int64#3
728 # asm 2: adc <zero=%rcx,<rdx=%rdx
732 # asm 1: add <c=int64#5,<q33=int64#9
733 # asm 2: add <c=%r8,<q33=%r11
736 # qhasm: q33_stack = q33
737 # asm 1: movq <q33=int64#9,>q33_stack=stack64#11
738 # asm 2: movq <q33=%r11,>q33_stack=80(%rsp)
741 # qhasm: rax = q30_stack
742 # asm 1: movq <q30_stack=stack64#8,>rax=int64#7
743 # asm 2: movq <q30_stack=56(%rsp),>rax=%rax
746 # qhasm: (uint128) rdx rax = rax * *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_ORDER0
747 mulq crypto_sign_ed25519_amd64_51_30k_batch_ORDER0
750 # asm 1: mov <rax=int64#7,>r20=int64#5
751 # asm 2: mov <rax=%rax,>r20=%r8
755 # asm 1: mov <rdx=int64#3,>c=int64#6
756 # asm 2: mov <rdx=%rdx,>c=%r9
759 # qhasm: rax = q30_stack
760 # asm 1: movq <q30_stack=stack64#8,>rax=int64#7
761 # asm 2: movq <q30_stack=56(%rsp),>rax=%rax
764 # qhasm: (uint128) rdx rax = rax * *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_ORDER1
765 mulq crypto_sign_ed25519_amd64_51_30k_batch_ORDER1
768 # asm 1: mov <rax=int64#7,>r21=int64#8
769 # asm 2: mov <rax=%rax,>r21=%r10
772 # qhasm: carry? r21 += c
773 # asm 1: add <c=int64#6,<r21=int64#8
774 # asm 2: add <c=%r9,<r21=%r10
778 # asm 1: mov $0,>c=int64#6
779 # asm 2: mov $0,>c=%r9
782 # qhasm: c += rdx + carry
783 # asm 1: adc <rdx=int64#3,<c=int64#6
784 # asm 2: adc <rdx=%rdx,<c=%r9
787 # qhasm: rax = q30_stack
788 # asm 1: movq <q30_stack=stack64#8,>rax=int64#7
789 # asm 2: movq <q30_stack=56(%rsp),>rax=%rax
792 # qhasm: (uint128) rdx rax = rax * *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_ORDER2
793 mulq crypto_sign_ed25519_amd64_51_30k_batch_ORDER2
796 # asm 1: mov <rax=int64#7,>r22=int64#9
797 # asm 2: mov <rax=%rax,>r22=%r11
800 # qhasm: carry? r22 += c
801 # asm 1: add <c=int64#6,<r22=int64#9
802 # asm 2: add <c=%r9,<r22=%r11
806 # asm 1: mov $0,>c=int64#6
807 # asm 2: mov $0,>c=%r9
810 # qhasm: c += rdx + carry
811 # asm 1: adc <rdx=int64#3,<c=int64#6
812 # asm 2: adc <rdx=%rdx,<c=%r9
815 # qhasm: rax = q30_stack
816 # asm 1: movq <q30_stack=stack64#8,>rax=int64#7
817 # asm 2: movq <q30_stack=56(%rsp),>rax=%rax
820 # qhasm: (uint128) rdx rax = rax * *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_ORDER3
821 mulq crypto_sign_ed25519_amd64_51_30k_batch_ORDER3
826 # asm 1: mov <rax=int64#7,>r23=int64#10
827 # asm 2: mov <rax=%rax,>r23=%r12
831 # asm 1: add <c=int64#6,<r23=int64#10
832 # asm 2: add <c=%r9,<r23=%r12
835 # qhasm: rax = q31_stack
836 # asm 1: movq <q31_stack=stack64#9,>rax=int64#7
837 # asm 2: movq <q31_stack=64(%rsp),>rax=%rax
840 # qhasm: (uint128) rdx rax = rax * *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_ORDER0
841 mulq crypto_sign_ed25519_amd64_51_30k_batch_ORDER0
843 # qhasm: carry? r21 += rax
844 # asm 1: add <rax=int64#7,<r21=int64#8
845 # asm 2: add <rax=%rax,<r21=%r10
849 # asm 1: mov $0,>c=int64#6
850 # asm 2: mov $0,>c=%r9
853 # qhasm: c += rdx + carry
854 # asm 1: adc <rdx=int64#3,<c=int64#6
855 # asm 2: adc <rdx=%rdx,<c=%r9
858 # qhasm: rax = q31_stack
859 # asm 1: movq <q31_stack=stack64#9,>rax=int64#7
860 # asm 2: movq <q31_stack=64(%rsp),>rax=%rax
863 # qhasm: (uint128) rdx rax = rax * *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_ORDER1
864 mulq crypto_sign_ed25519_amd64_51_30k_batch_ORDER1
866 # qhasm: carry? r22 += rax
867 # asm 1: add <rax=int64#7,<r22=int64#9
868 # asm 2: add <rax=%rax,<r22=%r11
871 # qhasm: rdx += zero + carry
872 # asm 1: adc <zero=int64#4,<rdx=int64#3
873 # asm 2: adc <zero=%rcx,<rdx=%rdx
876 # qhasm: carry? r22 += c
877 # asm 1: add <c=int64#6,<r22=int64#9
878 # asm 2: add <c=%r9,<r22=%r11
882 # asm 1: mov $0,>c=int64#4
883 # asm 2: mov $0,>c=%rcx
886 # qhasm: c += rdx + carry
887 # asm 1: adc <rdx=int64#3,<c=int64#4
888 # asm 2: adc <rdx=%rdx,<c=%rcx
891 # qhasm: rax = q31_stack
892 # asm 1: movq <q31_stack=stack64#9,>rax=int64#7
893 # asm 2: movq <q31_stack=64(%rsp),>rax=%rax
896 # qhasm: (uint128) rdx rax = rax * *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_ORDER2
897 mulq crypto_sign_ed25519_amd64_51_30k_batch_ORDER2
902 # asm 1: add <rax=int64#7,<r23=int64#10
903 # asm 2: add <rax=%rax,<r23=%r12
907 # asm 1: add <c=int64#4,<r23=int64#10
908 # asm 2: add <c=%rcx,<r23=%r12
911 # qhasm: rax = q32_stack
912 # asm 1: movq <q32_stack=stack64#10,>rax=int64#7
913 # asm 2: movq <q32_stack=72(%rsp),>rax=%rax
916 # qhasm: (uint128) rdx rax = rax * *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_ORDER0
917 mulq crypto_sign_ed25519_amd64_51_30k_batch_ORDER0
919 # qhasm: carry? r22 += rax
920 # asm 1: add <rax=int64#7,<r22=int64#9
921 # asm 2: add <rax=%rax,<r22=%r11
925 # asm 1: mov $0,>c=int64#4
926 # asm 2: mov $0,>c=%rcx
929 # qhasm: c += rdx + carry
930 # asm 1: adc <rdx=int64#3,<c=int64#4
931 # asm 2: adc <rdx=%rdx,<c=%rcx
934 # qhasm: rax = q32_stack
935 # asm 1: movq <q32_stack=stack64#10,>rax=int64#7
936 # asm 2: movq <q32_stack=72(%rsp),>rax=%rax
939 # qhasm: (uint128) rdx rax = rax * *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_ORDER1
940 mulq crypto_sign_ed25519_amd64_51_30k_batch_ORDER1
945 # asm 1: add <rax=int64#7,<r23=int64#10
946 # asm 2: add <rax=%rax,<r23=%r12
950 # asm 1: add <c=int64#4,<r23=int64#10
951 # asm 2: add <c=%rcx,<r23=%r12
954 # qhasm: rax = q33_stack
955 # asm 1: movq <q33_stack=stack64#11,>rax=int64#7
956 # asm 2: movq <q33_stack=80(%rsp),>rax=%rax
959 # qhasm: (uint128) rdx rax = rax * *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_ORDER0
960 mulq crypto_sign_ed25519_amd64_51_30k_batch_ORDER0
965 # asm 1: add <rax=int64#7,<r23=int64#10
966 # asm 2: add <rax=%rax,<r23=%r12
969 # qhasm: r0 = *(uint64 *)(xp + 0)
970 # asm 1: movq 0(<xp=int64#2),>r0=int64#3
971 # asm 2: movq 0(<xp=%rsi),>r0=%rdx
974 # qhasm: carry? r0 -= r20
975 # asm 1: sub <r20=int64#5,<r0=int64#3
976 # asm 2: sub <r20=%r8,<r0=%rdx
980 # asm 1: mov <r0=int64#3,>t0=int64#4
981 # asm 2: mov <r0=%rdx,>t0=%rcx
984 # qhasm: r1 = *(uint64 *)(xp + 8)
985 # asm 1: movq 8(<xp=int64#2),>r1=int64#5
986 # asm 2: movq 8(<xp=%rsi),>r1=%r8
989 # qhasm: carry? r1 -= r21 - carry
990 # asm 1: sbb <r21=int64#8,<r1=int64#5
991 # asm 2: sbb <r21=%r10,<r1=%r8
995 # asm 1: mov <r1=int64#5,>t1=int64#6
996 # asm 2: mov <r1=%r8,>t1=%r9
999 # qhasm: r2 = *(uint64 *)(xp + 16)
1000 # asm 1: movq 16(<xp=int64#2),>r2=int64#7
1001 # asm 2: movq 16(<xp=%rsi),>r2=%rax
1004 # qhasm: carry? r2 -= r22 - carry
1005 # asm 1: sbb <r22=int64#9,<r2=int64#7
1006 # asm 2: sbb <r22=%r11,<r2=%rax
1010 # asm 1: mov <r2=int64#7,>t2=int64#8
1011 # asm 2: mov <r2=%rax,>t2=%r10
1014 # qhasm: r3 = *(uint64 *)(xp + 24)
1015 # asm 1: movq 24(<xp=int64#2),>r3=int64#2
1016 # asm 2: movq 24(<xp=%rsi),>r3=%rsi
1019 # qhasm: r3 -= r23 - carry
1020 # asm 1: sbb <r23=int64#10,<r3=int64#2
1021 # asm 2: sbb <r23=%r12,<r3=%rsi
1025 # asm 1: mov <r3=int64#2,>t3=int64#9
1026 # asm 2: mov <r3=%rsi,>t3=%r11
1029 # qhasm: carry? t0 -= *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_ORDER0
1030 # asm 1: sub crypto_sign_ed25519_amd64_51_30k_batch_ORDER0,<t0=int64#4
1031 # asm 2: sub crypto_sign_ed25519_amd64_51_30k_batch_ORDER0,<t0=%rcx
1032 sub crypto_sign_ed25519_amd64_51_30k_batch_ORDER0
,%rcx
1034 # qhasm: carry? t1 -= *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_ORDER1 - carry
1035 # asm 1: sbb crypto_sign_ed25519_amd64_51_30k_batch_ORDER1,<t1=int64#6
1036 # asm 2: sbb crypto_sign_ed25519_amd64_51_30k_batch_ORDER1,<t1=%r9
1037 sbb crypto_sign_ed25519_amd64_51_30k_batch_ORDER1
,%r9
1039 # qhasm: carry? t2 -= *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_ORDER2 - carry
1040 # asm 1: sbb crypto_sign_ed25519_amd64_51_30k_batch_ORDER2,<t2=int64#8
1041 # asm 2: sbb crypto_sign_ed25519_amd64_51_30k_batch_ORDER2,<t2=%r10
1042 sbb crypto_sign_ed25519_amd64_51_30k_batch_ORDER2
,%r10
1044 # qhasm: unsigned<? t3 -= *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_ORDER3 - carry
1045 # asm 1: sbb crypto_sign_ed25519_amd64_51_30k_batch_ORDER3,<t3=int64#9
1046 # asm 2: sbb crypto_sign_ed25519_amd64_51_30k_batch_ORDER3,<t3=%r11
1047 sbb crypto_sign_ed25519_amd64_51_30k_batch_ORDER3
,%r11
1049 # qhasm: r0 = t0 if !unsigned<
1050 # asm 1: cmovae <t0=int64#4,<r0=int64#3
1051 # asm 2: cmovae <t0=%rcx,<r0=%rdx
1055 # asm 1: mov <r0=int64#3,>t0=int64#4
1056 # asm 2: mov <r0=%rdx,>t0=%rcx
1059 # qhasm: r1 = t1 if !unsigned<
1060 # asm 1: cmovae <t1=int64#6,<r1=int64#5
1061 # asm 2: cmovae <t1=%r9,<r1=%r8
1065 # asm 1: mov <r1=int64#5,>t1=int64#6
1066 # asm 2: mov <r1=%r8,>t1=%r9
1069 # qhasm: r2 = t2 if !unsigned<
1070 # asm 1: cmovae <t2=int64#8,<r2=int64#7
1071 # asm 2: cmovae <t2=%r10,<r2=%rax
1075 # asm 1: mov <r2=int64#7,>t2=int64#8
1076 # asm 2: mov <r2=%rax,>t2=%r10
1079 # qhasm: r3 = t3 if !unsigned<
1080 # asm 1: cmovae <t3=int64#9,<r3=int64#2
1081 # asm 2: cmovae <t3=%r11,<r3=%rsi
1085 # asm 1: mov <r3=int64#2,>t3=int64#9
1086 # asm 2: mov <r3=%rsi,>t3=%r11
1089 # qhasm: carry? t0 -= *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_ORDER0
1090 # asm 1: sub crypto_sign_ed25519_amd64_51_30k_batch_ORDER0,<t0=int64#4
1091 # asm 2: sub crypto_sign_ed25519_amd64_51_30k_batch_ORDER0,<t0=%rcx
1092 sub crypto_sign_ed25519_amd64_51_30k_batch_ORDER0
,%rcx
1094 # qhasm: carry? t1 -= *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_ORDER1 - carry
1095 # asm 1: sbb crypto_sign_ed25519_amd64_51_30k_batch_ORDER1,<t1=int64#6
1096 # asm 2: sbb crypto_sign_ed25519_amd64_51_30k_batch_ORDER1,<t1=%r9
1097 sbb crypto_sign_ed25519_amd64_51_30k_batch_ORDER1
,%r9
1099 # qhasm: carry? t2 -= *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_ORDER2 - carry
1100 # asm 1: sbb crypto_sign_ed25519_amd64_51_30k_batch_ORDER2,<t2=int64#8
1101 # asm 2: sbb crypto_sign_ed25519_amd64_51_30k_batch_ORDER2,<t2=%r10
1102 sbb crypto_sign_ed25519_amd64_51_30k_batch_ORDER2
,%r10
1104 # qhasm: unsigned<? t3 -= *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_ORDER3 - carry
1105 # asm 1: sbb crypto_sign_ed25519_amd64_51_30k_batch_ORDER3,<t3=int64#9
1106 # asm 2: sbb crypto_sign_ed25519_amd64_51_30k_batch_ORDER3,<t3=%r11
1107 sbb crypto_sign_ed25519_amd64_51_30k_batch_ORDER3
,%r11
1109 # qhasm: r0 = t0 if !unsigned<
1110 # asm 1: cmovae <t0=int64#4,<r0=int64#3
1111 # asm 2: cmovae <t0=%rcx,<r0=%rdx
1114 # qhasm: r1 = t1 if !unsigned<
1115 # asm 1: cmovae <t1=int64#6,<r1=int64#5
1116 # asm 2: cmovae <t1=%r9,<r1=%r8
1119 # qhasm: r2 = t2 if !unsigned<
1120 # asm 1: cmovae <t2=int64#8,<r2=int64#7
1121 # asm 2: cmovae <t2=%r10,<r2=%rax
1124 # qhasm: r3 = t3 if !unsigned<
1125 # asm 1: cmovae <t3=int64#9,<r3=int64#2
1126 # asm 2: cmovae <t3=%r11,<r3=%rsi
1129 # qhasm: *(uint64 *)(rp + 0) = r0
1130 # asm 1: movq <r0=int64#3,0(<rp=int64#1)
1131 # asm 2: movq <r0=%rdx,0(<rp=%rdi)
1134 # qhasm: *(uint64 *)(rp + 8) = r1
1135 # asm 1: movq <r1=int64#5,8(<rp=int64#1)
1136 # asm 2: movq <r1=%r8,8(<rp=%rdi)
1139 # qhasm: *(uint64 *)(rp + 16) = r2
1140 # asm 1: movq <r2=int64#7,16(<rp=int64#1)
1141 # asm 2: movq <r2=%rax,16(<rp=%rdi)
1144 # qhasm: *(uint64 *)(rp + 24) = r3
1145 # asm 1: movq <r3=int64#2,24(<rp=int64#1)
1146 # asm 2: movq <r3=%rsi,24(<rp=%rdi)
1149 # qhasm: caller1 = caller1_stack
1150 # asm 1: movq <caller1_stack=stack64#1,>caller1=int64#9
1151 # asm 2: movq <caller1_stack=0(%rsp),>caller1=%r11
1154 # qhasm: caller2 = caller2_stack
1155 # asm 1: movq <caller2_stack=stack64#2,>caller2=int64#10
1156 # asm 2: movq <caller2_stack=8(%rsp),>caller2=%r12
1159 # qhasm: caller3 = caller3_stack
1160 # asm 1: movq <caller3_stack=stack64#3,>caller3=int64#11
1161 # asm 2: movq <caller3_stack=16(%rsp),>caller3=%r13
1164 # qhasm: caller4 = caller4_stack
1165 # asm 1: movq <caller4_stack=stack64#4,>caller4=int64#12
1166 # asm 2: movq <caller4_stack=24(%rsp),>caller4=%r14
1169 # qhasm: caller5 = caller5_stack
1170 # asm 1: movq <caller5_stack=stack64#5,>caller5=int64#13
1171 # asm 2: movq <caller5_stack=32(%rsp),>caller5=%r15
1174 # qhasm: caller6 = caller6_stack
1175 # asm 1: movq <caller6_stack=stack64#6,>caller6=int64#14
1176 # asm 2: movq <caller6_stack=40(%rsp),>caller6=%rbx
1179 # qhasm: caller7 = caller7_stack
1180 # asm 1: movq <caller7_stack=stack64#7,>caller7=int64#15
1181 # asm 2: movq <caller7_stack=48(%rsp),>caller7=%rbp