some tweaks
[mkp224o.git] / ed25519 / amd64-51-30k / sc25519_barrett.s
blobf784ea2ff49838ed896fa4aa1cc39046206c3047
2 # qhasm: int64 rp
4 # qhasm: int64 xp
6 # qhasm: input rp
8 # qhasm: input xp
10 # qhasm: int64 caller1
12 # qhasm: int64 caller2
14 # qhasm: int64 caller3
16 # qhasm: int64 caller4
18 # qhasm: int64 caller5
20 # qhasm: int64 caller6
22 # qhasm: int64 caller7
24 # qhasm: caller caller1
26 # qhasm: caller caller2
28 # qhasm: caller caller3
30 # qhasm: caller caller4
32 # qhasm: caller caller5
34 # qhasm: caller caller6
36 # qhasm: caller caller7
38 # qhasm: stack64 caller1_stack
40 # qhasm: stack64 caller2_stack
42 # qhasm: stack64 caller3_stack
44 # qhasm: stack64 caller4_stack
46 # qhasm: stack64 caller5_stack
48 # qhasm: stack64 caller6_stack
50 # qhasm: stack64 caller7_stack
52 # qhasm: int64 q23
54 # qhasm: int64 q24
56 # qhasm: int64 q30
58 # qhasm: int64 q31
60 # qhasm: int64 q32
62 # qhasm: int64 q33
64 # qhasm: int64 r20
66 # qhasm: int64 r21
68 # qhasm: int64 r22
70 # qhasm: int64 r23
72 # qhasm: int64 r24
74 # qhasm: int64 r0
76 # qhasm: int64 r1
78 # qhasm: int64 r2
80 # qhasm: int64 r3
82 # qhasm: int64 t0
84 # qhasm: int64 t1
86 # qhasm: int64 t2
88 # qhasm: int64 t3
90 # qhasm: int64 rax
92 # qhasm: int64 rdx
94 # qhasm: int64 c
96 # qhasm: int64 zero
98 # qhasm: int64 mask
100 # qhasm: int64 nmask
102 # qhasm: stack64 q30_stack
104 # qhasm: stack64 q31_stack
106 # qhasm: stack64 q32_stack
108 # qhasm: stack64 q33_stack
110 # qhasm: enter crypto_sign_ed25519_amd64_51_30k_batch_sc25519_barrett
111 .text
112 .p2align 5
113 .globl _crypto_sign_ed25519_amd64_51_30k_batch_sc25519_barrett
114 .globl crypto_sign_ed25519_amd64_51_30k_batch_sc25519_barrett
115 _crypto_sign_ed25519_amd64_51_30k_batch_sc25519_barrett:
116 crypto_sign_ed25519_amd64_51_30k_batch_sc25519_barrett:
117 mov %rsp,%r11
118 and $31,%r11
119 add $96,%r11
120 sub %r11,%rsp
122 # qhasm: caller1_stack = caller1
123 # asm 1: movq <caller1=int64#9,>caller1_stack=stack64#1
124 # asm 2: movq <caller1=%r11,>caller1_stack=0(%rsp)
125 movq %r11,0(%rsp)
127 # qhasm: caller2_stack = caller2
128 # asm 1: movq <caller2=int64#10,>caller2_stack=stack64#2
129 # asm 2: movq <caller2=%r12,>caller2_stack=8(%rsp)
130 movq %r12,8(%rsp)
132 # qhasm: caller3_stack = caller3
133 # asm 1: movq <caller3=int64#11,>caller3_stack=stack64#3
134 # asm 2: movq <caller3=%r13,>caller3_stack=16(%rsp)
135 movq %r13,16(%rsp)
137 # qhasm: caller4_stack = caller4
138 # asm 1: movq <caller4=int64#12,>caller4_stack=stack64#4
139 # asm 2: movq <caller4=%r14,>caller4_stack=24(%rsp)
140 movq %r14,24(%rsp)
142 # qhasm: caller5_stack = caller5
143 # asm 1: movq <caller5=int64#13,>caller5_stack=stack64#5
144 # asm 2: movq <caller5=%r15,>caller5_stack=32(%rsp)
145 movq %r15,32(%rsp)
147 # qhasm: caller6_stack = caller6
148 # asm 1: movq <caller6=int64#14,>caller6_stack=stack64#6
149 # asm 2: movq <caller6=%rbx,>caller6_stack=40(%rsp)
150 movq %rbx,40(%rsp)
152 # qhasm: caller7_stack = caller7
153 # asm 1: movq <caller7=int64#15,>caller7_stack=stack64#7
154 # asm 2: movq <caller7=%rbp,>caller7_stack=48(%rsp)
155 movq %rbp,48(%rsp)
157 # qhasm: zero ^= zero
158 # asm 1: xor <zero=int64#4,<zero=int64#4
159 # asm 2: xor <zero=%rcx,<zero=%rcx
160 xor %rcx,%rcx
162 # qhasm: q30 ^= q30
163 # asm 1: xor <q30=int64#5,<q30=int64#5
164 # asm 2: xor <q30=%r8,<q30=%r8
165 xor %r8,%r8
167 # qhasm: q31 ^= q31
168 # asm 1: xor <q31=int64#6,<q31=int64#6
169 # asm 2: xor <q31=%r9,<q31=%r9
170 xor %r9,%r9
172 # qhasm: q32 ^= q32
173 # asm 1: xor <q32=int64#8,<q32=int64#8
174 # asm 2: xor <q32=%r10,<q32=%r10
175 xor %r10,%r10
177 # qhasm: q33 ^= q33
178 # asm 1: xor <q33=int64#9,<q33=int64#9
179 # asm 2: xor <q33=%r11,<q33=%r11
180 xor %r11,%r11
182 # qhasm: rax = *(uint64 *)(xp + 24)
183 # asm 1: movq 24(<xp=int64#2),>rax=int64#7
184 # asm 2: movq 24(<xp=%rsi),>rax=%rax
185 movq 24(%rsi),%rax
187 # qhasm: (uint128) rdx rax = rax * *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_MU3
188 mulq crypto_sign_ed25519_amd64_51_30k_batch_MU3
190 # qhasm: q23 = rax
191 # asm 1: mov <rax=int64#7,>q23=int64#10
192 # asm 2: mov <rax=%rax,>q23=%r12
193 mov %rax,%r12
195 # qhasm: c = rdx
196 # asm 1: mov <rdx=int64#3,>c=int64#11
197 # asm 2: mov <rdx=%rdx,>c=%r13
198 mov %rdx,%r13
200 # qhasm: rax = *(uint64 *)(xp + 24)
201 # asm 1: movq 24(<xp=int64#2),>rax=int64#7
202 # asm 2: movq 24(<xp=%rsi),>rax=%rax
203 movq 24(%rsi),%rax
205 # qhasm: (uint128) rdx rax = rax * *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_MU4
206 mulq crypto_sign_ed25519_amd64_51_30k_batch_MU4
208 # qhasm: q24 = rax
209 # asm 1: mov <rax=int64#7,>q24=int64#12
210 # asm 2: mov <rax=%rax,>q24=%r14
211 mov %rax,%r14
213 # qhasm: carry? q24 += c
214 # asm 1: add <c=int64#11,<q24=int64#12
215 # asm 2: add <c=%r13,<q24=%r14
216 add %r13,%r14
218 # qhasm: q30 += rdx + carry
219 # asm 1: adc <rdx=int64#3,<q30=int64#5
220 # asm 2: adc <rdx=%rdx,<q30=%r8
221 adc %rdx,%r8
223 # qhasm: rax = *(uint64 *)(xp + 32)
224 # asm 1: movq 32(<xp=int64#2),>rax=int64#7
225 # asm 2: movq 32(<xp=%rsi),>rax=%rax
226 movq 32(%rsi),%rax
228 # qhasm: (uint128) rdx rax = rax * *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_MU2
229 mulq crypto_sign_ed25519_amd64_51_30k_batch_MU2
231 # qhasm: carry? q23 += rax
232 # asm 1: add <rax=int64#7,<q23=int64#10
233 # asm 2: add <rax=%rax,<q23=%r12
234 add %rax,%r12
236 # qhasm: c = 0
237 # asm 1: mov $0,>c=int64#11
238 # asm 2: mov $0,>c=%r13
239 mov $0,%r13
241 # qhasm: c += rdx + carry
242 # asm 1: adc <rdx=int64#3,<c=int64#11
243 # asm 2: adc <rdx=%rdx,<c=%r13
244 adc %rdx,%r13
246 # qhasm: rax = *(uint64 *)(xp + 32)
247 # asm 1: movq 32(<xp=int64#2),>rax=int64#7
248 # asm 2: movq 32(<xp=%rsi),>rax=%rax
249 movq 32(%rsi),%rax
251 # qhasm: (uint128) rdx rax = rax * *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_MU3
252 mulq crypto_sign_ed25519_amd64_51_30k_batch_MU3
254 # qhasm: carry? q24 += rax
255 # asm 1: add <rax=int64#7,<q24=int64#12
256 # asm 2: add <rax=%rax,<q24=%r14
257 add %rax,%r14
259 # qhasm: rdx += zero + carry
260 # asm 1: adc <zero=int64#4,<rdx=int64#3
261 # asm 2: adc <zero=%rcx,<rdx=%rdx
262 adc %rcx,%rdx
264 # qhasm: carry? q24 += c
265 # asm 1: add <c=int64#11,<q24=int64#12
266 # asm 2: add <c=%r13,<q24=%r14
267 add %r13,%r14
269 # qhasm: c = 0
270 # asm 1: mov $0,>c=int64#11
271 # asm 2: mov $0,>c=%r13
272 mov $0,%r13
274 # qhasm: c += rdx + carry
275 # asm 1: adc <rdx=int64#3,<c=int64#11
276 # asm 2: adc <rdx=%rdx,<c=%r13
277 adc %rdx,%r13
279 # qhasm: rax = *(uint64 *)(xp + 32)
280 # asm 1: movq 32(<xp=int64#2),>rax=int64#7
281 # asm 2: movq 32(<xp=%rsi),>rax=%rax
282 movq 32(%rsi),%rax
284 # qhasm: (uint128) rdx rax = rax * *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_MU4
285 mulq crypto_sign_ed25519_amd64_51_30k_batch_MU4
287 # qhasm: carry? q30 += rax
288 # asm 1: add <rax=int64#7,<q30=int64#5
289 # asm 2: add <rax=%rax,<q30=%r8
290 add %rax,%r8
292 # qhasm: rdx += zero + carry
293 # asm 1: adc <zero=int64#4,<rdx=int64#3
294 # asm 2: adc <zero=%rcx,<rdx=%rdx
295 adc %rcx,%rdx
297 # qhasm: carry? q30 += c
298 # asm 1: add <c=int64#11,<q30=int64#5
299 # asm 2: add <c=%r13,<q30=%r8
300 add %r13,%r8
302 # qhasm: q31 += rdx + carry
303 # asm 1: adc <rdx=int64#3,<q31=int64#6
304 # asm 2: adc <rdx=%rdx,<q31=%r9
305 adc %rdx,%r9
307 # qhasm: rax = *(uint64 *)(xp + 40)
308 # asm 1: movq 40(<xp=int64#2),>rax=int64#7
309 # asm 2: movq 40(<xp=%rsi),>rax=%rax
310 movq 40(%rsi),%rax
312 # qhasm: (uint128) rdx rax = rax * *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_MU1
313 mulq crypto_sign_ed25519_amd64_51_30k_batch_MU1
315 # qhasm: carry? q23 += rax
316 # asm 1: add <rax=int64#7,<q23=int64#10
317 # asm 2: add <rax=%rax,<q23=%r12
318 add %rax,%r12
320 # qhasm: c = 0
321 # asm 1: mov $0,>c=int64#11
322 # asm 2: mov $0,>c=%r13
323 mov $0,%r13
325 # qhasm: c += rdx + carry
326 # asm 1: adc <rdx=int64#3,<c=int64#11
327 # asm 2: adc <rdx=%rdx,<c=%r13
328 adc %rdx,%r13
330 # qhasm: rax = *(uint64 *)(xp + 40)
331 # asm 1: movq 40(<xp=int64#2),>rax=int64#7
332 # asm 2: movq 40(<xp=%rsi),>rax=%rax
333 movq 40(%rsi),%rax
335 # qhasm: (uint128) rdx rax = rax * *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_MU2
336 mulq crypto_sign_ed25519_amd64_51_30k_batch_MU2
338 # qhasm: carry? q24 += rax
339 # asm 1: add <rax=int64#7,<q24=int64#12
340 # asm 2: add <rax=%rax,<q24=%r14
341 add %rax,%r14
343 # qhasm: rdx += zero + carry
344 # asm 1: adc <zero=int64#4,<rdx=int64#3
345 # asm 2: adc <zero=%rcx,<rdx=%rdx
346 adc %rcx,%rdx
348 # qhasm: carry? q24 += c
349 # asm 1: add <c=int64#11,<q24=int64#12
350 # asm 2: add <c=%r13,<q24=%r14
351 add %r13,%r14
353 # qhasm: c = 0
354 # asm 1: mov $0,>c=int64#11
355 # asm 2: mov $0,>c=%r13
356 mov $0,%r13
358 # qhasm: c += rdx + carry
359 # asm 1: adc <rdx=int64#3,<c=int64#11
360 # asm 2: adc <rdx=%rdx,<c=%r13
361 adc %rdx,%r13
363 # qhasm: rax = *(uint64 *)(xp + 40)
364 # asm 1: movq 40(<xp=int64#2),>rax=int64#7
365 # asm 2: movq 40(<xp=%rsi),>rax=%rax
366 movq 40(%rsi),%rax
368 # qhasm: (uint128) rdx rax = rax * *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_MU3
369 mulq crypto_sign_ed25519_amd64_51_30k_batch_MU3
371 # qhasm: carry? q30 += rax
372 # asm 1: add <rax=int64#7,<q30=int64#5
373 # asm 2: add <rax=%rax,<q30=%r8
374 add %rax,%r8
376 # qhasm: rdx += zero + carry
377 # asm 1: adc <zero=int64#4,<rdx=int64#3
378 # asm 2: adc <zero=%rcx,<rdx=%rdx
379 adc %rcx,%rdx
381 # qhasm: carry? q30 += c
382 # asm 1: add <c=int64#11,<q30=int64#5
383 # asm 2: add <c=%r13,<q30=%r8
384 add %r13,%r8
386 # qhasm: c = 0
387 # asm 1: mov $0,>c=int64#11
388 # asm 2: mov $0,>c=%r13
389 mov $0,%r13
391 # qhasm: c += rdx + carry
392 # asm 1: adc <rdx=int64#3,<c=int64#11
393 # asm 2: adc <rdx=%rdx,<c=%r13
394 adc %rdx,%r13
396 # qhasm: rax = *(uint64 *)(xp + 40)
397 # asm 1: movq 40(<xp=int64#2),>rax=int64#7
398 # asm 2: movq 40(<xp=%rsi),>rax=%rax
399 movq 40(%rsi),%rax
401 # qhasm: (uint128) rdx rax = rax * *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_MU4
402 mulq crypto_sign_ed25519_amd64_51_30k_batch_MU4
404 # qhasm: carry? q31 += rax
405 # asm 1: add <rax=int64#7,<q31=int64#6
406 # asm 2: add <rax=%rax,<q31=%r9
407 add %rax,%r9
409 # qhasm: rdx += zero + carry
410 # asm 1: adc <zero=int64#4,<rdx=int64#3
411 # asm 2: adc <zero=%rcx,<rdx=%rdx
412 adc %rcx,%rdx
414 # qhasm: carry? q31 += c
415 # asm 1: add <c=int64#11,<q31=int64#6
416 # asm 2: add <c=%r13,<q31=%r9
417 add %r13,%r9
419 # qhasm: q32 += rdx + carry
420 # asm 1: adc <rdx=int64#3,<q32=int64#8
421 # asm 2: adc <rdx=%rdx,<q32=%r10
422 adc %rdx,%r10
424 # qhasm: rax = *(uint64 *)(xp + 48)
425 # asm 1: movq 48(<xp=int64#2),>rax=int64#7
426 # asm 2: movq 48(<xp=%rsi),>rax=%rax
427 movq 48(%rsi),%rax
429 # qhasm: (uint128) rdx rax = rax * *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_MU0
430 mulq crypto_sign_ed25519_amd64_51_30k_batch_MU0
432 # qhasm: carry? q23 += rax
433 # asm 1: add <rax=int64#7,<q23=int64#10
434 # asm 2: add <rax=%rax,<q23=%r12
435 add %rax,%r12
437 # qhasm: c = 0
438 # asm 1: mov $0,>c=int64#10
439 # asm 2: mov $0,>c=%r12
440 mov $0,%r12
442 # qhasm: c += rdx + carry
443 # asm 1: adc <rdx=int64#3,<c=int64#10
444 # asm 2: adc <rdx=%rdx,<c=%r12
445 adc %rdx,%r12
447 # qhasm: rax = *(uint64 *)(xp + 48)
448 # asm 1: movq 48(<xp=int64#2),>rax=int64#7
449 # asm 2: movq 48(<xp=%rsi),>rax=%rax
450 movq 48(%rsi),%rax
452 # qhasm: (uint128) rdx rax = rax * *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_MU1
453 mulq crypto_sign_ed25519_amd64_51_30k_batch_MU1
455 # qhasm: carry? q24 += rax
456 # asm 1: add <rax=int64#7,<q24=int64#12
457 # asm 2: add <rax=%rax,<q24=%r14
458 add %rax,%r14
460 # qhasm: rdx += zero + carry
461 # asm 1: adc <zero=int64#4,<rdx=int64#3
462 # asm 2: adc <zero=%rcx,<rdx=%rdx
463 adc %rcx,%rdx
465 # qhasm: carry? q24 += c
466 # asm 1: add <c=int64#10,<q24=int64#12
467 # asm 2: add <c=%r12,<q24=%r14
468 add %r12,%r14
470 # qhasm: c = 0
471 # asm 1: mov $0,>c=int64#10
472 # asm 2: mov $0,>c=%r12
473 mov $0,%r12
475 # qhasm: c += rdx + carry
476 # asm 1: adc <rdx=int64#3,<c=int64#10
477 # asm 2: adc <rdx=%rdx,<c=%r12
478 adc %rdx,%r12
480 # qhasm: rax = *(uint64 *)(xp + 48)
481 # asm 1: movq 48(<xp=int64#2),>rax=int64#7
482 # asm 2: movq 48(<xp=%rsi),>rax=%rax
483 movq 48(%rsi),%rax
485 # qhasm: (uint128) rdx rax = rax * *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_MU2
486 mulq crypto_sign_ed25519_amd64_51_30k_batch_MU2
488 # qhasm: carry? q30 += rax
489 # asm 1: add <rax=int64#7,<q30=int64#5
490 # asm 2: add <rax=%rax,<q30=%r8
491 add %rax,%r8
493 # qhasm: rdx += zero + carry
494 # asm 1: adc <zero=int64#4,<rdx=int64#3
495 # asm 2: adc <zero=%rcx,<rdx=%rdx
496 adc %rcx,%rdx
498 # qhasm: carry? q30 += c
499 # asm 1: add <c=int64#10,<q30=int64#5
500 # asm 2: add <c=%r12,<q30=%r8
501 add %r12,%r8
503 # qhasm: c = 0
504 # asm 1: mov $0,>c=int64#10
505 # asm 2: mov $0,>c=%r12
506 mov $0,%r12
508 # qhasm: c += rdx + carry
509 # asm 1: adc <rdx=int64#3,<c=int64#10
510 # asm 2: adc <rdx=%rdx,<c=%r12
511 adc %rdx,%r12
513 # qhasm: rax = *(uint64 *)(xp + 48)
514 # asm 1: movq 48(<xp=int64#2),>rax=int64#7
515 # asm 2: movq 48(<xp=%rsi),>rax=%rax
516 movq 48(%rsi),%rax
518 # qhasm: (uint128) rdx rax = rax * *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_MU3
519 mulq crypto_sign_ed25519_amd64_51_30k_batch_MU3
521 # qhasm: carry? q31 += rax
522 # asm 1: add <rax=int64#7,<q31=int64#6
523 # asm 2: add <rax=%rax,<q31=%r9
524 add %rax,%r9
526 # qhasm: rdx += zero + carry
527 # asm 1: adc <zero=int64#4,<rdx=int64#3
528 # asm 2: adc <zero=%rcx,<rdx=%rdx
529 adc %rcx,%rdx
531 # qhasm: carry? q31 += c
532 # asm 1: add <c=int64#10,<q31=int64#6
533 # asm 2: add <c=%r12,<q31=%r9
534 add %r12,%r9
536 # qhasm: c = 0
537 # asm 1: mov $0,>c=int64#10
538 # asm 2: mov $0,>c=%r12
539 mov $0,%r12
541 # qhasm: c += rdx + carry
542 # asm 1: adc <rdx=int64#3,<c=int64#10
543 # asm 2: adc <rdx=%rdx,<c=%r12
544 adc %rdx,%r12
546 # qhasm: rax = *(uint64 *)(xp + 48)
547 # asm 1: movq 48(<xp=int64#2),>rax=int64#7
548 # asm 2: movq 48(<xp=%rsi),>rax=%rax
549 movq 48(%rsi),%rax
551 # qhasm: (uint128) rdx rax = rax * *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_MU4
552 mulq crypto_sign_ed25519_amd64_51_30k_batch_MU4
554 # qhasm: carry? q32 += rax
555 # asm 1: add <rax=int64#7,<q32=int64#8
556 # asm 2: add <rax=%rax,<q32=%r10
557 add %rax,%r10
559 # qhasm: rdx += zero + carry
560 # asm 1: adc <zero=int64#4,<rdx=int64#3
561 # asm 2: adc <zero=%rcx,<rdx=%rdx
562 adc %rcx,%rdx
564 # qhasm: carry? q32 += c
565 # asm 1: add <c=int64#10,<q32=int64#8
566 # asm 2: add <c=%r12,<q32=%r10
567 add %r12,%r10
569 # qhasm: q33 += rdx + carry
570 # asm 1: adc <rdx=int64#3,<q33=int64#9
571 # asm 2: adc <rdx=%rdx,<q33=%r11
572 adc %rdx,%r11
574 # qhasm: rax = *(uint64 *)(xp + 56)
575 # asm 1: movq 56(<xp=int64#2),>rax=int64#7
576 # asm 2: movq 56(<xp=%rsi),>rax=%rax
577 movq 56(%rsi),%rax
579 # qhasm: (uint128) rdx rax = rax * *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_MU0
580 mulq crypto_sign_ed25519_amd64_51_30k_batch_MU0
582 # qhasm: carry? q24 += rax
583 # asm 1: add <rax=int64#7,<q24=int64#12
584 # asm 2: add <rax=%rax,<q24=%r14
585 add %rax,%r14
587 # qhasm: free q24
589 # qhasm: c = 0
590 # asm 1: mov $0,>c=int64#10
591 # asm 2: mov $0,>c=%r12
592 mov $0,%r12
594 # qhasm: c += rdx + carry
595 # asm 1: adc <rdx=int64#3,<c=int64#10
596 # asm 2: adc <rdx=%rdx,<c=%r12
597 adc %rdx,%r12
599 # qhasm: rax = *(uint64 *)(xp + 56)
600 # asm 1: movq 56(<xp=int64#2),>rax=int64#7
601 # asm 2: movq 56(<xp=%rsi),>rax=%rax
602 movq 56(%rsi),%rax
604 # qhasm: (uint128) rdx rax = rax * *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_MU1
605 mulq crypto_sign_ed25519_amd64_51_30k_batch_MU1
607 # qhasm: carry? q30 += rax
608 # asm 1: add <rax=int64#7,<q30=int64#5
609 # asm 2: add <rax=%rax,<q30=%r8
610 add %rax,%r8
612 # qhasm: rdx += zero + carry
613 # asm 1: adc <zero=int64#4,<rdx=int64#3
614 # asm 2: adc <zero=%rcx,<rdx=%rdx
615 adc %rcx,%rdx
617 # qhasm: carry? q30 += c
618 # asm 1: add <c=int64#10,<q30=int64#5
619 # asm 2: add <c=%r12,<q30=%r8
620 add %r12,%r8
622 # qhasm: c = 0
623 # asm 1: mov $0,>c=int64#10
624 # asm 2: mov $0,>c=%r12
625 mov $0,%r12
627 # qhasm: c += rdx + carry
628 # asm 1: adc <rdx=int64#3,<c=int64#10
629 # asm 2: adc <rdx=%rdx,<c=%r12
630 adc %rdx,%r12
632 # qhasm: q30_stack = q30
633 # asm 1: movq <q30=int64#5,>q30_stack=stack64#8
634 # asm 2: movq <q30=%r8,>q30_stack=56(%rsp)
635 movq %r8,56(%rsp)
637 # qhasm: rax = *(uint64 *)(xp + 56)
638 # asm 1: movq 56(<xp=int64#2),>rax=int64#7
639 # asm 2: movq 56(<xp=%rsi),>rax=%rax
640 movq 56(%rsi),%rax
642 # qhasm: (uint128) rdx rax = rax * *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_MU2
643 mulq crypto_sign_ed25519_amd64_51_30k_batch_MU2
645 # qhasm: carry? q31 += rax
646 # asm 1: add <rax=int64#7,<q31=int64#6
647 # asm 2: add <rax=%rax,<q31=%r9
648 add %rax,%r9
650 # qhasm: rdx += zero + carry
651 # asm 1: adc <zero=int64#4,<rdx=int64#3
652 # asm 2: adc <zero=%rcx,<rdx=%rdx
653 adc %rcx,%rdx
655 # qhasm: carry? q31 += c
656 # asm 1: add <c=int64#10,<q31=int64#6
657 # asm 2: add <c=%r12,<q31=%r9
658 add %r12,%r9
660 # qhasm: c = 0
661 # asm 1: mov $0,>c=int64#5
662 # asm 2: mov $0,>c=%r8
663 mov $0,%r8
665 # qhasm: c += rdx + carry
666 # asm 1: adc <rdx=int64#3,<c=int64#5
667 # asm 2: adc <rdx=%rdx,<c=%r8
668 adc %rdx,%r8
670 # qhasm: q31_stack = q31
671 # asm 1: movq <q31=int64#6,>q31_stack=stack64#9
672 # asm 2: movq <q31=%r9,>q31_stack=64(%rsp)
673 movq %r9,64(%rsp)
675 # qhasm: rax = *(uint64 *)(xp + 56)
676 # asm 1: movq 56(<xp=int64#2),>rax=int64#7
677 # asm 2: movq 56(<xp=%rsi),>rax=%rax
678 movq 56(%rsi),%rax
680 # qhasm: (uint128) rdx rax = rax * *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_MU3
681 mulq crypto_sign_ed25519_amd64_51_30k_batch_MU3
683 # qhasm: carry? q32 += rax
684 # asm 1: add <rax=int64#7,<q32=int64#8
685 # asm 2: add <rax=%rax,<q32=%r10
686 add %rax,%r10
688 # qhasm: rdx += zero + carry
689 # asm 1: adc <zero=int64#4,<rdx=int64#3
690 # asm 2: adc <zero=%rcx,<rdx=%rdx
691 adc %rcx,%rdx
693 # qhasm: carry? q32 += c
694 # asm 1: add <c=int64#5,<q32=int64#8
695 # asm 2: add <c=%r8,<q32=%r10
696 add %r8,%r10
698 # qhasm: c = 0
699 # asm 1: mov $0,>c=int64#5
700 # asm 2: mov $0,>c=%r8
701 mov $0,%r8
703 # qhasm: c += rdx + carry
704 # asm 1: adc <rdx=int64#3,<c=int64#5
705 # asm 2: adc <rdx=%rdx,<c=%r8
706 adc %rdx,%r8
708 # qhasm: q32_stack = q32
709 # asm 1: movq <q32=int64#8,>q32_stack=stack64#10
710 # asm 2: movq <q32=%r10,>q32_stack=72(%rsp)
711 movq %r10,72(%rsp)
713 # qhasm: rax = *(uint64 *)(xp + 56)
714 # asm 1: movq 56(<xp=int64#2),>rax=int64#7
715 # asm 2: movq 56(<xp=%rsi),>rax=%rax
716 movq 56(%rsi),%rax
718 # qhasm: (uint128) rdx rax = rax * *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_MU4
719 mulq crypto_sign_ed25519_amd64_51_30k_batch_MU4
721 # qhasm: carry? q33 += rax
722 # asm 1: add <rax=int64#7,<q33=int64#9
723 # asm 2: add <rax=%rax,<q33=%r11
724 add %rax,%r11
726 # qhasm: rdx += zero + carry
727 # asm 1: adc <zero=int64#4,<rdx=int64#3
728 # asm 2: adc <zero=%rcx,<rdx=%rdx
729 adc %rcx,%rdx
731 # qhasm: q33 += c
732 # asm 1: add <c=int64#5,<q33=int64#9
733 # asm 2: add <c=%r8,<q33=%r11
734 add %r8,%r11
736 # qhasm: q33_stack = q33
737 # asm 1: movq <q33=int64#9,>q33_stack=stack64#11
738 # asm 2: movq <q33=%r11,>q33_stack=80(%rsp)
739 movq %r11,80(%rsp)
741 # qhasm: rax = q30_stack
742 # asm 1: movq <q30_stack=stack64#8,>rax=int64#7
743 # asm 2: movq <q30_stack=56(%rsp),>rax=%rax
744 movq 56(%rsp),%rax
746 # qhasm: (uint128) rdx rax = rax * *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_ORDER0
747 mulq crypto_sign_ed25519_amd64_51_30k_batch_ORDER0
749 # qhasm: r20 = rax
750 # asm 1: mov <rax=int64#7,>r20=int64#5
751 # asm 2: mov <rax=%rax,>r20=%r8
752 mov %rax,%r8
754 # qhasm: c = rdx
755 # asm 1: mov <rdx=int64#3,>c=int64#6
756 # asm 2: mov <rdx=%rdx,>c=%r9
757 mov %rdx,%r9
759 # qhasm: rax = q30_stack
760 # asm 1: movq <q30_stack=stack64#8,>rax=int64#7
761 # asm 2: movq <q30_stack=56(%rsp),>rax=%rax
762 movq 56(%rsp),%rax
764 # qhasm: (uint128) rdx rax = rax * *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_ORDER1
765 mulq crypto_sign_ed25519_amd64_51_30k_batch_ORDER1
767 # qhasm: r21 = rax
768 # asm 1: mov <rax=int64#7,>r21=int64#8
769 # asm 2: mov <rax=%rax,>r21=%r10
770 mov %rax,%r10
772 # qhasm: carry? r21 += c
773 # asm 1: add <c=int64#6,<r21=int64#8
774 # asm 2: add <c=%r9,<r21=%r10
775 add %r9,%r10
777 # qhasm: c = 0
778 # asm 1: mov $0,>c=int64#6
779 # asm 2: mov $0,>c=%r9
780 mov $0,%r9
782 # qhasm: c += rdx + carry
783 # asm 1: adc <rdx=int64#3,<c=int64#6
784 # asm 2: adc <rdx=%rdx,<c=%r9
785 adc %rdx,%r9
787 # qhasm: rax = q30_stack
788 # asm 1: movq <q30_stack=stack64#8,>rax=int64#7
789 # asm 2: movq <q30_stack=56(%rsp),>rax=%rax
790 movq 56(%rsp),%rax
792 # qhasm: (uint128) rdx rax = rax * *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_ORDER2
793 mulq crypto_sign_ed25519_amd64_51_30k_batch_ORDER2
795 # qhasm: r22 = rax
796 # asm 1: mov <rax=int64#7,>r22=int64#9
797 # asm 2: mov <rax=%rax,>r22=%r11
798 mov %rax,%r11
800 # qhasm: carry? r22 += c
801 # asm 1: add <c=int64#6,<r22=int64#9
802 # asm 2: add <c=%r9,<r22=%r11
803 add %r9,%r11
805 # qhasm: c = 0
806 # asm 1: mov $0,>c=int64#6
807 # asm 2: mov $0,>c=%r9
808 mov $0,%r9
810 # qhasm: c += rdx + carry
811 # asm 1: adc <rdx=int64#3,<c=int64#6
812 # asm 2: adc <rdx=%rdx,<c=%r9
813 adc %rdx,%r9
815 # qhasm: rax = q30_stack
816 # asm 1: movq <q30_stack=stack64#8,>rax=int64#7
817 # asm 2: movq <q30_stack=56(%rsp),>rax=%rax
818 movq 56(%rsp),%rax
820 # qhasm: (uint128) rdx rax = rax * *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_ORDER3
821 mulq crypto_sign_ed25519_amd64_51_30k_batch_ORDER3
823 # qhasm: free rdx
825 # qhasm: r23 = rax
826 # asm 1: mov <rax=int64#7,>r23=int64#10
827 # asm 2: mov <rax=%rax,>r23=%r12
828 mov %rax,%r12
830 # qhasm: r23 += c
831 # asm 1: add <c=int64#6,<r23=int64#10
832 # asm 2: add <c=%r9,<r23=%r12
833 add %r9,%r12
835 # qhasm: rax = q31_stack
836 # asm 1: movq <q31_stack=stack64#9,>rax=int64#7
837 # asm 2: movq <q31_stack=64(%rsp),>rax=%rax
838 movq 64(%rsp),%rax
840 # qhasm: (uint128) rdx rax = rax * *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_ORDER0
841 mulq crypto_sign_ed25519_amd64_51_30k_batch_ORDER0
843 # qhasm: carry? r21 += rax
844 # asm 1: add <rax=int64#7,<r21=int64#8
845 # asm 2: add <rax=%rax,<r21=%r10
846 add %rax,%r10
848 # qhasm: c = 0
849 # asm 1: mov $0,>c=int64#6
850 # asm 2: mov $0,>c=%r9
851 mov $0,%r9
853 # qhasm: c += rdx + carry
854 # asm 1: adc <rdx=int64#3,<c=int64#6
855 # asm 2: adc <rdx=%rdx,<c=%r9
856 adc %rdx,%r9
858 # qhasm: rax = q31_stack
859 # asm 1: movq <q31_stack=stack64#9,>rax=int64#7
860 # asm 2: movq <q31_stack=64(%rsp),>rax=%rax
861 movq 64(%rsp),%rax
863 # qhasm: (uint128) rdx rax = rax * *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_ORDER1
864 mulq crypto_sign_ed25519_amd64_51_30k_batch_ORDER1
866 # qhasm: carry? r22 += rax
867 # asm 1: add <rax=int64#7,<r22=int64#9
868 # asm 2: add <rax=%rax,<r22=%r11
869 add %rax,%r11
871 # qhasm: rdx += zero + carry
872 # asm 1: adc <zero=int64#4,<rdx=int64#3
873 # asm 2: adc <zero=%rcx,<rdx=%rdx
874 adc %rcx,%rdx
876 # qhasm: carry? r22 += c
877 # asm 1: add <c=int64#6,<r22=int64#9
878 # asm 2: add <c=%r9,<r22=%r11
879 add %r9,%r11
881 # qhasm: c = 0
882 # asm 1: mov $0,>c=int64#4
883 # asm 2: mov $0,>c=%rcx
884 mov $0,%rcx
886 # qhasm: c += rdx + carry
887 # asm 1: adc <rdx=int64#3,<c=int64#4
888 # asm 2: adc <rdx=%rdx,<c=%rcx
889 adc %rdx,%rcx
891 # qhasm: rax = q31_stack
892 # asm 1: movq <q31_stack=stack64#9,>rax=int64#7
893 # asm 2: movq <q31_stack=64(%rsp),>rax=%rax
894 movq 64(%rsp),%rax
896 # qhasm: (uint128) rdx rax = rax * *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_ORDER2
897 mulq crypto_sign_ed25519_amd64_51_30k_batch_ORDER2
899 # qhasm: free rdx
901 # qhasm: r23 += rax
902 # asm 1: add <rax=int64#7,<r23=int64#10
903 # asm 2: add <rax=%rax,<r23=%r12
904 add %rax,%r12
906 # qhasm: r23 += c
907 # asm 1: add <c=int64#4,<r23=int64#10
908 # asm 2: add <c=%rcx,<r23=%r12
909 add %rcx,%r12
911 # qhasm: rax = q32_stack
912 # asm 1: movq <q32_stack=stack64#10,>rax=int64#7
913 # asm 2: movq <q32_stack=72(%rsp),>rax=%rax
914 movq 72(%rsp),%rax
916 # qhasm: (uint128) rdx rax = rax * *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_ORDER0
917 mulq crypto_sign_ed25519_amd64_51_30k_batch_ORDER0
919 # qhasm: carry? r22 += rax
920 # asm 1: add <rax=int64#7,<r22=int64#9
921 # asm 2: add <rax=%rax,<r22=%r11
922 add %rax,%r11
924 # qhasm: c = 0
925 # asm 1: mov $0,>c=int64#4
926 # asm 2: mov $0,>c=%rcx
927 mov $0,%rcx
929 # qhasm: c += rdx + carry
930 # asm 1: adc <rdx=int64#3,<c=int64#4
931 # asm 2: adc <rdx=%rdx,<c=%rcx
932 adc %rdx,%rcx
934 # qhasm: rax = q32_stack
935 # asm 1: movq <q32_stack=stack64#10,>rax=int64#7
936 # asm 2: movq <q32_stack=72(%rsp),>rax=%rax
937 movq 72(%rsp),%rax
939 # qhasm: (uint128) rdx rax = rax * *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_ORDER1
940 mulq crypto_sign_ed25519_amd64_51_30k_batch_ORDER1
942 # qhasm: free rdx
944 # qhasm: r23 += rax
945 # asm 1: add <rax=int64#7,<r23=int64#10
946 # asm 2: add <rax=%rax,<r23=%r12
947 add %rax,%r12
949 # qhasm: r23 += c
950 # asm 1: add <c=int64#4,<r23=int64#10
951 # asm 2: add <c=%rcx,<r23=%r12
952 add %rcx,%r12
954 # qhasm: rax = q33_stack
955 # asm 1: movq <q33_stack=stack64#11,>rax=int64#7
956 # asm 2: movq <q33_stack=80(%rsp),>rax=%rax
957 movq 80(%rsp),%rax
959 # qhasm: (uint128) rdx rax = rax * *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_ORDER0
960 mulq crypto_sign_ed25519_amd64_51_30k_batch_ORDER0
962 # qhasm: free rdx
964 # qhasm: r23 += rax
965 # asm 1: add <rax=int64#7,<r23=int64#10
966 # asm 2: add <rax=%rax,<r23=%r12
967 add %rax,%r12
969 # qhasm: r0 = *(uint64 *)(xp + 0)
970 # asm 1: movq 0(<xp=int64#2),>r0=int64#3
971 # asm 2: movq 0(<xp=%rsi),>r0=%rdx
972 movq 0(%rsi),%rdx
974 # qhasm: carry? r0 -= r20
975 # asm 1: sub <r20=int64#5,<r0=int64#3
976 # asm 2: sub <r20=%r8,<r0=%rdx
977 sub %r8,%rdx
979 # qhasm: t0 = r0
980 # asm 1: mov <r0=int64#3,>t0=int64#4
981 # asm 2: mov <r0=%rdx,>t0=%rcx
982 mov %rdx,%rcx
984 # qhasm: r1 = *(uint64 *)(xp + 8)
985 # asm 1: movq 8(<xp=int64#2),>r1=int64#5
986 # asm 2: movq 8(<xp=%rsi),>r1=%r8
987 movq 8(%rsi),%r8
989 # qhasm: carry? r1 -= r21 - carry
990 # asm 1: sbb <r21=int64#8,<r1=int64#5
991 # asm 2: sbb <r21=%r10,<r1=%r8
992 sbb %r10,%r8
994 # qhasm: t1 = r1
995 # asm 1: mov <r1=int64#5,>t1=int64#6
996 # asm 2: mov <r1=%r8,>t1=%r9
997 mov %r8,%r9
999 # qhasm: r2 = *(uint64 *)(xp + 16)
1000 # asm 1: movq 16(<xp=int64#2),>r2=int64#7
1001 # asm 2: movq 16(<xp=%rsi),>r2=%rax
1002 movq 16(%rsi),%rax
1004 # qhasm: carry? r2 -= r22 - carry
1005 # asm 1: sbb <r22=int64#9,<r2=int64#7
1006 # asm 2: sbb <r22=%r11,<r2=%rax
1007 sbb %r11,%rax
1009 # qhasm: t2 = r2
1010 # asm 1: mov <r2=int64#7,>t2=int64#8
1011 # asm 2: mov <r2=%rax,>t2=%r10
1012 mov %rax,%r10
1014 # qhasm: r3 = *(uint64 *)(xp + 24)
1015 # asm 1: movq 24(<xp=int64#2),>r3=int64#2
1016 # asm 2: movq 24(<xp=%rsi),>r3=%rsi
1017 movq 24(%rsi),%rsi
1019 # qhasm: r3 -= r23 - carry
1020 # asm 1: sbb <r23=int64#10,<r3=int64#2
1021 # asm 2: sbb <r23=%r12,<r3=%rsi
1022 sbb %r12,%rsi
1024 # qhasm: t3 = r3
1025 # asm 1: mov <r3=int64#2,>t3=int64#9
1026 # asm 2: mov <r3=%rsi,>t3=%r11
1027 mov %rsi,%r11
1029 # qhasm: carry? t0 -= *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_ORDER0
1030 # asm 1: sub crypto_sign_ed25519_amd64_51_30k_batch_ORDER0,<t0=int64#4
1031 # asm 2: sub crypto_sign_ed25519_amd64_51_30k_batch_ORDER0,<t0=%rcx
1032 sub crypto_sign_ed25519_amd64_51_30k_batch_ORDER0,%rcx
1034 # qhasm: carry? t1 -= *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_ORDER1 - carry
1035 # asm 1: sbb crypto_sign_ed25519_amd64_51_30k_batch_ORDER1,<t1=int64#6
1036 # asm 2: sbb crypto_sign_ed25519_amd64_51_30k_batch_ORDER1,<t1=%r9
1037 sbb crypto_sign_ed25519_amd64_51_30k_batch_ORDER1,%r9
1039 # qhasm: carry? t2 -= *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_ORDER2 - carry
1040 # asm 1: sbb crypto_sign_ed25519_amd64_51_30k_batch_ORDER2,<t2=int64#8
1041 # asm 2: sbb crypto_sign_ed25519_amd64_51_30k_batch_ORDER2,<t2=%r10
1042 sbb crypto_sign_ed25519_amd64_51_30k_batch_ORDER2,%r10
1044 # qhasm: unsigned<? t3 -= *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_ORDER3 - carry
1045 # asm 1: sbb crypto_sign_ed25519_amd64_51_30k_batch_ORDER3,<t3=int64#9
1046 # asm 2: sbb crypto_sign_ed25519_amd64_51_30k_batch_ORDER3,<t3=%r11
1047 sbb crypto_sign_ed25519_amd64_51_30k_batch_ORDER3,%r11
1049 # qhasm: r0 = t0 if !unsigned<
1050 # asm 1: cmovae <t0=int64#4,<r0=int64#3
1051 # asm 2: cmovae <t0=%rcx,<r0=%rdx
1052 cmovae %rcx,%rdx
1054 # qhasm: t0 = r0
1055 # asm 1: mov <r0=int64#3,>t0=int64#4
1056 # asm 2: mov <r0=%rdx,>t0=%rcx
1057 mov %rdx,%rcx
1059 # qhasm: r1 = t1 if !unsigned<
1060 # asm 1: cmovae <t1=int64#6,<r1=int64#5
1061 # asm 2: cmovae <t1=%r9,<r1=%r8
1062 cmovae %r9,%r8
1064 # qhasm: t1 = r1
1065 # asm 1: mov <r1=int64#5,>t1=int64#6
1066 # asm 2: mov <r1=%r8,>t1=%r9
1067 mov %r8,%r9
1069 # qhasm: r2 = t2 if !unsigned<
1070 # asm 1: cmovae <t2=int64#8,<r2=int64#7
1071 # asm 2: cmovae <t2=%r10,<r2=%rax
1072 cmovae %r10,%rax
1074 # qhasm: t2 = r2
1075 # asm 1: mov <r2=int64#7,>t2=int64#8
1076 # asm 2: mov <r2=%rax,>t2=%r10
1077 mov %rax,%r10
1079 # qhasm: r3 = t3 if !unsigned<
1080 # asm 1: cmovae <t3=int64#9,<r3=int64#2
1081 # asm 2: cmovae <t3=%r11,<r3=%rsi
1082 cmovae %r11,%rsi
1084 # qhasm: t3 = r3
1085 # asm 1: mov <r3=int64#2,>t3=int64#9
1086 # asm 2: mov <r3=%rsi,>t3=%r11
1087 mov %rsi,%r11
1089 # qhasm: carry? t0 -= *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_ORDER0
1090 # asm 1: sub crypto_sign_ed25519_amd64_51_30k_batch_ORDER0,<t0=int64#4
1091 # asm 2: sub crypto_sign_ed25519_amd64_51_30k_batch_ORDER0,<t0=%rcx
1092 sub crypto_sign_ed25519_amd64_51_30k_batch_ORDER0,%rcx
1094 # qhasm: carry? t1 -= *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_ORDER1 - carry
1095 # asm 1: sbb crypto_sign_ed25519_amd64_51_30k_batch_ORDER1,<t1=int64#6
1096 # asm 2: sbb crypto_sign_ed25519_amd64_51_30k_batch_ORDER1,<t1=%r9
1097 sbb crypto_sign_ed25519_amd64_51_30k_batch_ORDER1,%r9
1099 # qhasm: carry? t2 -= *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_ORDER2 - carry
1100 # asm 1: sbb crypto_sign_ed25519_amd64_51_30k_batch_ORDER2,<t2=int64#8
1101 # asm 2: sbb crypto_sign_ed25519_amd64_51_30k_batch_ORDER2,<t2=%r10
1102 sbb crypto_sign_ed25519_amd64_51_30k_batch_ORDER2,%r10
1104 # qhasm: unsigned<? t3 -= *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_ORDER3 - carry
1105 # asm 1: sbb crypto_sign_ed25519_amd64_51_30k_batch_ORDER3,<t3=int64#9
1106 # asm 2: sbb crypto_sign_ed25519_amd64_51_30k_batch_ORDER3,<t3=%r11
1107 sbb crypto_sign_ed25519_amd64_51_30k_batch_ORDER3,%r11
1109 # qhasm: r0 = t0 if !unsigned<
1110 # asm 1: cmovae <t0=int64#4,<r0=int64#3
1111 # asm 2: cmovae <t0=%rcx,<r0=%rdx
1112 cmovae %rcx,%rdx
1114 # qhasm: r1 = t1 if !unsigned<
1115 # asm 1: cmovae <t1=int64#6,<r1=int64#5
1116 # asm 2: cmovae <t1=%r9,<r1=%r8
1117 cmovae %r9,%r8
1119 # qhasm: r2 = t2 if !unsigned<
1120 # asm 1: cmovae <t2=int64#8,<r2=int64#7
1121 # asm 2: cmovae <t2=%r10,<r2=%rax
1122 cmovae %r10,%rax
1124 # qhasm: r3 = t3 if !unsigned<
1125 # asm 1: cmovae <t3=int64#9,<r3=int64#2
1126 # asm 2: cmovae <t3=%r11,<r3=%rsi
1127 cmovae %r11,%rsi
1129 # qhasm: *(uint64 *)(rp + 0) = r0
1130 # asm 1: movq <r0=int64#3,0(<rp=int64#1)
1131 # asm 2: movq <r0=%rdx,0(<rp=%rdi)
1132 movq %rdx,0(%rdi)
1134 # qhasm: *(uint64 *)(rp + 8) = r1
1135 # asm 1: movq <r1=int64#5,8(<rp=int64#1)
1136 # asm 2: movq <r1=%r8,8(<rp=%rdi)
1137 movq %r8,8(%rdi)
1139 # qhasm: *(uint64 *)(rp + 16) = r2
1140 # asm 1: movq <r2=int64#7,16(<rp=int64#1)
1141 # asm 2: movq <r2=%rax,16(<rp=%rdi)
1142 movq %rax,16(%rdi)
1144 # qhasm: *(uint64 *)(rp + 24) = r3
1145 # asm 1: movq <r3=int64#2,24(<rp=int64#1)
1146 # asm 2: movq <r3=%rsi,24(<rp=%rdi)
1147 movq %rsi,24(%rdi)
1149 # qhasm: caller1 = caller1_stack
1150 # asm 1: movq <caller1_stack=stack64#1,>caller1=int64#9
1151 # asm 2: movq <caller1_stack=0(%rsp),>caller1=%r11
1152 movq 0(%rsp),%r11
1154 # qhasm: caller2 = caller2_stack
1155 # asm 1: movq <caller2_stack=stack64#2,>caller2=int64#10
1156 # asm 2: movq <caller2_stack=8(%rsp),>caller2=%r12
1157 movq 8(%rsp),%r12
1159 # qhasm: caller3 = caller3_stack
1160 # asm 1: movq <caller3_stack=stack64#3,>caller3=int64#11
1161 # asm 2: movq <caller3_stack=16(%rsp),>caller3=%r13
1162 movq 16(%rsp),%r13
1164 # qhasm: caller4 = caller4_stack
1165 # asm 1: movq <caller4_stack=stack64#4,>caller4=int64#12
1166 # asm 2: movq <caller4_stack=24(%rsp),>caller4=%r14
1167 movq 24(%rsp),%r14
1169 # qhasm: caller5 = caller5_stack
1170 # asm 1: movq <caller5_stack=stack64#5,>caller5=int64#13
1171 # asm 2: movq <caller5_stack=32(%rsp),>caller5=%r15
1172 movq 32(%rsp),%r15
1174 # qhasm: caller6 = caller6_stack
1175 # asm 1: movq <caller6_stack=stack64#6,>caller6=int64#14
1176 # asm 2: movq <caller6_stack=40(%rsp),>caller6=%rbx
1177 movq 40(%rsp),%rbx
1179 # qhasm: caller7 = caller7_stack
1180 # asm 1: movq <caller7_stack=stack64#7,>caller7=int64#15
1181 # asm 2: movq <caller7_stack=48(%rsp),>caller7=%rbp
1182 movq 48(%rsp),%rbp
1184 # qhasm: leave
1185 add %r11,%rsp
1186 mov %rdi,%rax
1187 mov %rsi,%rdx