10 # qhasm: int64 caller1
12 # qhasm: int64 caller2
14 # qhasm: int64 caller3
16 # qhasm: int64 caller4
18 # qhasm: int64 caller5
20 # qhasm: int64 caller6
22 # qhasm: int64 caller7
24 # qhasm: caller caller1
26 # qhasm: caller caller2
28 # qhasm: caller caller3
30 # qhasm: caller caller4
32 # qhasm: caller caller5
34 # qhasm: caller caller6
36 # qhasm: caller caller7
38 # qhasm: stack64 caller1_stack
40 # qhasm: stack64 caller2_stack
42 # qhasm: stack64 caller3_stack
44 # qhasm: stack64 caller4_stack
46 # qhasm: stack64 caller5_stack
48 # qhasm: stack64 caller6_stack
50 # qhasm: stack64 caller7_stack
100 # qhasm: int64 mulr41
102 # qhasm: int64 mulrax
104 # qhasm: int64 mulrdx
108 # qhasm: int64 mulredmask
110 # qhasm: stack64 mulx219_stack
112 # qhasm: stack64 mulx319_stack
114 # qhasm: stack64 mulx419_stack
116 # qhasm: enter crypto_sign_ed25519_amd64_51_30k_batch_ge25519_p1p1_to_p3
119 .globl _crypto_sign_ed25519_amd64_51_30k_batch_ge25519_p1p1_to_p3
120 .globl crypto_sign_ed25519_amd64_51_30k_batch_ge25519_p1p1_to_p3
121 _crypto_sign_ed25519_amd64_51_30k_batch_ge25519_p1p1_to_p3
:
122 crypto_sign_ed25519_amd64_51_30k_batch_ge25519_p1p1_to_p3
:
128 # qhasm: caller1_stack = caller1
129 # asm 1: movq <caller1=int64#9,>caller1_stack=stack64#1
130 # asm 2: movq <caller1=%r11,>caller1_stack=0(%rsp)
133 # qhasm: caller2_stack = caller2
134 # asm 1: movq <caller2=int64#10,>caller2_stack=stack64#2
135 # asm 2: movq <caller2=%r12,>caller2_stack=8(%rsp)
138 # qhasm: caller3_stack = caller3
139 # asm 1: movq <caller3=int64#11,>caller3_stack=stack64#3
140 # asm 2: movq <caller3=%r13,>caller3_stack=16(%rsp)
143 # qhasm: caller4_stack = caller4
144 # asm 1: movq <caller4=int64#12,>caller4_stack=stack64#4
145 # asm 2: movq <caller4=%r14,>caller4_stack=24(%rsp)
148 # qhasm: caller5_stack = caller5
149 # asm 1: movq <caller5=int64#13,>caller5_stack=stack64#5
150 # asm 2: movq <caller5=%r15,>caller5_stack=32(%rsp)
153 # qhasm: caller6_stack = caller6
154 # asm 1: movq <caller6=int64#14,>caller6_stack=stack64#6
155 # asm 2: movq <caller6=%rbx,>caller6_stack=40(%rsp)
158 # qhasm: caller7_stack = caller7
159 # asm 1: movq <caller7=int64#15,>caller7_stack=stack64#7
160 # asm 2: movq <caller7=%rbp,>caller7_stack=48(%rsp)
163 # qhasm: mulrax = *(uint64 *)(pp + 24)
164 # asm 1: movq 24(<pp=int64#2),>mulrax=int64#3
165 # asm 2: movq 24(<pp=%rsi),>mulrax=%rdx
168 # qhasm: mulrax *= 19
169 # asm 1: imulq $19,<mulrax=int64#3,>mulrax=int64#7
170 # asm 2: imulq $19,<mulrax=%rdx,>mulrax=%rax
173 # qhasm: mulx319_stack = mulrax
174 # asm 1: movq <mulrax=int64#7,>mulx319_stack=stack64#8
175 # asm 2: movq <mulrax=%rax,>mulx319_stack=56(%rsp)
178 # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 136)
179 # asm 1: mulq 136(<pp=int64#2)
180 # asm 2: mulq 136(<pp=%rsi)
183 # qhasm: rx0 = mulrax
184 # asm 1: mov <mulrax=int64#7,>rx0=int64#4
185 # asm 2: mov <mulrax=%rax,>rx0=%rcx
188 # qhasm: mulr01 = mulrdx
189 # asm 1: mov <mulrdx=int64#3,>mulr01=int64#5
190 # asm 2: mov <mulrdx=%rdx,>mulr01=%r8
193 # qhasm: mulrax = *(uint64 *)(pp + 32)
194 # asm 1: movq 32(<pp=int64#2),>mulrax=int64#3
195 # asm 2: movq 32(<pp=%rsi),>mulrax=%rdx
198 # qhasm: mulrax *= 19
199 # asm 1: imulq $19,<mulrax=int64#3,>mulrax=int64#7
200 # asm 2: imulq $19,<mulrax=%rdx,>mulrax=%rax
203 # qhasm: mulx419_stack = mulrax
204 # asm 1: movq <mulrax=int64#7,>mulx419_stack=stack64#9
205 # asm 2: movq <mulrax=%rax,>mulx419_stack=64(%rsp)
208 # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 128)
209 # asm 1: mulq 128(<pp=int64#2)
210 # asm 2: mulq 128(<pp=%rsi)
213 # qhasm: carry? rx0 += mulrax
214 # asm 1: add <mulrax=int64#7,<rx0=int64#4
215 # asm 2: add <mulrax=%rax,<rx0=%rcx
218 # qhasm: mulr01 += mulrdx + carry
219 # asm 1: adc <mulrdx=int64#3,<mulr01=int64#5
220 # asm 2: adc <mulrdx=%rdx,<mulr01=%r8
223 # qhasm: mulrax = *(uint64 *)(pp + 0)
224 # asm 1: movq 0(<pp=int64#2),>mulrax=int64#7
225 # asm 2: movq 0(<pp=%rsi),>mulrax=%rax
228 # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 120)
229 # asm 1: mulq 120(<pp=int64#2)
230 # asm 2: mulq 120(<pp=%rsi)
233 # qhasm: carry? rx0 += mulrax
234 # asm 1: add <mulrax=int64#7,<rx0=int64#4
235 # asm 2: add <mulrax=%rax,<rx0=%rcx
238 # qhasm: mulr01 += mulrdx + carry
239 # asm 1: adc <mulrdx=int64#3,<mulr01=int64#5
240 # asm 2: adc <mulrdx=%rdx,<mulr01=%r8
243 # qhasm: mulrax = *(uint64 *)(pp + 0)
244 # asm 1: movq 0(<pp=int64#2),>mulrax=int64#7
245 # asm 2: movq 0(<pp=%rsi),>mulrax=%rax
248 # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 128)
249 # asm 1: mulq 128(<pp=int64#2)
250 # asm 2: mulq 128(<pp=%rsi)
253 # qhasm: rx1 = mulrax
254 # asm 1: mov <mulrax=int64#7,>rx1=int64#6
255 # asm 2: mov <mulrax=%rax,>rx1=%r9
258 # qhasm: mulr11 = mulrdx
259 # asm 1: mov <mulrdx=int64#3,>mulr11=int64#8
260 # asm 2: mov <mulrdx=%rdx,>mulr11=%r10
263 # qhasm: mulrax = *(uint64 *)(pp + 0)
264 # asm 1: movq 0(<pp=int64#2),>mulrax=int64#7
265 # asm 2: movq 0(<pp=%rsi),>mulrax=%rax
268 # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 136)
269 # asm 1: mulq 136(<pp=int64#2)
270 # asm 2: mulq 136(<pp=%rsi)
273 # qhasm: rx2 = mulrax
274 # asm 1: mov <mulrax=int64#7,>rx2=int64#9
275 # asm 2: mov <mulrax=%rax,>rx2=%r11
278 # qhasm: mulr21 = mulrdx
279 # asm 1: mov <mulrdx=int64#3,>mulr21=int64#10
280 # asm 2: mov <mulrdx=%rdx,>mulr21=%r12
283 # qhasm: mulrax = *(uint64 *)(pp + 0)
284 # asm 1: movq 0(<pp=int64#2),>mulrax=int64#7
285 # asm 2: movq 0(<pp=%rsi),>mulrax=%rax
288 # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 144)
289 # asm 1: mulq 144(<pp=int64#2)
290 # asm 2: mulq 144(<pp=%rsi)
293 # qhasm: rx3 = mulrax
294 # asm 1: mov <mulrax=int64#7,>rx3=int64#11
295 # asm 2: mov <mulrax=%rax,>rx3=%r13
298 # qhasm: mulr31 = mulrdx
299 # asm 1: mov <mulrdx=int64#3,>mulr31=int64#12
300 # asm 2: mov <mulrdx=%rdx,>mulr31=%r14
303 # qhasm: mulrax = *(uint64 *)(pp + 0)
304 # asm 1: movq 0(<pp=int64#2),>mulrax=int64#7
305 # asm 2: movq 0(<pp=%rsi),>mulrax=%rax
308 # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 152)
309 # asm 1: mulq 152(<pp=int64#2)
310 # asm 2: mulq 152(<pp=%rsi)
313 # qhasm: rx4 = mulrax
314 # asm 1: mov <mulrax=int64#7,>rx4=int64#13
315 # asm 2: mov <mulrax=%rax,>rx4=%r15
318 # qhasm: mulr41 = mulrdx
319 # asm 1: mov <mulrdx=int64#3,>mulr41=int64#14
320 # asm 2: mov <mulrdx=%rdx,>mulr41=%rbx
323 # qhasm: mulrax = *(uint64 *)(pp + 8)
324 # asm 1: movq 8(<pp=int64#2),>mulrax=int64#7
325 # asm 2: movq 8(<pp=%rsi),>mulrax=%rax
328 # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 120)
329 # asm 1: mulq 120(<pp=int64#2)
330 # asm 2: mulq 120(<pp=%rsi)
333 # qhasm: carry? rx1 += mulrax
334 # asm 1: add <mulrax=int64#7,<rx1=int64#6
335 # asm 2: add <mulrax=%rax,<rx1=%r9
338 # qhasm: mulr11 += mulrdx + carry
339 # asm 1: adc <mulrdx=int64#3,<mulr11=int64#8
340 # asm 2: adc <mulrdx=%rdx,<mulr11=%r10
343 # qhasm: mulrax = *(uint64 *)(pp + 8)
344 # asm 1: movq 8(<pp=int64#2),>mulrax=int64#7
345 # asm 2: movq 8(<pp=%rsi),>mulrax=%rax
348 # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 128)
349 # asm 1: mulq 128(<pp=int64#2)
350 # asm 2: mulq 128(<pp=%rsi)
353 # qhasm: carry? rx2 += mulrax
354 # asm 1: add <mulrax=int64#7,<rx2=int64#9
355 # asm 2: add <mulrax=%rax,<rx2=%r11
358 # qhasm: mulr21 += mulrdx + carry
359 # asm 1: adc <mulrdx=int64#3,<mulr21=int64#10
360 # asm 2: adc <mulrdx=%rdx,<mulr21=%r12
363 # qhasm: mulrax = *(uint64 *)(pp + 8)
364 # asm 1: movq 8(<pp=int64#2),>mulrax=int64#7
365 # asm 2: movq 8(<pp=%rsi),>mulrax=%rax
368 # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 136)
369 # asm 1: mulq 136(<pp=int64#2)
370 # asm 2: mulq 136(<pp=%rsi)
373 # qhasm: carry? rx3 += mulrax
374 # asm 1: add <mulrax=int64#7,<rx3=int64#11
375 # asm 2: add <mulrax=%rax,<rx3=%r13
378 # qhasm: mulr31 += mulrdx + carry
379 # asm 1: adc <mulrdx=int64#3,<mulr31=int64#12
380 # asm 2: adc <mulrdx=%rdx,<mulr31=%r14
383 # qhasm: mulrax = *(uint64 *)(pp + 8)
384 # asm 1: movq 8(<pp=int64#2),>mulrax=int64#7
385 # asm 2: movq 8(<pp=%rsi),>mulrax=%rax
388 # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 144)
389 # asm 1: mulq 144(<pp=int64#2)
390 # asm 2: mulq 144(<pp=%rsi)
393 # qhasm: carry? rx4 += mulrax
394 # asm 1: add <mulrax=int64#7,<rx4=int64#13
395 # asm 2: add <mulrax=%rax,<rx4=%r15
398 # qhasm: mulr41 += mulrdx + carry
399 # asm 1: adc <mulrdx=int64#3,<mulr41=int64#14
400 # asm 2: adc <mulrdx=%rdx,<mulr41=%rbx
403 # qhasm: mulrax = *(uint64 *)(pp + 8)
404 # asm 1: movq 8(<pp=int64#2),>mulrax=int64#3
405 # asm 2: movq 8(<pp=%rsi),>mulrax=%rdx
408 # qhasm: mulrax *= 19
409 # asm 1: imulq $19,<mulrax=int64#3,>mulrax=int64#7
410 # asm 2: imulq $19,<mulrax=%rdx,>mulrax=%rax
413 # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 152)
414 # asm 1: mulq 152(<pp=int64#2)
415 # asm 2: mulq 152(<pp=%rsi)
418 # qhasm: carry? rx0 += mulrax
419 # asm 1: add <mulrax=int64#7,<rx0=int64#4
420 # asm 2: add <mulrax=%rax,<rx0=%rcx
423 # qhasm: mulr01 += mulrdx + carry
424 # asm 1: adc <mulrdx=int64#3,<mulr01=int64#5
425 # asm 2: adc <mulrdx=%rdx,<mulr01=%r8
428 # qhasm: mulrax = *(uint64 *)(pp + 16)
429 # asm 1: movq 16(<pp=int64#2),>mulrax=int64#7
430 # asm 2: movq 16(<pp=%rsi),>mulrax=%rax
433 # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 120)
434 # asm 1: mulq 120(<pp=int64#2)
435 # asm 2: mulq 120(<pp=%rsi)
438 # qhasm: carry? rx2 += mulrax
439 # asm 1: add <mulrax=int64#7,<rx2=int64#9
440 # asm 2: add <mulrax=%rax,<rx2=%r11
443 # qhasm: mulr21 += mulrdx + carry
444 # asm 1: adc <mulrdx=int64#3,<mulr21=int64#10
445 # asm 2: adc <mulrdx=%rdx,<mulr21=%r12
448 # qhasm: mulrax = *(uint64 *)(pp + 16)
449 # asm 1: movq 16(<pp=int64#2),>mulrax=int64#7
450 # asm 2: movq 16(<pp=%rsi),>mulrax=%rax
453 # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 128)
454 # asm 1: mulq 128(<pp=int64#2)
455 # asm 2: mulq 128(<pp=%rsi)
458 # qhasm: carry? rx3 += mulrax
459 # asm 1: add <mulrax=int64#7,<rx3=int64#11
460 # asm 2: add <mulrax=%rax,<rx3=%r13
463 # qhasm: mulr31 += mulrdx + carry
464 # asm 1: adc <mulrdx=int64#3,<mulr31=int64#12
465 # asm 2: adc <mulrdx=%rdx,<mulr31=%r14
468 # qhasm: mulrax = *(uint64 *)(pp + 16)
469 # asm 1: movq 16(<pp=int64#2),>mulrax=int64#7
470 # asm 2: movq 16(<pp=%rsi),>mulrax=%rax
473 # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 136)
474 # asm 1: mulq 136(<pp=int64#2)
475 # asm 2: mulq 136(<pp=%rsi)
478 # qhasm: carry? rx4 += mulrax
479 # asm 1: add <mulrax=int64#7,<rx4=int64#13
480 # asm 2: add <mulrax=%rax,<rx4=%r15
483 # qhasm: mulr41 += mulrdx + carry
484 # asm 1: adc <mulrdx=int64#3,<mulr41=int64#14
485 # asm 2: adc <mulrdx=%rdx,<mulr41=%rbx
488 # qhasm: mulrax = *(uint64 *)(pp + 16)
489 # asm 1: movq 16(<pp=int64#2),>mulrax=int64#3
490 # asm 2: movq 16(<pp=%rsi),>mulrax=%rdx
493 # qhasm: mulrax *= 19
494 # asm 1: imulq $19,<mulrax=int64#3,>mulrax=int64#7
495 # asm 2: imulq $19,<mulrax=%rdx,>mulrax=%rax
498 # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 144)
499 # asm 1: mulq 144(<pp=int64#2)
500 # asm 2: mulq 144(<pp=%rsi)
503 # qhasm: carry? rx0 += mulrax
504 # asm 1: add <mulrax=int64#7,<rx0=int64#4
505 # asm 2: add <mulrax=%rax,<rx0=%rcx
508 # qhasm: mulr01 += mulrdx + carry
509 # asm 1: adc <mulrdx=int64#3,<mulr01=int64#5
510 # asm 2: adc <mulrdx=%rdx,<mulr01=%r8
513 # qhasm: mulrax = *(uint64 *)(pp + 16)
514 # asm 1: movq 16(<pp=int64#2),>mulrax=int64#3
515 # asm 2: movq 16(<pp=%rsi),>mulrax=%rdx
518 # qhasm: mulrax *= 19
519 # asm 1: imulq $19,<mulrax=int64#3,>mulrax=int64#7
520 # asm 2: imulq $19,<mulrax=%rdx,>mulrax=%rax
523 # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 152)
524 # asm 1: mulq 152(<pp=int64#2)
525 # asm 2: mulq 152(<pp=%rsi)
528 # qhasm: carry? rx1 += mulrax
529 # asm 1: add <mulrax=int64#7,<rx1=int64#6
530 # asm 2: add <mulrax=%rax,<rx1=%r9
533 # qhasm: mulr11 += mulrdx + carry
534 # asm 1: adc <mulrdx=int64#3,<mulr11=int64#8
535 # asm 2: adc <mulrdx=%rdx,<mulr11=%r10
538 # qhasm: mulrax = *(uint64 *)(pp + 24)
539 # asm 1: movq 24(<pp=int64#2),>mulrax=int64#7
540 # asm 2: movq 24(<pp=%rsi),>mulrax=%rax
543 # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 120)
544 # asm 1: mulq 120(<pp=int64#2)
545 # asm 2: mulq 120(<pp=%rsi)
548 # qhasm: carry? rx3 += mulrax
549 # asm 1: add <mulrax=int64#7,<rx3=int64#11
550 # asm 2: add <mulrax=%rax,<rx3=%r13
553 # qhasm: mulr31 += mulrdx + carry
554 # asm 1: adc <mulrdx=int64#3,<mulr31=int64#12
555 # asm 2: adc <mulrdx=%rdx,<mulr31=%r14
558 # qhasm: mulrax = *(uint64 *)(pp + 24)
559 # asm 1: movq 24(<pp=int64#2),>mulrax=int64#7
560 # asm 2: movq 24(<pp=%rsi),>mulrax=%rax
563 # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 128)
564 # asm 1: mulq 128(<pp=int64#2)
565 # asm 2: mulq 128(<pp=%rsi)
568 # qhasm: carry? rx4 += mulrax
569 # asm 1: add <mulrax=int64#7,<rx4=int64#13
570 # asm 2: add <mulrax=%rax,<rx4=%r15
573 # qhasm: mulr41 += mulrdx + carry
574 # asm 1: adc <mulrdx=int64#3,<mulr41=int64#14
575 # asm 2: adc <mulrdx=%rdx,<mulr41=%rbx
578 # qhasm: mulrax = mulx319_stack
579 # asm 1: movq <mulx319_stack=stack64#8,>mulrax=int64#7
580 # asm 2: movq <mulx319_stack=56(%rsp),>mulrax=%rax
583 # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 144)
584 # asm 1: mulq 144(<pp=int64#2)
585 # asm 2: mulq 144(<pp=%rsi)
588 # qhasm: carry? rx1 += mulrax
589 # asm 1: add <mulrax=int64#7,<rx1=int64#6
590 # asm 2: add <mulrax=%rax,<rx1=%r9
593 # qhasm: mulr11 += mulrdx + carry
594 # asm 1: adc <mulrdx=int64#3,<mulr11=int64#8
595 # asm 2: adc <mulrdx=%rdx,<mulr11=%r10
598 # qhasm: mulrax = mulx319_stack
599 # asm 1: movq <mulx319_stack=stack64#8,>mulrax=int64#7
600 # asm 2: movq <mulx319_stack=56(%rsp),>mulrax=%rax
603 # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 152)
604 # asm 1: mulq 152(<pp=int64#2)
605 # asm 2: mulq 152(<pp=%rsi)
608 # qhasm: carry? rx2 += mulrax
609 # asm 1: add <mulrax=int64#7,<rx2=int64#9
610 # asm 2: add <mulrax=%rax,<rx2=%r11
613 # qhasm: mulr21 += mulrdx + carry
614 # asm 1: adc <mulrdx=int64#3,<mulr21=int64#10
615 # asm 2: adc <mulrdx=%rdx,<mulr21=%r12
618 # qhasm: mulrax = *(uint64 *)(pp + 32)
619 # asm 1: movq 32(<pp=int64#2),>mulrax=int64#7
620 # asm 2: movq 32(<pp=%rsi),>mulrax=%rax
623 # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 120)
624 # asm 1: mulq 120(<pp=int64#2)
625 # asm 2: mulq 120(<pp=%rsi)
628 # qhasm: carry? rx4 += mulrax
629 # asm 1: add <mulrax=int64#7,<rx4=int64#13
630 # asm 2: add <mulrax=%rax,<rx4=%r15
633 # qhasm: mulr41 += mulrdx + carry
634 # asm 1: adc <mulrdx=int64#3,<mulr41=int64#14
635 # asm 2: adc <mulrdx=%rdx,<mulr41=%rbx
638 # qhasm: mulrax = mulx419_stack
639 # asm 1: movq <mulx419_stack=stack64#9,>mulrax=int64#7
640 # asm 2: movq <mulx419_stack=64(%rsp),>mulrax=%rax
643 # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 136)
644 # asm 1: mulq 136(<pp=int64#2)
645 # asm 2: mulq 136(<pp=%rsi)
648 # qhasm: carry? rx1 += mulrax
649 # asm 1: add <mulrax=int64#7,<rx1=int64#6
650 # asm 2: add <mulrax=%rax,<rx1=%r9
653 # qhasm: mulr11 += mulrdx + carry
654 # asm 1: adc <mulrdx=int64#3,<mulr11=int64#8
655 # asm 2: adc <mulrdx=%rdx,<mulr11=%r10
658 # qhasm: mulrax = mulx419_stack
659 # asm 1: movq <mulx419_stack=stack64#9,>mulrax=int64#7
660 # asm 2: movq <mulx419_stack=64(%rsp),>mulrax=%rax
663 # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 144)
664 # asm 1: mulq 144(<pp=int64#2)
665 # asm 2: mulq 144(<pp=%rsi)
668 # qhasm: carry? rx2 += mulrax
669 # asm 1: add <mulrax=int64#7,<rx2=int64#9
670 # asm 2: add <mulrax=%rax,<rx2=%r11
673 # qhasm: mulr21 += mulrdx + carry
674 # asm 1: adc <mulrdx=int64#3,<mulr21=int64#10
675 # asm 2: adc <mulrdx=%rdx,<mulr21=%r12
678 # qhasm: mulrax = mulx419_stack
679 # asm 1: movq <mulx419_stack=stack64#9,>mulrax=int64#7
680 # asm 2: movq <mulx419_stack=64(%rsp),>mulrax=%rax
683 # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 152)
684 # asm 1: mulq 152(<pp=int64#2)
685 # asm 2: mulq 152(<pp=%rsi)
688 # qhasm: carry? rx3 += mulrax
689 # asm 1: add <mulrax=int64#7,<rx3=int64#11
690 # asm 2: add <mulrax=%rax,<rx3=%r13
693 # qhasm: mulr31 += mulrdx + carry
694 # asm 1: adc <mulrdx=int64#3,<mulr31=int64#12
695 # asm 2: adc <mulrdx=%rdx,<mulr31=%r14
698 # qhasm: mulredmask = *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51
699 # asm 1: movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,>mulredmask=int64#3
700 # asm 2: movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,>mulredmask=%rdx
701 movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51
,%rdx
703 # qhasm: mulr01 = (mulr01.rx0) << 13
704 # asm 1: shld $13,<rx0=int64#4,<mulr01=int64#5
705 # asm 2: shld $13,<rx0=%rcx,<mulr01=%r8
708 # qhasm: rx0 &= mulredmask
709 # asm 1: and <mulredmask=int64#3,<rx0=int64#4
710 # asm 2: and <mulredmask=%rdx,<rx0=%rcx
713 # qhasm: mulr11 = (mulr11.rx1) << 13
714 # asm 1: shld $13,<rx1=int64#6,<mulr11=int64#8
715 # asm 2: shld $13,<rx1=%r9,<mulr11=%r10
718 # qhasm: rx1 &= mulredmask
719 # asm 1: and <mulredmask=int64#3,<rx1=int64#6
720 # asm 2: and <mulredmask=%rdx,<rx1=%r9
723 # qhasm: rx1 += mulr01
724 # asm 1: add <mulr01=int64#5,<rx1=int64#6
725 # asm 2: add <mulr01=%r8,<rx1=%r9
728 # qhasm: mulr21 = (mulr21.rx2) << 13
729 # asm 1: shld $13,<rx2=int64#9,<mulr21=int64#10
730 # asm 2: shld $13,<rx2=%r11,<mulr21=%r12
733 # qhasm: rx2 &= mulredmask
734 # asm 1: and <mulredmask=int64#3,<rx2=int64#9
735 # asm 2: and <mulredmask=%rdx,<rx2=%r11
738 # qhasm: rx2 += mulr11
739 # asm 1: add <mulr11=int64#8,<rx2=int64#9
740 # asm 2: add <mulr11=%r10,<rx2=%r11
743 # qhasm: mulr31 = (mulr31.rx3) << 13
744 # asm 1: shld $13,<rx3=int64#11,<mulr31=int64#12
745 # asm 2: shld $13,<rx3=%r13,<mulr31=%r14
748 # qhasm: rx3 &= mulredmask
749 # asm 1: and <mulredmask=int64#3,<rx3=int64#11
750 # asm 2: and <mulredmask=%rdx,<rx3=%r13
753 # qhasm: rx3 += mulr21
754 # asm 1: add <mulr21=int64#10,<rx3=int64#11
755 # asm 2: add <mulr21=%r12,<rx3=%r13
758 # qhasm: mulr41 = (mulr41.rx4) << 13
759 # asm 1: shld $13,<rx4=int64#13,<mulr41=int64#14
760 # asm 2: shld $13,<rx4=%r15,<mulr41=%rbx
763 # qhasm: rx4 &= mulredmask
764 # asm 1: and <mulredmask=int64#3,<rx4=int64#13
765 # asm 2: and <mulredmask=%rdx,<rx4=%r15
768 # qhasm: rx4 += mulr31
769 # asm 1: add <mulr31=int64#12,<rx4=int64#13
770 # asm 2: add <mulr31=%r14,<rx4=%r15
773 # qhasm: mulr41 = mulr41 * 19
774 # asm 1: imulq $19,<mulr41=int64#14,>mulr41=int64#5
775 # asm 2: imulq $19,<mulr41=%rbx,>mulr41=%r8
778 # qhasm: rx0 += mulr41
779 # asm 1: add <mulr41=int64#5,<rx0=int64#4
780 # asm 2: add <mulr41=%r8,<rx0=%rcx
784 # asm 1: mov <rx0=int64#4,>mult=int64#5
785 # asm 2: mov <rx0=%rcx,>mult=%r8
788 # qhasm: (uint64) mult >>= 51
789 # asm 1: shr $51,<mult=int64#5
790 # asm 2: shr $51,<mult=%r8
794 # asm 1: add <rx1=int64#6,<mult=int64#5
795 # asm 2: add <rx1=%r9,<mult=%r8
799 # asm 1: mov <mult=int64#5,>rx1=int64#6
800 # asm 2: mov <mult=%r8,>rx1=%r9
803 # qhasm: (uint64) mult >>= 51
804 # asm 1: shr $51,<mult=int64#5
805 # asm 2: shr $51,<mult=%r8
808 # qhasm: rx0 &= mulredmask
809 # asm 1: and <mulredmask=int64#3,<rx0=int64#4
810 # asm 2: and <mulredmask=%rdx,<rx0=%rcx
814 # asm 1: add <rx2=int64#9,<mult=int64#5
815 # asm 2: add <rx2=%r11,<mult=%r8
819 # asm 1: mov <mult=int64#5,>rx2=int64#7
820 # asm 2: mov <mult=%r8,>rx2=%rax
823 # qhasm: (uint64) mult >>= 51
824 # asm 1: shr $51,<mult=int64#5
825 # asm 2: shr $51,<mult=%r8
828 # qhasm: rx1 &= mulredmask
829 # asm 1: and <mulredmask=int64#3,<rx1=int64#6
830 # asm 2: and <mulredmask=%rdx,<rx1=%r9
834 # asm 1: add <rx3=int64#11,<mult=int64#5
835 # asm 2: add <rx3=%r13,<mult=%r8
839 # asm 1: mov <mult=int64#5,>rx3=int64#8
840 # asm 2: mov <mult=%r8,>rx3=%r10
843 # qhasm: (uint64) mult >>= 51
844 # asm 1: shr $51,<mult=int64#5
845 # asm 2: shr $51,<mult=%r8
848 # qhasm: rx2 &= mulredmask
849 # asm 1: and <mulredmask=int64#3,<rx2=int64#7
850 # asm 2: and <mulredmask=%rdx,<rx2=%rax
854 # asm 1: add <rx4=int64#13,<mult=int64#5
855 # asm 2: add <rx4=%r15,<mult=%r8
859 # asm 1: mov <mult=int64#5,>rx4=int64#9
860 # asm 2: mov <mult=%r8,>rx4=%r11
863 # qhasm: (uint64) mult >>= 51
864 # asm 1: shr $51,<mult=int64#5
865 # asm 2: shr $51,<mult=%r8
868 # qhasm: rx3 &= mulredmask
869 # asm 1: and <mulredmask=int64#3,<rx3=int64#8
870 # asm 2: and <mulredmask=%rdx,<rx3=%r10
874 # asm 1: imulq $19,<mult=int64#5,>mult=int64#5
875 # asm 2: imulq $19,<mult=%r8,>mult=%r8
879 # asm 1: add <mult=int64#5,<rx0=int64#4
880 # asm 2: add <mult=%r8,<rx0=%rcx
883 # qhasm: rx4 &= mulredmask
884 # asm 1: and <mulredmask=int64#3,<rx4=int64#9
885 # asm 2: and <mulredmask=%rdx,<rx4=%r11
888 # qhasm: *(uint64 *)(rp + 0) = rx0
889 # asm 1: movq <rx0=int64#4,0(<rp=int64#1)
890 # asm 2: movq <rx0=%rcx,0(<rp=%rdi)
893 # qhasm: *(uint64 *)(rp + 8) = rx1
894 # asm 1: movq <rx1=int64#6,8(<rp=int64#1)
895 # asm 2: movq <rx1=%r9,8(<rp=%rdi)
898 # qhasm: *(uint64 *)(rp + 16) = rx2
899 # asm 1: movq <rx2=int64#7,16(<rp=int64#1)
900 # asm 2: movq <rx2=%rax,16(<rp=%rdi)
903 # qhasm: *(uint64 *)(rp + 24) = rx3
904 # asm 1: movq <rx3=int64#8,24(<rp=int64#1)
905 # asm 2: movq <rx3=%r10,24(<rp=%rdi)
908 # qhasm: *(uint64 *)(rp + 32) = rx4
909 # asm 1: movq <rx4=int64#9,32(<rp=int64#1)
910 # asm 2: movq <rx4=%r11,32(<rp=%rdi)
913 # qhasm: mulrax = *(uint64 *)(pp + 104)
914 # asm 1: movq 104(<pp=int64#2),>mulrax=int64#3
915 # asm 2: movq 104(<pp=%rsi),>mulrax=%rdx
918 # qhasm: mulrax *= 19
919 # asm 1: imulq $19,<mulrax=int64#3,>mulrax=int64#7
920 # asm 2: imulq $19,<mulrax=%rdx,>mulrax=%rax
923 # qhasm: mulx319_stack = mulrax
924 # asm 1: movq <mulrax=int64#7,>mulx319_stack=stack64#8
925 # asm 2: movq <mulrax=%rax,>mulx319_stack=56(%rsp)
928 # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 56)
929 # asm 1: mulq 56(<pp=int64#2)
930 # asm 2: mulq 56(<pp=%rsi)
933 # qhasm: ry0 = mulrax
934 # asm 1: mov <mulrax=int64#7,>ry0=int64#4
935 # asm 2: mov <mulrax=%rax,>ry0=%rcx
938 # qhasm: mulr01 = mulrdx
939 # asm 1: mov <mulrdx=int64#3,>mulr01=int64#5
940 # asm 2: mov <mulrdx=%rdx,>mulr01=%r8
943 # qhasm: mulrax = *(uint64 *)(pp + 112)
944 # asm 1: movq 112(<pp=int64#2),>mulrax=int64#3
945 # asm 2: movq 112(<pp=%rsi),>mulrax=%rdx
948 # qhasm: mulrax *= 19
949 # asm 1: imulq $19,<mulrax=int64#3,>mulrax=int64#7
950 # asm 2: imulq $19,<mulrax=%rdx,>mulrax=%rax
953 # qhasm: mulx419_stack = mulrax
954 # asm 1: movq <mulrax=int64#7,>mulx419_stack=stack64#9
955 # asm 2: movq <mulrax=%rax,>mulx419_stack=64(%rsp)
958 # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 48)
959 # asm 1: mulq 48(<pp=int64#2)
960 # asm 2: mulq 48(<pp=%rsi)
963 # qhasm: carry? ry0 += mulrax
964 # asm 1: add <mulrax=int64#7,<ry0=int64#4
965 # asm 2: add <mulrax=%rax,<ry0=%rcx
968 # qhasm: mulr01 += mulrdx + carry
969 # asm 1: adc <mulrdx=int64#3,<mulr01=int64#5
970 # asm 2: adc <mulrdx=%rdx,<mulr01=%r8
973 # qhasm: mulrax = *(uint64 *)(pp + 80)
974 # asm 1: movq 80(<pp=int64#2),>mulrax=int64#7
975 # asm 2: movq 80(<pp=%rsi),>mulrax=%rax
978 # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 40)
979 # asm 1: mulq 40(<pp=int64#2)
980 # asm 2: mulq 40(<pp=%rsi)
983 # qhasm: carry? ry0 += mulrax
984 # asm 1: add <mulrax=int64#7,<ry0=int64#4
985 # asm 2: add <mulrax=%rax,<ry0=%rcx
988 # qhasm: mulr01 += mulrdx + carry
989 # asm 1: adc <mulrdx=int64#3,<mulr01=int64#5
990 # asm 2: adc <mulrdx=%rdx,<mulr01=%r8
993 # qhasm: mulrax = *(uint64 *)(pp + 80)
994 # asm 1: movq 80(<pp=int64#2),>mulrax=int64#7
995 # asm 2: movq 80(<pp=%rsi),>mulrax=%rax
998 # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 48)
999 # asm 1: mulq 48(<pp=int64#2)
1000 # asm 2: mulq 48(<pp=%rsi)
1003 # qhasm: ry1 = mulrax
1004 # asm 1: mov <mulrax=int64#7,>ry1=int64#6
1005 # asm 2: mov <mulrax=%rax,>ry1=%r9
1008 # qhasm: mulr11 = mulrdx
1009 # asm 1: mov <mulrdx=int64#3,>mulr11=int64#8
1010 # asm 2: mov <mulrdx=%rdx,>mulr11=%r10
1013 # qhasm: mulrax = *(uint64 *)(pp + 80)
1014 # asm 1: movq 80(<pp=int64#2),>mulrax=int64#7
1015 # asm 2: movq 80(<pp=%rsi),>mulrax=%rax
1018 # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 56)
1019 # asm 1: mulq 56(<pp=int64#2)
1020 # asm 2: mulq 56(<pp=%rsi)
1023 # qhasm: ry2 = mulrax
1024 # asm 1: mov <mulrax=int64#7,>ry2=int64#9
1025 # asm 2: mov <mulrax=%rax,>ry2=%r11
1028 # qhasm: mulr21 = mulrdx
1029 # asm 1: mov <mulrdx=int64#3,>mulr21=int64#10
1030 # asm 2: mov <mulrdx=%rdx,>mulr21=%r12
1033 # qhasm: mulrax = *(uint64 *)(pp + 80)
1034 # asm 1: movq 80(<pp=int64#2),>mulrax=int64#7
1035 # asm 2: movq 80(<pp=%rsi),>mulrax=%rax
1038 # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 64)
1039 # asm 1: mulq 64(<pp=int64#2)
1040 # asm 2: mulq 64(<pp=%rsi)
1043 # qhasm: ry3 = mulrax
1044 # asm 1: mov <mulrax=int64#7,>ry3=int64#11
1045 # asm 2: mov <mulrax=%rax,>ry3=%r13
1048 # qhasm: mulr31 = mulrdx
1049 # asm 1: mov <mulrdx=int64#3,>mulr31=int64#12
1050 # asm 2: mov <mulrdx=%rdx,>mulr31=%r14
1053 # qhasm: mulrax = *(uint64 *)(pp + 80)
1054 # asm 1: movq 80(<pp=int64#2),>mulrax=int64#7
1055 # asm 2: movq 80(<pp=%rsi),>mulrax=%rax
1058 # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 72)
1059 # asm 1: mulq 72(<pp=int64#2)
1060 # asm 2: mulq 72(<pp=%rsi)
1063 # qhasm: ry4 = mulrax
1064 # asm 1: mov <mulrax=int64#7,>ry4=int64#13
1065 # asm 2: mov <mulrax=%rax,>ry4=%r15
1068 # qhasm: mulr41 = mulrdx
1069 # asm 1: mov <mulrdx=int64#3,>mulr41=int64#14
1070 # asm 2: mov <mulrdx=%rdx,>mulr41=%rbx
1073 # qhasm: mulrax = *(uint64 *)(pp + 88)
1074 # asm 1: movq 88(<pp=int64#2),>mulrax=int64#7
1075 # asm 2: movq 88(<pp=%rsi),>mulrax=%rax
1078 # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 40)
1079 # asm 1: mulq 40(<pp=int64#2)
1080 # asm 2: mulq 40(<pp=%rsi)
1083 # qhasm: carry? ry1 += mulrax
1084 # asm 1: add <mulrax=int64#7,<ry1=int64#6
1085 # asm 2: add <mulrax=%rax,<ry1=%r9
1088 # qhasm: mulr11 += mulrdx + carry
1089 # asm 1: adc <mulrdx=int64#3,<mulr11=int64#8
1090 # asm 2: adc <mulrdx=%rdx,<mulr11=%r10
1093 # qhasm: mulrax = *(uint64 *)(pp + 88)
1094 # asm 1: movq 88(<pp=int64#2),>mulrax=int64#7
1095 # asm 2: movq 88(<pp=%rsi),>mulrax=%rax
1098 # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 48)
1099 # asm 1: mulq 48(<pp=int64#2)
1100 # asm 2: mulq 48(<pp=%rsi)
1103 # qhasm: carry? ry2 += mulrax
1104 # asm 1: add <mulrax=int64#7,<ry2=int64#9
1105 # asm 2: add <mulrax=%rax,<ry2=%r11
1108 # qhasm: mulr21 += mulrdx + carry
1109 # asm 1: adc <mulrdx=int64#3,<mulr21=int64#10
1110 # asm 2: adc <mulrdx=%rdx,<mulr21=%r12
1113 # qhasm: mulrax = *(uint64 *)(pp + 88)
1114 # asm 1: movq 88(<pp=int64#2),>mulrax=int64#7
1115 # asm 2: movq 88(<pp=%rsi),>mulrax=%rax
1118 # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 56)
1119 # asm 1: mulq 56(<pp=int64#2)
1120 # asm 2: mulq 56(<pp=%rsi)
1123 # qhasm: carry? ry3 += mulrax
1124 # asm 1: add <mulrax=int64#7,<ry3=int64#11
1125 # asm 2: add <mulrax=%rax,<ry3=%r13
1128 # qhasm: mulr31 += mulrdx + carry
1129 # asm 1: adc <mulrdx=int64#3,<mulr31=int64#12
1130 # asm 2: adc <mulrdx=%rdx,<mulr31=%r14
1133 # qhasm: mulrax = *(uint64 *)(pp + 88)
1134 # asm 1: movq 88(<pp=int64#2),>mulrax=int64#7
1135 # asm 2: movq 88(<pp=%rsi),>mulrax=%rax
1138 # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 64)
1139 # asm 1: mulq 64(<pp=int64#2)
1140 # asm 2: mulq 64(<pp=%rsi)
1143 # qhasm: carry? ry4 += mulrax
1144 # asm 1: add <mulrax=int64#7,<ry4=int64#13
1145 # asm 2: add <mulrax=%rax,<ry4=%r15
1148 # qhasm: mulr41 += mulrdx + carry
1149 # asm 1: adc <mulrdx=int64#3,<mulr41=int64#14
1150 # asm 2: adc <mulrdx=%rdx,<mulr41=%rbx
1153 # qhasm: mulrax = *(uint64 *)(pp + 88)
1154 # asm 1: movq 88(<pp=int64#2),>mulrax=int64#3
1155 # asm 2: movq 88(<pp=%rsi),>mulrax=%rdx
1158 # qhasm: mulrax *= 19
1159 # asm 1: imulq $19,<mulrax=int64#3,>mulrax=int64#7
1160 # asm 2: imulq $19,<mulrax=%rdx,>mulrax=%rax
1163 # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 72)
1164 # asm 1: mulq 72(<pp=int64#2)
1165 # asm 2: mulq 72(<pp=%rsi)
1168 # qhasm: carry? ry0 += mulrax
1169 # asm 1: add <mulrax=int64#7,<ry0=int64#4
1170 # asm 2: add <mulrax=%rax,<ry0=%rcx
1173 # qhasm: mulr01 += mulrdx + carry
1174 # asm 1: adc <mulrdx=int64#3,<mulr01=int64#5
1175 # asm 2: adc <mulrdx=%rdx,<mulr01=%r8
1178 # qhasm: mulrax = *(uint64 *)(pp + 96)
1179 # asm 1: movq 96(<pp=int64#2),>mulrax=int64#7
1180 # asm 2: movq 96(<pp=%rsi),>mulrax=%rax
1183 # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 40)
1184 # asm 1: mulq 40(<pp=int64#2)
1185 # asm 2: mulq 40(<pp=%rsi)
1188 # qhasm: carry? ry2 += mulrax
1189 # asm 1: add <mulrax=int64#7,<ry2=int64#9
1190 # asm 2: add <mulrax=%rax,<ry2=%r11
1193 # qhasm: mulr21 += mulrdx + carry
1194 # asm 1: adc <mulrdx=int64#3,<mulr21=int64#10
1195 # asm 2: adc <mulrdx=%rdx,<mulr21=%r12
1198 # qhasm: mulrax = *(uint64 *)(pp + 96)
1199 # asm 1: movq 96(<pp=int64#2),>mulrax=int64#7
1200 # asm 2: movq 96(<pp=%rsi),>mulrax=%rax
1203 # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 48)
1204 # asm 1: mulq 48(<pp=int64#2)
1205 # asm 2: mulq 48(<pp=%rsi)
1208 # qhasm: carry? ry3 += mulrax
1209 # asm 1: add <mulrax=int64#7,<ry3=int64#11
1210 # asm 2: add <mulrax=%rax,<ry3=%r13
1213 # qhasm: mulr31 += mulrdx + carry
1214 # asm 1: adc <mulrdx=int64#3,<mulr31=int64#12
1215 # asm 2: adc <mulrdx=%rdx,<mulr31=%r14
1218 # qhasm: mulrax = *(uint64 *)(pp + 96)
1219 # asm 1: movq 96(<pp=int64#2),>mulrax=int64#7
1220 # asm 2: movq 96(<pp=%rsi),>mulrax=%rax
1223 # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 56)
1224 # asm 1: mulq 56(<pp=int64#2)
1225 # asm 2: mulq 56(<pp=%rsi)
1228 # qhasm: carry? ry4 += mulrax
1229 # asm 1: add <mulrax=int64#7,<ry4=int64#13
1230 # asm 2: add <mulrax=%rax,<ry4=%r15
1233 # qhasm: mulr41 += mulrdx + carry
1234 # asm 1: adc <mulrdx=int64#3,<mulr41=int64#14
1235 # asm 2: adc <mulrdx=%rdx,<mulr41=%rbx
1238 # qhasm: mulrax = *(uint64 *)(pp + 96)
1239 # asm 1: movq 96(<pp=int64#2),>mulrax=int64#3
1240 # asm 2: movq 96(<pp=%rsi),>mulrax=%rdx
1243 # qhasm: mulrax *= 19
1244 # asm 1: imulq $19,<mulrax=int64#3,>mulrax=int64#7
1245 # asm 2: imulq $19,<mulrax=%rdx,>mulrax=%rax
1248 # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 64)
1249 # asm 1: mulq 64(<pp=int64#2)
1250 # asm 2: mulq 64(<pp=%rsi)
1253 # qhasm: carry? ry0 += mulrax
1254 # asm 1: add <mulrax=int64#7,<ry0=int64#4
1255 # asm 2: add <mulrax=%rax,<ry0=%rcx
1258 # qhasm: mulr01 += mulrdx + carry
1259 # asm 1: adc <mulrdx=int64#3,<mulr01=int64#5
1260 # asm 2: adc <mulrdx=%rdx,<mulr01=%r8
1263 # qhasm: mulrax = *(uint64 *)(pp + 96)
1264 # asm 1: movq 96(<pp=int64#2),>mulrax=int64#3
1265 # asm 2: movq 96(<pp=%rsi),>mulrax=%rdx
1268 # qhasm: mulrax *= 19
1269 # asm 1: imulq $19,<mulrax=int64#3,>mulrax=int64#7
1270 # asm 2: imulq $19,<mulrax=%rdx,>mulrax=%rax
1273 # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 72)
1274 # asm 1: mulq 72(<pp=int64#2)
1275 # asm 2: mulq 72(<pp=%rsi)
1278 # qhasm: carry? ry1 += mulrax
1279 # asm 1: add <mulrax=int64#7,<ry1=int64#6
1280 # asm 2: add <mulrax=%rax,<ry1=%r9
1283 # qhasm: mulr11 += mulrdx + carry
1284 # asm 1: adc <mulrdx=int64#3,<mulr11=int64#8
1285 # asm 2: adc <mulrdx=%rdx,<mulr11=%r10
1288 # qhasm: mulrax = *(uint64 *)(pp + 104)
1289 # asm 1: movq 104(<pp=int64#2),>mulrax=int64#7
1290 # asm 2: movq 104(<pp=%rsi),>mulrax=%rax
1293 # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 40)
1294 # asm 1: mulq 40(<pp=int64#2)
1295 # asm 2: mulq 40(<pp=%rsi)
1298 # qhasm: carry? ry3 += mulrax
1299 # asm 1: add <mulrax=int64#7,<ry3=int64#11
1300 # asm 2: add <mulrax=%rax,<ry3=%r13
1303 # qhasm: mulr31 += mulrdx + carry
1304 # asm 1: adc <mulrdx=int64#3,<mulr31=int64#12
1305 # asm 2: adc <mulrdx=%rdx,<mulr31=%r14
1308 # qhasm: mulrax = *(uint64 *)(pp + 104)
1309 # asm 1: movq 104(<pp=int64#2),>mulrax=int64#7
1310 # asm 2: movq 104(<pp=%rsi),>mulrax=%rax
1313 # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 48)
1314 # asm 1: mulq 48(<pp=int64#2)
1315 # asm 2: mulq 48(<pp=%rsi)
1318 # qhasm: carry? ry4 += mulrax
1319 # asm 1: add <mulrax=int64#7,<ry4=int64#13
1320 # asm 2: add <mulrax=%rax,<ry4=%r15
1323 # qhasm: mulr41 += mulrdx + carry
1324 # asm 1: adc <mulrdx=int64#3,<mulr41=int64#14
1325 # asm 2: adc <mulrdx=%rdx,<mulr41=%rbx
1328 # qhasm: mulrax = mulx319_stack
1329 # asm 1: movq <mulx319_stack=stack64#8,>mulrax=int64#7
1330 # asm 2: movq <mulx319_stack=56(%rsp),>mulrax=%rax
1333 # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 64)
1334 # asm 1: mulq 64(<pp=int64#2)
1335 # asm 2: mulq 64(<pp=%rsi)
1338 # qhasm: carry? ry1 += mulrax
1339 # asm 1: add <mulrax=int64#7,<ry1=int64#6
1340 # asm 2: add <mulrax=%rax,<ry1=%r9
1343 # qhasm: mulr11 += mulrdx + carry
1344 # asm 1: adc <mulrdx=int64#3,<mulr11=int64#8
1345 # asm 2: adc <mulrdx=%rdx,<mulr11=%r10
1348 # qhasm: mulrax = mulx319_stack
1349 # asm 1: movq <mulx319_stack=stack64#8,>mulrax=int64#7
1350 # asm 2: movq <mulx319_stack=56(%rsp),>mulrax=%rax
1353 # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 72)
1354 # asm 1: mulq 72(<pp=int64#2)
1355 # asm 2: mulq 72(<pp=%rsi)
1358 # qhasm: carry? ry2 += mulrax
1359 # asm 1: add <mulrax=int64#7,<ry2=int64#9
1360 # asm 2: add <mulrax=%rax,<ry2=%r11
1363 # qhasm: mulr21 += mulrdx + carry
1364 # asm 1: adc <mulrdx=int64#3,<mulr21=int64#10
1365 # asm 2: adc <mulrdx=%rdx,<mulr21=%r12
1368 # qhasm: mulrax = *(uint64 *)(pp + 112)
1369 # asm 1: movq 112(<pp=int64#2),>mulrax=int64#7
1370 # asm 2: movq 112(<pp=%rsi),>mulrax=%rax
1373 # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 40)
1374 # asm 1: mulq 40(<pp=int64#2)
1375 # asm 2: mulq 40(<pp=%rsi)
1378 # qhasm: carry? ry4 += mulrax
1379 # asm 1: add <mulrax=int64#7,<ry4=int64#13
1380 # asm 2: add <mulrax=%rax,<ry4=%r15
1383 # qhasm: mulr41 += mulrdx + carry
1384 # asm 1: adc <mulrdx=int64#3,<mulr41=int64#14
1385 # asm 2: adc <mulrdx=%rdx,<mulr41=%rbx
1388 # qhasm: mulrax = mulx419_stack
1389 # asm 1: movq <mulx419_stack=stack64#9,>mulrax=int64#7
1390 # asm 2: movq <mulx419_stack=64(%rsp),>mulrax=%rax
1393 # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 56)
1394 # asm 1: mulq 56(<pp=int64#2)
1395 # asm 2: mulq 56(<pp=%rsi)
1398 # qhasm: carry? ry1 += mulrax
1399 # asm 1: add <mulrax=int64#7,<ry1=int64#6
1400 # asm 2: add <mulrax=%rax,<ry1=%r9
1403 # qhasm: mulr11 += mulrdx + carry
1404 # asm 1: adc <mulrdx=int64#3,<mulr11=int64#8
1405 # asm 2: adc <mulrdx=%rdx,<mulr11=%r10
1408 # qhasm: mulrax = mulx419_stack
1409 # asm 1: movq <mulx419_stack=stack64#9,>mulrax=int64#7
1410 # asm 2: movq <mulx419_stack=64(%rsp),>mulrax=%rax
1413 # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 64)
1414 # asm 1: mulq 64(<pp=int64#2)
1415 # asm 2: mulq 64(<pp=%rsi)
1418 # qhasm: carry? ry2 += mulrax
1419 # asm 1: add <mulrax=int64#7,<ry2=int64#9
1420 # asm 2: add <mulrax=%rax,<ry2=%r11
1423 # qhasm: mulr21 += mulrdx + carry
1424 # asm 1: adc <mulrdx=int64#3,<mulr21=int64#10
1425 # asm 2: adc <mulrdx=%rdx,<mulr21=%r12
1428 # qhasm: mulrax = mulx419_stack
1429 # asm 1: movq <mulx419_stack=stack64#9,>mulrax=int64#7
1430 # asm 2: movq <mulx419_stack=64(%rsp),>mulrax=%rax
1433 # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 72)
1434 # asm 1: mulq 72(<pp=int64#2)
1435 # asm 2: mulq 72(<pp=%rsi)
1438 # qhasm: carry? ry3 += mulrax
1439 # asm 1: add <mulrax=int64#7,<ry3=int64#11
1440 # asm 2: add <mulrax=%rax,<ry3=%r13
1443 # qhasm: mulr31 += mulrdx + carry
1444 # asm 1: adc <mulrdx=int64#3,<mulr31=int64#12
1445 # asm 2: adc <mulrdx=%rdx,<mulr31=%r14
1448 # qhasm: mulredmask = *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51
1449 # asm 1: movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,>mulredmask=int64#3
1450 # asm 2: movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,>mulredmask=%rdx
1451 movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51
,%rdx
1453 # qhasm: mulr01 = (mulr01.ry0) << 13
1454 # asm 1: shld $13,<ry0=int64#4,<mulr01=int64#5
1455 # asm 2: shld $13,<ry0=%rcx,<mulr01=%r8
1458 # qhasm: ry0 &= mulredmask
1459 # asm 1: and <mulredmask=int64#3,<ry0=int64#4
1460 # asm 2: and <mulredmask=%rdx,<ry0=%rcx
1463 # qhasm: mulr11 = (mulr11.ry1) << 13
1464 # asm 1: shld $13,<ry1=int64#6,<mulr11=int64#8
1465 # asm 2: shld $13,<ry1=%r9,<mulr11=%r10
1468 # qhasm: ry1 &= mulredmask
1469 # asm 1: and <mulredmask=int64#3,<ry1=int64#6
1470 # asm 2: and <mulredmask=%rdx,<ry1=%r9
1473 # qhasm: ry1 += mulr01
1474 # asm 1: add <mulr01=int64#5,<ry1=int64#6
1475 # asm 2: add <mulr01=%r8,<ry1=%r9
1478 # qhasm: mulr21 = (mulr21.ry2) << 13
1479 # asm 1: shld $13,<ry2=int64#9,<mulr21=int64#10
1480 # asm 2: shld $13,<ry2=%r11,<mulr21=%r12
1483 # qhasm: ry2 &= mulredmask
1484 # asm 1: and <mulredmask=int64#3,<ry2=int64#9
1485 # asm 2: and <mulredmask=%rdx,<ry2=%r11
1488 # qhasm: ry2 += mulr11
1489 # asm 1: add <mulr11=int64#8,<ry2=int64#9
1490 # asm 2: add <mulr11=%r10,<ry2=%r11
1493 # qhasm: mulr31 = (mulr31.ry3) << 13
1494 # asm 1: shld $13,<ry3=int64#11,<mulr31=int64#12
1495 # asm 2: shld $13,<ry3=%r13,<mulr31=%r14
1498 # qhasm: ry3 &= mulredmask
1499 # asm 1: and <mulredmask=int64#3,<ry3=int64#11
1500 # asm 2: and <mulredmask=%rdx,<ry3=%r13
1503 # qhasm: ry3 += mulr21
1504 # asm 1: add <mulr21=int64#10,<ry3=int64#11
1505 # asm 2: add <mulr21=%r12,<ry3=%r13
1508 # qhasm: mulr41 = (mulr41.ry4) << 13
1509 # asm 1: shld $13,<ry4=int64#13,<mulr41=int64#14
1510 # asm 2: shld $13,<ry4=%r15,<mulr41=%rbx
1513 # qhasm: ry4 &= mulredmask
1514 # asm 1: and <mulredmask=int64#3,<ry4=int64#13
1515 # asm 2: and <mulredmask=%rdx,<ry4=%r15
1518 # qhasm: ry4 += mulr31
1519 # asm 1: add <mulr31=int64#12,<ry4=int64#13
1520 # asm 2: add <mulr31=%r14,<ry4=%r15
1523 # qhasm: mulr41 = mulr41 * 19
1524 # asm 1: imulq $19,<mulr41=int64#14,>mulr41=int64#5
1525 # asm 2: imulq $19,<mulr41=%rbx,>mulr41=%r8
1528 # qhasm: ry0 += mulr41
1529 # asm 1: add <mulr41=int64#5,<ry0=int64#4
1530 # asm 2: add <mulr41=%r8,<ry0=%rcx
1534 # asm 1: mov <ry0=int64#4,>mult=int64#5
1535 # asm 2: mov <ry0=%rcx,>mult=%r8
1538 # qhasm: (uint64) mult >>= 51
1539 # asm 1: shr $51,<mult=int64#5
1540 # asm 2: shr $51,<mult=%r8
1543 # qhasm: mult += ry1
1544 # asm 1: add <ry1=int64#6,<mult=int64#5
1545 # asm 2: add <ry1=%r9,<mult=%r8
1549 # asm 1: mov <mult=int64#5,>ry1=int64#6
1550 # asm 2: mov <mult=%r8,>ry1=%r9
1553 # qhasm: (uint64) mult >>= 51
1554 # asm 1: shr $51,<mult=int64#5
1555 # asm 2: shr $51,<mult=%r8
1558 # qhasm: ry0 &= mulredmask
1559 # asm 1: and <mulredmask=int64#3,<ry0=int64#4
1560 # asm 2: and <mulredmask=%rdx,<ry0=%rcx
1563 # qhasm: mult += ry2
1564 # asm 1: add <ry2=int64#9,<mult=int64#5
1565 # asm 2: add <ry2=%r11,<mult=%r8
1569 # asm 1: mov <mult=int64#5,>ry2=int64#7
1570 # asm 2: mov <mult=%r8,>ry2=%rax
1573 # qhasm: (uint64) mult >>= 51
1574 # asm 1: shr $51,<mult=int64#5
1575 # asm 2: shr $51,<mult=%r8
1578 # qhasm: ry1 &= mulredmask
1579 # asm 1: and <mulredmask=int64#3,<ry1=int64#6
1580 # asm 2: and <mulredmask=%rdx,<ry1=%r9
1583 # qhasm: mult += ry3
1584 # asm 1: add <ry3=int64#11,<mult=int64#5
1585 # asm 2: add <ry3=%r13,<mult=%r8
1589 # asm 1: mov <mult=int64#5,>ry3=int64#8
1590 # asm 2: mov <mult=%r8,>ry3=%r10
1593 # qhasm: (uint64) mult >>= 51
1594 # asm 1: shr $51,<mult=int64#5
1595 # asm 2: shr $51,<mult=%r8
1598 # qhasm: ry2 &= mulredmask
1599 # asm 1: and <mulredmask=int64#3,<ry2=int64#7
1600 # asm 2: and <mulredmask=%rdx,<ry2=%rax
1603 # qhasm: mult += ry4
1604 # asm 1: add <ry4=int64#13,<mult=int64#5
1605 # asm 2: add <ry4=%r15,<mult=%r8
1609 # asm 1: mov <mult=int64#5,>ry4=int64#9
1610 # asm 2: mov <mult=%r8,>ry4=%r11
1613 # qhasm: (uint64) mult >>= 51
1614 # asm 1: shr $51,<mult=int64#5
1615 # asm 2: shr $51,<mult=%r8
1618 # qhasm: ry3 &= mulredmask
1619 # asm 1: and <mulredmask=int64#3,<ry3=int64#8
1620 # asm 2: and <mulredmask=%rdx,<ry3=%r10
1624 # asm 1: imulq $19,<mult=int64#5,>mult=int64#5
1625 # asm 2: imulq $19,<mult=%r8,>mult=%r8
1628 # qhasm: ry0 += mult
1629 # asm 1: add <mult=int64#5,<ry0=int64#4
1630 # asm 2: add <mult=%r8,<ry0=%rcx
1633 # qhasm: ry4 &= mulredmask
1634 # asm 1: and <mulredmask=int64#3,<ry4=int64#9
1635 # asm 2: and <mulredmask=%rdx,<ry4=%r11
1638 # qhasm: *(uint64 *)(rp + 40) = ry0
1639 # asm 1: movq <ry0=int64#4,40(<rp=int64#1)
1640 # asm 2: movq <ry0=%rcx,40(<rp=%rdi)
1643 # qhasm: *(uint64 *)(rp + 48) = ry1
1644 # asm 1: movq <ry1=int64#6,48(<rp=int64#1)
1645 # asm 2: movq <ry1=%r9,48(<rp=%rdi)
1648 # qhasm: *(uint64 *)(rp + 56) = ry2
1649 # asm 1: movq <ry2=int64#7,56(<rp=int64#1)
1650 # asm 2: movq <ry2=%rax,56(<rp=%rdi)
1653 # qhasm: *(uint64 *)(rp + 64) = ry3
1654 # asm 1: movq <ry3=int64#8,64(<rp=int64#1)
1655 # asm 2: movq <ry3=%r10,64(<rp=%rdi)
1658 # qhasm: *(uint64 *)(rp + 72) = ry4
1659 # asm 1: movq <ry4=int64#9,72(<rp=int64#1)
1660 # asm 2: movq <ry4=%r11,72(<rp=%rdi)
1663 # qhasm: mulrax = *(uint64 *)(pp + 64)
1664 # asm 1: movq 64(<pp=int64#2),>mulrax=int64#3
1665 # asm 2: movq 64(<pp=%rsi),>mulrax=%rdx
1668 # qhasm: mulrax *= 19
1669 # asm 1: imulq $19,<mulrax=int64#3,>mulrax=int64#7
1670 # asm 2: imulq $19,<mulrax=%rdx,>mulrax=%rax
1673 # qhasm: mulx319_stack = mulrax
1674 # asm 1: movq <mulrax=int64#7,>mulx319_stack=stack64#8
1675 # asm 2: movq <mulrax=%rax,>mulx319_stack=56(%rsp)
1678 # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 136)
1679 # asm 1: mulq 136(<pp=int64#2)
1680 # asm 2: mulq 136(<pp=%rsi)
1683 # qhasm: rz0 = mulrax
1684 # asm 1: mov <mulrax=int64#7,>rz0=int64#4
1685 # asm 2: mov <mulrax=%rax,>rz0=%rcx
1688 # qhasm: mulr01 = mulrdx
1689 # asm 1: mov <mulrdx=int64#3,>mulr01=int64#5
1690 # asm 2: mov <mulrdx=%rdx,>mulr01=%r8
1693 # qhasm: mulrax = *(uint64 *)(pp + 72)
1694 # asm 1: movq 72(<pp=int64#2),>mulrax=int64#3
1695 # asm 2: movq 72(<pp=%rsi),>mulrax=%rdx
1698 # qhasm: mulrax *= 19
1699 # asm 1: imulq $19,<mulrax=int64#3,>mulrax=int64#7
1700 # asm 2: imulq $19,<mulrax=%rdx,>mulrax=%rax
1703 # qhasm: mulx419_stack = mulrax
1704 # asm 1: movq <mulrax=int64#7,>mulx419_stack=stack64#9
1705 # asm 2: movq <mulrax=%rax,>mulx419_stack=64(%rsp)
1708 # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 128)
1709 # asm 1: mulq 128(<pp=int64#2)
1710 # asm 2: mulq 128(<pp=%rsi)
1713 # qhasm: carry? rz0 += mulrax
1714 # asm 1: add <mulrax=int64#7,<rz0=int64#4
1715 # asm 2: add <mulrax=%rax,<rz0=%rcx
1718 # qhasm: mulr01 += mulrdx + carry
1719 # asm 1: adc <mulrdx=int64#3,<mulr01=int64#5
1720 # asm 2: adc <mulrdx=%rdx,<mulr01=%r8
1723 # qhasm: mulrax = *(uint64 *)(pp + 40)
1724 # asm 1: movq 40(<pp=int64#2),>mulrax=int64#7
1725 # asm 2: movq 40(<pp=%rsi),>mulrax=%rax
1728 # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 120)
1729 # asm 1: mulq 120(<pp=int64#2)
1730 # asm 2: mulq 120(<pp=%rsi)
1733 # qhasm: carry? rz0 += mulrax
1734 # asm 1: add <mulrax=int64#7,<rz0=int64#4
1735 # asm 2: add <mulrax=%rax,<rz0=%rcx
1738 # qhasm: mulr01 += mulrdx + carry
1739 # asm 1: adc <mulrdx=int64#3,<mulr01=int64#5
1740 # asm 2: adc <mulrdx=%rdx,<mulr01=%r8
1743 # qhasm: mulrax = *(uint64 *)(pp + 40)
1744 # asm 1: movq 40(<pp=int64#2),>mulrax=int64#7
1745 # asm 2: movq 40(<pp=%rsi),>mulrax=%rax
1748 # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 128)
1749 # asm 1: mulq 128(<pp=int64#2)
1750 # asm 2: mulq 128(<pp=%rsi)
1753 # qhasm: rz1 = mulrax
1754 # asm 1: mov <mulrax=int64#7,>rz1=int64#6
1755 # asm 2: mov <mulrax=%rax,>rz1=%r9
1758 # qhasm: mulr11 = mulrdx
1759 # asm 1: mov <mulrdx=int64#3,>mulr11=int64#8
1760 # asm 2: mov <mulrdx=%rdx,>mulr11=%r10
1763 # qhasm: mulrax = *(uint64 *)(pp + 40)
1764 # asm 1: movq 40(<pp=int64#2),>mulrax=int64#7
1765 # asm 2: movq 40(<pp=%rsi),>mulrax=%rax
1768 # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 136)
1769 # asm 1: mulq 136(<pp=int64#2)
1770 # asm 2: mulq 136(<pp=%rsi)
1773 # qhasm: rz2 = mulrax
1774 # asm 1: mov <mulrax=int64#7,>rz2=int64#9
1775 # asm 2: mov <mulrax=%rax,>rz2=%r11
1778 # qhasm: mulr21 = mulrdx
1779 # asm 1: mov <mulrdx=int64#3,>mulr21=int64#10
1780 # asm 2: mov <mulrdx=%rdx,>mulr21=%r12
1783 # qhasm: mulrax = *(uint64 *)(pp + 40)
1784 # asm 1: movq 40(<pp=int64#2),>mulrax=int64#7
1785 # asm 2: movq 40(<pp=%rsi),>mulrax=%rax
1788 # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 144)
1789 # asm 1: mulq 144(<pp=int64#2)
1790 # asm 2: mulq 144(<pp=%rsi)
1793 # qhasm: rz3 = mulrax
1794 # asm 1: mov <mulrax=int64#7,>rz3=int64#11
1795 # asm 2: mov <mulrax=%rax,>rz3=%r13
1798 # qhasm: mulr31 = mulrdx
1799 # asm 1: mov <mulrdx=int64#3,>mulr31=int64#12
1800 # asm 2: mov <mulrdx=%rdx,>mulr31=%r14
1803 # qhasm: mulrax = *(uint64 *)(pp + 40)
1804 # asm 1: movq 40(<pp=int64#2),>mulrax=int64#7
1805 # asm 2: movq 40(<pp=%rsi),>mulrax=%rax
1808 # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 152)
1809 # asm 1: mulq 152(<pp=int64#2)
1810 # asm 2: mulq 152(<pp=%rsi)
1813 # qhasm: rz4 = mulrax
1814 # asm 1: mov <mulrax=int64#7,>rz4=int64#13
1815 # asm 2: mov <mulrax=%rax,>rz4=%r15
1818 # qhasm: mulr41 = mulrdx
1819 # asm 1: mov <mulrdx=int64#3,>mulr41=int64#14
1820 # asm 2: mov <mulrdx=%rdx,>mulr41=%rbx
1823 # qhasm: mulrax = *(uint64 *)(pp + 48)
1824 # asm 1: movq 48(<pp=int64#2),>mulrax=int64#7
1825 # asm 2: movq 48(<pp=%rsi),>mulrax=%rax
1828 # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 120)
1829 # asm 1: mulq 120(<pp=int64#2)
1830 # asm 2: mulq 120(<pp=%rsi)
1833 # qhasm: carry? rz1 += mulrax
1834 # asm 1: add <mulrax=int64#7,<rz1=int64#6
1835 # asm 2: add <mulrax=%rax,<rz1=%r9
1838 # qhasm: mulr11 += mulrdx + carry
1839 # asm 1: adc <mulrdx=int64#3,<mulr11=int64#8
1840 # asm 2: adc <mulrdx=%rdx,<mulr11=%r10
1843 # qhasm: mulrax = *(uint64 *)(pp + 48)
1844 # asm 1: movq 48(<pp=int64#2),>mulrax=int64#7
1845 # asm 2: movq 48(<pp=%rsi),>mulrax=%rax
1848 # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 128)
1849 # asm 1: mulq 128(<pp=int64#2)
1850 # asm 2: mulq 128(<pp=%rsi)
1853 # qhasm: carry? rz2 += mulrax
1854 # asm 1: add <mulrax=int64#7,<rz2=int64#9
1855 # asm 2: add <mulrax=%rax,<rz2=%r11
1858 # qhasm: mulr21 += mulrdx + carry
1859 # asm 1: adc <mulrdx=int64#3,<mulr21=int64#10
1860 # asm 2: adc <mulrdx=%rdx,<mulr21=%r12
1863 # qhasm: mulrax = *(uint64 *)(pp + 48)
1864 # asm 1: movq 48(<pp=int64#2),>mulrax=int64#7
1865 # asm 2: movq 48(<pp=%rsi),>mulrax=%rax
1868 # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 136)
1869 # asm 1: mulq 136(<pp=int64#2)
1870 # asm 2: mulq 136(<pp=%rsi)
1873 # qhasm: carry? rz3 += mulrax
1874 # asm 1: add <mulrax=int64#7,<rz3=int64#11
1875 # asm 2: add <mulrax=%rax,<rz3=%r13
1878 # qhasm: mulr31 += mulrdx + carry
1879 # asm 1: adc <mulrdx=int64#3,<mulr31=int64#12
1880 # asm 2: adc <mulrdx=%rdx,<mulr31=%r14
1883 # qhasm: mulrax = *(uint64 *)(pp + 48)
1884 # asm 1: movq 48(<pp=int64#2),>mulrax=int64#7
1885 # asm 2: movq 48(<pp=%rsi),>mulrax=%rax
1888 # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 144)
1889 # asm 1: mulq 144(<pp=int64#2)
1890 # asm 2: mulq 144(<pp=%rsi)
1893 # qhasm: carry? rz4 += mulrax
1894 # asm 1: add <mulrax=int64#7,<rz4=int64#13
1895 # asm 2: add <mulrax=%rax,<rz4=%r15
1898 # qhasm: mulr41 += mulrdx + carry
1899 # asm 1: adc <mulrdx=int64#3,<mulr41=int64#14
1900 # asm 2: adc <mulrdx=%rdx,<mulr41=%rbx
1903 # qhasm: mulrax = *(uint64 *)(pp + 48)
1904 # asm 1: movq 48(<pp=int64#2),>mulrax=int64#3
1905 # asm 2: movq 48(<pp=%rsi),>mulrax=%rdx
1908 # qhasm: mulrax *= 19
1909 # asm 1: imulq $19,<mulrax=int64#3,>mulrax=int64#7
1910 # asm 2: imulq $19,<mulrax=%rdx,>mulrax=%rax
1913 # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 152)
1914 # asm 1: mulq 152(<pp=int64#2)
1915 # asm 2: mulq 152(<pp=%rsi)
1918 # qhasm: carry? rz0 += mulrax
1919 # asm 1: add <mulrax=int64#7,<rz0=int64#4
1920 # asm 2: add <mulrax=%rax,<rz0=%rcx
1923 # qhasm: mulr01 += mulrdx + carry
1924 # asm 1: adc <mulrdx=int64#3,<mulr01=int64#5
1925 # asm 2: adc <mulrdx=%rdx,<mulr01=%r8
1928 # qhasm: mulrax = *(uint64 *)(pp + 56)
1929 # asm 1: movq 56(<pp=int64#2),>mulrax=int64#7
1930 # asm 2: movq 56(<pp=%rsi),>mulrax=%rax
1933 # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 120)
1934 # asm 1: mulq 120(<pp=int64#2)
1935 # asm 2: mulq 120(<pp=%rsi)
1938 # qhasm: carry? rz2 += mulrax
1939 # asm 1: add <mulrax=int64#7,<rz2=int64#9
1940 # asm 2: add <mulrax=%rax,<rz2=%r11
1943 # qhasm: mulr21 += mulrdx + carry
1944 # asm 1: adc <mulrdx=int64#3,<mulr21=int64#10
1945 # asm 2: adc <mulrdx=%rdx,<mulr21=%r12
1948 # qhasm: mulrax = *(uint64 *)(pp + 56)
1949 # asm 1: movq 56(<pp=int64#2),>mulrax=int64#7
1950 # asm 2: movq 56(<pp=%rsi),>mulrax=%rax
1953 # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 128)
1954 # asm 1: mulq 128(<pp=int64#2)
1955 # asm 2: mulq 128(<pp=%rsi)
1958 # qhasm: carry? rz3 += mulrax
1959 # asm 1: add <mulrax=int64#7,<rz3=int64#11
1960 # asm 2: add <mulrax=%rax,<rz3=%r13
1963 # qhasm: mulr31 += mulrdx + carry
1964 # asm 1: adc <mulrdx=int64#3,<mulr31=int64#12
1965 # asm 2: adc <mulrdx=%rdx,<mulr31=%r14
1968 # qhasm: mulrax = *(uint64 *)(pp + 56)
1969 # asm 1: movq 56(<pp=int64#2),>mulrax=int64#7
1970 # asm 2: movq 56(<pp=%rsi),>mulrax=%rax
1973 # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 136)
1974 # asm 1: mulq 136(<pp=int64#2)
1975 # asm 2: mulq 136(<pp=%rsi)
1978 # qhasm: carry? rz4 += mulrax
1979 # asm 1: add <mulrax=int64#7,<rz4=int64#13
1980 # asm 2: add <mulrax=%rax,<rz4=%r15
1983 # qhasm: mulr41 += mulrdx + carry
1984 # asm 1: adc <mulrdx=int64#3,<mulr41=int64#14
1985 # asm 2: adc <mulrdx=%rdx,<mulr41=%rbx
1988 # qhasm: mulrax = *(uint64 *)(pp + 56)
1989 # asm 1: movq 56(<pp=int64#2),>mulrax=int64#3
1990 # asm 2: movq 56(<pp=%rsi),>mulrax=%rdx
1993 # qhasm: mulrax *= 19
1994 # asm 1: imulq $19,<mulrax=int64#3,>mulrax=int64#7
1995 # asm 2: imulq $19,<mulrax=%rdx,>mulrax=%rax
1998 # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 144)
1999 # asm 1: mulq 144(<pp=int64#2)
2000 # asm 2: mulq 144(<pp=%rsi)
2003 # qhasm: carry? rz0 += mulrax
2004 # asm 1: add <mulrax=int64#7,<rz0=int64#4
2005 # asm 2: add <mulrax=%rax,<rz0=%rcx
2008 # qhasm: mulr01 += mulrdx + carry
2009 # asm 1: adc <mulrdx=int64#3,<mulr01=int64#5
2010 # asm 2: adc <mulrdx=%rdx,<mulr01=%r8
2013 # qhasm: mulrax = *(uint64 *)(pp + 56)
2014 # asm 1: movq 56(<pp=int64#2),>mulrax=int64#3
2015 # asm 2: movq 56(<pp=%rsi),>mulrax=%rdx
2018 # qhasm: mulrax *= 19
2019 # asm 1: imulq $19,<mulrax=int64#3,>mulrax=int64#7
2020 # asm 2: imulq $19,<mulrax=%rdx,>mulrax=%rax
2023 # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 152)
2024 # asm 1: mulq 152(<pp=int64#2)
2025 # asm 2: mulq 152(<pp=%rsi)
2028 # qhasm: carry? rz1 += mulrax
2029 # asm 1: add <mulrax=int64#7,<rz1=int64#6
2030 # asm 2: add <mulrax=%rax,<rz1=%r9
2033 # qhasm: mulr11 += mulrdx + carry
2034 # asm 1: adc <mulrdx=int64#3,<mulr11=int64#8
2035 # asm 2: adc <mulrdx=%rdx,<mulr11=%r10
2038 # qhasm: mulrax = *(uint64 *)(pp + 64)
2039 # asm 1: movq 64(<pp=int64#2),>mulrax=int64#7
2040 # asm 2: movq 64(<pp=%rsi),>mulrax=%rax
2043 # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 120)
2044 # asm 1: mulq 120(<pp=int64#2)
2045 # asm 2: mulq 120(<pp=%rsi)
2048 # qhasm: carry? rz3 += mulrax
2049 # asm 1: add <mulrax=int64#7,<rz3=int64#11
2050 # asm 2: add <mulrax=%rax,<rz3=%r13
2053 # qhasm: mulr31 += mulrdx + carry
2054 # asm 1: adc <mulrdx=int64#3,<mulr31=int64#12
2055 # asm 2: adc <mulrdx=%rdx,<mulr31=%r14
2058 # qhasm: mulrax = *(uint64 *)(pp + 64)
2059 # asm 1: movq 64(<pp=int64#2),>mulrax=int64#7
2060 # asm 2: movq 64(<pp=%rsi),>mulrax=%rax
2063 # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 128)
2064 # asm 1: mulq 128(<pp=int64#2)
2065 # asm 2: mulq 128(<pp=%rsi)
2068 # qhasm: carry? rz4 += mulrax
2069 # asm 1: add <mulrax=int64#7,<rz4=int64#13
2070 # asm 2: add <mulrax=%rax,<rz4=%r15
2073 # qhasm: mulr41 += mulrdx + carry
2074 # asm 1: adc <mulrdx=int64#3,<mulr41=int64#14
2075 # asm 2: adc <mulrdx=%rdx,<mulr41=%rbx
2078 # qhasm: mulrax = mulx319_stack
2079 # asm 1: movq <mulx319_stack=stack64#8,>mulrax=int64#7
2080 # asm 2: movq <mulx319_stack=56(%rsp),>mulrax=%rax
2083 # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 144)
2084 # asm 1: mulq 144(<pp=int64#2)
2085 # asm 2: mulq 144(<pp=%rsi)
2088 # qhasm: carry? rz1 += mulrax
2089 # asm 1: add <mulrax=int64#7,<rz1=int64#6
2090 # asm 2: add <mulrax=%rax,<rz1=%r9
2093 # qhasm: mulr11 += mulrdx + carry
2094 # asm 1: adc <mulrdx=int64#3,<mulr11=int64#8
2095 # asm 2: adc <mulrdx=%rdx,<mulr11=%r10
2098 # qhasm: mulrax = mulx319_stack
2099 # asm 1: movq <mulx319_stack=stack64#8,>mulrax=int64#7
2100 # asm 2: movq <mulx319_stack=56(%rsp),>mulrax=%rax
2103 # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 152)
2104 # asm 1: mulq 152(<pp=int64#2)
2105 # asm 2: mulq 152(<pp=%rsi)
2108 # qhasm: carry? rz2 += mulrax
2109 # asm 1: add <mulrax=int64#7,<rz2=int64#9
2110 # asm 2: add <mulrax=%rax,<rz2=%r11
2113 # qhasm: mulr21 += mulrdx + carry
2114 # asm 1: adc <mulrdx=int64#3,<mulr21=int64#10
2115 # asm 2: adc <mulrdx=%rdx,<mulr21=%r12
2118 # qhasm: mulrax = *(uint64 *)(pp + 72)
2119 # asm 1: movq 72(<pp=int64#2),>mulrax=int64#7
2120 # asm 2: movq 72(<pp=%rsi),>mulrax=%rax
2123 # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 120)
2124 # asm 1: mulq 120(<pp=int64#2)
2125 # asm 2: mulq 120(<pp=%rsi)
2128 # qhasm: carry? rz4 += mulrax
2129 # asm 1: add <mulrax=int64#7,<rz4=int64#13
2130 # asm 2: add <mulrax=%rax,<rz4=%r15
2133 # qhasm: mulr41 += mulrdx + carry
2134 # asm 1: adc <mulrdx=int64#3,<mulr41=int64#14
2135 # asm 2: adc <mulrdx=%rdx,<mulr41=%rbx
2138 # qhasm: mulrax = mulx419_stack
2139 # asm 1: movq <mulx419_stack=stack64#9,>mulrax=int64#7
2140 # asm 2: movq <mulx419_stack=64(%rsp),>mulrax=%rax
2143 # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 136)
2144 # asm 1: mulq 136(<pp=int64#2)
2145 # asm 2: mulq 136(<pp=%rsi)
2148 # qhasm: carry? rz1 += mulrax
2149 # asm 1: add <mulrax=int64#7,<rz1=int64#6
2150 # asm 2: add <mulrax=%rax,<rz1=%r9
2153 # qhasm: mulr11 += mulrdx + carry
2154 # asm 1: adc <mulrdx=int64#3,<mulr11=int64#8
2155 # asm 2: adc <mulrdx=%rdx,<mulr11=%r10
2158 # qhasm: mulrax = mulx419_stack
2159 # asm 1: movq <mulx419_stack=stack64#9,>mulrax=int64#7
2160 # asm 2: movq <mulx419_stack=64(%rsp),>mulrax=%rax
2163 # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 144)
2164 # asm 1: mulq 144(<pp=int64#2)
2165 # asm 2: mulq 144(<pp=%rsi)
2168 # qhasm: carry? rz2 += mulrax
2169 # asm 1: add <mulrax=int64#7,<rz2=int64#9
2170 # asm 2: add <mulrax=%rax,<rz2=%r11
2173 # qhasm: mulr21 += mulrdx + carry
2174 # asm 1: adc <mulrdx=int64#3,<mulr21=int64#10
2175 # asm 2: adc <mulrdx=%rdx,<mulr21=%r12
2178 # qhasm: mulrax = mulx419_stack
2179 # asm 1: movq <mulx419_stack=stack64#9,>mulrax=int64#7
2180 # asm 2: movq <mulx419_stack=64(%rsp),>mulrax=%rax
2183 # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 152)
2184 # asm 1: mulq 152(<pp=int64#2)
2185 # asm 2: mulq 152(<pp=%rsi)
2188 # qhasm: carry? rz3 += mulrax
2189 # asm 1: add <mulrax=int64#7,<rz3=int64#11
2190 # asm 2: add <mulrax=%rax,<rz3=%r13
2193 # qhasm: mulr31 += mulrdx + carry
2194 # asm 1: adc <mulrdx=int64#3,<mulr31=int64#12
2195 # asm 2: adc <mulrdx=%rdx,<mulr31=%r14
2198 # qhasm: mulredmask = *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51
2199 # asm 1: movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,>mulredmask=int64#3
2200 # asm 2: movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,>mulredmask=%rdx
2201 movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51
,%rdx
2203 # qhasm: mulr01 = (mulr01.rz0) << 13
2204 # asm 1: shld $13,<rz0=int64#4,<mulr01=int64#5
2205 # asm 2: shld $13,<rz0=%rcx,<mulr01=%r8
2208 # qhasm: rz0 &= mulredmask
2209 # asm 1: and <mulredmask=int64#3,<rz0=int64#4
2210 # asm 2: and <mulredmask=%rdx,<rz0=%rcx
2213 # qhasm: mulr11 = (mulr11.rz1) << 13
2214 # asm 1: shld $13,<rz1=int64#6,<mulr11=int64#8
2215 # asm 2: shld $13,<rz1=%r9,<mulr11=%r10
2218 # qhasm: rz1 &= mulredmask
2219 # asm 1: and <mulredmask=int64#3,<rz1=int64#6
2220 # asm 2: and <mulredmask=%rdx,<rz1=%r9
2223 # qhasm: rz1 += mulr01
2224 # asm 1: add <mulr01=int64#5,<rz1=int64#6
2225 # asm 2: add <mulr01=%r8,<rz1=%r9
2228 # qhasm: mulr21 = (mulr21.rz2) << 13
2229 # asm 1: shld $13,<rz2=int64#9,<mulr21=int64#10
2230 # asm 2: shld $13,<rz2=%r11,<mulr21=%r12
2233 # qhasm: rz2 &= mulredmask
2234 # asm 1: and <mulredmask=int64#3,<rz2=int64#9
2235 # asm 2: and <mulredmask=%rdx,<rz2=%r11
2238 # qhasm: rz2 += mulr11
2239 # asm 1: add <mulr11=int64#8,<rz2=int64#9
2240 # asm 2: add <mulr11=%r10,<rz2=%r11
2243 # qhasm: mulr31 = (mulr31.rz3) << 13
2244 # asm 1: shld $13,<rz3=int64#11,<mulr31=int64#12
2245 # asm 2: shld $13,<rz3=%r13,<mulr31=%r14
2248 # qhasm: rz3 &= mulredmask
2249 # asm 1: and <mulredmask=int64#3,<rz3=int64#11
2250 # asm 2: and <mulredmask=%rdx,<rz3=%r13
2253 # qhasm: rz3 += mulr21
2254 # asm 1: add <mulr21=int64#10,<rz3=int64#11
2255 # asm 2: add <mulr21=%r12,<rz3=%r13
2258 # qhasm: mulr41 = (mulr41.rz4) << 13
2259 # asm 1: shld $13,<rz4=int64#13,<mulr41=int64#14
2260 # asm 2: shld $13,<rz4=%r15,<mulr41=%rbx
2263 # qhasm: rz4 &= mulredmask
2264 # asm 1: and <mulredmask=int64#3,<rz4=int64#13
2265 # asm 2: and <mulredmask=%rdx,<rz4=%r15
2268 # qhasm: rz4 += mulr31
2269 # asm 1: add <mulr31=int64#12,<rz4=int64#13
2270 # asm 2: add <mulr31=%r14,<rz4=%r15
2273 # qhasm: mulr41 = mulr41 * 19
2274 # asm 1: imulq $19,<mulr41=int64#14,>mulr41=int64#5
2275 # asm 2: imulq $19,<mulr41=%rbx,>mulr41=%r8
2278 # qhasm: rz0 += mulr41
2279 # asm 1: add <mulr41=int64#5,<rz0=int64#4
2280 # asm 2: add <mulr41=%r8,<rz0=%rcx
2284 # asm 1: mov <rz0=int64#4,>mult=int64#5
2285 # asm 2: mov <rz0=%rcx,>mult=%r8
2288 # qhasm: (uint64) mult >>= 51
2289 # asm 1: shr $51,<mult=int64#5
2290 # asm 2: shr $51,<mult=%r8
2293 # qhasm: mult += rz1
2294 # asm 1: add <rz1=int64#6,<mult=int64#5
2295 # asm 2: add <rz1=%r9,<mult=%r8
2299 # asm 1: mov <mult=int64#5,>rz1=int64#6
2300 # asm 2: mov <mult=%r8,>rz1=%r9
2303 # qhasm: (uint64) mult >>= 51
2304 # asm 1: shr $51,<mult=int64#5
2305 # asm 2: shr $51,<mult=%r8
2308 # qhasm: rz0 &= mulredmask
2309 # asm 1: and <mulredmask=int64#3,<rz0=int64#4
2310 # asm 2: and <mulredmask=%rdx,<rz0=%rcx
2313 # qhasm: mult += rz2
2314 # asm 1: add <rz2=int64#9,<mult=int64#5
2315 # asm 2: add <rz2=%r11,<mult=%r8
2319 # asm 1: mov <mult=int64#5,>rz2=int64#7
2320 # asm 2: mov <mult=%r8,>rz2=%rax
2323 # qhasm: (uint64) mult >>= 51
2324 # asm 1: shr $51,<mult=int64#5
2325 # asm 2: shr $51,<mult=%r8
2328 # qhasm: rz1 &= mulredmask
2329 # asm 1: and <mulredmask=int64#3,<rz1=int64#6
2330 # asm 2: and <mulredmask=%rdx,<rz1=%r9
2333 # qhasm: mult += rz3
2334 # asm 1: add <rz3=int64#11,<mult=int64#5
2335 # asm 2: add <rz3=%r13,<mult=%r8
2339 # asm 1: mov <mult=int64#5,>rz3=int64#8
2340 # asm 2: mov <mult=%r8,>rz3=%r10
2343 # qhasm: (uint64) mult >>= 51
2344 # asm 1: shr $51,<mult=int64#5
2345 # asm 2: shr $51,<mult=%r8
2348 # qhasm: rz2 &= mulredmask
2349 # asm 1: and <mulredmask=int64#3,<rz2=int64#7
2350 # asm 2: and <mulredmask=%rdx,<rz2=%rax
2353 # qhasm: mult += rz4
2354 # asm 1: add <rz4=int64#13,<mult=int64#5
2355 # asm 2: add <rz4=%r15,<mult=%r8
2359 # asm 1: mov <mult=int64#5,>rz4=int64#9
2360 # asm 2: mov <mult=%r8,>rz4=%r11
2363 # qhasm: (uint64) mult >>= 51
2364 # asm 1: shr $51,<mult=int64#5
2365 # asm 2: shr $51,<mult=%r8
2368 # qhasm: rz3 &= mulredmask
2369 # asm 1: and <mulredmask=int64#3,<rz3=int64#8
2370 # asm 2: and <mulredmask=%rdx,<rz3=%r10
2374 # asm 1: imulq $19,<mult=int64#5,>mult=int64#5
2375 # asm 2: imulq $19,<mult=%r8,>mult=%r8
2378 # qhasm: rz0 += mult
2379 # asm 1: add <mult=int64#5,<rz0=int64#4
2380 # asm 2: add <mult=%r8,<rz0=%rcx
2383 # qhasm: rz4 &= mulredmask
2384 # asm 1: and <mulredmask=int64#3,<rz4=int64#9
2385 # asm 2: and <mulredmask=%rdx,<rz4=%r11
2388 # qhasm: *(uint64 *)(rp + 80) = rz0
2389 # asm 1: movq <rz0=int64#4,80(<rp=int64#1)
2390 # asm 2: movq <rz0=%rcx,80(<rp=%rdi)
2393 # qhasm: *(uint64 *)(rp + 88) = rz1
2394 # asm 1: movq <rz1=int64#6,88(<rp=int64#1)
2395 # asm 2: movq <rz1=%r9,88(<rp=%rdi)
2398 # qhasm: *(uint64 *)(rp + 96) = rz2
2399 # asm 1: movq <rz2=int64#7,96(<rp=int64#1)
2400 # asm 2: movq <rz2=%rax,96(<rp=%rdi)
2403 # qhasm: *(uint64 *)(rp + 104) = rz3
2404 # asm 1: movq <rz3=int64#8,104(<rp=int64#1)
2405 # asm 2: movq <rz3=%r10,104(<rp=%rdi)
2408 # qhasm: *(uint64 *)(rp + 112) = rz4
2409 # asm 1: movq <rz4=int64#9,112(<rp=int64#1)
2410 # asm 2: movq <rz4=%r11,112(<rp=%rdi)
2413 # qhasm: mulrax = *(uint64 *)(pp + 24)
2414 # asm 1: movq 24(<pp=int64#2),>mulrax=int64#3
2415 # asm 2: movq 24(<pp=%rsi),>mulrax=%rdx
2418 # qhasm: mulrax *= 19
2419 # asm 1: imulq $19,<mulrax=int64#3,>mulrax=int64#7
2420 # asm 2: imulq $19,<mulrax=%rdx,>mulrax=%rax
2423 # qhasm: mulx319_stack = mulrax
2424 # asm 1: movq <mulrax=int64#7,>mulx319_stack=stack64#8
2425 # asm 2: movq <mulrax=%rax,>mulx319_stack=56(%rsp)
2428 # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 96)
2429 # asm 1: mulq 96(<pp=int64#2)
2430 # asm 2: mulq 96(<pp=%rsi)
2433 # qhasm: rt0 = mulrax
2434 # asm 1: mov <mulrax=int64#7,>rt0=int64#4
2435 # asm 2: mov <mulrax=%rax,>rt0=%rcx
2438 # qhasm: mulr01 = mulrdx
2439 # asm 1: mov <mulrdx=int64#3,>mulr01=int64#5
2440 # asm 2: mov <mulrdx=%rdx,>mulr01=%r8
2443 # qhasm: mulrax = *(uint64 *)(pp + 32)
2444 # asm 1: movq 32(<pp=int64#2),>mulrax=int64#3
2445 # asm 2: movq 32(<pp=%rsi),>mulrax=%rdx
2448 # qhasm: mulrax *= 19
2449 # asm 1: imulq $19,<mulrax=int64#3,>mulrax=int64#7
2450 # asm 2: imulq $19,<mulrax=%rdx,>mulrax=%rax
2453 # qhasm: mulx419_stack = mulrax
2454 # asm 1: movq <mulrax=int64#7,>mulx419_stack=stack64#9
2455 # asm 2: movq <mulrax=%rax,>mulx419_stack=64(%rsp)
2458 # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 88)
2459 # asm 1: mulq 88(<pp=int64#2)
2460 # asm 2: mulq 88(<pp=%rsi)
2463 # qhasm: carry? rt0 += mulrax
2464 # asm 1: add <mulrax=int64#7,<rt0=int64#4
2465 # asm 2: add <mulrax=%rax,<rt0=%rcx
2468 # qhasm: mulr01 += mulrdx + carry
2469 # asm 1: adc <mulrdx=int64#3,<mulr01=int64#5
2470 # asm 2: adc <mulrdx=%rdx,<mulr01=%r8
2473 # qhasm: mulrax = *(uint64 *)(pp + 0)
2474 # asm 1: movq 0(<pp=int64#2),>mulrax=int64#7
2475 # asm 2: movq 0(<pp=%rsi),>mulrax=%rax
2478 # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 80)
2479 # asm 1: mulq 80(<pp=int64#2)
2480 # asm 2: mulq 80(<pp=%rsi)
2483 # qhasm: carry? rt0 += mulrax
2484 # asm 1: add <mulrax=int64#7,<rt0=int64#4
2485 # asm 2: add <mulrax=%rax,<rt0=%rcx
2488 # qhasm: mulr01 += mulrdx + carry
2489 # asm 1: adc <mulrdx=int64#3,<mulr01=int64#5
2490 # asm 2: adc <mulrdx=%rdx,<mulr01=%r8
2493 # qhasm: mulrax = *(uint64 *)(pp + 0)
2494 # asm 1: movq 0(<pp=int64#2),>mulrax=int64#7
2495 # asm 2: movq 0(<pp=%rsi),>mulrax=%rax
2498 # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 88)
2499 # asm 1: mulq 88(<pp=int64#2)
2500 # asm 2: mulq 88(<pp=%rsi)
2503 # qhasm: rt1 = mulrax
2504 # asm 1: mov <mulrax=int64#7,>rt1=int64#6
2505 # asm 2: mov <mulrax=%rax,>rt1=%r9
2508 # qhasm: mulr11 = mulrdx
2509 # asm 1: mov <mulrdx=int64#3,>mulr11=int64#8
2510 # asm 2: mov <mulrdx=%rdx,>mulr11=%r10
2513 # qhasm: mulrax = *(uint64 *)(pp + 0)
2514 # asm 1: movq 0(<pp=int64#2),>mulrax=int64#7
2515 # asm 2: movq 0(<pp=%rsi),>mulrax=%rax
2518 # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 96)
2519 # asm 1: mulq 96(<pp=int64#2)
2520 # asm 2: mulq 96(<pp=%rsi)
2523 # qhasm: rt2 = mulrax
2524 # asm 1: mov <mulrax=int64#7,>rt2=int64#9
2525 # asm 2: mov <mulrax=%rax,>rt2=%r11
2528 # qhasm: mulr21 = mulrdx
2529 # asm 1: mov <mulrdx=int64#3,>mulr21=int64#10
2530 # asm 2: mov <mulrdx=%rdx,>mulr21=%r12
2533 # qhasm: mulrax = *(uint64 *)(pp + 0)
2534 # asm 1: movq 0(<pp=int64#2),>mulrax=int64#7
2535 # asm 2: movq 0(<pp=%rsi),>mulrax=%rax
2538 # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 104)
2539 # asm 1: mulq 104(<pp=int64#2)
2540 # asm 2: mulq 104(<pp=%rsi)
2543 # qhasm: rt3 = mulrax
2544 # asm 1: mov <mulrax=int64#7,>rt3=int64#11
2545 # asm 2: mov <mulrax=%rax,>rt3=%r13
2548 # qhasm: mulr31 = mulrdx
2549 # asm 1: mov <mulrdx=int64#3,>mulr31=int64#12
2550 # asm 2: mov <mulrdx=%rdx,>mulr31=%r14
2553 # qhasm: mulrax = *(uint64 *)(pp + 0)
2554 # asm 1: movq 0(<pp=int64#2),>mulrax=int64#7
2555 # asm 2: movq 0(<pp=%rsi),>mulrax=%rax
2558 # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 112)
2559 # asm 1: mulq 112(<pp=int64#2)
2560 # asm 2: mulq 112(<pp=%rsi)
2563 # qhasm: rt4 = mulrax
2564 # asm 1: mov <mulrax=int64#7,>rt4=int64#13
2565 # asm 2: mov <mulrax=%rax,>rt4=%r15
2568 # qhasm: mulr41 = mulrdx
2569 # asm 1: mov <mulrdx=int64#3,>mulr41=int64#14
2570 # asm 2: mov <mulrdx=%rdx,>mulr41=%rbx
2573 # qhasm: mulrax = *(uint64 *)(pp + 8)
2574 # asm 1: movq 8(<pp=int64#2),>mulrax=int64#7
2575 # asm 2: movq 8(<pp=%rsi),>mulrax=%rax
2578 # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 80)
2579 # asm 1: mulq 80(<pp=int64#2)
2580 # asm 2: mulq 80(<pp=%rsi)
2583 # qhasm: carry? rt1 += mulrax
2584 # asm 1: add <mulrax=int64#7,<rt1=int64#6
2585 # asm 2: add <mulrax=%rax,<rt1=%r9
2588 # qhasm: mulr11 += mulrdx + carry
2589 # asm 1: adc <mulrdx=int64#3,<mulr11=int64#8
2590 # asm 2: adc <mulrdx=%rdx,<mulr11=%r10
2593 # qhasm: mulrax = *(uint64 *)(pp + 8)
2594 # asm 1: movq 8(<pp=int64#2),>mulrax=int64#7
2595 # asm 2: movq 8(<pp=%rsi),>mulrax=%rax
2598 # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 88)
2599 # asm 1: mulq 88(<pp=int64#2)
2600 # asm 2: mulq 88(<pp=%rsi)
2603 # qhasm: carry? rt2 += mulrax
2604 # asm 1: add <mulrax=int64#7,<rt2=int64#9
2605 # asm 2: add <mulrax=%rax,<rt2=%r11
2608 # qhasm: mulr21 += mulrdx + carry
2609 # asm 1: adc <mulrdx=int64#3,<mulr21=int64#10
2610 # asm 2: adc <mulrdx=%rdx,<mulr21=%r12
2613 # qhasm: mulrax = *(uint64 *)(pp + 8)
2614 # asm 1: movq 8(<pp=int64#2),>mulrax=int64#7
2615 # asm 2: movq 8(<pp=%rsi),>mulrax=%rax
2618 # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 96)
2619 # asm 1: mulq 96(<pp=int64#2)
2620 # asm 2: mulq 96(<pp=%rsi)
2623 # qhasm: carry? rt3 += mulrax
2624 # asm 1: add <mulrax=int64#7,<rt3=int64#11
2625 # asm 2: add <mulrax=%rax,<rt3=%r13
2628 # qhasm: mulr31 += mulrdx + carry
2629 # asm 1: adc <mulrdx=int64#3,<mulr31=int64#12
2630 # asm 2: adc <mulrdx=%rdx,<mulr31=%r14
2633 # qhasm: mulrax = *(uint64 *)(pp + 8)
2634 # asm 1: movq 8(<pp=int64#2),>mulrax=int64#7
2635 # asm 2: movq 8(<pp=%rsi),>mulrax=%rax
2638 # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 104)
2639 # asm 1: mulq 104(<pp=int64#2)
2640 # asm 2: mulq 104(<pp=%rsi)
2643 # qhasm: carry? rt4 += mulrax
2644 # asm 1: add <mulrax=int64#7,<rt4=int64#13
2645 # asm 2: add <mulrax=%rax,<rt4=%r15
2648 # qhasm: mulr41 += mulrdx + carry
2649 # asm 1: adc <mulrdx=int64#3,<mulr41=int64#14
2650 # asm 2: adc <mulrdx=%rdx,<mulr41=%rbx
2653 # qhasm: mulrax = *(uint64 *)(pp + 8)
2654 # asm 1: movq 8(<pp=int64#2),>mulrax=int64#3
2655 # asm 2: movq 8(<pp=%rsi),>mulrax=%rdx
2658 # qhasm: mulrax *= 19
2659 # asm 1: imulq $19,<mulrax=int64#3,>mulrax=int64#7
2660 # asm 2: imulq $19,<mulrax=%rdx,>mulrax=%rax
2663 # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 112)
2664 # asm 1: mulq 112(<pp=int64#2)
2665 # asm 2: mulq 112(<pp=%rsi)
2668 # qhasm: carry? rt0 += mulrax
2669 # asm 1: add <mulrax=int64#7,<rt0=int64#4
2670 # asm 2: add <mulrax=%rax,<rt0=%rcx
2673 # qhasm: mulr01 += mulrdx + carry
2674 # asm 1: adc <mulrdx=int64#3,<mulr01=int64#5
2675 # asm 2: adc <mulrdx=%rdx,<mulr01=%r8
2678 # qhasm: mulrax = *(uint64 *)(pp + 16)
2679 # asm 1: movq 16(<pp=int64#2),>mulrax=int64#7
2680 # asm 2: movq 16(<pp=%rsi),>mulrax=%rax
2683 # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 80)
2684 # asm 1: mulq 80(<pp=int64#2)
2685 # asm 2: mulq 80(<pp=%rsi)
2688 # qhasm: carry? rt2 += mulrax
2689 # asm 1: add <mulrax=int64#7,<rt2=int64#9
2690 # asm 2: add <mulrax=%rax,<rt2=%r11
2693 # qhasm: mulr21 += mulrdx + carry
2694 # asm 1: adc <mulrdx=int64#3,<mulr21=int64#10
2695 # asm 2: adc <mulrdx=%rdx,<mulr21=%r12
2698 # qhasm: mulrax = *(uint64 *)(pp + 16)
2699 # asm 1: movq 16(<pp=int64#2),>mulrax=int64#7
2700 # asm 2: movq 16(<pp=%rsi),>mulrax=%rax
2703 # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 88)
2704 # asm 1: mulq 88(<pp=int64#2)
2705 # asm 2: mulq 88(<pp=%rsi)
2708 # qhasm: carry? rt3 += mulrax
2709 # asm 1: add <mulrax=int64#7,<rt3=int64#11
2710 # asm 2: add <mulrax=%rax,<rt3=%r13
2713 # qhasm: mulr31 += mulrdx + carry
2714 # asm 1: adc <mulrdx=int64#3,<mulr31=int64#12
2715 # asm 2: adc <mulrdx=%rdx,<mulr31=%r14
2718 # qhasm: mulrax = *(uint64 *)(pp + 16)
2719 # asm 1: movq 16(<pp=int64#2),>mulrax=int64#7
2720 # asm 2: movq 16(<pp=%rsi),>mulrax=%rax
2723 # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 96)
2724 # asm 1: mulq 96(<pp=int64#2)
2725 # asm 2: mulq 96(<pp=%rsi)
2728 # qhasm: carry? rt4 += mulrax
2729 # asm 1: add <mulrax=int64#7,<rt4=int64#13
2730 # asm 2: add <mulrax=%rax,<rt4=%r15
2733 # qhasm: mulr41 += mulrdx + carry
2734 # asm 1: adc <mulrdx=int64#3,<mulr41=int64#14
2735 # asm 2: adc <mulrdx=%rdx,<mulr41=%rbx
2738 # qhasm: mulrax = *(uint64 *)(pp + 16)
2739 # asm 1: movq 16(<pp=int64#2),>mulrax=int64#3
2740 # asm 2: movq 16(<pp=%rsi),>mulrax=%rdx
2743 # qhasm: mulrax *= 19
2744 # asm 1: imulq $19,<mulrax=int64#3,>mulrax=int64#7
2745 # asm 2: imulq $19,<mulrax=%rdx,>mulrax=%rax
2748 # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 104)
2749 # asm 1: mulq 104(<pp=int64#2)
2750 # asm 2: mulq 104(<pp=%rsi)
2753 # qhasm: carry? rt0 += mulrax
2754 # asm 1: add <mulrax=int64#7,<rt0=int64#4
2755 # asm 2: add <mulrax=%rax,<rt0=%rcx
2758 # qhasm: mulr01 += mulrdx + carry
2759 # asm 1: adc <mulrdx=int64#3,<mulr01=int64#5
2760 # asm 2: adc <mulrdx=%rdx,<mulr01=%r8
2763 # qhasm: mulrax = *(uint64 *)(pp + 16)
2764 # asm 1: movq 16(<pp=int64#2),>mulrax=int64#3
2765 # asm 2: movq 16(<pp=%rsi),>mulrax=%rdx
2768 # qhasm: mulrax *= 19
2769 # asm 1: imulq $19,<mulrax=int64#3,>mulrax=int64#7
2770 # asm 2: imulq $19,<mulrax=%rdx,>mulrax=%rax
2773 # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 112)
2774 # asm 1: mulq 112(<pp=int64#2)
2775 # asm 2: mulq 112(<pp=%rsi)
2778 # qhasm: carry? rt1 += mulrax
2779 # asm 1: add <mulrax=int64#7,<rt1=int64#6
2780 # asm 2: add <mulrax=%rax,<rt1=%r9
2783 # qhasm: mulr11 += mulrdx + carry
2784 # asm 1: adc <mulrdx=int64#3,<mulr11=int64#8
2785 # asm 2: adc <mulrdx=%rdx,<mulr11=%r10
2788 # qhasm: mulrax = *(uint64 *)(pp + 24)
2789 # asm 1: movq 24(<pp=int64#2),>mulrax=int64#7
2790 # asm 2: movq 24(<pp=%rsi),>mulrax=%rax
2793 # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 80)
2794 # asm 1: mulq 80(<pp=int64#2)
2795 # asm 2: mulq 80(<pp=%rsi)
2798 # qhasm: carry? rt3 += mulrax
2799 # asm 1: add <mulrax=int64#7,<rt3=int64#11
2800 # asm 2: add <mulrax=%rax,<rt3=%r13
2803 # qhasm: mulr31 += mulrdx + carry
2804 # asm 1: adc <mulrdx=int64#3,<mulr31=int64#12
2805 # asm 2: adc <mulrdx=%rdx,<mulr31=%r14
2808 # qhasm: mulrax = *(uint64 *)(pp + 24)
2809 # asm 1: movq 24(<pp=int64#2),>mulrax=int64#7
2810 # asm 2: movq 24(<pp=%rsi),>mulrax=%rax
2813 # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 88)
2814 # asm 1: mulq 88(<pp=int64#2)
2815 # asm 2: mulq 88(<pp=%rsi)
2818 # qhasm: carry? rt4 += mulrax
2819 # asm 1: add <mulrax=int64#7,<rt4=int64#13
2820 # asm 2: add <mulrax=%rax,<rt4=%r15
2823 # qhasm: mulr41 += mulrdx + carry
2824 # asm 1: adc <mulrdx=int64#3,<mulr41=int64#14
2825 # asm 2: adc <mulrdx=%rdx,<mulr41=%rbx
2828 # qhasm: mulrax = mulx319_stack
2829 # asm 1: movq <mulx319_stack=stack64#8,>mulrax=int64#7
2830 # asm 2: movq <mulx319_stack=56(%rsp),>mulrax=%rax
2833 # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 104)
2834 # asm 1: mulq 104(<pp=int64#2)
2835 # asm 2: mulq 104(<pp=%rsi)
2838 # qhasm: carry? rt1 += mulrax
2839 # asm 1: add <mulrax=int64#7,<rt1=int64#6
2840 # asm 2: add <mulrax=%rax,<rt1=%r9
2843 # qhasm: mulr11 += mulrdx + carry
2844 # asm 1: adc <mulrdx=int64#3,<mulr11=int64#8
2845 # asm 2: adc <mulrdx=%rdx,<mulr11=%r10
2848 # qhasm: mulrax = mulx319_stack
2849 # asm 1: movq <mulx319_stack=stack64#8,>mulrax=int64#7
2850 # asm 2: movq <mulx319_stack=56(%rsp),>mulrax=%rax
2853 # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 112)
2854 # asm 1: mulq 112(<pp=int64#2)
2855 # asm 2: mulq 112(<pp=%rsi)
2858 # qhasm: carry? rt2 += mulrax
2859 # asm 1: add <mulrax=int64#7,<rt2=int64#9
2860 # asm 2: add <mulrax=%rax,<rt2=%r11
2863 # qhasm: mulr21 += mulrdx + carry
2864 # asm 1: adc <mulrdx=int64#3,<mulr21=int64#10
2865 # asm 2: adc <mulrdx=%rdx,<mulr21=%r12
2868 # qhasm: mulrax = *(uint64 *)(pp + 32)
2869 # asm 1: movq 32(<pp=int64#2),>mulrax=int64#7
2870 # asm 2: movq 32(<pp=%rsi),>mulrax=%rax
2873 # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 80)
2874 # asm 1: mulq 80(<pp=int64#2)
2875 # asm 2: mulq 80(<pp=%rsi)
2878 # qhasm: carry? rt4 += mulrax
2879 # asm 1: add <mulrax=int64#7,<rt4=int64#13
2880 # asm 2: add <mulrax=%rax,<rt4=%r15
2883 # qhasm: mulr41 += mulrdx + carry
2884 # asm 1: adc <mulrdx=int64#3,<mulr41=int64#14
2885 # asm 2: adc <mulrdx=%rdx,<mulr41=%rbx
2888 # qhasm: mulrax = mulx419_stack
2889 # asm 1: movq <mulx419_stack=stack64#9,>mulrax=int64#7
2890 # asm 2: movq <mulx419_stack=64(%rsp),>mulrax=%rax
2893 # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 96)
2894 # asm 1: mulq 96(<pp=int64#2)
2895 # asm 2: mulq 96(<pp=%rsi)
2898 # qhasm: carry? rt1 += mulrax
2899 # asm 1: add <mulrax=int64#7,<rt1=int64#6
2900 # asm 2: add <mulrax=%rax,<rt1=%r9
2903 # qhasm: mulr11 += mulrdx + carry
2904 # asm 1: adc <mulrdx=int64#3,<mulr11=int64#8
2905 # asm 2: adc <mulrdx=%rdx,<mulr11=%r10
2908 # qhasm: mulrax = mulx419_stack
2909 # asm 1: movq <mulx419_stack=stack64#9,>mulrax=int64#7
2910 # asm 2: movq <mulx419_stack=64(%rsp),>mulrax=%rax
2913 # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 104)
2914 # asm 1: mulq 104(<pp=int64#2)
2915 # asm 2: mulq 104(<pp=%rsi)
2918 # qhasm: carry? rt2 += mulrax
2919 # asm 1: add <mulrax=int64#7,<rt2=int64#9
2920 # asm 2: add <mulrax=%rax,<rt2=%r11
2923 # qhasm: mulr21 += mulrdx + carry
2924 # asm 1: adc <mulrdx=int64#3,<mulr21=int64#10
2925 # asm 2: adc <mulrdx=%rdx,<mulr21=%r12
2928 # qhasm: mulrax = mulx419_stack
2929 # asm 1: movq <mulx419_stack=stack64#9,>mulrax=int64#7
2930 # asm 2: movq <mulx419_stack=64(%rsp),>mulrax=%rax
2933 # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 112)
2934 # asm 1: mulq 112(<pp=int64#2)
2935 # asm 2: mulq 112(<pp=%rsi)
2938 # qhasm: carry? rt3 += mulrax
2939 # asm 1: add <mulrax=int64#7,<rt3=int64#11
2940 # asm 2: add <mulrax=%rax,<rt3=%r13
2943 # qhasm: mulr31 += mulrdx + carry
2944 # asm 1: adc <mulrdx=int64#3,<mulr31=int64#12
2945 # asm 2: adc <mulrdx=%rdx,<mulr31=%r14
2948 # qhasm: mulredmask = *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51
2949 # asm 1: movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,>mulredmask=int64#2
2950 # asm 2: movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,>mulredmask=%rsi
2951 movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51
,%rsi
2953 # qhasm: mulr01 = (mulr01.rt0) << 13
2954 # asm 1: shld $13,<rt0=int64#4,<mulr01=int64#5
2955 # asm 2: shld $13,<rt0=%rcx,<mulr01=%r8
2958 # qhasm: rt0 &= mulredmask
2959 # asm 1: and <mulredmask=int64#2,<rt0=int64#4
2960 # asm 2: and <mulredmask=%rsi,<rt0=%rcx
2963 # qhasm: mulr11 = (mulr11.rt1) << 13
2964 # asm 1: shld $13,<rt1=int64#6,<mulr11=int64#8
2965 # asm 2: shld $13,<rt1=%r9,<mulr11=%r10
2968 # qhasm: rt1 &= mulredmask
2969 # asm 1: and <mulredmask=int64#2,<rt1=int64#6
2970 # asm 2: and <mulredmask=%rsi,<rt1=%r9
2973 # qhasm: rt1 += mulr01
2974 # asm 1: add <mulr01=int64#5,<rt1=int64#6
2975 # asm 2: add <mulr01=%r8,<rt1=%r9
2978 # qhasm: mulr21 = (mulr21.rt2) << 13
2979 # asm 1: shld $13,<rt2=int64#9,<mulr21=int64#10
2980 # asm 2: shld $13,<rt2=%r11,<mulr21=%r12
2983 # qhasm: rt2 &= mulredmask
2984 # asm 1: and <mulredmask=int64#2,<rt2=int64#9
2985 # asm 2: and <mulredmask=%rsi,<rt2=%r11
2988 # qhasm: rt2 += mulr11
2989 # asm 1: add <mulr11=int64#8,<rt2=int64#9
2990 # asm 2: add <mulr11=%r10,<rt2=%r11
2993 # qhasm: mulr31 = (mulr31.rt3) << 13
2994 # asm 1: shld $13,<rt3=int64#11,<mulr31=int64#12
2995 # asm 2: shld $13,<rt3=%r13,<mulr31=%r14
2998 # qhasm: rt3 &= mulredmask
2999 # asm 1: and <mulredmask=int64#2,<rt3=int64#11
3000 # asm 2: and <mulredmask=%rsi,<rt3=%r13
3003 # qhasm: rt3 += mulr21
3004 # asm 1: add <mulr21=int64#10,<rt3=int64#11
3005 # asm 2: add <mulr21=%r12,<rt3=%r13
3008 # qhasm: mulr41 = (mulr41.rt4) << 13
3009 # asm 1: shld $13,<rt4=int64#13,<mulr41=int64#14
3010 # asm 2: shld $13,<rt4=%r15,<mulr41=%rbx
3013 # qhasm: rt4 &= mulredmask
3014 # asm 1: and <mulredmask=int64#2,<rt4=int64#13
3015 # asm 2: and <mulredmask=%rsi,<rt4=%r15
3018 # qhasm: rt4 += mulr31
3019 # asm 1: add <mulr31=int64#12,<rt4=int64#13
3020 # asm 2: add <mulr31=%r14,<rt4=%r15
3023 # qhasm: mulr41 = mulr41 * 19
3024 # asm 1: imulq $19,<mulr41=int64#14,>mulr41=int64#3
3025 # asm 2: imulq $19,<mulr41=%rbx,>mulr41=%rdx
3028 # qhasm: rt0 += mulr41
3029 # asm 1: add <mulr41=int64#3,<rt0=int64#4
3030 # asm 2: add <mulr41=%rdx,<rt0=%rcx
3034 # asm 1: mov <rt0=int64#4,>mult=int64#3
3035 # asm 2: mov <rt0=%rcx,>mult=%rdx
3038 # qhasm: (uint64) mult >>= 51
3039 # asm 1: shr $51,<mult=int64#3
3040 # asm 2: shr $51,<mult=%rdx
3043 # qhasm: mult += rt1
3044 # asm 1: add <rt1=int64#6,<mult=int64#3
3045 # asm 2: add <rt1=%r9,<mult=%rdx
3049 # asm 1: mov <mult=int64#3,>rt1=int64#5
3050 # asm 2: mov <mult=%rdx,>rt1=%r8
3053 # qhasm: (uint64) mult >>= 51
3054 # asm 1: shr $51,<mult=int64#3
3055 # asm 2: shr $51,<mult=%rdx
3058 # qhasm: rt0 &= mulredmask
3059 # asm 1: and <mulredmask=int64#2,<rt0=int64#4
3060 # asm 2: and <mulredmask=%rsi,<rt0=%rcx
3063 # qhasm: mult += rt2
3064 # asm 1: add <rt2=int64#9,<mult=int64#3
3065 # asm 2: add <rt2=%r11,<mult=%rdx
3069 # asm 1: mov <mult=int64#3,>rt2=int64#6
3070 # asm 2: mov <mult=%rdx,>rt2=%r9
3073 # qhasm: (uint64) mult >>= 51
3074 # asm 1: shr $51,<mult=int64#3
3075 # asm 2: shr $51,<mult=%rdx
3078 # qhasm: rt1 &= mulredmask
3079 # asm 1: and <mulredmask=int64#2,<rt1=int64#5
3080 # asm 2: and <mulredmask=%rsi,<rt1=%r8
3083 # qhasm: mult += rt3
3084 # asm 1: add <rt3=int64#11,<mult=int64#3
3085 # asm 2: add <rt3=%r13,<mult=%rdx
3089 # asm 1: mov <mult=int64#3,>rt3=int64#7
3090 # asm 2: mov <mult=%rdx,>rt3=%rax
3093 # qhasm: (uint64) mult >>= 51
3094 # asm 1: shr $51,<mult=int64#3
3095 # asm 2: shr $51,<mult=%rdx
3098 # qhasm: rt2 &= mulredmask
3099 # asm 1: and <mulredmask=int64#2,<rt2=int64#6
3100 # asm 2: and <mulredmask=%rsi,<rt2=%r9
3103 # qhasm: mult += rt4
3104 # asm 1: add <rt4=int64#13,<mult=int64#3
3105 # asm 2: add <rt4=%r15,<mult=%rdx
3109 # asm 1: mov <mult=int64#3,>rt4=int64#8
3110 # asm 2: mov <mult=%rdx,>rt4=%r10
3113 # qhasm: (uint64) mult >>= 51
3114 # asm 1: shr $51,<mult=int64#3
3115 # asm 2: shr $51,<mult=%rdx
3118 # qhasm: rt3 &= mulredmask
3119 # asm 1: and <mulredmask=int64#2,<rt3=int64#7
3120 # asm 2: and <mulredmask=%rsi,<rt3=%rax
3124 # asm 1: imulq $19,<mult=int64#3,>mult=int64#3
3125 # asm 2: imulq $19,<mult=%rdx,>mult=%rdx
3128 # qhasm: rt0 += mult
3129 # asm 1: add <mult=int64#3,<rt0=int64#4
3130 # asm 2: add <mult=%rdx,<rt0=%rcx
3133 # qhasm: rt4 &= mulredmask
3134 # asm 1: and <mulredmask=int64#2,<rt4=int64#8
3135 # asm 2: and <mulredmask=%rsi,<rt4=%r10
3138 # qhasm: *(uint64 *)(rp + 120) = rt0
3139 # asm 1: movq <rt0=int64#4,120(<rp=int64#1)
3140 # asm 2: movq <rt0=%rcx,120(<rp=%rdi)
3143 # qhasm: *(uint64 *)(rp + 128) = rt1
3144 # asm 1: movq <rt1=int64#5,128(<rp=int64#1)
3145 # asm 2: movq <rt1=%r8,128(<rp=%rdi)
3148 # qhasm: *(uint64 *)(rp + 136) = rt2
3149 # asm 1: movq <rt2=int64#6,136(<rp=int64#1)
3150 # asm 2: movq <rt2=%r9,136(<rp=%rdi)
3153 # qhasm: *(uint64 *)(rp + 144) = rt3
3154 # asm 1: movq <rt3=int64#7,144(<rp=int64#1)
3155 # asm 2: movq <rt3=%rax,144(<rp=%rdi)
3158 # qhasm: *(uint64 *)(rp + 152) = rt4
3159 # asm 1: movq <rt4=int64#8,152(<rp=int64#1)
3160 # asm 2: movq <rt4=%r10,152(<rp=%rdi)
3163 # qhasm: caller1 = caller1_stack
3164 # asm 1: movq <caller1_stack=stack64#1,>caller1=int64#9
3165 # asm 2: movq <caller1_stack=0(%rsp),>caller1=%r11
3168 # qhasm: caller2 = caller2_stack
3169 # asm 1: movq <caller2_stack=stack64#2,>caller2=int64#10
3170 # asm 2: movq <caller2_stack=8(%rsp),>caller2=%r12
3173 # qhasm: caller3 = caller3_stack
3174 # asm 1: movq <caller3_stack=stack64#3,>caller3=int64#11
3175 # asm 2: movq <caller3_stack=16(%rsp),>caller3=%r13
3178 # qhasm: caller4 = caller4_stack
3179 # asm 1: movq <caller4_stack=stack64#4,>caller4=int64#12
3180 # asm 2: movq <caller4_stack=24(%rsp),>caller4=%r14
3183 # qhasm: caller5 = caller5_stack
3184 # asm 1: movq <caller5_stack=stack64#5,>caller5=int64#13
3185 # asm 2: movq <caller5_stack=32(%rsp),>caller5=%r15
3188 # qhasm: caller6 = caller6_stack
3189 # asm 1: movq <caller6_stack=stack64#6,>caller6=int64#14
3190 # asm 2: movq <caller6_stack=40(%rsp),>caller6=%rbx
3193 # qhasm: caller7 = caller7_stack
3194 # asm 1: movq <caller7_stack=stack64#7,>caller7=int64#15
3195 # asm 2: movq <caller7_stack=48(%rsp),>caller7=%rbp