some tweaks
[mkp224o.git] / ed25519 / amd64-51-30k / ge25519_p1p1_to_p3.s
blob86a95d0d0dc9546498d899095055ab924f07bafd
2 # qhasm: int64 rp
4 # qhasm: int64 pp
6 # qhasm: input rp
8 # qhasm: input pp
10 # qhasm: int64 caller1
12 # qhasm: int64 caller2
14 # qhasm: int64 caller3
16 # qhasm: int64 caller4
18 # qhasm: int64 caller5
20 # qhasm: int64 caller6
22 # qhasm: int64 caller7
24 # qhasm: caller caller1
26 # qhasm: caller caller2
28 # qhasm: caller caller3
30 # qhasm: caller caller4
32 # qhasm: caller caller5
34 # qhasm: caller caller6
36 # qhasm: caller caller7
38 # qhasm: stack64 caller1_stack
40 # qhasm: stack64 caller2_stack
42 # qhasm: stack64 caller3_stack
44 # qhasm: stack64 caller4_stack
46 # qhasm: stack64 caller5_stack
48 # qhasm: stack64 caller6_stack
50 # qhasm: stack64 caller7_stack
52 # qhasm: int64 rx0
54 # qhasm: int64 rx1
56 # qhasm: int64 rx2
58 # qhasm: int64 rx3
60 # qhasm: int64 rx4
62 # qhasm: int64 ry0
64 # qhasm: int64 ry1
66 # qhasm: int64 ry2
68 # qhasm: int64 ry3
70 # qhasm: int64 ry4
72 # qhasm: int64 rz0
74 # qhasm: int64 rz1
76 # qhasm: int64 rz2
78 # qhasm: int64 rz3
80 # qhasm: int64 rz4
82 # qhasm: int64 rt0
84 # qhasm: int64 rt1
86 # qhasm: int64 rt2
88 # qhasm: int64 rt3
90 # qhasm: int64 rt4
92 # qhasm: int64 mulr01
94 # qhasm: int64 mulr11
96 # qhasm: int64 mulr21
98 # qhasm: int64 mulr31
100 # qhasm: int64 mulr41
102 # qhasm: int64 mulrax
104 # qhasm: int64 mulrdx
106 # qhasm: int64 mult
108 # qhasm: int64 mulredmask
110 # qhasm: stack64 mulx219_stack
112 # qhasm: stack64 mulx319_stack
114 # qhasm: stack64 mulx419_stack
116 # qhasm: enter crypto_sign_ed25519_amd64_51_30k_batch_ge25519_p1p1_to_p3
117 .text
118 .p2align 5
119 .globl _crypto_sign_ed25519_amd64_51_30k_batch_ge25519_p1p1_to_p3
120 .globl crypto_sign_ed25519_amd64_51_30k_batch_ge25519_p1p1_to_p3
121 _crypto_sign_ed25519_amd64_51_30k_batch_ge25519_p1p1_to_p3:
122 crypto_sign_ed25519_amd64_51_30k_batch_ge25519_p1p1_to_p3:
123 mov %rsp,%r11
124 and $31,%r11
125 add $96,%r11
126 sub %r11,%rsp
128 # qhasm: caller1_stack = caller1
129 # asm 1: movq <caller1=int64#9,>caller1_stack=stack64#1
130 # asm 2: movq <caller1=%r11,>caller1_stack=0(%rsp)
131 movq %r11,0(%rsp)
133 # qhasm: caller2_stack = caller2
134 # asm 1: movq <caller2=int64#10,>caller2_stack=stack64#2
135 # asm 2: movq <caller2=%r12,>caller2_stack=8(%rsp)
136 movq %r12,8(%rsp)
138 # qhasm: caller3_stack = caller3
139 # asm 1: movq <caller3=int64#11,>caller3_stack=stack64#3
140 # asm 2: movq <caller3=%r13,>caller3_stack=16(%rsp)
141 movq %r13,16(%rsp)
143 # qhasm: caller4_stack = caller4
144 # asm 1: movq <caller4=int64#12,>caller4_stack=stack64#4
145 # asm 2: movq <caller4=%r14,>caller4_stack=24(%rsp)
146 movq %r14,24(%rsp)
148 # qhasm: caller5_stack = caller5
149 # asm 1: movq <caller5=int64#13,>caller5_stack=stack64#5
150 # asm 2: movq <caller5=%r15,>caller5_stack=32(%rsp)
151 movq %r15,32(%rsp)
153 # qhasm: caller6_stack = caller6
154 # asm 1: movq <caller6=int64#14,>caller6_stack=stack64#6
155 # asm 2: movq <caller6=%rbx,>caller6_stack=40(%rsp)
156 movq %rbx,40(%rsp)
158 # qhasm: caller7_stack = caller7
159 # asm 1: movq <caller7=int64#15,>caller7_stack=stack64#7
160 # asm 2: movq <caller7=%rbp,>caller7_stack=48(%rsp)
161 movq %rbp,48(%rsp)
163 # qhasm: mulrax = *(uint64 *)(pp + 24)
164 # asm 1: movq 24(<pp=int64#2),>mulrax=int64#3
165 # asm 2: movq 24(<pp=%rsi),>mulrax=%rdx
166 movq 24(%rsi),%rdx
168 # qhasm: mulrax *= 19
169 # asm 1: imulq $19,<mulrax=int64#3,>mulrax=int64#7
170 # asm 2: imulq $19,<mulrax=%rdx,>mulrax=%rax
171 imulq $19,%rdx,%rax
173 # qhasm: mulx319_stack = mulrax
174 # asm 1: movq <mulrax=int64#7,>mulx319_stack=stack64#8
175 # asm 2: movq <mulrax=%rax,>mulx319_stack=56(%rsp)
176 movq %rax,56(%rsp)
178 # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 136)
179 # asm 1: mulq 136(<pp=int64#2)
180 # asm 2: mulq 136(<pp=%rsi)
181 mulq 136(%rsi)
183 # qhasm: rx0 = mulrax
184 # asm 1: mov <mulrax=int64#7,>rx0=int64#4
185 # asm 2: mov <mulrax=%rax,>rx0=%rcx
186 mov %rax,%rcx
188 # qhasm: mulr01 = mulrdx
189 # asm 1: mov <mulrdx=int64#3,>mulr01=int64#5
190 # asm 2: mov <mulrdx=%rdx,>mulr01=%r8
191 mov %rdx,%r8
193 # qhasm: mulrax = *(uint64 *)(pp + 32)
194 # asm 1: movq 32(<pp=int64#2),>mulrax=int64#3
195 # asm 2: movq 32(<pp=%rsi),>mulrax=%rdx
196 movq 32(%rsi),%rdx
198 # qhasm: mulrax *= 19
199 # asm 1: imulq $19,<mulrax=int64#3,>mulrax=int64#7
200 # asm 2: imulq $19,<mulrax=%rdx,>mulrax=%rax
201 imulq $19,%rdx,%rax
203 # qhasm: mulx419_stack = mulrax
204 # asm 1: movq <mulrax=int64#7,>mulx419_stack=stack64#9
205 # asm 2: movq <mulrax=%rax,>mulx419_stack=64(%rsp)
206 movq %rax,64(%rsp)
208 # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 128)
209 # asm 1: mulq 128(<pp=int64#2)
210 # asm 2: mulq 128(<pp=%rsi)
211 mulq 128(%rsi)
213 # qhasm: carry? rx0 += mulrax
214 # asm 1: add <mulrax=int64#7,<rx0=int64#4
215 # asm 2: add <mulrax=%rax,<rx0=%rcx
216 add %rax,%rcx
218 # qhasm: mulr01 += mulrdx + carry
219 # asm 1: adc <mulrdx=int64#3,<mulr01=int64#5
220 # asm 2: adc <mulrdx=%rdx,<mulr01=%r8
221 adc %rdx,%r8
223 # qhasm: mulrax = *(uint64 *)(pp + 0)
224 # asm 1: movq 0(<pp=int64#2),>mulrax=int64#7
225 # asm 2: movq 0(<pp=%rsi),>mulrax=%rax
226 movq 0(%rsi),%rax
228 # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 120)
229 # asm 1: mulq 120(<pp=int64#2)
230 # asm 2: mulq 120(<pp=%rsi)
231 mulq 120(%rsi)
233 # qhasm: carry? rx0 += mulrax
234 # asm 1: add <mulrax=int64#7,<rx0=int64#4
235 # asm 2: add <mulrax=%rax,<rx0=%rcx
236 add %rax,%rcx
238 # qhasm: mulr01 += mulrdx + carry
239 # asm 1: adc <mulrdx=int64#3,<mulr01=int64#5
240 # asm 2: adc <mulrdx=%rdx,<mulr01=%r8
241 adc %rdx,%r8
243 # qhasm: mulrax = *(uint64 *)(pp + 0)
244 # asm 1: movq 0(<pp=int64#2),>mulrax=int64#7
245 # asm 2: movq 0(<pp=%rsi),>mulrax=%rax
246 movq 0(%rsi),%rax
248 # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 128)
249 # asm 1: mulq 128(<pp=int64#2)
250 # asm 2: mulq 128(<pp=%rsi)
251 mulq 128(%rsi)
253 # qhasm: rx1 = mulrax
254 # asm 1: mov <mulrax=int64#7,>rx1=int64#6
255 # asm 2: mov <mulrax=%rax,>rx1=%r9
256 mov %rax,%r9
258 # qhasm: mulr11 = mulrdx
259 # asm 1: mov <mulrdx=int64#3,>mulr11=int64#8
260 # asm 2: mov <mulrdx=%rdx,>mulr11=%r10
261 mov %rdx,%r10
263 # qhasm: mulrax = *(uint64 *)(pp + 0)
264 # asm 1: movq 0(<pp=int64#2),>mulrax=int64#7
265 # asm 2: movq 0(<pp=%rsi),>mulrax=%rax
266 movq 0(%rsi),%rax
268 # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 136)
269 # asm 1: mulq 136(<pp=int64#2)
270 # asm 2: mulq 136(<pp=%rsi)
271 mulq 136(%rsi)
273 # qhasm: rx2 = mulrax
274 # asm 1: mov <mulrax=int64#7,>rx2=int64#9
275 # asm 2: mov <mulrax=%rax,>rx2=%r11
276 mov %rax,%r11
278 # qhasm: mulr21 = mulrdx
279 # asm 1: mov <mulrdx=int64#3,>mulr21=int64#10
280 # asm 2: mov <mulrdx=%rdx,>mulr21=%r12
281 mov %rdx,%r12
283 # qhasm: mulrax = *(uint64 *)(pp + 0)
284 # asm 1: movq 0(<pp=int64#2),>mulrax=int64#7
285 # asm 2: movq 0(<pp=%rsi),>mulrax=%rax
286 movq 0(%rsi),%rax
288 # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 144)
289 # asm 1: mulq 144(<pp=int64#2)
290 # asm 2: mulq 144(<pp=%rsi)
291 mulq 144(%rsi)
293 # qhasm: rx3 = mulrax
294 # asm 1: mov <mulrax=int64#7,>rx3=int64#11
295 # asm 2: mov <mulrax=%rax,>rx3=%r13
296 mov %rax,%r13
298 # qhasm: mulr31 = mulrdx
299 # asm 1: mov <mulrdx=int64#3,>mulr31=int64#12
300 # asm 2: mov <mulrdx=%rdx,>mulr31=%r14
301 mov %rdx,%r14
303 # qhasm: mulrax = *(uint64 *)(pp + 0)
304 # asm 1: movq 0(<pp=int64#2),>mulrax=int64#7
305 # asm 2: movq 0(<pp=%rsi),>mulrax=%rax
306 movq 0(%rsi),%rax
308 # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 152)
309 # asm 1: mulq 152(<pp=int64#2)
310 # asm 2: mulq 152(<pp=%rsi)
311 mulq 152(%rsi)
313 # qhasm: rx4 = mulrax
314 # asm 1: mov <mulrax=int64#7,>rx4=int64#13
315 # asm 2: mov <mulrax=%rax,>rx4=%r15
316 mov %rax,%r15
318 # qhasm: mulr41 = mulrdx
319 # asm 1: mov <mulrdx=int64#3,>mulr41=int64#14
320 # asm 2: mov <mulrdx=%rdx,>mulr41=%rbx
321 mov %rdx,%rbx
323 # qhasm: mulrax = *(uint64 *)(pp + 8)
324 # asm 1: movq 8(<pp=int64#2),>mulrax=int64#7
325 # asm 2: movq 8(<pp=%rsi),>mulrax=%rax
326 movq 8(%rsi),%rax
328 # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 120)
329 # asm 1: mulq 120(<pp=int64#2)
330 # asm 2: mulq 120(<pp=%rsi)
331 mulq 120(%rsi)
333 # qhasm: carry? rx1 += mulrax
334 # asm 1: add <mulrax=int64#7,<rx1=int64#6
335 # asm 2: add <mulrax=%rax,<rx1=%r9
336 add %rax,%r9
338 # qhasm: mulr11 += mulrdx + carry
339 # asm 1: adc <mulrdx=int64#3,<mulr11=int64#8
340 # asm 2: adc <mulrdx=%rdx,<mulr11=%r10
341 adc %rdx,%r10
343 # qhasm: mulrax = *(uint64 *)(pp + 8)
344 # asm 1: movq 8(<pp=int64#2),>mulrax=int64#7
345 # asm 2: movq 8(<pp=%rsi),>mulrax=%rax
346 movq 8(%rsi),%rax
348 # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 128)
349 # asm 1: mulq 128(<pp=int64#2)
350 # asm 2: mulq 128(<pp=%rsi)
351 mulq 128(%rsi)
353 # qhasm: carry? rx2 += mulrax
354 # asm 1: add <mulrax=int64#7,<rx2=int64#9
355 # asm 2: add <mulrax=%rax,<rx2=%r11
356 add %rax,%r11
358 # qhasm: mulr21 += mulrdx + carry
359 # asm 1: adc <mulrdx=int64#3,<mulr21=int64#10
360 # asm 2: adc <mulrdx=%rdx,<mulr21=%r12
361 adc %rdx,%r12
363 # qhasm: mulrax = *(uint64 *)(pp + 8)
364 # asm 1: movq 8(<pp=int64#2),>mulrax=int64#7
365 # asm 2: movq 8(<pp=%rsi),>mulrax=%rax
366 movq 8(%rsi),%rax
368 # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 136)
369 # asm 1: mulq 136(<pp=int64#2)
370 # asm 2: mulq 136(<pp=%rsi)
371 mulq 136(%rsi)
373 # qhasm: carry? rx3 += mulrax
374 # asm 1: add <mulrax=int64#7,<rx3=int64#11
375 # asm 2: add <mulrax=%rax,<rx3=%r13
376 add %rax,%r13
378 # qhasm: mulr31 += mulrdx + carry
379 # asm 1: adc <mulrdx=int64#3,<mulr31=int64#12
380 # asm 2: adc <mulrdx=%rdx,<mulr31=%r14
381 adc %rdx,%r14
383 # qhasm: mulrax = *(uint64 *)(pp + 8)
384 # asm 1: movq 8(<pp=int64#2),>mulrax=int64#7
385 # asm 2: movq 8(<pp=%rsi),>mulrax=%rax
386 movq 8(%rsi),%rax
388 # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 144)
389 # asm 1: mulq 144(<pp=int64#2)
390 # asm 2: mulq 144(<pp=%rsi)
391 mulq 144(%rsi)
393 # qhasm: carry? rx4 += mulrax
394 # asm 1: add <mulrax=int64#7,<rx4=int64#13
395 # asm 2: add <mulrax=%rax,<rx4=%r15
396 add %rax,%r15
398 # qhasm: mulr41 += mulrdx + carry
399 # asm 1: adc <mulrdx=int64#3,<mulr41=int64#14
400 # asm 2: adc <mulrdx=%rdx,<mulr41=%rbx
401 adc %rdx,%rbx
403 # qhasm: mulrax = *(uint64 *)(pp + 8)
404 # asm 1: movq 8(<pp=int64#2),>mulrax=int64#3
405 # asm 2: movq 8(<pp=%rsi),>mulrax=%rdx
406 movq 8(%rsi),%rdx
408 # qhasm: mulrax *= 19
409 # asm 1: imulq $19,<mulrax=int64#3,>mulrax=int64#7
410 # asm 2: imulq $19,<mulrax=%rdx,>mulrax=%rax
411 imulq $19,%rdx,%rax
413 # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 152)
414 # asm 1: mulq 152(<pp=int64#2)
415 # asm 2: mulq 152(<pp=%rsi)
416 mulq 152(%rsi)
418 # qhasm: carry? rx0 += mulrax
419 # asm 1: add <mulrax=int64#7,<rx0=int64#4
420 # asm 2: add <mulrax=%rax,<rx0=%rcx
421 add %rax,%rcx
423 # qhasm: mulr01 += mulrdx + carry
424 # asm 1: adc <mulrdx=int64#3,<mulr01=int64#5
425 # asm 2: adc <mulrdx=%rdx,<mulr01=%r8
426 adc %rdx,%r8
428 # qhasm: mulrax = *(uint64 *)(pp + 16)
429 # asm 1: movq 16(<pp=int64#2),>mulrax=int64#7
430 # asm 2: movq 16(<pp=%rsi),>mulrax=%rax
431 movq 16(%rsi),%rax
433 # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 120)
434 # asm 1: mulq 120(<pp=int64#2)
435 # asm 2: mulq 120(<pp=%rsi)
436 mulq 120(%rsi)
438 # qhasm: carry? rx2 += mulrax
439 # asm 1: add <mulrax=int64#7,<rx2=int64#9
440 # asm 2: add <mulrax=%rax,<rx2=%r11
441 add %rax,%r11
443 # qhasm: mulr21 += mulrdx + carry
444 # asm 1: adc <mulrdx=int64#3,<mulr21=int64#10
445 # asm 2: adc <mulrdx=%rdx,<mulr21=%r12
446 adc %rdx,%r12
448 # qhasm: mulrax = *(uint64 *)(pp + 16)
449 # asm 1: movq 16(<pp=int64#2),>mulrax=int64#7
450 # asm 2: movq 16(<pp=%rsi),>mulrax=%rax
451 movq 16(%rsi),%rax
453 # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 128)
454 # asm 1: mulq 128(<pp=int64#2)
455 # asm 2: mulq 128(<pp=%rsi)
456 mulq 128(%rsi)
458 # qhasm: carry? rx3 += mulrax
459 # asm 1: add <mulrax=int64#7,<rx3=int64#11
460 # asm 2: add <mulrax=%rax,<rx3=%r13
461 add %rax,%r13
463 # qhasm: mulr31 += mulrdx + carry
464 # asm 1: adc <mulrdx=int64#3,<mulr31=int64#12
465 # asm 2: adc <mulrdx=%rdx,<mulr31=%r14
466 adc %rdx,%r14
468 # qhasm: mulrax = *(uint64 *)(pp + 16)
469 # asm 1: movq 16(<pp=int64#2),>mulrax=int64#7
470 # asm 2: movq 16(<pp=%rsi),>mulrax=%rax
471 movq 16(%rsi),%rax
473 # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 136)
474 # asm 1: mulq 136(<pp=int64#2)
475 # asm 2: mulq 136(<pp=%rsi)
476 mulq 136(%rsi)
478 # qhasm: carry? rx4 += mulrax
479 # asm 1: add <mulrax=int64#7,<rx4=int64#13
480 # asm 2: add <mulrax=%rax,<rx4=%r15
481 add %rax,%r15
483 # qhasm: mulr41 += mulrdx + carry
484 # asm 1: adc <mulrdx=int64#3,<mulr41=int64#14
485 # asm 2: adc <mulrdx=%rdx,<mulr41=%rbx
486 adc %rdx,%rbx
488 # qhasm: mulrax = *(uint64 *)(pp + 16)
489 # asm 1: movq 16(<pp=int64#2),>mulrax=int64#3
490 # asm 2: movq 16(<pp=%rsi),>mulrax=%rdx
491 movq 16(%rsi),%rdx
493 # qhasm: mulrax *= 19
494 # asm 1: imulq $19,<mulrax=int64#3,>mulrax=int64#7
495 # asm 2: imulq $19,<mulrax=%rdx,>mulrax=%rax
496 imulq $19,%rdx,%rax
498 # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 144)
499 # asm 1: mulq 144(<pp=int64#2)
500 # asm 2: mulq 144(<pp=%rsi)
501 mulq 144(%rsi)
503 # qhasm: carry? rx0 += mulrax
504 # asm 1: add <mulrax=int64#7,<rx0=int64#4
505 # asm 2: add <mulrax=%rax,<rx0=%rcx
506 add %rax,%rcx
508 # qhasm: mulr01 += mulrdx + carry
509 # asm 1: adc <mulrdx=int64#3,<mulr01=int64#5
510 # asm 2: adc <mulrdx=%rdx,<mulr01=%r8
511 adc %rdx,%r8
513 # qhasm: mulrax = *(uint64 *)(pp + 16)
514 # asm 1: movq 16(<pp=int64#2),>mulrax=int64#3
515 # asm 2: movq 16(<pp=%rsi),>mulrax=%rdx
516 movq 16(%rsi),%rdx
518 # qhasm: mulrax *= 19
519 # asm 1: imulq $19,<mulrax=int64#3,>mulrax=int64#7
520 # asm 2: imulq $19,<mulrax=%rdx,>mulrax=%rax
521 imulq $19,%rdx,%rax
523 # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 152)
524 # asm 1: mulq 152(<pp=int64#2)
525 # asm 2: mulq 152(<pp=%rsi)
526 mulq 152(%rsi)
528 # qhasm: carry? rx1 += mulrax
529 # asm 1: add <mulrax=int64#7,<rx1=int64#6
530 # asm 2: add <mulrax=%rax,<rx1=%r9
531 add %rax,%r9
533 # qhasm: mulr11 += mulrdx + carry
534 # asm 1: adc <mulrdx=int64#3,<mulr11=int64#8
535 # asm 2: adc <mulrdx=%rdx,<mulr11=%r10
536 adc %rdx,%r10
538 # qhasm: mulrax = *(uint64 *)(pp + 24)
539 # asm 1: movq 24(<pp=int64#2),>mulrax=int64#7
540 # asm 2: movq 24(<pp=%rsi),>mulrax=%rax
541 movq 24(%rsi),%rax
543 # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 120)
544 # asm 1: mulq 120(<pp=int64#2)
545 # asm 2: mulq 120(<pp=%rsi)
546 mulq 120(%rsi)
548 # qhasm: carry? rx3 += mulrax
549 # asm 1: add <mulrax=int64#7,<rx3=int64#11
550 # asm 2: add <mulrax=%rax,<rx3=%r13
551 add %rax,%r13
553 # qhasm: mulr31 += mulrdx + carry
554 # asm 1: adc <mulrdx=int64#3,<mulr31=int64#12
555 # asm 2: adc <mulrdx=%rdx,<mulr31=%r14
556 adc %rdx,%r14
558 # qhasm: mulrax = *(uint64 *)(pp + 24)
559 # asm 1: movq 24(<pp=int64#2),>mulrax=int64#7
560 # asm 2: movq 24(<pp=%rsi),>mulrax=%rax
561 movq 24(%rsi),%rax
563 # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 128)
564 # asm 1: mulq 128(<pp=int64#2)
565 # asm 2: mulq 128(<pp=%rsi)
566 mulq 128(%rsi)
568 # qhasm: carry? rx4 += mulrax
569 # asm 1: add <mulrax=int64#7,<rx4=int64#13
570 # asm 2: add <mulrax=%rax,<rx4=%r15
571 add %rax,%r15
573 # qhasm: mulr41 += mulrdx + carry
574 # asm 1: adc <mulrdx=int64#3,<mulr41=int64#14
575 # asm 2: adc <mulrdx=%rdx,<mulr41=%rbx
576 adc %rdx,%rbx
578 # qhasm: mulrax = mulx319_stack
579 # asm 1: movq <mulx319_stack=stack64#8,>mulrax=int64#7
580 # asm 2: movq <mulx319_stack=56(%rsp),>mulrax=%rax
581 movq 56(%rsp),%rax
583 # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 144)
584 # asm 1: mulq 144(<pp=int64#2)
585 # asm 2: mulq 144(<pp=%rsi)
586 mulq 144(%rsi)
588 # qhasm: carry? rx1 += mulrax
589 # asm 1: add <mulrax=int64#7,<rx1=int64#6
590 # asm 2: add <mulrax=%rax,<rx1=%r9
591 add %rax,%r9
593 # qhasm: mulr11 += mulrdx + carry
594 # asm 1: adc <mulrdx=int64#3,<mulr11=int64#8
595 # asm 2: adc <mulrdx=%rdx,<mulr11=%r10
596 adc %rdx,%r10
598 # qhasm: mulrax = mulx319_stack
599 # asm 1: movq <mulx319_stack=stack64#8,>mulrax=int64#7
600 # asm 2: movq <mulx319_stack=56(%rsp),>mulrax=%rax
601 movq 56(%rsp),%rax
603 # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 152)
604 # asm 1: mulq 152(<pp=int64#2)
605 # asm 2: mulq 152(<pp=%rsi)
606 mulq 152(%rsi)
608 # qhasm: carry? rx2 += mulrax
609 # asm 1: add <mulrax=int64#7,<rx2=int64#9
610 # asm 2: add <mulrax=%rax,<rx2=%r11
611 add %rax,%r11
613 # qhasm: mulr21 += mulrdx + carry
614 # asm 1: adc <mulrdx=int64#3,<mulr21=int64#10
615 # asm 2: adc <mulrdx=%rdx,<mulr21=%r12
616 adc %rdx,%r12
618 # qhasm: mulrax = *(uint64 *)(pp + 32)
619 # asm 1: movq 32(<pp=int64#2),>mulrax=int64#7
620 # asm 2: movq 32(<pp=%rsi),>mulrax=%rax
621 movq 32(%rsi),%rax
623 # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 120)
624 # asm 1: mulq 120(<pp=int64#2)
625 # asm 2: mulq 120(<pp=%rsi)
626 mulq 120(%rsi)
628 # qhasm: carry? rx4 += mulrax
629 # asm 1: add <mulrax=int64#7,<rx4=int64#13
630 # asm 2: add <mulrax=%rax,<rx4=%r15
631 add %rax,%r15
633 # qhasm: mulr41 += mulrdx + carry
634 # asm 1: adc <mulrdx=int64#3,<mulr41=int64#14
635 # asm 2: adc <mulrdx=%rdx,<mulr41=%rbx
636 adc %rdx,%rbx
638 # qhasm: mulrax = mulx419_stack
639 # asm 1: movq <mulx419_stack=stack64#9,>mulrax=int64#7
640 # asm 2: movq <mulx419_stack=64(%rsp),>mulrax=%rax
641 movq 64(%rsp),%rax
643 # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 136)
644 # asm 1: mulq 136(<pp=int64#2)
645 # asm 2: mulq 136(<pp=%rsi)
646 mulq 136(%rsi)
648 # qhasm: carry? rx1 += mulrax
649 # asm 1: add <mulrax=int64#7,<rx1=int64#6
650 # asm 2: add <mulrax=%rax,<rx1=%r9
651 add %rax,%r9
653 # qhasm: mulr11 += mulrdx + carry
654 # asm 1: adc <mulrdx=int64#3,<mulr11=int64#8
655 # asm 2: adc <mulrdx=%rdx,<mulr11=%r10
656 adc %rdx,%r10
658 # qhasm: mulrax = mulx419_stack
659 # asm 1: movq <mulx419_stack=stack64#9,>mulrax=int64#7
660 # asm 2: movq <mulx419_stack=64(%rsp),>mulrax=%rax
661 movq 64(%rsp),%rax
663 # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 144)
664 # asm 1: mulq 144(<pp=int64#2)
665 # asm 2: mulq 144(<pp=%rsi)
666 mulq 144(%rsi)
668 # qhasm: carry? rx2 += mulrax
669 # asm 1: add <mulrax=int64#7,<rx2=int64#9
670 # asm 2: add <mulrax=%rax,<rx2=%r11
671 add %rax,%r11
673 # qhasm: mulr21 += mulrdx + carry
674 # asm 1: adc <mulrdx=int64#3,<mulr21=int64#10
675 # asm 2: adc <mulrdx=%rdx,<mulr21=%r12
676 adc %rdx,%r12
678 # qhasm: mulrax = mulx419_stack
679 # asm 1: movq <mulx419_stack=stack64#9,>mulrax=int64#7
680 # asm 2: movq <mulx419_stack=64(%rsp),>mulrax=%rax
681 movq 64(%rsp),%rax
683 # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 152)
684 # asm 1: mulq 152(<pp=int64#2)
685 # asm 2: mulq 152(<pp=%rsi)
686 mulq 152(%rsi)
688 # qhasm: carry? rx3 += mulrax
689 # asm 1: add <mulrax=int64#7,<rx3=int64#11
690 # asm 2: add <mulrax=%rax,<rx3=%r13
691 add %rax,%r13
693 # qhasm: mulr31 += mulrdx + carry
694 # asm 1: adc <mulrdx=int64#3,<mulr31=int64#12
695 # asm 2: adc <mulrdx=%rdx,<mulr31=%r14
696 adc %rdx,%r14
698 # qhasm: mulredmask = *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51
699 # asm 1: movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,>mulredmask=int64#3
700 # asm 2: movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,>mulredmask=%rdx
701 movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,%rdx
703 # qhasm: mulr01 = (mulr01.rx0) << 13
704 # asm 1: shld $13,<rx0=int64#4,<mulr01=int64#5
705 # asm 2: shld $13,<rx0=%rcx,<mulr01=%r8
706 shld $13,%rcx,%r8
708 # qhasm: rx0 &= mulredmask
709 # asm 1: and <mulredmask=int64#3,<rx0=int64#4
710 # asm 2: and <mulredmask=%rdx,<rx0=%rcx
711 and %rdx,%rcx
713 # qhasm: mulr11 = (mulr11.rx1) << 13
714 # asm 1: shld $13,<rx1=int64#6,<mulr11=int64#8
715 # asm 2: shld $13,<rx1=%r9,<mulr11=%r10
716 shld $13,%r9,%r10
718 # qhasm: rx1 &= mulredmask
719 # asm 1: and <mulredmask=int64#3,<rx1=int64#6
720 # asm 2: and <mulredmask=%rdx,<rx1=%r9
721 and %rdx,%r9
723 # qhasm: rx1 += mulr01
724 # asm 1: add <mulr01=int64#5,<rx1=int64#6
725 # asm 2: add <mulr01=%r8,<rx1=%r9
726 add %r8,%r9
728 # qhasm: mulr21 = (mulr21.rx2) << 13
729 # asm 1: shld $13,<rx2=int64#9,<mulr21=int64#10
730 # asm 2: shld $13,<rx2=%r11,<mulr21=%r12
731 shld $13,%r11,%r12
733 # qhasm: rx2 &= mulredmask
734 # asm 1: and <mulredmask=int64#3,<rx2=int64#9
735 # asm 2: and <mulredmask=%rdx,<rx2=%r11
736 and %rdx,%r11
738 # qhasm: rx2 += mulr11
739 # asm 1: add <mulr11=int64#8,<rx2=int64#9
740 # asm 2: add <mulr11=%r10,<rx2=%r11
741 add %r10,%r11
743 # qhasm: mulr31 = (mulr31.rx3) << 13
744 # asm 1: shld $13,<rx3=int64#11,<mulr31=int64#12
745 # asm 2: shld $13,<rx3=%r13,<mulr31=%r14
746 shld $13,%r13,%r14
748 # qhasm: rx3 &= mulredmask
749 # asm 1: and <mulredmask=int64#3,<rx3=int64#11
750 # asm 2: and <mulredmask=%rdx,<rx3=%r13
751 and %rdx,%r13
753 # qhasm: rx3 += mulr21
754 # asm 1: add <mulr21=int64#10,<rx3=int64#11
755 # asm 2: add <mulr21=%r12,<rx3=%r13
756 add %r12,%r13
758 # qhasm: mulr41 = (mulr41.rx4) << 13
759 # asm 1: shld $13,<rx4=int64#13,<mulr41=int64#14
760 # asm 2: shld $13,<rx4=%r15,<mulr41=%rbx
761 shld $13,%r15,%rbx
763 # qhasm: rx4 &= mulredmask
764 # asm 1: and <mulredmask=int64#3,<rx4=int64#13
765 # asm 2: and <mulredmask=%rdx,<rx4=%r15
766 and %rdx,%r15
768 # qhasm: rx4 += mulr31
769 # asm 1: add <mulr31=int64#12,<rx4=int64#13
770 # asm 2: add <mulr31=%r14,<rx4=%r15
771 add %r14,%r15
773 # qhasm: mulr41 = mulr41 * 19
774 # asm 1: imulq $19,<mulr41=int64#14,>mulr41=int64#5
775 # asm 2: imulq $19,<mulr41=%rbx,>mulr41=%r8
776 imulq $19,%rbx,%r8
778 # qhasm: rx0 += mulr41
779 # asm 1: add <mulr41=int64#5,<rx0=int64#4
780 # asm 2: add <mulr41=%r8,<rx0=%rcx
781 add %r8,%rcx
783 # qhasm: mult = rx0
784 # asm 1: mov <rx0=int64#4,>mult=int64#5
785 # asm 2: mov <rx0=%rcx,>mult=%r8
786 mov %rcx,%r8
788 # qhasm: (uint64) mult >>= 51
789 # asm 1: shr $51,<mult=int64#5
790 # asm 2: shr $51,<mult=%r8
791 shr $51,%r8
793 # qhasm: mult += rx1
794 # asm 1: add <rx1=int64#6,<mult=int64#5
795 # asm 2: add <rx1=%r9,<mult=%r8
796 add %r9,%r8
798 # qhasm: rx1 = mult
799 # asm 1: mov <mult=int64#5,>rx1=int64#6
800 # asm 2: mov <mult=%r8,>rx1=%r9
801 mov %r8,%r9
803 # qhasm: (uint64) mult >>= 51
804 # asm 1: shr $51,<mult=int64#5
805 # asm 2: shr $51,<mult=%r8
806 shr $51,%r8
808 # qhasm: rx0 &= mulredmask
809 # asm 1: and <mulredmask=int64#3,<rx0=int64#4
810 # asm 2: and <mulredmask=%rdx,<rx0=%rcx
811 and %rdx,%rcx
813 # qhasm: mult += rx2
814 # asm 1: add <rx2=int64#9,<mult=int64#5
815 # asm 2: add <rx2=%r11,<mult=%r8
816 add %r11,%r8
818 # qhasm: rx2 = mult
819 # asm 1: mov <mult=int64#5,>rx2=int64#7
820 # asm 2: mov <mult=%r8,>rx2=%rax
821 mov %r8,%rax
823 # qhasm: (uint64) mult >>= 51
824 # asm 1: shr $51,<mult=int64#5
825 # asm 2: shr $51,<mult=%r8
826 shr $51,%r8
828 # qhasm: rx1 &= mulredmask
829 # asm 1: and <mulredmask=int64#3,<rx1=int64#6
830 # asm 2: and <mulredmask=%rdx,<rx1=%r9
831 and %rdx,%r9
833 # qhasm: mult += rx3
834 # asm 1: add <rx3=int64#11,<mult=int64#5
835 # asm 2: add <rx3=%r13,<mult=%r8
836 add %r13,%r8
838 # qhasm: rx3 = mult
839 # asm 1: mov <mult=int64#5,>rx3=int64#8
840 # asm 2: mov <mult=%r8,>rx3=%r10
841 mov %r8,%r10
843 # qhasm: (uint64) mult >>= 51
844 # asm 1: shr $51,<mult=int64#5
845 # asm 2: shr $51,<mult=%r8
846 shr $51,%r8
848 # qhasm: rx2 &= mulredmask
849 # asm 1: and <mulredmask=int64#3,<rx2=int64#7
850 # asm 2: and <mulredmask=%rdx,<rx2=%rax
851 and %rdx,%rax
853 # qhasm: mult += rx4
854 # asm 1: add <rx4=int64#13,<mult=int64#5
855 # asm 2: add <rx4=%r15,<mult=%r8
856 add %r15,%r8
858 # qhasm: rx4 = mult
859 # asm 1: mov <mult=int64#5,>rx4=int64#9
860 # asm 2: mov <mult=%r8,>rx4=%r11
861 mov %r8,%r11
863 # qhasm: (uint64) mult >>= 51
864 # asm 1: shr $51,<mult=int64#5
865 # asm 2: shr $51,<mult=%r8
866 shr $51,%r8
868 # qhasm: rx3 &= mulredmask
869 # asm 1: and <mulredmask=int64#3,<rx3=int64#8
870 # asm 2: and <mulredmask=%rdx,<rx3=%r10
871 and %rdx,%r10
873 # qhasm: mult *= 19
874 # asm 1: imulq $19,<mult=int64#5,>mult=int64#5
875 # asm 2: imulq $19,<mult=%r8,>mult=%r8
876 imulq $19,%r8,%r8
878 # qhasm: rx0 += mult
879 # asm 1: add <mult=int64#5,<rx0=int64#4
880 # asm 2: add <mult=%r8,<rx0=%rcx
881 add %r8,%rcx
883 # qhasm: rx4 &= mulredmask
884 # asm 1: and <mulredmask=int64#3,<rx4=int64#9
885 # asm 2: and <mulredmask=%rdx,<rx4=%r11
886 and %rdx,%r11
888 # qhasm: *(uint64 *)(rp + 0) = rx0
889 # asm 1: movq <rx0=int64#4,0(<rp=int64#1)
890 # asm 2: movq <rx0=%rcx,0(<rp=%rdi)
891 movq %rcx,0(%rdi)
893 # qhasm: *(uint64 *)(rp + 8) = rx1
894 # asm 1: movq <rx1=int64#6,8(<rp=int64#1)
895 # asm 2: movq <rx1=%r9,8(<rp=%rdi)
896 movq %r9,8(%rdi)
898 # qhasm: *(uint64 *)(rp + 16) = rx2
899 # asm 1: movq <rx2=int64#7,16(<rp=int64#1)
900 # asm 2: movq <rx2=%rax,16(<rp=%rdi)
901 movq %rax,16(%rdi)
903 # qhasm: *(uint64 *)(rp + 24) = rx3
904 # asm 1: movq <rx3=int64#8,24(<rp=int64#1)
905 # asm 2: movq <rx3=%r10,24(<rp=%rdi)
906 movq %r10,24(%rdi)
908 # qhasm: *(uint64 *)(rp + 32) = rx4
909 # asm 1: movq <rx4=int64#9,32(<rp=int64#1)
910 # asm 2: movq <rx4=%r11,32(<rp=%rdi)
911 movq %r11,32(%rdi)
913 # qhasm: mulrax = *(uint64 *)(pp + 104)
914 # asm 1: movq 104(<pp=int64#2),>mulrax=int64#3
915 # asm 2: movq 104(<pp=%rsi),>mulrax=%rdx
916 movq 104(%rsi),%rdx
918 # qhasm: mulrax *= 19
919 # asm 1: imulq $19,<mulrax=int64#3,>mulrax=int64#7
920 # asm 2: imulq $19,<mulrax=%rdx,>mulrax=%rax
921 imulq $19,%rdx,%rax
923 # qhasm: mulx319_stack = mulrax
924 # asm 1: movq <mulrax=int64#7,>mulx319_stack=stack64#8
925 # asm 2: movq <mulrax=%rax,>mulx319_stack=56(%rsp)
926 movq %rax,56(%rsp)
928 # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 56)
929 # asm 1: mulq 56(<pp=int64#2)
930 # asm 2: mulq 56(<pp=%rsi)
931 mulq 56(%rsi)
933 # qhasm: ry0 = mulrax
934 # asm 1: mov <mulrax=int64#7,>ry0=int64#4
935 # asm 2: mov <mulrax=%rax,>ry0=%rcx
936 mov %rax,%rcx
938 # qhasm: mulr01 = mulrdx
939 # asm 1: mov <mulrdx=int64#3,>mulr01=int64#5
940 # asm 2: mov <mulrdx=%rdx,>mulr01=%r8
941 mov %rdx,%r8
943 # qhasm: mulrax = *(uint64 *)(pp + 112)
944 # asm 1: movq 112(<pp=int64#2),>mulrax=int64#3
945 # asm 2: movq 112(<pp=%rsi),>mulrax=%rdx
946 movq 112(%rsi),%rdx
948 # qhasm: mulrax *= 19
949 # asm 1: imulq $19,<mulrax=int64#3,>mulrax=int64#7
950 # asm 2: imulq $19,<mulrax=%rdx,>mulrax=%rax
951 imulq $19,%rdx,%rax
953 # qhasm: mulx419_stack = mulrax
954 # asm 1: movq <mulrax=int64#7,>mulx419_stack=stack64#9
955 # asm 2: movq <mulrax=%rax,>mulx419_stack=64(%rsp)
956 movq %rax,64(%rsp)
958 # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 48)
959 # asm 1: mulq 48(<pp=int64#2)
960 # asm 2: mulq 48(<pp=%rsi)
961 mulq 48(%rsi)
963 # qhasm: carry? ry0 += mulrax
964 # asm 1: add <mulrax=int64#7,<ry0=int64#4
965 # asm 2: add <mulrax=%rax,<ry0=%rcx
966 add %rax,%rcx
968 # qhasm: mulr01 += mulrdx + carry
969 # asm 1: adc <mulrdx=int64#3,<mulr01=int64#5
970 # asm 2: adc <mulrdx=%rdx,<mulr01=%r8
971 adc %rdx,%r8
973 # qhasm: mulrax = *(uint64 *)(pp + 80)
974 # asm 1: movq 80(<pp=int64#2),>mulrax=int64#7
975 # asm 2: movq 80(<pp=%rsi),>mulrax=%rax
976 movq 80(%rsi),%rax
978 # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 40)
979 # asm 1: mulq 40(<pp=int64#2)
980 # asm 2: mulq 40(<pp=%rsi)
981 mulq 40(%rsi)
983 # qhasm: carry? ry0 += mulrax
984 # asm 1: add <mulrax=int64#7,<ry0=int64#4
985 # asm 2: add <mulrax=%rax,<ry0=%rcx
986 add %rax,%rcx
988 # qhasm: mulr01 += mulrdx + carry
989 # asm 1: adc <mulrdx=int64#3,<mulr01=int64#5
990 # asm 2: adc <mulrdx=%rdx,<mulr01=%r8
991 adc %rdx,%r8
993 # qhasm: mulrax = *(uint64 *)(pp + 80)
994 # asm 1: movq 80(<pp=int64#2),>mulrax=int64#7
995 # asm 2: movq 80(<pp=%rsi),>mulrax=%rax
996 movq 80(%rsi),%rax
998 # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 48)
999 # asm 1: mulq 48(<pp=int64#2)
1000 # asm 2: mulq 48(<pp=%rsi)
1001 mulq 48(%rsi)
1003 # qhasm: ry1 = mulrax
1004 # asm 1: mov <mulrax=int64#7,>ry1=int64#6
1005 # asm 2: mov <mulrax=%rax,>ry1=%r9
1006 mov %rax,%r9
1008 # qhasm: mulr11 = mulrdx
1009 # asm 1: mov <mulrdx=int64#3,>mulr11=int64#8
1010 # asm 2: mov <mulrdx=%rdx,>mulr11=%r10
1011 mov %rdx,%r10
1013 # qhasm: mulrax = *(uint64 *)(pp + 80)
1014 # asm 1: movq 80(<pp=int64#2),>mulrax=int64#7
1015 # asm 2: movq 80(<pp=%rsi),>mulrax=%rax
1016 movq 80(%rsi),%rax
1018 # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 56)
1019 # asm 1: mulq 56(<pp=int64#2)
1020 # asm 2: mulq 56(<pp=%rsi)
1021 mulq 56(%rsi)
1023 # qhasm: ry2 = mulrax
1024 # asm 1: mov <mulrax=int64#7,>ry2=int64#9
1025 # asm 2: mov <mulrax=%rax,>ry2=%r11
1026 mov %rax,%r11
1028 # qhasm: mulr21 = mulrdx
1029 # asm 1: mov <mulrdx=int64#3,>mulr21=int64#10
1030 # asm 2: mov <mulrdx=%rdx,>mulr21=%r12
1031 mov %rdx,%r12
1033 # qhasm: mulrax = *(uint64 *)(pp + 80)
1034 # asm 1: movq 80(<pp=int64#2),>mulrax=int64#7
1035 # asm 2: movq 80(<pp=%rsi),>mulrax=%rax
1036 movq 80(%rsi),%rax
1038 # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 64)
1039 # asm 1: mulq 64(<pp=int64#2)
1040 # asm 2: mulq 64(<pp=%rsi)
1041 mulq 64(%rsi)
1043 # qhasm: ry3 = mulrax
1044 # asm 1: mov <mulrax=int64#7,>ry3=int64#11
1045 # asm 2: mov <mulrax=%rax,>ry3=%r13
1046 mov %rax,%r13
1048 # qhasm: mulr31 = mulrdx
1049 # asm 1: mov <mulrdx=int64#3,>mulr31=int64#12
1050 # asm 2: mov <mulrdx=%rdx,>mulr31=%r14
1051 mov %rdx,%r14
1053 # qhasm: mulrax = *(uint64 *)(pp + 80)
1054 # asm 1: movq 80(<pp=int64#2),>mulrax=int64#7
1055 # asm 2: movq 80(<pp=%rsi),>mulrax=%rax
1056 movq 80(%rsi),%rax
1058 # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 72)
1059 # asm 1: mulq 72(<pp=int64#2)
1060 # asm 2: mulq 72(<pp=%rsi)
1061 mulq 72(%rsi)
1063 # qhasm: ry4 = mulrax
1064 # asm 1: mov <mulrax=int64#7,>ry4=int64#13
1065 # asm 2: mov <mulrax=%rax,>ry4=%r15
1066 mov %rax,%r15
1068 # qhasm: mulr41 = mulrdx
1069 # asm 1: mov <mulrdx=int64#3,>mulr41=int64#14
1070 # asm 2: mov <mulrdx=%rdx,>mulr41=%rbx
1071 mov %rdx,%rbx
1073 # qhasm: mulrax = *(uint64 *)(pp + 88)
1074 # asm 1: movq 88(<pp=int64#2),>mulrax=int64#7
1075 # asm 2: movq 88(<pp=%rsi),>mulrax=%rax
1076 movq 88(%rsi),%rax
1078 # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 40)
1079 # asm 1: mulq 40(<pp=int64#2)
1080 # asm 2: mulq 40(<pp=%rsi)
1081 mulq 40(%rsi)
1083 # qhasm: carry? ry1 += mulrax
1084 # asm 1: add <mulrax=int64#7,<ry1=int64#6
1085 # asm 2: add <mulrax=%rax,<ry1=%r9
1086 add %rax,%r9
1088 # qhasm: mulr11 += mulrdx + carry
1089 # asm 1: adc <mulrdx=int64#3,<mulr11=int64#8
1090 # asm 2: adc <mulrdx=%rdx,<mulr11=%r10
1091 adc %rdx,%r10
1093 # qhasm: mulrax = *(uint64 *)(pp + 88)
1094 # asm 1: movq 88(<pp=int64#2),>mulrax=int64#7
1095 # asm 2: movq 88(<pp=%rsi),>mulrax=%rax
1096 movq 88(%rsi),%rax
1098 # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 48)
1099 # asm 1: mulq 48(<pp=int64#2)
1100 # asm 2: mulq 48(<pp=%rsi)
1101 mulq 48(%rsi)
1103 # qhasm: carry? ry2 += mulrax
1104 # asm 1: add <mulrax=int64#7,<ry2=int64#9
1105 # asm 2: add <mulrax=%rax,<ry2=%r11
1106 add %rax,%r11
1108 # qhasm: mulr21 += mulrdx + carry
1109 # asm 1: adc <mulrdx=int64#3,<mulr21=int64#10
1110 # asm 2: adc <mulrdx=%rdx,<mulr21=%r12
1111 adc %rdx,%r12
1113 # qhasm: mulrax = *(uint64 *)(pp + 88)
1114 # asm 1: movq 88(<pp=int64#2),>mulrax=int64#7
1115 # asm 2: movq 88(<pp=%rsi),>mulrax=%rax
1116 movq 88(%rsi),%rax
1118 # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 56)
1119 # asm 1: mulq 56(<pp=int64#2)
1120 # asm 2: mulq 56(<pp=%rsi)
1121 mulq 56(%rsi)
1123 # qhasm: carry? ry3 += mulrax
1124 # asm 1: add <mulrax=int64#7,<ry3=int64#11
1125 # asm 2: add <mulrax=%rax,<ry3=%r13
1126 add %rax,%r13
1128 # qhasm: mulr31 += mulrdx + carry
1129 # asm 1: adc <mulrdx=int64#3,<mulr31=int64#12
1130 # asm 2: adc <mulrdx=%rdx,<mulr31=%r14
1131 adc %rdx,%r14
1133 # qhasm: mulrax = *(uint64 *)(pp + 88)
1134 # asm 1: movq 88(<pp=int64#2),>mulrax=int64#7
1135 # asm 2: movq 88(<pp=%rsi),>mulrax=%rax
1136 movq 88(%rsi),%rax
1138 # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 64)
1139 # asm 1: mulq 64(<pp=int64#2)
1140 # asm 2: mulq 64(<pp=%rsi)
1141 mulq 64(%rsi)
1143 # qhasm: carry? ry4 += mulrax
1144 # asm 1: add <mulrax=int64#7,<ry4=int64#13
1145 # asm 2: add <mulrax=%rax,<ry4=%r15
1146 add %rax,%r15
1148 # qhasm: mulr41 += mulrdx + carry
1149 # asm 1: adc <mulrdx=int64#3,<mulr41=int64#14
1150 # asm 2: adc <mulrdx=%rdx,<mulr41=%rbx
1151 adc %rdx,%rbx
1153 # qhasm: mulrax = *(uint64 *)(pp + 88)
1154 # asm 1: movq 88(<pp=int64#2),>mulrax=int64#3
1155 # asm 2: movq 88(<pp=%rsi),>mulrax=%rdx
1156 movq 88(%rsi),%rdx
1158 # qhasm: mulrax *= 19
1159 # asm 1: imulq $19,<mulrax=int64#3,>mulrax=int64#7
1160 # asm 2: imulq $19,<mulrax=%rdx,>mulrax=%rax
1161 imulq $19,%rdx,%rax
1163 # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 72)
1164 # asm 1: mulq 72(<pp=int64#2)
1165 # asm 2: mulq 72(<pp=%rsi)
1166 mulq 72(%rsi)
1168 # qhasm: carry? ry0 += mulrax
1169 # asm 1: add <mulrax=int64#7,<ry0=int64#4
1170 # asm 2: add <mulrax=%rax,<ry0=%rcx
1171 add %rax,%rcx
1173 # qhasm: mulr01 += mulrdx + carry
1174 # asm 1: adc <mulrdx=int64#3,<mulr01=int64#5
1175 # asm 2: adc <mulrdx=%rdx,<mulr01=%r8
1176 adc %rdx,%r8
1178 # qhasm: mulrax = *(uint64 *)(pp + 96)
1179 # asm 1: movq 96(<pp=int64#2),>mulrax=int64#7
1180 # asm 2: movq 96(<pp=%rsi),>mulrax=%rax
1181 movq 96(%rsi),%rax
1183 # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 40)
1184 # asm 1: mulq 40(<pp=int64#2)
1185 # asm 2: mulq 40(<pp=%rsi)
1186 mulq 40(%rsi)
1188 # qhasm: carry? ry2 += mulrax
1189 # asm 1: add <mulrax=int64#7,<ry2=int64#9
1190 # asm 2: add <mulrax=%rax,<ry2=%r11
1191 add %rax,%r11
1193 # qhasm: mulr21 += mulrdx + carry
1194 # asm 1: adc <mulrdx=int64#3,<mulr21=int64#10
1195 # asm 2: adc <mulrdx=%rdx,<mulr21=%r12
1196 adc %rdx,%r12
1198 # qhasm: mulrax = *(uint64 *)(pp + 96)
1199 # asm 1: movq 96(<pp=int64#2),>mulrax=int64#7
1200 # asm 2: movq 96(<pp=%rsi),>mulrax=%rax
1201 movq 96(%rsi),%rax
1203 # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 48)
1204 # asm 1: mulq 48(<pp=int64#2)
1205 # asm 2: mulq 48(<pp=%rsi)
1206 mulq 48(%rsi)
1208 # qhasm: carry? ry3 += mulrax
1209 # asm 1: add <mulrax=int64#7,<ry3=int64#11
1210 # asm 2: add <mulrax=%rax,<ry3=%r13
1211 add %rax,%r13
1213 # qhasm: mulr31 += mulrdx + carry
1214 # asm 1: adc <mulrdx=int64#3,<mulr31=int64#12
1215 # asm 2: adc <mulrdx=%rdx,<mulr31=%r14
1216 adc %rdx,%r14
1218 # qhasm: mulrax = *(uint64 *)(pp + 96)
1219 # asm 1: movq 96(<pp=int64#2),>mulrax=int64#7
1220 # asm 2: movq 96(<pp=%rsi),>mulrax=%rax
1221 movq 96(%rsi),%rax
1223 # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 56)
1224 # asm 1: mulq 56(<pp=int64#2)
1225 # asm 2: mulq 56(<pp=%rsi)
1226 mulq 56(%rsi)
1228 # qhasm: carry? ry4 += mulrax
1229 # asm 1: add <mulrax=int64#7,<ry4=int64#13
1230 # asm 2: add <mulrax=%rax,<ry4=%r15
1231 add %rax,%r15
1233 # qhasm: mulr41 += mulrdx + carry
1234 # asm 1: adc <mulrdx=int64#3,<mulr41=int64#14
1235 # asm 2: adc <mulrdx=%rdx,<mulr41=%rbx
1236 adc %rdx,%rbx
1238 # qhasm: mulrax = *(uint64 *)(pp + 96)
1239 # asm 1: movq 96(<pp=int64#2),>mulrax=int64#3
1240 # asm 2: movq 96(<pp=%rsi),>mulrax=%rdx
1241 movq 96(%rsi),%rdx
1243 # qhasm: mulrax *= 19
1244 # asm 1: imulq $19,<mulrax=int64#3,>mulrax=int64#7
1245 # asm 2: imulq $19,<mulrax=%rdx,>mulrax=%rax
1246 imulq $19,%rdx,%rax
1248 # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 64)
1249 # asm 1: mulq 64(<pp=int64#2)
1250 # asm 2: mulq 64(<pp=%rsi)
1251 mulq 64(%rsi)
1253 # qhasm: carry? ry0 += mulrax
1254 # asm 1: add <mulrax=int64#7,<ry0=int64#4
1255 # asm 2: add <mulrax=%rax,<ry0=%rcx
1256 add %rax,%rcx
1258 # qhasm: mulr01 += mulrdx + carry
1259 # asm 1: adc <mulrdx=int64#3,<mulr01=int64#5
1260 # asm 2: adc <mulrdx=%rdx,<mulr01=%r8
1261 adc %rdx,%r8
1263 # qhasm: mulrax = *(uint64 *)(pp + 96)
1264 # asm 1: movq 96(<pp=int64#2),>mulrax=int64#3
1265 # asm 2: movq 96(<pp=%rsi),>mulrax=%rdx
1266 movq 96(%rsi),%rdx
1268 # qhasm: mulrax *= 19
1269 # asm 1: imulq $19,<mulrax=int64#3,>mulrax=int64#7
1270 # asm 2: imulq $19,<mulrax=%rdx,>mulrax=%rax
1271 imulq $19,%rdx,%rax
1273 # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 72)
1274 # asm 1: mulq 72(<pp=int64#2)
1275 # asm 2: mulq 72(<pp=%rsi)
1276 mulq 72(%rsi)
1278 # qhasm: carry? ry1 += mulrax
1279 # asm 1: add <mulrax=int64#7,<ry1=int64#6
1280 # asm 2: add <mulrax=%rax,<ry1=%r9
1281 add %rax,%r9
1283 # qhasm: mulr11 += mulrdx + carry
1284 # asm 1: adc <mulrdx=int64#3,<mulr11=int64#8
1285 # asm 2: adc <mulrdx=%rdx,<mulr11=%r10
1286 adc %rdx,%r10
1288 # qhasm: mulrax = *(uint64 *)(pp + 104)
1289 # asm 1: movq 104(<pp=int64#2),>mulrax=int64#7
1290 # asm 2: movq 104(<pp=%rsi),>mulrax=%rax
1291 movq 104(%rsi),%rax
1293 # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 40)
1294 # asm 1: mulq 40(<pp=int64#2)
1295 # asm 2: mulq 40(<pp=%rsi)
1296 mulq 40(%rsi)
1298 # qhasm: carry? ry3 += mulrax
1299 # asm 1: add <mulrax=int64#7,<ry3=int64#11
1300 # asm 2: add <mulrax=%rax,<ry3=%r13
1301 add %rax,%r13
1303 # qhasm: mulr31 += mulrdx + carry
1304 # asm 1: adc <mulrdx=int64#3,<mulr31=int64#12
1305 # asm 2: adc <mulrdx=%rdx,<mulr31=%r14
1306 adc %rdx,%r14
1308 # qhasm: mulrax = *(uint64 *)(pp + 104)
1309 # asm 1: movq 104(<pp=int64#2),>mulrax=int64#7
1310 # asm 2: movq 104(<pp=%rsi),>mulrax=%rax
1311 movq 104(%rsi),%rax
1313 # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 48)
1314 # asm 1: mulq 48(<pp=int64#2)
1315 # asm 2: mulq 48(<pp=%rsi)
1316 mulq 48(%rsi)
1318 # qhasm: carry? ry4 += mulrax
1319 # asm 1: add <mulrax=int64#7,<ry4=int64#13
1320 # asm 2: add <mulrax=%rax,<ry4=%r15
1321 add %rax,%r15
1323 # qhasm: mulr41 += mulrdx + carry
1324 # asm 1: adc <mulrdx=int64#3,<mulr41=int64#14
1325 # asm 2: adc <mulrdx=%rdx,<mulr41=%rbx
1326 adc %rdx,%rbx
1328 # qhasm: mulrax = mulx319_stack
1329 # asm 1: movq <mulx319_stack=stack64#8,>mulrax=int64#7
1330 # asm 2: movq <mulx319_stack=56(%rsp),>mulrax=%rax
1331 movq 56(%rsp),%rax
1333 # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 64)
1334 # asm 1: mulq 64(<pp=int64#2)
1335 # asm 2: mulq 64(<pp=%rsi)
1336 mulq 64(%rsi)
1338 # qhasm: carry? ry1 += mulrax
1339 # asm 1: add <mulrax=int64#7,<ry1=int64#6
1340 # asm 2: add <mulrax=%rax,<ry1=%r9
1341 add %rax,%r9
1343 # qhasm: mulr11 += mulrdx + carry
1344 # asm 1: adc <mulrdx=int64#3,<mulr11=int64#8
1345 # asm 2: adc <mulrdx=%rdx,<mulr11=%r10
1346 adc %rdx,%r10
1348 # qhasm: mulrax = mulx319_stack
1349 # asm 1: movq <mulx319_stack=stack64#8,>mulrax=int64#7
1350 # asm 2: movq <mulx319_stack=56(%rsp),>mulrax=%rax
1351 movq 56(%rsp),%rax
1353 # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 72)
1354 # asm 1: mulq 72(<pp=int64#2)
1355 # asm 2: mulq 72(<pp=%rsi)
1356 mulq 72(%rsi)
1358 # qhasm: carry? ry2 += mulrax
1359 # asm 1: add <mulrax=int64#7,<ry2=int64#9
1360 # asm 2: add <mulrax=%rax,<ry2=%r11
1361 add %rax,%r11
1363 # qhasm: mulr21 += mulrdx + carry
1364 # asm 1: adc <mulrdx=int64#3,<mulr21=int64#10
1365 # asm 2: adc <mulrdx=%rdx,<mulr21=%r12
1366 adc %rdx,%r12
1368 # qhasm: mulrax = *(uint64 *)(pp + 112)
1369 # asm 1: movq 112(<pp=int64#2),>mulrax=int64#7
1370 # asm 2: movq 112(<pp=%rsi),>mulrax=%rax
1371 movq 112(%rsi),%rax
1373 # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 40)
1374 # asm 1: mulq 40(<pp=int64#2)
1375 # asm 2: mulq 40(<pp=%rsi)
1376 mulq 40(%rsi)
1378 # qhasm: carry? ry4 += mulrax
1379 # asm 1: add <mulrax=int64#7,<ry4=int64#13
1380 # asm 2: add <mulrax=%rax,<ry4=%r15
1381 add %rax,%r15
1383 # qhasm: mulr41 += mulrdx + carry
1384 # asm 1: adc <mulrdx=int64#3,<mulr41=int64#14
1385 # asm 2: adc <mulrdx=%rdx,<mulr41=%rbx
1386 adc %rdx,%rbx
1388 # qhasm: mulrax = mulx419_stack
1389 # asm 1: movq <mulx419_stack=stack64#9,>mulrax=int64#7
1390 # asm 2: movq <mulx419_stack=64(%rsp),>mulrax=%rax
1391 movq 64(%rsp),%rax
1393 # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 56)
1394 # asm 1: mulq 56(<pp=int64#2)
1395 # asm 2: mulq 56(<pp=%rsi)
1396 mulq 56(%rsi)
1398 # qhasm: carry? ry1 += mulrax
1399 # asm 1: add <mulrax=int64#7,<ry1=int64#6
1400 # asm 2: add <mulrax=%rax,<ry1=%r9
1401 add %rax,%r9
1403 # qhasm: mulr11 += mulrdx + carry
1404 # asm 1: adc <mulrdx=int64#3,<mulr11=int64#8
1405 # asm 2: adc <mulrdx=%rdx,<mulr11=%r10
1406 adc %rdx,%r10
1408 # qhasm: mulrax = mulx419_stack
1409 # asm 1: movq <mulx419_stack=stack64#9,>mulrax=int64#7
1410 # asm 2: movq <mulx419_stack=64(%rsp),>mulrax=%rax
1411 movq 64(%rsp),%rax
1413 # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 64)
1414 # asm 1: mulq 64(<pp=int64#2)
1415 # asm 2: mulq 64(<pp=%rsi)
1416 mulq 64(%rsi)
1418 # qhasm: carry? ry2 += mulrax
1419 # asm 1: add <mulrax=int64#7,<ry2=int64#9
1420 # asm 2: add <mulrax=%rax,<ry2=%r11
1421 add %rax,%r11
1423 # qhasm: mulr21 += mulrdx + carry
1424 # asm 1: adc <mulrdx=int64#3,<mulr21=int64#10
1425 # asm 2: adc <mulrdx=%rdx,<mulr21=%r12
1426 adc %rdx,%r12
1428 # qhasm: mulrax = mulx419_stack
1429 # asm 1: movq <mulx419_stack=stack64#9,>mulrax=int64#7
1430 # asm 2: movq <mulx419_stack=64(%rsp),>mulrax=%rax
1431 movq 64(%rsp),%rax
1433 # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 72)
1434 # asm 1: mulq 72(<pp=int64#2)
1435 # asm 2: mulq 72(<pp=%rsi)
1436 mulq 72(%rsi)
1438 # qhasm: carry? ry3 += mulrax
1439 # asm 1: add <mulrax=int64#7,<ry3=int64#11
1440 # asm 2: add <mulrax=%rax,<ry3=%r13
1441 add %rax,%r13
1443 # qhasm: mulr31 += mulrdx + carry
1444 # asm 1: adc <mulrdx=int64#3,<mulr31=int64#12
1445 # asm 2: adc <mulrdx=%rdx,<mulr31=%r14
1446 adc %rdx,%r14
1448 # qhasm: mulredmask = *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51
1449 # asm 1: movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,>mulredmask=int64#3
1450 # asm 2: movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,>mulredmask=%rdx
1451 movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,%rdx
1453 # qhasm: mulr01 = (mulr01.ry0) << 13
1454 # asm 1: shld $13,<ry0=int64#4,<mulr01=int64#5
1455 # asm 2: shld $13,<ry0=%rcx,<mulr01=%r8
1456 shld $13,%rcx,%r8
1458 # qhasm: ry0 &= mulredmask
1459 # asm 1: and <mulredmask=int64#3,<ry0=int64#4
1460 # asm 2: and <mulredmask=%rdx,<ry0=%rcx
1461 and %rdx,%rcx
1463 # qhasm: mulr11 = (mulr11.ry1) << 13
1464 # asm 1: shld $13,<ry1=int64#6,<mulr11=int64#8
1465 # asm 2: shld $13,<ry1=%r9,<mulr11=%r10
1466 shld $13,%r9,%r10
1468 # qhasm: ry1 &= mulredmask
1469 # asm 1: and <mulredmask=int64#3,<ry1=int64#6
1470 # asm 2: and <mulredmask=%rdx,<ry1=%r9
1471 and %rdx,%r9
1473 # qhasm: ry1 += mulr01
1474 # asm 1: add <mulr01=int64#5,<ry1=int64#6
1475 # asm 2: add <mulr01=%r8,<ry1=%r9
1476 add %r8,%r9
1478 # qhasm: mulr21 = (mulr21.ry2) << 13
1479 # asm 1: shld $13,<ry2=int64#9,<mulr21=int64#10
1480 # asm 2: shld $13,<ry2=%r11,<mulr21=%r12
1481 shld $13,%r11,%r12
1483 # qhasm: ry2 &= mulredmask
1484 # asm 1: and <mulredmask=int64#3,<ry2=int64#9
1485 # asm 2: and <mulredmask=%rdx,<ry2=%r11
1486 and %rdx,%r11
1488 # qhasm: ry2 += mulr11
1489 # asm 1: add <mulr11=int64#8,<ry2=int64#9
1490 # asm 2: add <mulr11=%r10,<ry2=%r11
1491 add %r10,%r11
1493 # qhasm: mulr31 = (mulr31.ry3) << 13
1494 # asm 1: shld $13,<ry3=int64#11,<mulr31=int64#12
1495 # asm 2: shld $13,<ry3=%r13,<mulr31=%r14
1496 shld $13,%r13,%r14
1498 # qhasm: ry3 &= mulredmask
1499 # asm 1: and <mulredmask=int64#3,<ry3=int64#11
1500 # asm 2: and <mulredmask=%rdx,<ry3=%r13
1501 and %rdx,%r13
1503 # qhasm: ry3 += mulr21
1504 # asm 1: add <mulr21=int64#10,<ry3=int64#11
1505 # asm 2: add <mulr21=%r12,<ry3=%r13
1506 add %r12,%r13
1508 # qhasm: mulr41 = (mulr41.ry4) << 13
1509 # asm 1: shld $13,<ry4=int64#13,<mulr41=int64#14
1510 # asm 2: shld $13,<ry4=%r15,<mulr41=%rbx
1511 shld $13,%r15,%rbx
1513 # qhasm: ry4 &= mulredmask
1514 # asm 1: and <mulredmask=int64#3,<ry4=int64#13
1515 # asm 2: and <mulredmask=%rdx,<ry4=%r15
1516 and %rdx,%r15
1518 # qhasm: ry4 += mulr31
1519 # asm 1: add <mulr31=int64#12,<ry4=int64#13
1520 # asm 2: add <mulr31=%r14,<ry4=%r15
1521 add %r14,%r15
1523 # qhasm: mulr41 = mulr41 * 19
1524 # asm 1: imulq $19,<mulr41=int64#14,>mulr41=int64#5
1525 # asm 2: imulq $19,<mulr41=%rbx,>mulr41=%r8
1526 imulq $19,%rbx,%r8
1528 # qhasm: ry0 += mulr41
1529 # asm 1: add <mulr41=int64#5,<ry0=int64#4
1530 # asm 2: add <mulr41=%r8,<ry0=%rcx
1531 add %r8,%rcx
1533 # qhasm: mult = ry0
1534 # asm 1: mov <ry0=int64#4,>mult=int64#5
1535 # asm 2: mov <ry0=%rcx,>mult=%r8
1536 mov %rcx,%r8
1538 # qhasm: (uint64) mult >>= 51
1539 # asm 1: shr $51,<mult=int64#5
1540 # asm 2: shr $51,<mult=%r8
1541 shr $51,%r8
1543 # qhasm: mult += ry1
1544 # asm 1: add <ry1=int64#6,<mult=int64#5
1545 # asm 2: add <ry1=%r9,<mult=%r8
1546 add %r9,%r8
1548 # qhasm: ry1 = mult
1549 # asm 1: mov <mult=int64#5,>ry1=int64#6
1550 # asm 2: mov <mult=%r8,>ry1=%r9
1551 mov %r8,%r9
1553 # qhasm: (uint64) mult >>= 51
1554 # asm 1: shr $51,<mult=int64#5
1555 # asm 2: shr $51,<mult=%r8
1556 shr $51,%r8
1558 # qhasm: ry0 &= mulredmask
1559 # asm 1: and <mulredmask=int64#3,<ry0=int64#4
1560 # asm 2: and <mulredmask=%rdx,<ry0=%rcx
1561 and %rdx,%rcx
1563 # qhasm: mult += ry2
1564 # asm 1: add <ry2=int64#9,<mult=int64#5
1565 # asm 2: add <ry2=%r11,<mult=%r8
1566 add %r11,%r8
1568 # qhasm: ry2 = mult
1569 # asm 1: mov <mult=int64#5,>ry2=int64#7
1570 # asm 2: mov <mult=%r8,>ry2=%rax
1571 mov %r8,%rax
1573 # qhasm: (uint64) mult >>= 51
1574 # asm 1: shr $51,<mult=int64#5
1575 # asm 2: shr $51,<mult=%r8
1576 shr $51,%r8
1578 # qhasm: ry1 &= mulredmask
1579 # asm 1: and <mulredmask=int64#3,<ry1=int64#6
1580 # asm 2: and <mulredmask=%rdx,<ry1=%r9
1581 and %rdx,%r9
1583 # qhasm: mult += ry3
1584 # asm 1: add <ry3=int64#11,<mult=int64#5
1585 # asm 2: add <ry3=%r13,<mult=%r8
1586 add %r13,%r8
1588 # qhasm: ry3 = mult
1589 # asm 1: mov <mult=int64#5,>ry3=int64#8
1590 # asm 2: mov <mult=%r8,>ry3=%r10
1591 mov %r8,%r10
1593 # qhasm: (uint64) mult >>= 51
1594 # asm 1: shr $51,<mult=int64#5
1595 # asm 2: shr $51,<mult=%r8
1596 shr $51,%r8
1598 # qhasm: ry2 &= mulredmask
1599 # asm 1: and <mulredmask=int64#3,<ry2=int64#7
1600 # asm 2: and <mulredmask=%rdx,<ry2=%rax
1601 and %rdx,%rax
1603 # qhasm: mult += ry4
1604 # asm 1: add <ry4=int64#13,<mult=int64#5
1605 # asm 2: add <ry4=%r15,<mult=%r8
1606 add %r15,%r8
1608 # qhasm: ry4 = mult
1609 # asm 1: mov <mult=int64#5,>ry4=int64#9
1610 # asm 2: mov <mult=%r8,>ry4=%r11
1611 mov %r8,%r11
1613 # qhasm: (uint64) mult >>= 51
1614 # asm 1: shr $51,<mult=int64#5
1615 # asm 2: shr $51,<mult=%r8
1616 shr $51,%r8
1618 # qhasm: ry3 &= mulredmask
1619 # asm 1: and <mulredmask=int64#3,<ry3=int64#8
1620 # asm 2: and <mulredmask=%rdx,<ry3=%r10
1621 and %rdx,%r10
1623 # qhasm: mult *= 19
1624 # asm 1: imulq $19,<mult=int64#5,>mult=int64#5
1625 # asm 2: imulq $19,<mult=%r8,>mult=%r8
1626 imulq $19,%r8,%r8
1628 # qhasm: ry0 += mult
1629 # asm 1: add <mult=int64#5,<ry0=int64#4
1630 # asm 2: add <mult=%r8,<ry0=%rcx
1631 add %r8,%rcx
1633 # qhasm: ry4 &= mulredmask
1634 # asm 1: and <mulredmask=int64#3,<ry4=int64#9
1635 # asm 2: and <mulredmask=%rdx,<ry4=%r11
1636 and %rdx,%r11
1638 # qhasm: *(uint64 *)(rp + 40) = ry0
1639 # asm 1: movq <ry0=int64#4,40(<rp=int64#1)
1640 # asm 2: movq <ry0=%rcx,40(<rp=%rdi)
1641 movq %rcx,40(%rdi)
1643 # qhasm: *(uint64 *)(rp + 48) = ry1
1644 # asm 1: movq <ry1=int64#6,48(<rp=int64#1)
1645 # asm 2: movq <ry1=%r9,48(<rp=%rdi)
1646 movq %r9,48(%rdi)
1648 # qhasm: *(uint64 *)(rp + 56) = ry2
1649 # asm 1: movq <ry2=int64#7,56(<rp=int64#1)
1650 # asm 2: movq <ry2=%rax,56(<rp=%rdi)
1651 movq %rax,56(%rdi)
1653 # qhasm: *(uint64 *)(rp + 64) = ry3
1654 # asm 1: movq <ry3=int64#8,64(<rp=int64#1)
1655 # asm 2: movq <ry3=%r10,64(<rp=%rdi)
1656 movq %r10,64(%rdi)
1658 # qhasm: *(uint64 *)(rp + 72) = ry4
1659 # asm 1: movq <ry4=int64#9,72(<rp=int64#1)
1660 # asm 2: movq <ry4=%r11,72(<rp=%rdi)
1661 movq %r11,72(%rdi)
1663 # qhasm: mulrax = *(uint64 *)(pp + 64)
1664 # asm 1: movq 64(<pp=int64#2),>mulrax=int64#3
1665 # asm 2: movq 64(<pp=%rsi),>mulrax=%rdx
1666 movq 64(%rsi),%rdx
1668 # qhasm: mulrax *= 19
1669 # asm 1: imulq $19,<mulrax=int64#3,>mulrax=int64#7
1670 # asm 2: imulq $19,<mulrax=%rdx,>mulrax=%rax
1671 imulq $19,%rdx,%rax
1673 # qhasm: mulx319_stack = mulrax
1674 # asm 1: movq <mulrax=int64#7,>mulx319_stack=stack64#8
1675 # asm 2: movq <mulrax=%rax,>mulx319_stack=56(%rsp)
1676 movq %rax,56(%rsp)
1678 # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 136)
1679 # asm 1: mulq 136(<pp=int64#2)
1680 # asm 2: mulq 136(<pp=%rsi)
1681 mulq 136(%rsi)
1683 # qhasm: rz0 = mulrax
1684 # asm 1: mov <mulrax=int64#7,>rz0=int64#4
1685 # asm 2: mov <mulrax=%rax,>rz0=%rcx
1686 mov %rax,%rcx
1688 # qhasm: mulr01 = mulrdx
1689 # asm 1: mov <mulrdx=int64#3,>mulr01=int64#5
1690 # asm 2: mov <mulrdx=%rdx,>mulr01=%r8
1691 mov %rdx,%r8
1693 # qhasm: mulrax = *(uint64 *)(pp + 72)
1694 # asm 1: movq 72(<pp=int64#2),>mulrax=int64#3
1695 # asm 2: movq 72(<pp=%rsi),>mulrax=%rdx
1696 movq 72(%rsi),%rdx
1698 # qhasm: mulrax *= 19
1699 # asm 1: imulq $19,<mulrax=int64#3,>mulrax=int64#7
1700 # asm 2: imulq $19,<mulrax=%rdx,>mulrax=%rax
1701 imulq $19,%rdx,%rax
1703 # qhasm: mulx419_stack = mulrax
1704 # asm 1: movq <mulrax=int64#7,>mulx419_stack=stack64#9
1705 # asm 2: movq <mulrax=%rax,>mulx419_stack=64(%rsp)
1706 movq %rax,64(%rsp)
1708 # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 128)
1709 # asm 1: mulq 128(<pp=int64#2)
1710 # asm 2: mulq 128(<pp=%rsi)
1711 mulq 128(%rsi)
1713 # qhasm: carry? rz0 += mulrax
1714 # asm 1: add <mulrax=int64#7,<rz0=int64#4
1715 # asm 2: add <mulrax=%rax,<rz0=%rcx
1716 add %rax,%rcx
1718 # qhasm: mulr01 += mulrdx + carry
1719 # asm 1: adc <mulrdx=int64#3,<mulr01=int64#5
1720 # asm 2: adc <mulrdx=%rdx,<mulr01=%r8
1721 adc %rdx,%r8
1723 # qhasm: mulrax = *(uint64 *)(pp + 40)
1724 # asm 1: movq 40(<pp=int64#2),>mulrax=int64#7
1725 # asm 2: movq 40(<pp=%rsi),>mulrax=%rax
1726 movq 40(%rsi),%rax
1728 # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 120)
1729 # asm 1: mulq 120(<pp=int64#2)
1730 # asm 2: mulq 120(<pp=%rsi)
1731 mulq 120(%rsi)
1733 # qhasm: carry? rz0 += mulrax
1734 # asm 1: add <mulrax=int64#7,<rz0=int64#4
1735 # asm 2: add <mulrax=%rax,<rz0=%rcx
1736 add %rax,%rcx
1738 # qhasm: mulr01 += mulrdx + carry
1739 # asm 1: adc <mulrdx=int64#3,<mulr01=int64#5
1740 # asm 2: adc <mulrdx=%rdx,<mulr01=%r8
1741 adc %rdx,%r8
1743 # qhasm: mulrax = *(uint64 *)(pp + 40)
1744 # asm 1: movq 40(<pp=int64#2),>mulrax=int64#7
1745 # asm 2: movq 40(<pp=%rsi),>mulrax=%rax
1746 movq 40(%rsi),%rax
1748 # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 128)
1749 # asm 1: mulq 128(<pp=int64#2)
1750 # asm 2: mulq 128(<pp=%rsi)
1751 mulq 128(%rsi)
1753 # qhasm: rz1 = mulrax
1754 # asm 1: mov <mulrax=int64#7,>rz1=int64#6
1755 # asm 2: mov <mulrax=%rax,>rz1=%r9
1756 mov %rax,%r9
1758 # qhasm: mulr11 = mulrdx
1759 # asm 1: mov <mulrdx=int64#3,>mulr11=int64#8
1760 # asm 2: mov <mulrdx=%rdx,>mulr11=%r10
1761 mov %rdx,%r10
1763 # qhasm: mulrax = *(uint64 *)(pp + 40)
1764 # asm 1: movq 40(<pp=int64#2),>mulrax=int64#7
1765 # asm 2: movq 40(<pp=%rsi),>mulrax=%rax
1766 movq 40(%rsi),%rax
1768 # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 136)
1769 # asm 1: mulq 136(<pp=int64#2)
1770 # asm 2: mulq 136(<pp=%rsi)
1771 mulq 136(%rsi)
1773 # qhasm: rz2 = mulrax
1774 # asm 1: mov <mulrax=int64#7,>rz2=int64#9
1775 # asm 2: mov <mulrax=%rax,>rz2=%r11
1776 mov %rax,%r11
1778 # qhasm: mulr21 = mulrdx
1779 # asm 1: mov <mulrdx=int64#3,>mulr21=int64#10
1780 # asm 2: mov <mulrdx=%rdx,>mulr21=%r12
1781 mov %rdx,%r12
1783 # qhasm: mulrax = *(uint64 *)(pp + 40)
1784 # asm 1: movq 40(<pp=int64#2),>mulrax=int64#7
1785 # asm 2: movq 40(<pp=%rsi),>mulrax=%rax
1786 movq 40(%rsi),%rax
1788 # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 144)
1789 # asm 1: mulq 144(<pp=int64#2)
1790 # asm 2: mulq 144(<pp=%rsi)
1791 mulq 144(%rsi)
1793 # qhasm: rz3 = mulrax
1794 # asm 1: mov <mulrax=int64#7,>rz3=int64#11
1795 # asm 2: mov <mulrax=%rax,>rz3=%r13
1796 mov %rax,%r13
1798 # qhasm: mulr31 = mulrdx
1799 # asm 1: mov <mulrdx=int64#3,>mulr31=int64#12
1800 # asm 2: mov <mulrdx=%rdx,>mulr31=%r14
1801 mov %rdx,%r14
1803 # qhasm: mulrax = *(uint64 *)(pp + 40)
1804 # asm 1: movq 40(<pp=int64#2),>mulrax=int64#7
1805 # asm 2: movq 40(<pp=%rsi),>mulrax=%rax
1806 movq 40(%rsi),%rax
1808 # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 152)
1809 # asm 1: mulq 152(<pp=int64#2)
1810 # asm 2: mulq 152(<pp=%rsi)
1811 mulq 152(%rsi)
1813 # qhasm: rz4 = mulrax
1814 # asm 1: mov <mulrax=int64#7,>rz4=int64#13
1815 # asm 2: mov <mulrax=%rax,>rz4=%r15
1816 mov %rax,%r15
1818 # qhasm: mulr41 = mulrdx
1819 # asm 1: mov <mulrdx=int64#3,>mulr41=int64#14
1820 # asm 2: mov <mulrdx=%rdx,>mulr41=%rbx
1821 mov %rdx,%rbx
1823 # qhasm: mulrax = *(uint64 *)(pp + 48)
1824 # asm 1: movq 48(<pp=int64#2),>mulrax=int64#7
1825 # asm 2: movq 48(<pp=%rsi),>mulrax=%rax
1826 movq 48(%rsi),%rax
1828 # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 120)
1829 # asm 1: mulq 120(<pp=int64#2)
1830 # asm 2: mulq 120(<pp=%rsi)
1831 mulq 120(%rsi)
1833 # qhasm: carry? rz1 += mulrax
1834 # asm 1: add <mulrax=int64#7,<rz1=int64#6
1835 # asm 2: add <mulrax=%rax,<rz1=%r9
1836 add %rax,%r9
1838 # qhasm: mulr11 += mulrdx + carry
1839 # asm 1: adc <mulrdx=int64#3,<mulr11=int64#8
1840 # asm 2: adc <mulrdx=%rdx,<mulr11=%r10
1841 adc %rdx,%r10
1843 # qhasm: mulrax = *(uint64 *)(pp + 48)
1844 # asm 1: movq 48(<pp=int64#2),>mulrax=int64#7
1845 # asm 2: movq 48(<pp=%rsi),>mulrax=%rax
1846 movq 48(%rsi),%rax
1848 # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 128)
1849 # asm 1: mulq 128(<pp=int64#2)
1850 # asm 2: mulq 128(<pp=%rsi)
1851 mulq 128(%rsi)
1853 # qhasm: carry? rz2 += mulrax
1854 # asm 1: add <mulrax=int64#7,<rz2=int64#9
1855 # asm 2: add <mulrax=%rax,<rz2=%r11
1856 add %rax,%r11
1858 # qhasm: mulr21 += mulrdx + carry
1859 # asm 1: adc <mulrdx=int64#3,<mulr21=int64#10
1860 # asm 2: adc <mulrdx=%rdx,<mulr21=%r12
1861 adc %rdx,%r12
1863 # qhasm: mulrax = *(uint64 *)(pp + 48)
1864 # asm 1: movq 48(<pp=int64#2),>mulrax=int64#7
1865 # asm 2: movq 48(<pp=%rsi),>mulrax=%rax
1866 movq 48(%rsi),%rax
1868 # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 136)
1869 # asm 1: mulq 136(<pp=int64#2)
1870 # asm 2: mulq 136(<pp=%rsi)
1871 mulq 136(%rsi)
1873 # qhasm: carry? rz3 += mulrax
1874 # asm 1: add <mulrax=int64#7,<rz3=int64#11
1875 # asm 2: add <mulrax=%rax,<rz3=%r13
1876 add %rax,%r13
1878 # qhasm: mulr31 += mulrdx + carry
1879 # asm 1: adc <mulrdx=int64#3,<mulr31=int64#12
1880 # asm 2: adc <mulrdx=%rdx,<mulr31=%r14
1881 adc %rdx,%r14
1883 # qhasm: mulrax = *(uint64 *)(pp + 48)
1884 # asm 1: movq 48(<pp=int64#2),>mulrax=int64#7
1885 # asm 2: movq 48(<pp=%rsi),>mulrax=%rax
1886 movq 48(%rsi),%rax
1888 # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 144)
1889 # asm 1: mulq 144(<pp=int64#2)
1890 # asm 2: mulq 144(<pp=%rsi)
1891 mulq 144(%rsi)
1893 # qhasm: carry? rz4 += mulrax
1894 # asm 1: add <mulrax=int64#7,<rz4=int64#13
1895 # asm 2: add <mulrax=%rax,<rz4=%r15
1896 add %rax,%r15
1898 # qhasm: mulr41 += mulrdx + carry
1899 # asm 1: adc <mulrdx=int64#3,<mulr41=int64#14
1900 # asm 2: adc <mulrdx=%rdx,<mulr41=%rbx
1901 adc %rdx,%rbx
1903 # qhasm: mulrax = *(uint64 *)(pp + 48)
1904 # asm 1: movq 48(<pp=int64#2),>mulrax=int64#3
1905 # asm 2: movq 48(<pp=%rsi),>mulrax=%rdx
1906 movq 48(%rsi),%rdx
1908 # qhasm: mulrax *= 19
1909 # asm 1: imulq $19,<mulrax=int64#3,>mulrax=int64#7
1910 # asm 2: imulq $19,<mulrax=%rdx,>mulrax=%rax
1911 imulq $19,%rdx,%rax
1913 # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 152)
1914 # asm 1: mulq 152(<pp=int64#2)
1915 # asm 2: mulq 152(<pp=%rsi)
1916 mulq 152(%rsi)
1918 # qhasm: carry? rz0 += mulrax
1919 # asm 1: add <mulrax=int64#7,<rz0=int64#4
1920 # asm 2: add <mulrax=%rax,<rz0=%rcx
1921 add %rax,%rcx
1923 # qhasm: mulr01 += mulrdx + carry
1924 # asm 1: adc <mulrdx=int64#3,<mulr01=int64#5
1925 # asm 2: adc <mulrdx=%rdx,<mulr01=%r8
1926 adc %rdx,%r8
1928 # qhasm: mulrax = *(uint64 *)(pp + 56)
1929 # asm 1: movq 56(<pp=int64#2),>mulrax=int64#7
1930 # asm 2: movq 56(<pp=%rsi),>mulrax=%rax
1931 movq 56(%rsi),%rax
1933 # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 120)
1934 # asm 1: mulq 120(<pp=int64#2)
1935 # asm 2: mulq 120(<pp=%rsi)
1936 mulq 120(%rsi)
1938 # qhasm: carry? rz2 += mulrax
1939 # asm 1: add <mulrax=int64#7,<rz2=int64#9
1940 # asm 2: add <mulrax=%rax,<rz2=%r11
1941 add %rax,%r11
1943 # qhasm: mulr21 += mulrdx + carry
1944 # asm 1: adc <mulrdx=int64#3,<mulr21=int64#10
1945 # asm 2: adc <mulrdx=%rdx,<mulr21=%r12
1946 adc %rdx,%r12
1948 # qhasm: mulrax = *(uint64 *)(pp + 56)
1949 # asm 1: movq 56(<pp=int64#2),>mulrax=int64#7
1950 # asm 2: movq 56(<pp=%rsi),>mulrax=%rax
1951 movq 56(%rsi),%rax
1953 # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 128)
1954 # asm 1: mulq 128(<pp=int64#2)
1955 # asm 2: mulq 128(<pp=%rsi)
1956 mulq 128(%rsi)
1958 # qhasm: carry? rz3 += mulrax
1959 # asm 1: add <mulrax=int64#7,<rz3=int64#11
1960 # asm 2: add <mulrax=%rax,<rz3=%r13
1961 add %rax,%r13
1963 # qhasm: mulr31 += mulrdx + carry
1964 # asm 1: adc <mulrdx=int64#3,<mulr31=int64#12
1965 # asm 2: adc <mulrdx=%rdx,<mulr31=%r14
1966 adc %rdx,%r14
1968 # qhasm: mulrax = *(uint64 *)(pp + 56)
1969 # asm 1: movq 56(<pp=int64#2),>mulrax=int64#7
1970 # asm 2: movq 56(<pp=%rsi),>mulrax=%rax
1971 movq 56(%rsi),%rax
1973 # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 136)
1974 # asm 1: mulq 136(<pp=int64#2)
1975 # asm 2: mulq 136(<pp=%rsi)
1976 mulq 136(%rsi)
1978 # qhasm: carry? rz4 += mulrax
1979 # asm 1: add <mulrax=int64#7,<rz4=int64#13
1980 # asm 2: add <mulrax=%rax,<rz4=%r15
1981 add %rax,%r15
1983 # qhasm: mulr41 += mulrdx + carry
1984 # asm 1: adc <mulrdx=int64#3,<mulr41=int64#14
1985 # asm 2: adc <mulrdx=%rdx,<mulr41=%rbx
1986 adc %rdx,%rbx
1988 # qhasm: mulrax = *(uint64 *)(pp + 56)
1989 # asm 1: movq 56(<pp=int64#2),>mulrax=int64#3
1990 # asm 2: movq 56(<pp=%rsi),>mulrax=%rdx
1991 movq 56(%rsi),%rdx
1993 # qhasm: mulrax *= 19
1994 # asm 1: imulq $19,<mulrax=int64#3,>mulrax=int64#7
1995 # asm 2: imulq $19,<mulrax=%rdx,>mulrax=%rax
1996 imulq $19,%rdx,%rax
1998 # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 144)
1999 # asm 1: mulq 144(<pp=int64#2)
2000 # asm 2: mulq 144(<pp=%rsi)
2001 mulq 144(%rsi)
2003 # qhasm: carry? rz0 += mulrax
2004 # asm 1: add <mulrax=int64#7,<rz0=int64#4
2005 # asm 2: add <mulrax=%rax,<rz0=%rcx
2006 add %rax,%rcx
2008 # qhasm: mulr01 += mulrdx + carry
2009 # asm 1: adc <mulrdx=int64#3,<mulr01=int64#5
2010 # asm 2: adc <mulrdx=%rdx,<mulr01=%r8
2011 adc %rdx,%r8
2013 # qhasm: mulrax = *(uint64 *)(pp + 56)
2014 # asm 1: movq 56(<pp=int64#2),>mulrax=int64#3
2015 # asm 2: movq 56(<pp=%rsi),>mulrax=%rdx
2016 movq 56(%rsi),%rdx
2018 # qhasm: mulrax *= 19
2019 # asm 1: imulq $19,<mulrax=int64#3,>mulrax=int64#7
2020 # asm 2: imulq $19,<mulrax=%rdx,>mulrax=%rax
2021 imulq $19,%rdx,%rax
2023 # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 152)
2024 # asm 1: mulq 152(<pp=int64#2)
2025 # asm 2: mulq 152(<pp=%rsi)
2026 mulq 152(%rsi)
2028 # qhasm: carry? rz1 += mulrax
2029 # asm 1: add <mulrax=int64#7,<rz1=int64#6
2030 # asm 2: add <mulrax=%rax,<rz1=%r9
2031 add %rax,%r9
2033 # qhasm: mulr11 += mulrdx + carry
2034 # asm 1: adc <mulrdx=int64#3,<mulr11=int64#8
2035 # asm 2: adc <mulrdx=%rdx,<mulr11=%r10
2036 adc %rdx,%r10
2038 # qhasm: mulrax = *(uint64 *)(pp + 64)
2039 # asm 1: movq 64(<pp=int64#2),>mulrax=int64#7
2040 # asm 2: movq 64(<pp=%rsi),>mulrax=%rax
2041 movq 64(%rsi),%rax
2043 # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 120)
2044 # asm 1: mulq 120(<pp=int64#2)
2045 # asm 2: mulq 120(<pp=%rsi)
2046 mulq 120(%rsi)
2048 # qhasm: carry? rz3 += mulrax
2049 # asm 1: add <mulrax=int64#7,<rz3=int64#11
2050 # asm 2: add <mulrax=%rax,<rz3=%r13
2051 add %rax,%r13
2053 # qhasm: mulr31 += mulrdx + carry
2054 # asm 1: adc <mulrdx=int64#3,<mulr31=int64#12
2055 # asm 2: adc <mulrdx=%rdx,<mulr31=%r14
2056 adc %rdx,%r14
2058 # qhasm: mulrax = *(uint64 *)(pp + 64)
2059 # asm 1: movq 64(<pp=int64#2),>mulrax=int64#7
2060 # asm 2: movq 64(<pp=%rsi),>mulrax=%rax
2061 movq 64(%rsi),%rax
2063 # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 128)
2064 # asm 1: mulq 128(<pp=int64#2)
2065 # asm 2: mulq 128(<pp=%rsi)
2066 mulq 128(%rsi)
2068 # qhasm: carry? rz4 += mulrax
2069 # asm 1: add <mulrax=int64#7,<rz4=int64#13
2070 # asm 2: add <mulrax=%rax,<rz4=%r15
2071 add %rax,%r15
2073 # qhasm: mulr41 += mulrdx + carry
2074 # asm 1: adc <mulrdx=int64#3,<mulr41=int64#14
2075 # asm 2: adc <mulrdx=%rdx,<mulr41=%rbx
2076 adc %rdx,%rbx
2078 # qhasm: mulrax = mulx319_stack
2079 # asm 1: movq <mulx319_stack=stack64#8,>mulrax=int64#7
2080 # asm 2: movq <mulx319_stack=56(%rsp),>mulrax=%rax
2081 movq 56(%rsp),%rax
2083 # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 144)
2084 # asm 1: mulq 144(<pp=int64#2)
2085 # asm 2: mulq 144(<pp=%rsi)
2086 mulq 144(%rsi)
2088 # qhasm: carry? rz1 += mulrax
2089 # asm 1: add <mulrax=int64#7,<rz1=int64#6
2090 # asm 2: add <mulrax=%rax,<rz1=%r9
2091 add %rax,%r9
2093 # qhasm: mulr11 += mulrdx + carry
2094 # asm 1: adc <mulrdx=int64#3,<mulr11=int64#8
2095 # asm 2: adc <mulrdx=%rdx,<mulr11=%r10
2096 adc %rdx,%r10
2098 # qhasm: mulrax = mulx319_stack
2099 # asm 1: movq <mulx319_stack=stack64#8,>mulrax=int64#7
2100 # asm 2: movq <mulx319_stack=56(%rsp),>mulrax=%rax
2101 movq 56(%rsp),%rax
2103 # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 152)
2104 # asm 1: mulq 152(<pp=int64#2)
2105 # asm 2: mulq 152(<pp=%rsi)
2106 mulq 152(%rsi)
2108 # qhasm: carry? rz2 += mulrax
2109 # asm 1: add <mulrax=int64#7,<rz2=int64#9
2110 # asm 2: add <mulrax=%rax,<rz2=%r11
2111 add %rax,%r11
2113 # qhasm: mulr21 += mulrdx + carry
2114 # asm 1: adc <mulrdx=int64#3,<mulr21=int64#10
2115 # asm 2: adc <mulrdx=%rdx,<mulr21=%r12
2116 adc %rdx,%r12
2118 # qhasm: mulrax = *(uint64 *)(pp + 72)
2119 # asm 1: movq 72(<pp=int64#2),>mulrax=int64#7
2120 # asm 2: movq 72(<pp=%rsi),>mulrax=%rax
2121 movq 72(%rsi),%rax
2123 # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 120)
2124 # asm 1: mulq 120(<pp=int64#2)
2125 # asm 2: mulq 120(<pp=%rsi)
2126 mulq 120(%rsi)
2128 # qhasm: carry? rz4 += mulrax
2129 # asm 1: add <mulrax=int64#7,<rz4=int64#13
2130 # asm 2: add <mulrax=%rax,<rz4=%r15
2131 add %rax,%r15
2133 # qhasm: mulr41 += mulrdx + carry
2134 # asm 1: adc <mulrdx=int64#3,<mulr41=int64#14
2135 # asm 2: adc <mulrdx=%rdx,<mulr41=%rbx
2136 adc %rdx,%rbx
2138 # qhasm: mulrax = mulx419_stack
2139 # asm 1: movq <mulx419_stack=stack64#9,>mulrax=int64#7
2140 # asm 2: movq <mulx419_stack=64(%rsp),>mulrax=%rax
2141 movq 64(%rsp),%rax
2143 # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 136)
2144 # asm 1: mulq 136(<pp=int64#2)
2145 # asm 2: mulq 136(<pp=%rsi)
2146 mulq 136(%rsi)
2148 # qhasm: carry? rz1 += mulrax
2149 # asm 1: add <mulrax=int64#7,<rz1=int64#6
2150 # asm 2: add <mulrax=%rax,<rz1=%r9
2151 add %rax,%r9
2153 # qhasm: mulr11 += mulrdx + carry
2154 # asm 1: adc <mulrdx=int64#3,<mulr11=int64#8
2155 # asm 2: adc <mulrdx=%rdx,<mulr11=%r10
2156 adc %rdx,%r10
2158 # qhasm: mulrax = mulx419_stack
2159 # asm 1: movq <mulx419_stack=stack64#9,>mulrax=int64#7
2160 # asm 2: movq <mulx419_stack=64(%rsp),>mulrax=%rax
2161 movq 64(%rsp),%rax
2163 # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 144)
2164 # asm 1: mulq 144(<pp=int64#2)
2165 # asm 2: mulq 144(<pp=%rsi)
2166 mulq 144(%rsi)
2168 # qhasm: carry? rz2 += mulrax
2169 # asm 1: add <mulrax=int64#7,<rz2=int64#9
2170 # asm 2: add <mulrax=%rax,<rz2=%r11
2171 add %rax,%r11
2173 # qhasm: mulr21 += mulrdx + carry
2174 # asm 1: adc <mulrdx=int64#3,<mulr21=int64#10
2175 # asm 2: adc <mulrdx=%rdx,<mulr21=%r12
2176 adc %rdx,%r12
2178 # qhasm: mulrax = mulx419_stack
2179 # asm 1: movq <mulx419_stack=stack64#9,>mulrax=int64#7
2180 # asm 2: movq <mulx419_stack=64(%rsp),>mulrax=%rax
2181 movq 64(%rsp),%rax
2183 # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 152)
2184 # asm 1: mulq 152(<pp=int64#2)
2185 # asm 2: mulq 152(<pp=%rsi)
2186 mulq 152(%rsi)
2188 # qhasm: carry? rz3 += mulrax
2189 # asm 1: add <mulrax=int64#7,<rz3=int64#11
2190 # asm 2: add <mulrax=%rax,<rz3=%r13
2191 add %rax,%r13
2193 # qhasm: mulr31 += mulrdx + carry
2194 # asm 1: adc <mulrdx=int64#3,<mulr31=int64#12
2195 # asm 2: adc <mulrdx=%rdx,<mulr31=%r14
2196 adc %rdx,%r14
2198 # qhasm: mulredmask = *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51
2199 # asm 1: movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,>mulredmask=int64#3
2200 # asm 2: movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,>mulredmask=%rdx
2201 movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,%rdx
2203 # qhasm: mulr01 = (mulr01.rz0) << 13
2204 # asm 1: shld $13,<rz0=int64#4,<mulr01=int64#5
2205 # asm 2: shld $13,<rz0=%rcx,<mulr01=%r8
2206 shld $13,%rcx,%r8
2208 # qhasm: rz0 &= mulredmask
2209 # asm 1: and <mulredmask=int64#3,<rz0=int64#4
2210 # asm 2: and <mulredmask=%rdx,<rz0=%rcx
2211 and %rdx,%rcx
2213 # qhasm: mulr11 = (mulr11.rz1) << 13
2214 # asm 1: shld $13,<rz1=int64#6,<mulr11=int64#8
2215 # asm 2: shld $13,<rz1=%r9,<mulr11=%r10
2216 shld $13,%r9,%r10
2218 # qhasm: rz1 &= mulredmask
2219 # asm 1: and <mulredmask=int64#3,<rz1=int64#6
2220 # asm 2: and <mulredmask=%rdx,<rz1=%r9
2221 and %rdx,%r9
2223 # qhasm: rz1 += mulr01
2224 # asm 1: add <mulr01=int64#5,<rz1=int64#6
2225 # asm 2: add <mulr01=%r8,<rz1=%r9
2226 add %r8,%r9
2228 # qhasm: mulr21 = (mulr21.rz2) << 13
2229 # asm 1: shld $13,<rz2=int64#9,<mulr21=int64#10
2230 # asm 2: shld $13,<rz2=%r11,<mulr21=%r12
2231 shld $13,%r11,%r12
2233 # qhasm: rz2 &= mulredmask
2234 # asm 1: and <mulredmask=int64#3,<rz2=int64#9
2235 # asm 2: and <mulredmask=%rdx,<rz2=%r11
2236 and %rdx,%r11
2238 # qhasm: rz2 += mulr11
2239 # asm 1: add <mulr11=int64#8,<rz2=int64#9
2240 # asm 2: add <mulr11=%r10,<rz2=%r11
2241 add %r10,%r11
2243 # qhasm: mulr31 = (mulr31.rz3) << 13
2244 # asm 1: shld $13,<rz3=int64#11,<mulr31=int64#12
2245 # asm 2: shld $13,<rz3=%r13,<mulr31=%r14
2246 shld $13,%r13,%r14
2248 # qhasm: rz3 &= mulredmask
2249 # asm 1: and <mulredmask=int64#3,<rz3=int64#11
2250 # asm 2: and <mulredmask=%rdx,<rz3=%r13
2251 and %rdx,%r13
2253 # qhasm: rz3 += mulr21
2254 # asm 1: add <mulr21=int64#10,<rz3=int64#11
2255 # asm 2: add <mulr21=%r12,<rz3=%r13
2256 add %r12,%r13
2258 # qhasm: mulr41 = (mulr41.rz4) << 13
2259 # asm 1: shld $13,<rz4=int64#13,<mulr41=int64#14
2260 # asm 2: shld $13,<rz4=%r15,<mulr41=%rbx
2261 shld $13,%r15,%rbx
2263 # qhasm: rz4 &= mulredmask
2264 # asm 1: and <mulredmask=int64#3,<rz4=int64#13
2265 # asm 2: and <mulredmask=%rdx,<rz4=%r15
2266 and %rdx,%r15
2268 # qhasm: rz4 += mulr31
2269 # asm 1: add <mulr31=int64#12,<rz4=int64#13
2270 # asm 2: add <mulr31=%r14,<rz4=%r15
2271 add %r14,%r15
2273 # qhasm: mulr41 = mulr41 * 19
2274 # asm 1: imulq $19,<mulr41=int64#14,>mulr41=int64#5
2275 # asm 2: imulq $19,<mulr41=%rbx,>mulr41=%r8
2276 imulq $19,%rbx,%r8
2278 # qhasm: rz0 += mulr41
2279 # asm 1: add <mulr41=int64#5,<rz0=int64#4
2280 # asm 2: add <mulr41=%r8,<rz0=%rcx
2281 add %r8,%rcx
2283 # qhasm: mult = rz0
2284 # asm 1: mov <rz0=int64#4,>mult=int64#5
2285 # asm 2: mov <rz0=%rcx,>mult=%r8
2286 mov %rcx,%r8
2288 # qhasm: (uint64) mult >>= 51
2289 # asm 1: shr $51,<mult=int64#5
2290 # asm 2: shr $51,<mult=%r8
2291 shr $51,%r8
2293 # qhasm: mult += rz1
2294 # asm 1: add <rz1=int64#6,<mult=int64#5
2295 # asm 2: add <rz1=%r9,<mult=%r8
2296 add %r9,%r8
2298 # qhasm: rz1 = mult
2299 # asm 1: mov <mult=int64#5,>rz1=int64#6
2300 # asm 2: mov <mult=%r8,>rz1=%r9
2301 mov %r8,%r9
2303 # qhasm: (uint64) mult >>= 51
2304 # asm 1: shr $51,<mult=int64#5
2305 # asm 2: shr $51,<mult=%r8
2306 shr $51,%r8
2308 # qhasm: rz0 &= mulredmask
2309 # asm 1: and <mulredmask=int64#3,<rz0=int64#4
2310 # asm 2: and <mulredmask=%rdx,<rz0=%rcx
2311 and %rdx,%rcx
2313 # qhasm: mult += rz2
2314 # asm 1: add <rz2=int64#9,<mult=int64#5
2315 # asm 2: add <rz2=%r11,<mult=%r8
2316 add %r11,%r8
2318 # qhasm: rz2 = mult
2319 # asm 1: mov <mult=int64#5,>rz2=int64#7
2320 # asm 2: mov <mult=%r8,>rz2=%rax
2321 mov %r8,%rax
2323 # qhasm: (uint64) mult >>= 51
2324 # asm 1: shr $51,<mult=int64#5
2325 # asm 2: shr $51,<mult=%r8
2326 shr $51,%r8
2328 # qhasm: rz1 &= mulredmask
2329 # asm 1: and <mulredmask=int64#3,<rz1=int64#6
2330 # asm 2: and <mulredmask=%rdx,<rz1=%r9
2331 and %rdx,%r9
2333 # qhasm: mult += rz3
2334 # asm 1: add <rz3=int64#11,<mult=int64#5
2335 # asm 2: add <rz3=%r13,<mult=%r8
2336 add %r13,%r8
2338 # qhasm: rz3 = mult
2339 # asm 1: mov <mult=int64#5,>rz3=int64#8
2340 # asm 2: mov <mult=%r8,>rz3=%r10
2341 mov %r8,%r10
2343 # qhasm: (uint64) mult >>= 51
2344 # asm 1: shr $51,<mult=int64#5
2345 # asm 2: shr $51,<mult=%r8
2346 shr $51,%r8
2348 # qhasm: rz2 &= mulredmask
2349 # asm 1: and <mulredmask=int64#3,<rz2=int64#7
2350 # asm 2: and <mulredmask=%rdx,<rz2=%rax
2351 and %rdx,%rax
2353 # qhasm: mult += rz4
2354 # asm 1: add <rz4=int64#13,<mult=int64#5
2355 # asm 2: add <rz4=%r15,<mult=%r8
2356 add %r15,%r8
2358 # qhasm: rz4 = mult
2359 # asm 1: mov <mult=int64#5,>rz4=int64#9
2360 # asm 2: mov <mult=%r8,>rz4=%r11
2361 mov %r8,%r11
2363 # qhasm: (uint64) mult >>= 51
2364 # asm 1: shr $51,<mult=int64#5
2365 # asm 2: shr $51,<mult=%r8
2366 shr $51,%r8
2368 # qhasm: rz3 &= mulredmask
2369 # asm 1: and <mulredmask=int64#3,<rz3=int64#8
2370 # asm 2: and <mulredmask=%rdx,<rz3=%r10
2371 and %rdx,%r10
2373 # qhasm: mult *= 19
2374 # asm 1: imulq $19,<mult=int64#5,>mult=int64#5
2375 # asm 2: imulq $19,<mult=%r8,>mult=%r8
2376 imulq $19,%r8,%r8
2378 # qhasm: rz0 += mult
2379 # asm 1: add <mult=int64#5,<rz0=int64#4
2380 # asm 2: add <mult=%r8,<rz0=%rcx
2381 add %r8,%rcx
2383 # qhasm: rz4 &= mulredmask
2384 # asm 1: and <mulredmask=int64#3,<rz4=int64#9
2385 # asm 2: and <mulredmask=%rdx,<rz4=%r11
2386 and %rdx,%r11
2388 # qhasm: *(uint64 *)(rp + 80) = rz0
2389 # asm 1: movq <rz0=int64#4,80(<rp=int64#1)
2390 # asm 2: movq <rz0=%rcx,80(<rp=%rdi)
2391 movq %rcx,80(%rdi)
2393 # qhasm: *(uint64 *)(rp + 88) = rz1
2394 # asm 1: movq <rz1=int64#6,88(<rp=int64#1)
2395 # asm 2: movq <rz1=%r9,88(<rp=%rdi)
2396 movq %r9,88(%rdi)
2398 # qhasm: *(uint64 *)(rp + 96) = rz2
2399 # asm 1: movq <rz2=int64#7,96(<rp=int64#1)
2400 # asm 2: movq <rz2=%rax,96(<rp=%rdi)
2401 movq %rax,96(%rdi)
2403 # qhasm: *(uint64 *)(rp + 104) = rz3
2404 # asm 1: movq <rz3=int64#8,104(<rp=int64#1)
2405 # asm 2: movq <rz3=%r10,104(<rp=%rdi)
2406 movq %r10,104(%rdi)
2408 # qhasm: *(uint64 *)(rp + 112) = rz4
2409 # asm 1: movq <rz4=int64#9,112(<rp=int64#1)
2410 # asm 2: movq <rz4=%r11,112(<rp=%rdi)
2411 movq %r11,112(%rdi)
2413 # qhasm: mulrax = *(uint64 *)(pp + 24)
2414 # asm 1: movq 24(<pp=int64#2),>mulrax=int64#3
2415 # asm 2: movq 24(<pp=%rsi),>mulrax=%rdx
2416 movq 24(%rsi),%rdx
2418 # qhasm: mulrax *= 19
2419 # asm 1: imulq $19,<mulrax=int64#3,>mulrax=int64#7
2420 # asm 2: imulq $19,<mulrax=%rdx,>mulrax=%rax
2421 imulq $19,%rdx,%rax
2423 # qhasm: mulx319_stack = mulrax
2424 # asm 1: movq <mulrax=int64#7,>mulx319_stack=stack64#8
2425 # asm 2: movq <mulrax=%rax,>mulx319_stack=56(%rsp)
2426 movq %rax,56(%rsp)
2428 # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 96)
2429 # asm 1: mulq 96(<pp=int64#2)
2430 # asm 2: mulq 96(<pp=%rsi)
2431 mulq 96(%rsi)
2433 # qhasm: rt0 = mulrax
2434 # asm 1: mov <mulrax=int64#7,>rt0=int64#4
2435 # asm 2: mov <mulrax=%rax,>rt0=%rcx
2436 mov %rax,%rcx
2438 # qhasm: mulr01 = mulrdx
2439 # asm 1: mov <mulrdx=int64#3,>mulr01=int64#5
2440 # asm 2: mov <mulrdx=%rdx,>mulr01=%r8
2441 mov %rdx,%r8
2443 # qhasm: mulrax = *(uint64 *)(pp + 32)
2444 # asm 1: movq 32(<pp=int64#2),>mulrax=int64#3
2445 # asm 2: movq 32(<pp=%rsi),>mulrax=%rdx
2446 movq 32(%rsi),%rdx
2448 # qhasm: mulrax *= 19
2449 # asm 1: imulq $19,<mulrax=int64#3,>mulrax=int64#7
2450 # asm 2: imulq $19,<mulrax=%rdx,>mulrax=%rax
2451 imulq $19,%rdx,%rax
2453 # qhasm: mulx419_stack = mulrax
2454 # asm 1: movq <mulrax=int64#7,>mulx419_stack=stack64#9
2455 # asm 2: movq <mulrax=%rax,>mulx419_stack=64(%rsp)
2456 movq %rax,64(%rsp)
2458 # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 88)
2459 # asm 1: mulq 88(<pp=int64#2)
2460 # asm 2: mulq 88(<pp=%rsi)
2461 mulq 88(%rsi)
2463 # qhasm: carry? rt0 += mulrax
2464 # asm 1: add <mulrax=int64#7,<rt0=int64#4
2465 # asm 2: add <mulrax=%rax,<rt0=%rcx
2466 add %rax,%rcx
2468 # qhasm: mulr01 += mulrdx + carry
2469 # asm 1: adc <mulrdx=int64#3,<mulr01=int64#5
2470 # asm 2: adc <mulrdx=%rdx,<mulr01=%r8
2471 adc %rdx,%r8
2473 # qhasm: mulrax = *(uint64 *)(pp + 0)
2474 # asm 1: movq 0(<pp=int64#2),>mulrax=int64#7
2475 # asm 2: movq 0(<pp=%rsi),>mulrax=%rax
2476 movq 0(%rsi),%rax
2478 # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 80)
2479 # asm 1: mulq 80(<pp=int64#2)
2480 # asm 2: mulq 80(<pp=%rsi)
2481 mulq 80(%rsi)
2483 # qhasm: carry? rt0 += mulrax
2484 # asm 1: add <mulrax=int64#7,<rt0=int64#4
2485 # asm 2: add <mulrax=%rax,<rt0=%rcx
2486 add %rax,%rcx
2488 # qhasm: mulr01 += mulrdx + carry
2489 # asm 1: adc <mulrdx=int64#3,<mulr01=int64#5
2490 # asm 2: adc <mulrdx=%rdx,<mulr01=%r8
2491 adc %rdx,%r8
2493 # qhasm: mulrax = *(uint64 *)(pp + 0)
2494 # asm 1: movq 0(<pp=int64#2),>mulrax=int64#7
2495 # asm 2: movq 0(<pp=%rsi),>mulrax=%rax
2496 movq 0(%rsi),%rax
2498 # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 88)
2499 # asm 1: mulq 88(<pp=int64#2)
2500 # asm 2: mulq 88(<pp=%rsi)
2501 mulq 88(%rsi)
2503 # qhasm: rt1 = mulrax
2504 # asm 1: mov <mulrax=int64#7,>rt1=int64#6
2505 # asm 2: mov <mulrax=%rax,>rt1=%r9
2506 mov %rax,%r9
2508 # qhasm: mulr11 = mulrdx
2509 # asm 1: mov <mulrdx=int64#3,>mulr11=int64#8
2510 # asm 2: mov <mulrdx=%rdx,>mulr11=%r10
2511 mov %rdx,%r10
2513 # qhasm: mulrax = *(uint64 *)(pp + 0)
2514 # asm 1: movq 0(<pp=int64#2),>mulrax=int64#7
2515 # asm 2: movq 0(<pp=%rsi),>mulrax=%rax
2516 movq 0(%rsi),%rax
2518 # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 96)
2519 # asm 1: mulq 96(<pp=int64#2)
2520 # asm 2: mulq 96(<pp=%rsi)
2521 mulq 96(%rsi)
2523 # qhasm: rt2 = mulrax
2524 # asm 1: mov <mulrax=int64#7,>rt2=int64#9
2525 # asm 2: mov <mulrax=%rax,>rt2=%r11
2526 mov %rax,%r11
2528 # qhasm: mulr21 = mulrdx
2529 # asm 1: mov <mulrdx=int64#3,>mulr21=int64#10
2530 # asm 2: mov <mulrdx=%rdx,>mulr21=%r12
2531 mov %rdx,%r12
2533 # qhasm: mulrax = *(uint64 *)(pp + 0)
2534 # asm 1: movq 0(<pp=int64#2),>mulrax=int64#7
2535 # asm 2: movq 0(<pp=%rsi),>mulrax=%rax
2536 movq 0(%rsi),%rax
2538 # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 104)
2539 # asm 1: mulq 104(<pp=int64#2)
2540 # asm 2: mulq 104(<pp=%rsi)
2541 mulq 104(%rsi)
2543 # qhasm: rt3 = mulrax
2544 # asm 1: mov <mulrax=int64#7,>rt3=int64#11
2545 # asm 2: mov <mulrax=%rax,>rt3=%r13
2546 mov %rax,%r13
2548 # qhasm: mulr31 = mulrdx
2549 # asm 1: mov <mulrdx=int64#3,>mulr31=int64#12
2550 # asm 2: mov <mulrdx=%rdx,>mulr31=%r14
2551 mov %rdx,%r14
2553 # qhasm: mulrax = *(uint64 *)(pp + 0)
2554 # asm 1: movq 0(<pp=int64#2),>mulrax=int64#7
2555 # asm 2: movq 0(<pp=%rsi),>mulrax=%rax
2556 movq 0(%rsi),%rax
2558 # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 112)
2559 # asm 1: mulq 112(<pp=int64#2)
2560 # asm 2: mulq 112(<pp=%rsi)
2561 mulq 112(%rsi)
2563 # qhasm: rt4 = mulrax
2564 # asm 1: mov <mulrax=int64#7,>rt4=int64#13
2565 # asm 2: mov <mulrax=%rax,>rt4=%r15
2566 mov %rax,%r15
2568 # qhasm: mulr41 = mulrdx
2569 # asm 1: mov <mulrdx=int64#3,>mulr41=int64#14
2570 # asm 2: mov <mulrdx=%rdx,>mulr41=%rbx
2571 mov %rdx,%rbx
2573 # qhasm: mulrax = *(uint64 *)(pp + 8)
2574 # asm 1: movq 8(<pp=int64#2),>mulrax=int64#7
2575 # asm 2: movq 8(<pp=%rsi),>mulrax=%rax
2576 movq 8(%rsi),%rax
2578 # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 80)
2579 # asm 1: mulq 80(<pp=int64#2)
2580 # asm 2: mulq 80(<pp=%rsi)
2581 mulq 80(%rsi)
2583 # qhasm: carry? rt1 += mulrax
2584 # asm 1: add <mulrax=int64#7,<rt1=int64#6
2585 # asm 2: add <mulrax=%rax,<rt1=%r9
2586 add %rax,%r9
2588 # qhasm: mulr11 += mulrdx + carry
2589 # asm 1: adc <mulrdx=int64#3,<mulr11=int64#8
2590 # asm 2: adc <mulrdx=%rdx,<mulr11=%r10
2591 adc %rdx,%r10
2593 # qhasm: mulrax = *(uint64 *)(pp + 8)
2594 # asm 1: movq 8(<pp=int64#2),>mulrax=int64#7
2595 # asm 2: movq 8(<pp=%rsi),>mulrax=%rax
2596 movq 8(%rsi),%rax
2598 # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 88)
2599 # asm 1: mulq 88(<pp=int64#2)
2600 # asm 2: mulq 88(<pp=%rsi)
2601 mulq 88(%rsi)
2603 # qhasm: carry? rt2 += mulrax
2604 # asm 1: add <mulrax=int64#7,<rt2=int64#9
2605 # asm 2: add <mulrax=%rax,<rt2=%r11
2606 add %rax,%r11
2608 # qhasm: mulr21 += mulrdx + carry
2609 # asm 1: adc <mulrdx=int64#3,<mulr21=int64#10
2610 # asm 2: adc <mulrdx=%rdx,<mulr21=%r12
2611 adc %rdx,%r12
2613 # qhasm: mulrax = *(uint64 *)(pp + 8)
2614 # asm 1: movq 8(<pp=int64#2),>mulrax=int64#7
2615 # asm 2: movq 8(<pp=%rsi),>mulrax=%rax
2616 movq 8(%rsi),%rax
2618 # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 96)
2619 # asm 1: mulq 96(<pp=int64#2)
2620 # asm 2: mulq 96(<pp=%rsi)
2621 mulq 96(%rsi)
2623 # qhasm: carry? rt3 += mulrax
2624 # asm 1: add <mulrax=int64#7,<rt3=int64#11
2625 # asm 2: add <mulrax=%rax,<rt3=%r13
2626 add %rax,%r13
2628 # qhasm: mulr31 += mulrdx + carry
2629 # asm 1: adc <mulrdx=int64#3,<mulr31=int64#12
2630 # asm 2: adc <mulrdx=%rdx,<mulr31=%r14
2631 adc %rdx,%r14
2633 # qhasm: mulrax = *(uint64 *)(pp + 8)
2634 # asm 1: movq 8(<pp=int64#2),>mulrax=int64#7
2635 # asm 2: movq 8(<pp=%rsi),>mulrax=%rax
2636 movq 8(%rsi),%rax
2638 # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 104)
2639 # asm 1: mulq 104(<pp=int64#2)
2640 # asm 2: mulq 104(<pp=%rsi)
2641 mulq 104(%rsi)
2643 # qhasm: carry? rt4 += mulrax
2644 # asm 1: add <mulrax=int64#7,<rt4=int64#13
2645 # asm 2: add <mulrax=%rax,<rt4=%r15
2646 add %rax,%r15
2648 # qhasm: mulr41 += mulrdx + carry
2649 # asm 1: adc <mulrdx=int64#3,<mulr41=int64#14
2650 # asm 2: adc <mulrdx=%rdx,<mulr41=%rbx
2651 adc %rdx,%rbx
2653 # qhasm: mulrax = *(uint64 *)(pp + 8)
2654 # asm 1: movq 8(<pp=int64#2),>mulrax=int64#3
2655 # asm 2: movq 8(<pp=%rsi),>mulrax=%rdx
2656 movq 8(%rsi),%rdx
2658 # qhasm: mulrax *= 19
2659 # asm 1: imulq $19,<mulrax=int64#3,>mulrax=int64#7
2660 # asm 2: imulq $19,<mulrax=%rdx,>mulrax=%rax
2661 imulq $19,%rdx,%rax
2663 # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 112)
2664 # asm 1: mulq 112(<pp=int64#2)
2665 # asm 2: mulq 112(<pp=%rsi)
2666 mulq 112(%rsi)
2668 # qhasm: carry? rt0 += mulrax
2669 # asm 1: add <mulrax=int64#7,<rt0=int64#4
2670 # asm 2: add <mulrax=%rax,<rt0=%rcx
2671 add %rax,%rcx
2673 # qhasm: mulr01 += mulrdx + carry
2674 # asm 1: adc <mulrdx=int64#3,<mulr01=int64#5
2675 # asm 2: adc <mulrdx=%rdx,<mulr01=%r8
2676 adc %rdx,%r8
2678 # qhasm: mulrax = *(uint64 *)(pp + 16)
2679 # asm 1: movq 16(<pp=int64#2),>mulrax=int64#7
2680 # asm 2: movq 16(<pp=%rsi),>mulrax=%rax
2681 movq 16(%rsi),%rax
2683 # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 80)
2684 # asm 1: mulq 80(<pp=int64#2)
2685 # asm 2: mulq 80(<pp=%rsi)
2686 mulq 80(%rsi)
2688 # qhasm: carry? rt2 += mulrax
2689 # asm 1: add <mulrax=int64#7,<rt2=int64#9
2690 # asm 2: add <mulrax=%rax,<rt2=%r11
2691 add %rax,%r11
2693 # qhasm: mulr21 += mulrdx + carry
2694 # asm 1: adc <mulrdx=int64#3,<mulr21=int64#10
2695 # asm 2: adc <mulrdx=%rdx,<mulr21=%r12
2696 adc %rdx,%r12
2698 # qhasm: mulrax = *(uint64 *)(pp + 16)
2699 # asm 1: movq 16(<pp=int64#2),>mulrax=int64#7
2700 # asm 2: movq 16(<pp=%rsi),>mulrax=%rax
2701 movq 16(%rsi),%rax
2703 # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 88)
2704 # asm 1: mulq 88(<pp=int64#2)
2705 # asm 2: mulq 88(<pp=%rsi)
2706 mulq 88(%rsi)
2708 # qhasm: carry? rt3 += mulrax
2709 # asm 1: add <mulrax=int64#7,<rt3=int64#11
2710 # asm 2: add <mulrax=%rax,<rt3=%r13
2711 add %rax,%r13
2713 # qhasm: mulr31 += mulrdx + carry
2714 # asm 1: adc <mulrdx=int64#3,<mulr31=int64#12
2715 # asm 2: adc <mulrdx=%rdx,<mulr31=%r14
2716 adc %rdx,%r14
2718 # qhasm: mulrax = *(uint64 *)(pp + 16)
2719 # asm 1: movq 16(<pp=int64#2),>mulrax=int64#7
2720 # asm 2: movq 16(<pp=%rsi),>mulrax=%rax
2721 movq 16(%rsi),%rax
2723 # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 96)
2724 # asm 1: mulq 96(<pp=int64#2)
2725 # asm 2: mulq 96(<pp=%rsi)
2726 mulq 96(%rsi)
2728 # qhasm: carry? rt4 += mulrax
2729 # asm 1: add <mulrax=int64#7,<rt4=int64#13
2730 # asm 2: add <mulrax=%rax,<rt4=%r15
2731 add %rax,%r15
2733 # qhasm: mulr41 += mulrdx + carry
2734 # asm 1: adc <mulrdx=int64#3,<mulr41=int64#14
2735 # asm 2: adc <mulrdx=%rdx,<mulr41=%rbx
2736 adc %rdx,%rbx
2738 # qhasm: mulrax = *(uint64 *)(pp + 16)
2739 # asm 1: movq 16(<pp=int64#2),>mulrax=int64#3
2740 # asm 2: movq 16(<pp=%rsi),>mulrax=%rdx
2741 movq 16(%rsi),%rdx
2743 # qhasm: mulrax *= 19
2744 # asm 1: imulq $19,<mulrax=int64#3,>mulrax=int64#7
2745 # asm 2: imulq $19,<mulrax=%rdx,>mulrax=%rax
2746 imulq $19,%rdx,%rax
2748 # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 104)
2749 # asm 1: mulq 104(<pp=int64#2)
2750 # asm 2: mulq 104(<pp=%rsi)
2751 mulq 104(%rsi)
2753 # qhasm: carry? rt0 += mulrax
2754 # asm 1: add <mulrax=int64#7,<rt0=int64#4
2755 # asm 2: add <mulrax=%rax,<rt0=%rcx
2756 add %rax,%rcx
2758 # qhasm: mulr01 += mulrdx + carry
2759 # asm 1: adc <mulrdx=int64#3,<mulr01=int64#5
2760 # asm 2: adc <mulrdx=%rdx,<mulr01=%r8
2761 adc %rdx,%r8
2763 # qhasm: mulrax = *(uint64 *)(pp + 16)
2764 # asm 1: movq 16(<pp=int64#2),>mulrax=int64#3
2765 # asm 2: movq 16(<pp=%rsi),>mulrax=%rdx
2766 movq 16(%rsi),%rdx
2768 # qhasm: mulrax *= 19
2769 # asm 1: imulq $19,<mulrax=int64#3,>mulrax=int64#7
2770 # asm 2: imulq $19,<mulrax=%rdx,>mulrax=%rax
2771 imulq $19,%rdx,%rax
2773 # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 112)
2774 # asm 1: mulq 112(<pp=int64#2)
2775 # asm 2: mulq 112(<pp=%rsi)
2776 mulq 112(%rsi)
2778 # qhasm: carry? rt1 += mulrax
2779 # asm 1: add <mulrax=int64#7,<rt1=int64#6
2780 # asm 2: add <mulrax=%rax,<rt1=%r9
2781 add %rax,%r9
2783 # qhasm: mulr11 += mulrdx + carry
2784 # asm 1: adc <mulrdx=int64#3,<mulr11=int64#8
2785 # asm 2: adc <mulrdx=%rdx,<mulr11=%r10
2786 adc %rdx,%r10
2788 # qhasm: mulrax = *(uint64 *)(pp + 24)
2789 # asm 1: movq 24(<pp=int64#2),>mulrax=int64#7
2790 # asm 2: movq 24(<pp=%rsi),>mulrax=%rax
2791 movq 24(%rsi),%rax
2793 # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 80)
2794 # asm 1: mulq 80(<pp=int64#2)
2795 # asm 2: mulq 80(<pp=%rsi)
2796 mulq 80(%rsi)
2798 # qhasm: carry? rt3 += mulrax
2799 # asm 1: add <mulrax=int64#7,<rt3=int64#11
2800 # asm 2: add <mulrax=%rax,<rt3=%r13
2801 add %rax,%r13
2803 # qhasm: mulr31 += mulrdx + carry
2804 # asm 1: adc <mulrdx=int64#3,<mulr31=int64#12
2805 # asm 2: adc <mulrdx=%rdx,<mulr31=%r14
2806 adc %rdx,%r14
2808 # qhasm: mulrax = *(uint64 *)(pp + 24)
2809 # asm 1: movq 24(<pp=int64#2),>mulrax=int64#7
2810 # asm 2: movq 24(<pp=%rsi),>mulrax=%rax
2811 movq 24(%rsi),%rax
2813 # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 88)
2814 # asm 1: mulq 88(<pp=int64#2)
2815 # asm 2: mulq 88(<pp=%rsi)
2816 mulq 88(%rsi)
2818 # qhasm: carry? rt4 += mulrax
2819 # asm 1: add <mulrax=int64#7,<rt4=int64#13
2820 # asm 2: add <mulrax=%rax,<rt4=%r15
2821 add %rax,%r15
2823 # qhasm: mulr41 += mulrdx + carry
2824 # asm 1: adc <mulrdx=int64#3,<mulr41=int64#14
2825 # asm 2: adc <mulrdx=%rdx,<mulr41=%rbx
2826 adc %rdx,%rbx
2828 # qhasm: mulrax = mulx319_stack
2829 # asm 1: movq <mulx319_stack=stack64#8,>mulrax=int64#7
2830 # asm 2: movq <mulx319_stack=56(%rsp),>mulrax=%rax
2831 movq 56(%rsp),%rax
2833 # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 104)
2834 # asm 1: mulq 104(<pp=int64#2)
2835 # asm 2: mulq 104(<pp=%rsi)
2836 mulq 104(%rsi)
2838 # qhasm: carry? rt1 += mulrax
2839 # asm 1: add <mulrax=int64#7,<rt1=int64#6
2840 # asm 2: add <mulrax=%rax,<rt1=%r9
2841 add %rax,%r9
2843 # qhasm: mulr11 += mulrdx + carry
2844 # asm 1: adc <mulrdx=int64#3,<mulr11=int64#8
2845 # asm 2: adc <mulrdx=%rdx,<mulr11=%r10
2846 adc %rdx,%r10
2848 # qhasm: mulrax = mulx319_stack
2849 # asm 1: movq <mulx319_stack=stack64#8,>mulrax=int64#7
2850 # asm 2: movq <mulx319_stack=56(%rsp),>mulrax=%rax
2851 movq 56(%rsp),%rax
2853 # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 112)
2854 # asm 1: mulq 112(<pp=int64#2)
2855 # asm 2: mulq 112(<pp=%rsi)
2856 mulq 112(%rsi)
2858 # qhasm: carry? rt2 += mulrax
2859 # asm 1: add <mulrax=int64#7,<rt2=int64#9
2860 # asm 2: add <mulrax=%rax,<rt2=%r11
2861 add %rax,%r11
2863 # qhasm: mulr21 += mulrdx + carry
2864 # asm 1: adc <mulrdx=int64#3,<mulr21=int64#10
2865 # asm 2: adc <mulrdx=%rdx,<mulr21=%r12
2866 adc %rdx,%r12
2868 # qhasm: mulrax = *(uint64 *)(pp + 32)
2869 # asm 1: movq 32(<pp=int64#2),>mulrax=int64#7
2870 # asm 2: movq 32(<pp=%rsi),>mulrax=%rax
2871 movq 32(%rsi),%rax
2873 # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 80)
2874 # asm 1: mulq 80(<pp=int64#2)
2875 # asm 2: mulq 80(<pp=%rsi)
2876 mulq 80(%rsi)
2878 # qhasm: carry? rt4 += mulrax
2879 # asm 1: add <mulrax=int64#7,<rt4=int64#13
2880 # asm 2: add <mulrax=%rax,<rt4=%r15
2881 add %rax,%r15
2883 # qhasm: mulr41 += mulrdx + carry
2884 # asm 1: adc <mulrdx=int64#3,<mulr41=int64#14
2885 # asm 2: adc <mulrdx=%rdx,<mulr41=%rbx
2886 adc %rdx,%rbx
2888 # qhasm: mulrax = mulx419_stack
2889 # asm 1: movq <mulx419_stack=stack64#9,>mulrax=int64#7
2890 # asm 2: movq <mulx419_stack=64(%rsp),>mulrax=%rax
2891 movq 64(%rsp),%rax
2893 # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 96)
2894 # asm 1: mulq 96(<pp=int64#2)
2895 # asm 2: mulq 96(<pp=%rsi)
2896 mulq 96(%rsi)
2898 # qhasm: carry? rt1 += mulrax
2899 # asm 1: add <mulrax=int64#7,<rt1=int64#6
2900 # asm 2: add <mulrax=%rax,<rt1=%r9
2901 add %rax,%r9
2903 # qhasm: mulr11 += mulrdx + carry
2904 # asm 1: adc <mulrdx=int64#3,<mulr11=int64#8
2905 # asm 2: adc <mulrdx=%rdx,<mulr11=%r10
2906 adc %rdx,%r10
2908 # qhasm: mulrax = mulx419_stack
2909 # asm 1: movq <mulx419_stack=stack64#9,>mulrax=int64#7
2910 # asm 2: movq <mulx419_stack=64(%rsp),>mulrax=%rax
2911 movq 64(%rsp),%rax
2913 # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 104)
2914 # asm 1: mulq 104(<pp=int64#2)
2915 # asm 2: mulq 104(<pp=%rsi)
2916 mulq 104(%rsi)
2918 # qhasm: carry? rt2 += mulrax
2919 # asm 1: add <mulrax=int64#7,<rt2=int64#9
2920 # asm 2: add <mulrax=%rax,<rt2=%r11
2921 add %rax,%r11
2923 # qhasm: mulr21 += mulrdx + carry
2924 # asm 1: adc <mulrdx=int64#3,<mulr21=int64#10
2925 # asm 2: adc <mulrdx=%rdx,<mulr21=%r12
2926 adc %rdx,%r12
2928 # qhasm: mulrax = mulx419_stack
2929 # asm 1: movq <mulx419_stack=stack64#9,>mulrax=int64#7
2930 # asm 2: movq <mulx419_stack=64(%rsp),>mulrax=%rax
2931 movq 64(%rsp),%rax
2933 # qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 112)
2934 # asm 1: mulq 112(<pp=int64#2)
2935 # asm 2: mulq 112(<pp=%rsi)
2936 mulq 112(%rsi)
2938 # qhasm: carry? rt3 += mulrax
2939 # asm 1: add <mulrax=int64#7,<rt3=int64#11
2940 # asm 2: add <mulrax=%rax,<rt3=%r13
2941 add %rax,%r13
2943 # qhasm: mulr31 += mulrdx + carry
2944 # asm 1: adc <mulrdx=int64#3,<mulr31=int64#12
2945 # asm 2: adc <mulrdx=%rdx,<mulr31=%r14
2946 adc %rdx,%r14
2948 # qhasm: mulredmask = *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51
2949 # asm 1: movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,>mulredmask=int64#2
2950 # asm 2: movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,>mulredmask=%rsi
2951 movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,%rsi
2953 # qhasm: mulr01 = (mulr01.rt0) << 13
2954 # asm 1: shld $13,<rt0=int64#4,<mulr01=int64#5
2955 # asm 2: shld $13,<rt0=%rcx,<mulr01=%r8
2956 shld $13,%rcx,%r8
2958 # qhasm: rt0 &= mulredmask
2959 # asm 1: and <mulredmask=int64#2,<rt0=int64#4
2960 # asm 2: and <mulredmask=%rsi,<rt0=%rcx
2961 and %rsi,%rcx
2963 # qhasm: mulr11 = (mulr11.rt1) << 13
2964 # asm 1: shld $13,<rt1=int64#6,<mulr11=int64#8
2965 # asm 2: shld $13,<rt1=%r9,<mulr11=%r10
2966 shld $13,%r9,%r10
2968 # qhasm: rt1 &= mulredmask
2969 # asm 1: and <mulredmask=int64#2,<rt1=int64#6
2970 # asm 2: and <mulredmask=%rsi,<rt1=%r9
2971 and %rsi,%r9
2973 # qhasm: rt1 += mulr01
2974 # asm 1: add <mulr01=int64#5,<rt1=int64#6
2975 # asm 2: add <mulr01=%r8,<rt1=%r9
2976 add %r8,%r9
2978 # qhasm: mulr21 = (mulr21.rt2) << 13
2979 # asm 1: shld $13,<rt2=int64#9,<mulr21=int64#10
2980 # asm 2: shld $13,<rt2=%r11,<mulr21=%r12
2981 shld $13,%r11,%r12
2983 # qhasm: rt2 &= mulredmask
2984 # asm 1: and <mulredmask=int64#2,<rt2=int64#9
2985 # asm 2: and <mulredmask=%rsi,<rt2=%r11
2986 and %rsi,%r11
2988 # qhasm: rt2 += mulr11
2989 # asm 1: add <mulr11=int64#8,<rt2=int64#9
2990 # asm 2: add <mulr11=%r10,<rt2=%r11
2991 add %r10,%r11
2993 # qhasm: mulr31 = (mulr31.rt3) << 13
2994 # asm 1: shld $13,<rt3=int64#11,<mulr31=int64#12
2995 # asm 2: shld $13,<rt3=%r13,<mulr31=%r14
2996 shld $13,%r13,%r14
2998 # qhasm: rt3 &= mulredmask
2999 # asm 1: and <mulredmask=int64#2,<rt3=int64#11
3000 # asm 2: and <mulredmask=%rsi,<rt3=%r13
3001 and %rsi,%r13
3003 # qhasm: rt3 += mulr21
3004 # asm 1: add <mulr21=int64#10,<rt3=int64#11
3005 # asm 2: add <mulr21=%r12,<rt3=%r13
3006 add %r12,%r13
3008 # qhasm: mulr41 = (mulr41.rt4) << 13
3009 # asm 1: shld $13,<rt4=int64#13,<mulr41=int64#14
3010 # asm 2: shld $13,<rt4=%r15,<mulr41=%rbx
3011 shld $13,%r15,%rbx
3013 # qhasm: rt4 &= mulredmask
3014 # asm 1: and <mulredmask=int64#2,<rt4=int64#13
3015 # asm 2: and <mulredmask=%rsi,<rt4=%r15
3016 and %rsi,%r15
3018 # qhasm: rt4 += mulr31
3019 # asm 1: add <mulr31=int64#12,<rt4=int64#13
3020 # asm 2: add <mulr31=%r14,<rt4=%r15
3021 add %r14,%r15
3023 # qhasm: mulr41 = mulr41 * 19
3024 # asm 1: imulq $19,<mulr41=int64#14,>mulr41=int64#3
3025 # asm 2: imulq $19,<mulr41=%rbx,>mulr41=%rdx
3026 imulq $19,%rbx,%rdx
3028 # qhasm: rt0 += mulr41
3029 # asm 1: add <mulr41=int64#3,<rt0=int64#4
3030 # asm 2: add <mulr41=%rdx,<rt0=%rcx
3031 add %rdx,%rcx
3033 # qhasm: mult = rt0
3034 # asm 1: mov <rt0=int64#4,>mult=int64#3
3035 # asm 2: mov <rt0=%rcx,>mult=%rdx
3036 mov %rcx,%rdx
3038 # qhasm: (uint64) mult >>= 51
3039 # asm 1: shr $51,<mult=int64#3
3040 # asm 2: shr $51,<mult=%rdx
3041 shr $51,%rdx
3043 # qhasm: mult += rt1
3044 # asm 1: add <rt1=int64#6,<mult=int64#3
3045 # asm 2: add <rt1=%r9,<mult=%rdx
3046 add %r9,%rdx
3048 # qhasm: rt1 = mult
3049 # asm 1: mov <mult=int64#3,>rt1=int64#5
3050 # asm 2: mov <mult=%rdx,>rt1=%r8
3051 mov %rdx,%r8
3053 # qhasm: (uint64) mult >>= 51
3054 # asm 1: shr $51,<mult=int64#3
3055 # asm 2: shr $51,<mult=%rdx
3056 shr $51,%rdx
3058 # qhasm: rt0 &= mulredmask
3059 # asm 1: and <mulredmask=int64#2,<rt0=int64#4
3060 # asm 2: and <mulredmask=%rsi,<rt0=%rcx
3061 and %rsi,%rcx
3063 # qhasm: mult += rt2
3064 # asm 1: add <rt2=int64#9,<mult=int64#3
3065 # asm 2: add <rt2=%r11,<mult=%rdx
3066 add %r11,%rdx
3068 # qhasm: rt2 = mult
3069 # asm 1: mov <mult=int64#3,>rt2=int64#6
3070 # asm 2: mov <mult=%rdx,>rt2=%r9
3071 mov %rdx,%r9
3073 # qhasm: (uint64) mult >>= 51
3074 # asm 1: shr $51,<mult=int64#3
3075 # asm 2: shr $51,<mult=%rdx
3076 shr $51,%rdx
3078 # qhasm: rt1 &= mulredmask
3079 # asm 1: and <mulredmask=int64#2,<rt1=int64#5
3080 # asm 2: and <mulredmask=%rsi,<rt1=%r8
3081 and %rsi,%r8
3083 # qhasm: mult += rt3
3084 # asm 1: add <rt3=int64#11,<mult=int64#3
3085 # asm 2: add <rt3=%r13,<mult=%rdx
3086 add %r13,%rdx
3088 # qhasm: rt3 = mult
3089 # asm 1: mov <mult=int64#3,>rt3=int64#7
3090 # asm 2: mov <mult=%rdx,>rt3=%rax
3091 mov %rdx,%rax
3093 # qhasm: (uint64) mult >>= 51
3094 # asm 1: shr $51,<mult=int64#3
3095 # asm 2: shr $51,<mult=%rdx
3096 shr $51,%rdx
3098 # qhasm: rt2 &= mulredmask
3099 # asm 1: and <mulredmask=int64#2,<rt2=int64#6
3100 # asm 2: and <mulredmask=%rsi,<rt2=%r9
3101 and %rsi,%r9
3103 # qhasm: mult += rt4
3104 # asm 1: add <rt4=int64#13,<mult=int64#3
3105 # asm 2: add <rt4=%r15,<mult=%rdx
3106 add %r15,%rdx
3108 # qhasm: rt4 = mult
3109 # asm 1: mov <mult=int64#3,>rt4=int64#8
3110 # asm 2: mov <mult=%rdx,>rt4=%r10
3111 mov %rdx,%r10
3113 # qhasm: (uint64) mult >>= 51
3114 # asm 1: shr $51,<mult=int64#3
3115 # asm 2: shr $51,<mult=%rdx
3116 shr $51,%rdx
3118 # qhasm: rt3 &= mulredmask
3119 # asm 1: and <mulredmask=int64#2,<rt3=int64#7
3120 # asm 2: and <mulredmask=%rsi,<rt3=%rax
3121 and %rsi,%rax
3123 # qhasm: mult *= 19
3124 # asm 1: imulq $19,<mult=int64#3,>mult=int64#3
3125 # asm 2: imulq $19,<mult=%rdx,>mult=%rdx
3126 imulq $19,%rdx,%rdx
3128 # qhasm: rt0 += mult
3129 # asm 1: add <mult=int64#3,<rt0=int64#4
3130 # asm 2: add <mult=%rdx,<rt0=%rcx
3131 add %rdx,%rcx
3133 # qhasm: rt4 &= mulredmask
3134 # asm 1: and <mulredmask=int64#2,<rt4=int64#8
3135 # asm 2: and <mulredmask=%rsi,<rt4=%r10
3136 and %rsi,%r10
3138 # qhasm: *(uint64 *)(rp + 120) = rt0
3139 # asm 1: movq <rt0=int64#4,120(<rp=int64#1)
3140 # asm 2: movq <rt0=%rcx,120(<rp=%rdi)
3141 movq %rcx,120(%rdi)
3143 # qhasm: *(uint64 *)(rp + 128) = rt1
3144 # asm 1: movq <rt1=int64#5,128(<rp=int64#1)
3145 # asm 2: movq <rt1=%r8,128(<rp=%rdi)
3146 movq %r8,128(%rdi)
3148 # qhasm: *(uint64 *)(rp + 136) = rt2
3149 # asm 1: movq <rt2=int64#6,136(<rp=int64#1)
3150 # asm 2: movq <rt2=%r9,136(<rp=%rdi)
3151 movq %r9,136(%rdi)
3153 # qhasm: *(uint64 *)(rp + 144) = rt3
3154 # asm 1: movq <rt3=int64#7,144(<rp=int64#1)
3155 # asm 2: movq <rt3=%rax,144(<rp=%rdi)
3156 movq %rax,144(%rdi)
3158 # qhasm: *(uint64 *)(rp + 152) = rt4
3159 # asm 1: movq <rt4=int64#8,152(<rp=int64#1)
3160 # asm 2: movq <rt4=%r10,152(<rp=%rdi)
3161 movq %r10,152(%rdi)
3163 # qhasm: caller1 = caller1_stack
3164 # asm 1: movq <caller1_stack=stack64#1,>caller1=int64#9
3165 # asm 2: movq <caller1_stack=0(%rsp),>caller1=%r11
3166 movq 0(%rsp),%r11
3168 # qhasm: caller2 = caller2_stack
3169 # asm 1: movq <caller2_stack=stack64#2,>caller2=int64#10
3170 # asm 2: movq <caller2_stack=8(%rsp),>caller2=%r12
3171 movq 8(%rsp),%r12
3173 # qhasm: caller3 = caller3_stack
3174 # asm 1: movq <caller3_stack=stack64#3,>caller3=int64#11
3175 # asm 2: movq <caller3_stack=16(%rsp),>caller3=%r13
3176 movq 16(%rsp),%r13
3178 # qhasm: caller4 = caller4_stack
3179 # asm 1: movq <caller4_stack=stack64#4,>caller4=int64#12
3180 # asm 2: movq <caller4_stack=24(%rsp),>caller4=%r14
3181 movq 24(%rsp),%r14
3183 # qhasm: caller5 = caller5_stack
3184 # asm 1: movq <caller5_stack=stack64#5,>caller5=int64#13
3185 # asm 2: movq <caller5_stack=32(%rsp),>caller5=%r15
3186 movq 32(%rsp),%r15
3188 # qhasm: caller6 = caller6_stack
3189 # asm 1: movq <caller6_stack=stack64#6,>caller6=int64#14
3190 # asm 2: movq <caller6_stack=40(%rsp),>caller6=%rbx
3191 movq 40(%rsp),%rbx
3193 # qhasm: caller7 = caller7_stack
3194 # asm 1: movq <caller7_stack=stack64#7,>caller7=int64#15
3195 # asm 2: movq <caller7_stack=48(%rsp),>caller7=%rbp
3196 movq 48(%rsp),%rbp
3198 # qhasm: leave
3199 add %r11,%rsp
3200 mov %rdi,%rax
3201 mov %rsi,%rdx