some tweaks
[mkp224o.git] / ed25519 / amd64-51-30k / ull4_mul.s
blob256edd13471e5e8f490d901eec242f0b3b9a6afc
2 # qhasm: int64 rp
4 # qhasm: int64 xp
6 # qhasm: int64 yp
8 # qhasm: input rp
10 # qhasm: input xp
12 # qhasm: input yp
14 # qhasm: int64 r0
16 # qhasm: int64 r1
18 # qhasm: int64 r2
20 # qhasm: int64 r3
22 # qhasm: int64 r4
24 # qhasm: int64 r5
26 # qhasm: int64 r6
28 # qhasm: int64 r7
30 # qhasm: int64 c
32 # qhasm: int64 zero
34 # qhasm: int64 rax
36 # qhasm: int64 rdx
38 # qhasm: int64 caller1
40 # qhasm: int64 caller2
42 # qhasm: int64 caller3
44 # qhasm: int64 caller4
46 # qhasm: int64 caller5
48 # qhasm: int64 caller6
50 # qhasm: int64 caller7
52 # qhasm: caller caller1
54 # qhasm: caller caller2
56 # qhasm: caller caller3
58 # qhasm: caller caller4
60 # qhasm: caller caller5
62 # qhasm: caller caller6
64 # qhasm: caller caller7
66 # qhasm: stack64 caller1_stack
68 # qhasm: stack64 caller2_stack
70 # qhasm: stack64 caller3_stack
72 # qhasm: stack64 caller4_stack
74 # qhasm: stack64 caller5_stack
76 # qhasm: stack64 caller6_stack
78 # qhasm: stack64 caller7_stack
80 # qhasm: enter crypto_sign_ed25519_amd64_51_30k_batch_ull4_mul
81 .text
82 .p2align 5
83 .globl _crypto_sign_ed25519_amd64_51_30k_batch_ull4_mul
84 .globl crypto_sign_ed25519_amd64_51_30k_batch_ull4_mul
85 _crypto_sign_ed25519_amd64_51_30k_batch_ull4_mul:
86 crypto_sign_ed25519_amd64_51_30k_batch_ull4_mul:
87 mov %rsp,%r11
88 and $31,%r11
89 add $64,%r11
90 sub %r11,%rsp
92 # qhasm: caller1_stack = caller1
93 # asm 1: movq <caller1=int64#9,>caller1_stack=stack64#1
94 # asm 2: movq <caller1=%r11,>caller1_stack=0(%rsp)
95 movq %r11,0(%rsp)
97 # qhasm: caller2_stack = caller2
98 # asm 1: movq <caller2=int64#10,>caller2_stack=stack64#2
99 # asm 2: movq <caller2=%r12,>caller2_stack=8(%rsp)
100 movq %r12,8(%rsp)
102 # qhasm: caller3_stack = caller3
103 # asm 1: movq <caller3=int64#11,>caller3_stack=stack64#3
104 # asm 2: movq <caller3=%r13,>caller3_stack=16(%rsp)
105 movq %r13,16(%rsp)
107 # qhasm: caller4_stack = caller4
108 # asm 1: movq <caller4=int64#12,>caller4_stack=stack64#4
109 # asm 2: movq <caller4=%r14,>caller4_stack=24(%rsp)
110 movq %r14,24(%rsp)
112 # qhasm: caller5_stack = caller5
113 # asm 1: movq <caller5=int64#13,>caller5_stack=stack64#5
114 # asm 2: movq <caller5=%r15,>caller5_stack=32(%rsp)
115 movq %r15,32(%rsp)
117 # qhasm: caller6_stack = caller6
118 # asm 1: movq <caller6=int64#14,>caller6_stack=stack64#6
119 # asm 2: movq <caller6=%rbx,>caller6_stack=40(%rsp)
120 movq %rbx,40(%rsp)
122 # qhasm: caller7_stack = caller7
123 # asm 1: movq <caller7=int64#15,>caller7_stack=stack64#7
124 # asm 2: movq <caller7=%rbp,>caller7_stack=48(%rsp)
125 movq %rbp,48(%rsp)
127 # qhasm: yp = yp
128 # asm 1: mov <yp=int64#3,>yp=int64#4
129 # asm 2: mov <yp=%rdx,>yp=%rcx
130 mov %rdx,%rcx
132 # qhasm: r4 = 0
133 # asm 1: mov $0,>r4=int64#5
134 # asm 2: mov $0,>r4=%r8
135 mov $0,%r8
137 # qhasm: r5 = 0
138 # asm 1: mov $0,>r5=int64#6
139 # asm 2: mov $0,>r5=%r9
140 mov $0,%r9
142 # qhasm: r6 = 0
143 # asm 1: mov $0,>r6=int64#8
144 # asm 2: mov $0,>r6=%r10
145 mov $0,%r10
147 # qhasm: r7 = 0
148 # asm 1: mov $0,>r7=int64#9
149 # asm 2: mov $0,>r7=%r11
150 mov $0,%r11
152 # qhasm: zero = 0
153 # asm 1: mov $0,>zero=int64#10
154 # asm 2: mov $0,>zero=%r12
155 mov $0,%r12
157 # qhasm: rax = *(uint64 *)(xp + 0)
158 # asm 1: movq 0(<xp=int64#2),>rax=int64#7
159 # asm 2: movq 0(<xp=%rsi),>rax=%rax
160 movq 0(%rsi),%rax
162 # qhasm: (uint128) rdx rax = rax * *(uint64 *)(yp + 0)
163 # asm 1: mulq 0(<yp=int64#4)
164 # asm 2: mulq 0(<yp=%rcx)
165 mulq 0(%rcx)
167 # qhasm: r0 = rax
168 # asm 1: mov <rax=int64#7,>r0=int64#11
169 # asm 2: mov <rax=%rax,>r0=%r13
170 mov %rax,%r13
172 # qhasm: c = rdx
173 # asm 1: mov <rdx=int64#3,>c=int64#12
174 # asm 2: mov <rdx=%rdx,>c=%r14
175 mov %rdx,%r14
177 # qhasm: rax = *(uint64 *)(xp + 0)
178 # asm 1: movq 0(<xp=int64#2),>rax=int64#7
179 # asm 2: movq 0(<xp=%rsi),>rax=%rax
180 movq 0(%rsi),%rax
182 # qhasm: (uint128) rdx rax = rax * *(uint64 *)(yp + 8)
183 # asm 1: mulq 8(<yp=int64#4)
184 # asm 2: mulq 8(<yp=%rcx)
185 mulq 8(%rcx)
187 # qhasm: r1 = rax
188 # asm 1: mov <rax=int64#7,>r1=int64#13
189 # asm 2: mov <rax=%rax,>r1=%r15
190 mov %rax,%r15
192 # qhasm: carry? r1 += c
193 # asm 1: add <c=int64#12,<r1=int64#13
194 # asm 2: add <c=%r14,<r1=%r15
195 add %r14,%r15
197 # qhasm: c = 0
198 # asm 1: mov $0,>c=int64#12
199 # asm 2: mov $0,>c=%r14
200 mov $0,%r14
202 # qhasm: c += rdx + carry
203 # asm 1: adc <rdx=int64#3,<c=int64#12
204 # asm 2: adc <rdx=%rdx,<c=%r14
205 adc %rdx,%r14
207 # qhasm: rax = *(uint64 *)(xp + 0)
208 # asm 1: movq 0(<xp=int64#2),>rax=int64#7
209 # asm 2: movq 0(<xp=%rsi),>rax=%rax
210 movq 0(%rsi),%rax
212 # qhasm: (uint128) rdx rax = rax * *(uint64 *)(yp + 16)
213 # asm 1: mulq 16(<yp=int64#4)
214 # asm 2: mulq 16(<yp=%rcx)
215 mulq 16(%rcx)
217 # qhasm: r2 = rax
218 # asm 1: mov <rax=int64#7,>r2=int64#14
219 # asm 2: mov <rax=%rax,>r2=%rbx
220 mov %rax,%rbx
222 # qhasm: carry? r2 += c
223 # asm 1: add <c=int64#12,<r2=int64#14
224 # asm 2: add <c=%r14,<r2=%rbx
225 add %r14,%rbx
227 # qhasm: c = 0
228 # asm 1: mov $0,>c=int64#12
229 # asm 2: mov $0,>c=%r14
230 mov $0,%r14
232 # qhasm: c += rdx + carry
233 # asm 1: adc <rdx=int64#3,<c=int64#12
234 # asm 2: adc <rdx=%rdx,<c=%r14
235 adc %rdx,%r14
237 # qhasm: rax = *(uint64 *)(xp + 0)
238 # asm 1: movq 0(<xp=int64#2),>rax=int64#7
239 # asm 2: movq 0(<xp=%rsi),>rax=%rax
240 movq 0(%rsi),%rax
242 # qhasm: (uint128) rdx rax = rax * *(uint64 *)(yp + 24)
243 # asm 1: mulq 24(<yp=int64#4)
244 # asm 2: mulq 24(<yp=%rcx)
245 mulq 24(%rcx)
247 # qhasm: r3 = rax
248 # asm 1: mov <rax=int64#7,>r3=int64#15
249 # asm 2: mov <rax=%rax,>r3=%rbp
250 mov %rax,%rbp
252 # qhasm: carry? r3 += c
253 # asm 1: add <c=int64#12,<r3=int64#15
254 # asm 2: add <c=%r14,<r3=%rbp
255 add %r14,%rbp
257 # qhasm: r4 += rdx + carry
258 # asm 1: adc <rdx=int64#3,<r4=int64#5
259 # asm 2: adc <rdx=%rdx,<r4=%r8
260 adc %rdx,%r8
262 # qhasm: rax = *(uint64 *)(xp + 8)
263 # asm 1: movq 8(<xp=int64#2),>rax=int64#7
264 # asm 2: movq 8(<xp=%rsi),>rax=%rax
265 movq 8(%rsi),%rax
267 # qhasm: (uint128) rdx rax = rax * *(uint64 *)(yp + 0)
268 # asm 1: mulq 0(<yp=int64#4)
269 # asm 2: mulq 0(<yp=%rcx)
270 mulq 0(%rcx)
272 # qhasm: carry? r1 += rax
273 # asm 1: add <rax=int64#7,<r1=int64#13
274 # asm 2: add <rax=%rax,<r1=%r15
275 add %rax,%r15
277 # qhasm: c = 0
278 # asm 1: mov $0,>c=int64#12
279 # asm 2: mov $0,>c=%r14
280 mov $0,%r14
282 # qhasm: c += rdx + carry
283 # asm 1: adc <rdx=int64#3,<c=int64#12
284 # asm 2: adc <rdx=%rdx,<c=%r14
285 adc %rdx,%r14
287 # qhasm: rax = *(uint64 *)(xp + 8)
288 # asm 1: movq 8(<xp=int64#2),>rax=int64#7
289 # asm 2: movq 8(<xp=%rsi),>rax=%rax
290 movq 8(%rsi),%rax
292 # qhasm: (uint128) rdx rax = rax * *(uint64 *)(yp + 8)
293 # asm 1: mulq 8(<yp=int64#4)
294 # asm 2: mulq 8(<yp=%rcx)
295 mulq 8(%rcx)
297 # qhasm: carry? r2 += rax
298 # asm 1: add <rax=int64#7,<r2=int64#14
299 # asm 2: add <rax=%rax,<r2=%rbx
300 add %rax,%rbx
302 # qhasm: rdx += zero + carry
303 # asm 1: adc <zero=int64#10,<rdx=int64#3
304 # asm 2: adc <zero=%r12,<rdx=%rdx
305 adc %r12,%rdx
307 # qhasm: carry? r2 += c
308 # asm 1: add <c=int64#12,<r2=int64#14
309 # asm 2: add <c=%r14,<r2=%rbx
310 add %r14,%rbx
312 # qhasm: c = 0
313 # asm 1: mov $0,>c=int64#12
314 # asm 2: mov $0,>c=%r14
315 mov $0,%r14
317 # qhasm: c += rdx + carry
318 # asm 1: adc <rdx=int64#3,<c=int64#12
319 # asm 2: adc <rdx=%rdx,<c=%r14
320 adc %rdx,%r14
322 # qhasm: rax = *(uint64 *)(xp + 8)
323 # asm 1: movq 8(<xp=int64#2),>rax=int64#7
324 # asm 2: movq 8(<xp=%rsi),>rax=%rax
325 movq 8(%rsi),%rax
327 # qhasm: (uint128) rdx rax = rax * *(uint64 *)(yp + 16)
328 # asm 1: mulq 16(<yp=int64#4)
329 # asm 2: mulq 16(<yp=%rcx)
330 mulq 16(%rcx)
332 # qhasm: carry? r3 += rax
333 # asm 1: add <rax=int64#7,<r3=int64#15
334 # asm 2: add <rax=%rax,<r3=%rbp
335 add %rax,%rbp
337 # qhasm: rdx += zero + carry
338 # asm 1: adc <zero=int64#10,<rdx=int64#3
339 # asm 2: adc <zero=%r12,<rdx=%rdx
340 adc %r12,%rdx
342 # qhasm: carry? r3 += c
343 # asm 1: add <c=int64#12,<r3=int64#15
344 # asm 2: add <c=%r14,<r3=%rbp
345 add %r14,%rbp
347 # qhasm: c = 0
348 # asm 1: mov $0,>c=int64#12
349 # asm 2: mov $0,>c=%r14
350 mov $0,%r14
352 # qhasm: c += rdx + carry
353 # asm 1: adc <rdx=int64#3,<c=int64#12
354 # asm 2: adc <rdx=%rdx,<c=%r14
355 adc %rdx,%r14
357 # qhasm: rax = *(uint64 *)(xp + 8)
358 # asm 1: movq 8(<xp=int64#2),>rax=int64#7
359 # asm 2: movq 8(<xp=%rsi),>rax=%rax
360 movq 8(%rsi),%rax
362 # qhasm: (uint128) rdx rax = rax * *(uint64 *)(yp + 24)
363 # asm 1: mulq 24(<yp=int64#4)
364 # asm 2: mulq 24(<yp=%rcx)
365 mulq 24(%rcx)
367 # qhasm: carry? r4 += rax
368 # asm 1: add <rax=int64#7,<r4=int64#5
369 # asm 2: add <rax=%rax,<r4=%r8
370 add %rax,%r8
372 # qhasm: rdx += zero + carry
373 # asm 1: adc <zero=int64#10,<rdx=int64#3
374 # asm 2: adc <zero=%r12,<rdx=%rdx
375 adc %r12,%rdx
377 # qhasm: carry? r4 += c
378 # asm 1: add <c=int64#12,<r4=int64#5
379 # asm 2: add <c=%r14,<r4=%r8
380 add %r14,%r8
382 # qhasm: r5 += rdx + carry
383 # asm 1: adc <rdx=int64#3,<r5=int64#6
384 # asm 2: adc <rdx=%rdx,<r5=%r9
385 adc %rdx,%r9
387 # qhasm: rax = *(uint64 *)(xp + 16)
388 # asm 1: movq 16(<xp=int64#2),>rax=int64#7
389 # asm 2: movq 16(<xp=%rsi),>rax=%rax
390 movq 16(%rsi),%rax
392 # qhasm: (uint128) rdx rax = rax * *(uint64 *)(yp + 0)
393 # asm 1: mulq 0(<yp=int64#4)
394 # asm 2: mulq 0(<yp=%rcx)
395 mulq 0(%rcx)
397 # qhasm: carry? r2 += rax
398 # asm 1: add <rax=int64#7,<r2=int64#14
399 # asm 2: add <rax=%rax,<r2=%rbx
400 add %rax,%rbx
402 # qhasm: c = 0
403 # asm 1: mov $0,>c=int64#12
404 # asm 2: mov $0,>c=%r14
405 mov $0,%r14
407 # qhasm: c += rdx + carry
408 # asm 1: adc <rdx=int64#3,<c=int64#12
409 # asm 2: adc <rdx=%rdx,<c=%r14
410 adc %rdx,%r14
412 # qhasm: rax = *(uint64 *)(xp + 16)
413 # asm 1: movq 16(<xp=int64#2),>rax=int64#7
414 # asm 2: movq 16(<xp=%rsi),>rax=%rax
415 movq 16(%rsi),%rax
417 # qhasm: (uint128) rdx rax = rax * *(uint64 *)(yp + 8)
418 # asm 1: mulq 8(<yp=int64#4)
419 # asm 2: mulq 8(<yp=%rcx)
420 mulq 8(%rcx)
422 # qhasm: carry? r3 += rax
423 # asm 1: add <rax=int64#7,<r3=int64#15
424 # asm 2: add <rax=%rax,<r3=%rbp
425 add %rax,%rbp
427 # qhasm: rdx += zero + carry
428 # asm 1: adc <zero=int64#10,<rdx=int64#3
429 # asm 2: adc <zero=%r12,<rdx=%rdx
430 adc %r12,%rdx
432 # qhasm: carry? r3 += c
433 # asm 1: add <c=int64#12,<r3=int64#15
434 # asm 2: add <c=%r14,<r3=%rbp
435 add %r14,%rbp
437 # qhasm: c = 0
438 # asm 1: mov $0,>c=int64#12
439 # asm 2: mov $0,>c=%r14
440 mov $0,%r14
442 # qhasm: c += rdx + carry
443 # asm 1: adc <rdx=int64#3,<c=int64#12
444 # asm 2: adc <rdx=%rdx,<c=%r14
445 adc %rdx,%r14
447 # qhasm: rax = *(uint64 *)(xp + 16)
448 # asm 1: movq 16(<xp=int64#2),>rax=int64#7
449 # asm 2: movq 16(<xp=%rsi),>rax=%rax
450 movq 16(%rsi),%rax
452 # qhasm: (uint128) rdx rax = rax * *(uint64 *)(yp + 16)
453 # asm 1: mulq 16(<yp=int64#4)
454 # asm 2: mulq 16(<yp=%rcx)
455 mulq 16(%rcx)
457 # qhasm: carry? r4 += rax
458 # asm 1: add <rax=int64#7,<r4=int64#5
459 # asm 2: add <rax=%rax,<r4=%r8
460 add %rax,%r8
462 # qhasm: rdx += zero + carry
463 # asm 1: adc <zero=int64#10,<rdx=int64#3
464 # asm 2: adc <zero=%r12,<rdx=%rdx
465 adc %r12,%rdx
467 # qhasm: carry? r4 += c
468 # asm 1: add <c=int64#12,<r4=int64#5
469 # asm 2: add <c=%r14,<r4=%r8
470 add %r14,%r8
472 # qhasm: c = 0
473 # asm 1: mov $0,>c=int64#12
474 # asm 2: mov $0,>c=%r14
475 mov $0,%r14
477 # qhasm: c += rdx + carry
478 # asm 1: adc <rdx=int64#3,<c=int64#12
479 # asm 2: adc <rdx=%rdx,<c=%r14
480 adc %rdx,%r14
482 # qhasm: rax = *(uint64 *)(xp + 16)
483 # asm 1: movq 16(<xp=int64#2),>rax=int64#7
484 # asm 2: movq 16(<xp=%rsi),>rax=%rax
485 movq 16(%rsi),%rax
487 # qhasm: (uint128) rdx rax = rax * *(uint64 *)(yp + 24)
488 # asm 1: mulq 24(<yp=int64#4)
489 # asm 2: mulq 24(<yp=%rcx)
490 mulq 24(%rcx)
492 # qhasm: carry? r5 += rax
493 # asm 1: add <rax=int64#7,<r5=int64#6
494 # asm 2: add <rax=%rax,<r5=%r9
495 add %rax,%r9
497 # qhasm: rdx += zero + carry
498 # asm 1: adc <zero=int64#10,<rdx=int64#3
499 # asm 2: adc <zero=%r12,<rdx=%rdx
500 adc %r12,%rdx
502 # qhasm: carry? r5 += c
503 # asm 1: add <c=int64#12,<r5=int64#6
504 # asm 2: add <c=%r14,<r5=%r9
505 add %r14,%r9
507 # qhasm: r6 += rdx + carry
508 # asm 1: adc <rdx=int64#3,<r6=int64#8
509 # asm 2: adc <rdx=%rdx,<r6=%r10
510 adc %rdx,%r10
512 # qhasm: rax = *(uint64 *)(xp + 24)
513 # asm 1: movq 24(<xp=int64#2),>rax=int64#7
514 # asm 2: movq 24(<xp=%rsi),>rax=%rax
515 movq 24(%rsi),%rax
517 # qhasm: (uint128) rdx rax = rax * *(uint64 *)(yp + 0)
518 # asm 1: mulq 0(<yp=int64#4)
519 # asm 2: mulq 0(<yp=%rcx)
520 mulq 0(%rcx)
522 # qhasm: carry? r3 += rax
523 # asm 1: add <rax=int64#7,<r3=int64#15
524 # asm 2: add <rax=%rax,<r3=%rbp
525 add %rax,%rbp
527 # qhasm: c = 0
528 # asm 1: mov $0,>c=int64#12
529 # asm 2: mov $0,>c=%r14
530 mov $0,%r14
532 # qhasm: c += rdx + carry
533 # asm 1: adc <rdx=int64#3,<c=int64#12
534 # asm 2: adc <rdx=%rdx,<c=%r14
535 adc %rdx,%r14
537 # qhasm: rax = *(uint64 *)(xp + 24)
538 # asm 1: movq 24(<xp=int64#2),>rax=int64#7
539 # asm 2: movq 24(<xp=%rsi),>rax=%rax
540 movq 24(%rsi),%rax
542 # qhasm: (uint128) rdx rax = rax * *(uint64 *)(yp + 8)
543 # asm 1: mulq 8(<yp=int64#4)
544 # asm 2: mulq 8(<yp=%rcx)
545 mulq 8(%rcx)
547 # qhasm: carry? r4 += rax
548 # asm 1: add <rax=int64#7,<r4=int64#5
549 # asm 2: add <rax=%rax,<r4=%r8
550 add %rax,%r8
552 # qhasm: rdx += zero + carry
553 # asm 1: adc <zero=int64#10,<rdx=int64#3
554 # asm 2: adc <zero=%r12,<rdx=%rdx
555 adc %r12,%rdx
557 # qhasm: carry? r4 += c
558 # asm 1: add <c=int64#12,<r4=int64#5
559 # asm 2: add <c=%r14,<r4=%r8
560 add %r14,%r8
562 # qhasm: c = 0
563 # asm 1: mov $0,>c=int64#12
564 # asm 2: mov $0,>c=%r14
565 mov $0,%r14
567 # qhasm: c += rdx + carry
568 # asm 1: adc <rdx=int64#3,<c=int64#12
569 # asm 2: adc <rdx=%rdx,<c=%r14
570 adc %rdx,%r14
572 # qhasm: rax = *(uint64 *)(xp + 24)
573 # asm 1: movq 24(<xp=int64#2),>rax=int64#7
574 # asm 2: movq 24(<xp=%rsi),>rax=%rax
575 movq 24(%rsi),%rax
577 # qhasm: (uint128) rdx rax = rax * *(uint64 *)(yp + 16)
578 # asm 1: mulq 16(<yp=int64#4)
579 # asm 2: mulq 16(<yp=%rcx)
580 mulq 16(%rcx)
582 # qhasm: carry? r5 += rax
583 # asm 1: add <rax=int64#7,<r5=int64#6
584 # asm 2: add <rax=%rax,<r5=%r9
585 add %rax,%r9
587 # qhasm: rdx += zero + carry
588 # asm 1: adc <zero=int64#10,<rdx=int64#3
589 # asm 2: adc <zero=%r12,<rdx=%rdx
590 adc %r12,%rdx
592 # qhasm: carry? r5 += c
593 # asm 1: add <c=int64#12,<r5=int64#6
594 # asm 2: add <c=%r14,<r5=%r9
595 add %r14,%r9
597 # qhasm: c = 0
598 # asm 1: mov $0,>c=int64#12
599 # asm 2: mov $0,>c=%r14
600 mov $0,%r14
602 # qhasm: c += rdx + carry
603 # asm 1: adc <rdx=int64#3,<c=int64#12
604 # asm 2: adc <rdx=%rdx,<c=%r14
605 adc %rdx,%r14
607 # qhasm: rax = *(uint64 *)(xp + 24)
608 # asm 1: movq 24(<xp=int64#2),>rax=int64#7
609 # asm 2: movq 24(<xp=%rsi),>rax=%rax
610 movq 24(%rsi),%rax
612 # qhasm: (uint128) rdx rax = rax * *(uint64 *)(yp + 24)
613 # asm 1: mulq 24(<yp=int64#4)
614 # asm 2: mulq 24(<yp=%rcx)
615 mulq 24(%rcx)
617 # qhasm: carry? r6 += rax
618 # asm 1: add <rax=int64#7,<r6=int64#8
619 # asm 2: add <rax=%rax,<r6=%r10
620 add %rax,%r10
622 # qhasm: rdx += zero + carry
623 # asm 1: adc <zero=int64#10,<rdx=int64#3
624 # asm 2: adc <zero=%r12,<rdx=%rdx
625 adc %r12,%rdx
627 # qhasm: carry? r6 += c
628 # asm 1: add <c=int64#12,<r6=int64#8
629 # asm 2: add <c=%r14,<r6=%r10
630 add %r14,%r10
632 # qhasm: r7 += rdx + carry
633 # asm 1: adc <rdx=int64#3,<r7=int64#9
634 # asm 2: adc <rdx=%rdx,<r7=%r11
635 adc %rdx,%r11
637 # qhasm: *(uint64 *)(rp + 0) = r0
638 # asm 1: movq <r0=int64#11,0(<rp=int64#1)
639 # asm 2: movq <r0=%r13,0(<rp=%rdi)
640 movq %r13,0(%rdi)
642 # qhasm: *(uint64 *)(rp + 8) = r1
643 # asm 1: movq <r1=int64#13,8(<rp=int64#1)
644 # asm 2: movq <r1=%r15,8(<rp=%rdi)
645 movq %r15,8(%rdi)
647 # qhasm: *(uint64 *)(rp + 16) = r2
648 # asm 1: movq <r2=int64#14,16(<rp=int64#1)
649 # asm 2: movq <r2=%rbx,16(<rp=%rdi)
650 movq %rbx,16(%rdi)
652 # qhasm: *(uint64 *)(rp + 24) = r3
653 # asm 1: movq <r3=int64#15,24(<rp=int64#1)
654 # asm 2: movq <r3=%rbp,24(<rp=%rdi)
655 movq %rbp,24(%rdi)
657 # qhasm: *(uint64 *)(rp + 32) = r4
658 # asm 1: movq <r4=int64#5,32(<rp=int64#1)
659 # asm 2: movq <r4=%r8,32(<rp=%rdi)
660 movq %r8,32(%rdi)
662 # qhasm: *(uint64 *)(rp + 40) = r5
663 # asm 1: movq <r5=int64#6,40(<rp=int64#1)
664 # asm 2: movq <r5=%r9,40(<rp=%rdi)
665 movq %r9,40(%rdi)
667 # qhasm: *(uint64 *)(rp + 48) = r6
668 # asm 1: movq <r6=int64#8,48(<rp=int64#1)
669 # asm 2: movq <r6=%r10,48(<rp=%rdi)
670 movq %r10,48(%rdi)
672 # qhasm: *(uint64 *)(rp + 56) = r7
673 # asm 1: movq <r7=int64#9,56(<rp=int64#1)
674 # asm 2: movq <r7=%r11,56(<rp=%rdi)
675 movq %r11,56(%rdi)
677 # qhasm: caller1 = caller1_stack
678 # asm 1: movq <caller1_stack=stack64#1,>caller1=int64#9
679 # asm 2: movq <caller1_stack=0(%rsp),>caller1=%r11
680 movq 0(%rsp),%r11
682 # qhasm: caller2 = caller2_stack
683 # asm 1: movq <caller2_stack=stack64#2,>caller2=int64#10
684 # asm 2: movq <caller2_stack=8(%rsp),>caller2=%r12
685 movq 8(%rsp),%r12
687 # qhasm: caller3 = caller3_stack
688 # asm 1: movq <caller3_stack=stack64#3,>caller3=int64#11
689 # asm 2: movq <caller3_stack=16(%rsp),>caller3=%r13
690 movq 16(%rsp),%r13
692 # qhasm: caller4 = caller4_stack
693 # asm 1: movq <caller4_stack=stack64#4,>caller4=int64#12
694 # asm 2: movq <caller4_stack=24(%rsp),>caller4=%r14
695 movq 24(%rsp),%r14
697 # qhasm: caller5 = caller5_stack
698 # asm 1: movq <caller5_stack=stack64#5,>caller5=int64#13
699 # asm 2: movq <caller5_stack=32(%rsp),>caller5=%r15
700 movq 32(%rsp),%r15
702 # qhasm: caller6 = caller6_stack
703 # asm 1: movq <caller6_stack=stack64#6,>caller6=int64#14
704 # asm 2: movq <caller6_stack=40(%rsp),>caller6=%rbx
705 movq 40(%rsp),%rbx
707 # qhasm: caller7 = caller7_stack
708 # asm 1: movq <caller7_stack=stack64#7,>caller7=int64#15
709 # asm 2: movq <caller7_stack=48(%rsp),>caller7=%rbp
710 movq 48(%rsp),%rbp
712 # qhasm: leave
713 add %r11,%rsp
714 mov %rdi,%rax
715 mov %rsi,%rdx