clarify the purpose of this project
[nyanglibc.git] / crypt / sha256.v.s
blob078c3b37987c80d27080274035e3d32d709a25ac
1 .file "sha256.c"
2 # GNU C11 (GCC) version 7.3.0 (x86_64-nyan-linux-gnu)
3 # compiled by GNU C version 7.3.0, GMP version 6.1.2, MPFR version 4.0.1, MPC version 1.1.0, isl version none
4 # GGC heuristics: --param ggc-min-expand=100 --param ggc-min-heapsize=131072
5 # options passed: -I ../include
6 # -I /root/wip/nyanglibc/builds/0/nyanglibc/build/crypt
7 # -I /root/wip/nyanglibc/builds/0/nyanglibc/build
8 # -I ../sysdeps/unix/sysv/linux/x86_64/64
9 # -I ../sysdeps/unix/sysv/linux/x86_64
10 # -I ../sysdeps/unix/sysv/linux/x86/include
11 # -I ../sysdeps/unix/sysv/linux/x86 -I ../sysdeps/x86/nptl
12 # -I ../sysdeps/unix/sysv/linux/wordsize-64 -I ../sysdeps/x86_64/nptl
13 # -I ../sysdeps/unix/sysv/linux/include -I ../sysdeps/unix/sysv/linux
14 # -I ../sysdeps/nptl -I ../sysdeps/pthread -I ../sysdeps/gnu
15 # -I ../sysdeps/unix/inet -I ../sysdeps/unix/sysv -I ../sysdeps/unix/x86_64
16 # -I ../sysdeps/unix -I ../sysdeps/posix -I ../sysdeps/x86_64/64
17 # -I ../sysdeps/x86_64/fpu -I ../sysdeps/x86/fpu -I ../sysdeps/x86_64
18 # -I ../sysdeps/x86/include -I ../sysdeps/x86
19 # -I ../sysdeps/ieee754/float128 -I ../sysdeps/ieee754/ldbl-96/include
20 # -I ../sysdeps/ieee754/ldbl-96 -I ../sysdeps/ieee754/dbl-64
21 # -I ../sysdeps/ieee754/flt-32 -I ../sysdeps/wordsize-64
22 # -I ../sysdeps/ieee754 -I ../sysdeps/generic -I .. -I ../libio -I .
23 # -MD /run/asm/crypt/sha256.v.d -MF /run/asm/crypt/sha256.o.dt -MP
24 # -MT /run/asm/crypt/.o -D _LIBC_REENTRANT -D MODULE_NAME=libcrypt -D PIC
25 # -D TOP_NAMESPACE=glibc
26 # -include /root/wip/nyanglibc/builds/0/nyanglibc/build/libc-modules.h
27 # -include ../include/libc-symbols.h sha256.c -mtune=generic -march=x86-64
28 # -auxbase-strip /run/asm/crypt/sha256.v.s -O2 -Wall -Wwrite-strings
29 # -Wundef -Werror -Wstrict-prototypes -Wold-style-definition -std=gnu11
30 # -fverbose-asm -fgnu89-inline -fmerge-all-constants -frounding-math
31 # -fno-stack-protector -fmath-errno -fpie -ftls-model=initial-exec
32 # options enabled: -faggressive-loop-optimizations -falign-labels
33 # -fasynchronous-unwind-tables -fauto-inc-dec -fbranch-count-reg
34 # -fcaller-saves -fchkp-check-incomplete-type -fchkp-check-read
35 # -fchkp-check-write -fchkp-instrument-calls -fchkp-narrow-bounds
36 # -fchkp-optimize -fchkp-store-bounds -fchkp-use-static-bounds
37 # -fchkp-use-static-const-bounds -fchkp-use-wrappers -fcode-hoisting
38 # -fcombine-stack-adjustments -fcommon -fcompare-elim -fcprop-registers
39 # -fcrossjumping -fcse-follow-jumps -fdefer-pop
40 # -fdelete-null-pointer-checks -fdevirtualize -fdevirtualize-speculatively
41 # -fdwarf2-cfi-asm -fearly-inlining -feliminate-unused-debug-types
42 # -fexpensive-optimizations -fforward-propagate -ffp-int-builtin-inexact
43 # -ffunction-cse -fgcse -fgcse-lm -fgnu-runtime -fgnu-unique
44 # -fguess-branch-probability -fhoist-adjacent-loads -fident -fif-conversion
45 # -fif-conversion2 -findirect-inlining -finline -finline-atomics
46 # -finline-functions-called-once -finline-small-functions -fipa-bit-cp
47 # -fipa-cp -fipa-icf -fipa-icf-functions -fipa-icf-variables -fipa-profile
48 # -fipa-pure-const -fipa-ra -fipa-reference -fipa-sra -fipa-vrp
49 # -fira-hoist-pressure -fira-share-save-slots -fira-share-spill-slots
50 # -fisolate-erroneous-paths-dereference -fivopts -fkeep-static-consts
51 # -fleading-underscore -flifetime-dse -flra-remat -flto-odr-type-merging
52 # -fmath-errno -fmerge-all-constants -fmerge-debug-strings
53 # -fmove-loop-invariants -fomit-frame-pointer -foptimize-sibling-calls
54 # -foptimize-strlen -fpartial-inlining -fpeephole -fpeephole2 -fpic -fpie
55 # -fplt -fprefetch-loop-arrays -free -freg-struct-return -freorder-blocks
56 # -freorder-functions -frerun-cse-after-loop -frounding-math
57 # -fsched-critical-path-heuristic -fsched-dep-count-heuristic
58 # -fsched-group-heuristic -fsched-interblock -fsched-last-insn-heuristic
59 # -fsched-rank-heuristic -fsched-spec -fsched-spec-insn-heuristic
60 # -fsched-stalled-insns-dep -fschedule-fusion -fschedule-insns2
61 # -fsemantic-interposition -fshow-column -fshrink-wrap
62 # -fshrink-wrap-separate -fsigned-zeros -fsplit-ivs-in-unroller
63 # -fsplit-wide-types -fssa-backprop -fssa-phiopt -fstdarg-opt
64 # -fstore-merging -fstrict-aliasing -fstrict-overflow
65 # -fstrict-volatile-bitfields -fsync-libcalls -fthread-jumps
66 # -ftoplevel-reorder -ftrapping-math -ftree-bit-ccp -ftree-builtin-call-dce
67 # -ftree-ccp -ftree-ch -ftree-coalesce-vars -ftree-copy-prop -ftree-cselim
68 # -ftree-dce -ftree-dominator-opts -ftree-dse -ftree-forwprop -ftree-fre
69 # -ftree-loop-if-convert -ftree-loop-im -ftree-loop-ivcanon
70 # -ftree-loop-optimize -ftree-parallelize-loops= -ftree-phiprop -ftree-pre
71 # -ftree-pta -ftree-reassoc -ftree-scev-cprop -ftree-sink -ftree-slsr
72 # -ftree-sra -ftree-switch-conversion -ftree-tail-merge -ftree-ter
73 # -ftree-vrp -funit-at-a-time -funwind-tables -fverbose-asm
74 # -fzero-initialized-in-bss -m128bit-long-double -m64 -m80387
75 # -malign-stringops -mavx256-split-unaligned-load
76 # -mavx256-split-unaligned-store -mfancy-math-387 -mfp-ret-in-387 -mfxsr
77 # -mglibc -mieee-fp -mlong-double-80 -mmmx -mno-sse4 -mpush-args -mred-zone
78 # -msse -msse2 -mstv -mtls-direct-seg-refs -mvzeroupper
80 .text
81 .p2align 4,,15
82 .globl __sha256_init_ctx
83 .type __sha256_init_ctx, @function
84 __sha256_init_ctx:
85 .LFB32:
86 .cfi_startproc
87 # sha256.c:91: ctx->H[0] = 0x6a09e667;
88 movabsq $-4942790177982912921, %rax #, tmp92
89 # sha256.c:98: ctx->H[7] = 0x5be0cd19;
90 movq $0, 32(%rdi) #, MEM[(void *)ctx_2(D) + 32B]
91 # sha256.c:100: ctx->total64 = 0;
92 movl $0, 40(%rdi) #, MEM[(void *)ctx_2(D) + 40B]
93 # sha256.c:91: ctx->H[0] = 0x6a09e667;
94 movq %rax, (%rdi) # tmp92, MEM[(unsigned int *)ctx_2(D)]
95 # sha256.c:92: ctx->H[1] = 0xbb67ae85;
96 movabsq $-6534734903820487822, %rax #, tmp93
97 movq %rax, 8(%rdi) # tmp93, MEM[(unsigned int *)ctx_2(D) + 8B]
98 # sha256.c:94: ctx->H[3] = 0xa54ff53a;
99 movabsq $-7276294671082564993, %rax #, tmp94
100 movq %rax, 16(%rdi) # tmp94, MEM[(unsigned int *)ctx_2(D) + 16B]
101 # sha256.c:96: ctx->H[5] = 0x9b05688c;
102 movabsq $6620516960021240235, %rax #, tmp95
103 movq %rax, 24(%rdi) # tmp95, MEM[(void *)ctx_2(D) + 24B]
104 # sha256.c:102: }
106 .cfi_endproc
107 .LFE32:
108 .size __sha256_init_ctx, .-__sha256_init_ctx
109 .p2align 4,,15
110 .globl __sha256_process_block
111 .type __sha256_process_block, @function
112 __sha256_process_block:
113 .LFB35:
114 .cfi_startproc
115 pushq %r15 #
116 .cfi_def_cfa_offset 16
117 .cfi_offset 15, -16
118 pushq %r14 #
119 .cfi_def_cfa_offset 24
120 .cfi_offset 14, -24
121 pushq %r13 #
122 .cfi_def_cfa_offset 32
123 .cfi_offset 13, -32
124 pushq %r12 #
125 .cfi_def_cfa_offset 40
126 .cfi_offset 12, -40
127 pushq %rbp #
128 .cfi_def_cfa_offset 48
129 .cfi_offset 6, -48
130 pushq %rbx #
131 .cfi_def_cfa_offset 56
132 .cfi_offset 3, -56
133 # ./sha256-block.c:9: size_t nwords = len / sizeof (uint32_t);
134 movq %rsi, %rbx # len, nwords
135 shrq $2, %rbx #, nwords
136 # ./sha256-block.c:7: {
137 subq $208, %rsp #,
138 .cfi_def_cfa_offset 264
139 # ./sha256-block.c:22: ctx->total64 += len;
140 addq %rsi, 32(%rdx) # len, ctx_80(D)->D.4694.total64
141 # ./sha256-block.c:26: while (nwords > 0)
142 testq %rbx, %rbx # nwords
143 # ./sha256-block.c:7: {
144 movq %rdi, -88(%rsp) # buffer, %sfp
145 # ./sha256-block.c:10: uint32_t a = ctx->H[0];
146 movl (%rdx), %edi # ctx_80(D)->H, a
147 # ./sha256-block.c:7: {
148 movq %rdx, -64(%rsp) # ctx, %sfp
149 # ./sha256-block.c:9: size_t nwords = len / sizeof (uint32_t);
150 movq %rbx, -80(%rsp) # nwords, %sfp
151 # ./sha256-block.c:10: uint32_t a = ctx->H[0];
152 movl %edi, -108(%rsp) # a, %sfp
153 # ./sha256-block.c:11: uint32_t b = ctx->H[1];
154 movl 4(%rdx), %edi # ctx_80(D)->H, b
155 movl %edi, -104(%rsp) # b, %sfp
156 # ./sha256-block.c:12: uint32_t c = ctx->H[2];
157 movl 8(%rdx), %edi # ctx_80(D)->H, c
158 movl %edi, -100(%rsp) # c, %sfp
159 # ./sha256-block.c:13: uint32_t d = ctx->H[3];
160 movl 12(%rdx), %edi # ctx_80(D)->H, d
161 movl %edi, -112(%rsp) # d, %sfp
162 # ./sha256-block.c:14: uint32_t e = ctx->H[4];
163 movl 16(%rdx), %edi # ctx_80(D)->H, e
164 movl %edi, -96(%rsp) # e, %sfp
165 # ./sha256-block.c:15: uint32_t f = ctx->H[5];
166 movl 20(%rdx), %edi # ctx_80(D)->H, f
167 movl %edi, -116(%rsp) # f, %sfp
168 # ./sha256-block.c:16: uint32_t g = ctx->H[6];
169 movl 24(%rdx), %edi # ctx_80(D)->H, g
170 movl %edi, -92(%rsp) # g, %sfp
171 # ./sha256-block.c:17: uint32_t h = ctx->H[7];
172 movl 28(%rdx), %edi # ctx_80(D)->H, h
173 movl %edi, -120(%rsp) # h, %sfp
174 # ./sha256-block.c:26: while (nwords > 0)
175 je .L4 #,
176 leaq -56(%rsp), %r14 #, tmp241
177 leaq K(%rip), %r15 #, tmp242
178 leaq 192(%r14), %rax #, _214
179 movq %rax, -72(%rsp) # _214, %sfp
180 .p2align 4,,10
181 .p2align 3
182 .L9:
183 # ./sha256-block.c:7: {
184 movq -88(%rsp), %rcx # %sfp, buffer
185 xorl %eax, %eax # ivtmp.43
186 .p2align 4,,10
187 .p2align 3
188 .L5:
189 # ../bits/byteswap.h:52: return __builtin_bswap32 (__bsx);
190 movl (%rcx,%rax), %edx # MEM[base: words_139, index: ivtmp.43_213, offset: 0B], MEM[base: words_139, index: ivtmp.43_213, offset: 0B]
191 bswap %edx # _129
192 # ./sha256-block.c:53: W[t] = SWAP (*words);
193 movl %edx, (%r14,%rax) # _129, MEM[symbol: W, index: ivtmp.43_213, offset: 0B]
194 addq $4, %rax #, ivtmp.43
195 # ./sha256-block.c:51: for (unsigned int t = 0; t < 16; ++t)
196 cmpq $64, %rax #, ivtmp.43
197 jne .L5 #,
198 addq $64, -88(%rsp) #, %sfp
199 movq -72(%rsp), %rdi # %sfp, _214
200 movq %r14, %rsi # tmp241, ivtmp.39
201 .p2align 4,,10
202 .p2align 3
203 .L6:
204 # ./sha256-block.c:57: W[t] = R1 (W[t - 2]) + W[t - 7] + R0 (W[t - 15]) + W[t - 16];
205 movl 56(%rsi), %eax # MEM[base: _217, offset: 56B], _5
206 movl 4(%rsi), %ecx # MEM[base: _217, offset: 4B], _18
207 addq $4, %rsi #, ivtmp.39
208 movl %eax, %edx # _5, tmp202
209 movl %eax, %r8d # _5, tmp203
210 shrl $10, %eax #, tmp205
211 roll $13, %r8d #, tmp203
212 roll $15, %edx #, tmp202
213 xorl %r8d, %edx # tmp203, tmp204
214 movl %ecx, %r8d # _18, tmp212
215 xorl %eax, %edx # tmp205, tmp206
216 movl -4(%rsi), %eax # MEM[base: _217, offset: 0B], MEM[base: _217, offset: 0B]
217 addl 32(%rsi), %eax # MEM[base: _217, offset: 36B], tmp207
218 roll $14, %r8d #, tmp212
219 addl %eax, %edx # tmp207, tmp210
220 movl %ecx, %eax # _18, tmp211
221 shrl $3, %ecx #, tmp214
222 rorl $7, %eax #, tmp211
223 xorl %r8d, %eax # tmp212, tmp213
224 xorl %ecx, %eax # tmp214, tmp215
225 addl %edx, %eax # tmp210, tmp216
226 movl %eax, 60(%rsi) # tmp216, MEM[base: _217, offset: 64B]
227 # ./sha256-block.c:56: for (unsigned int t = 16; t < 64; ++t)
228 cmpq %rsi, %rdi # ivtmp.39, _214
229 jne .L6 #,
230 movl -120(%rsp), %eax # %sfp, h
231 movl -92(%rsp), %ebx # %sfp, g
232 xorl %r9d, %r9d # ivtmp.29
233 movl -116(%rsp), %ebp # %sfp, f
234 movl -96(%rsp), %edi # %sfp, e
235 movl $1116352408, %r12d #, pretmp_255
236 movl -112(%rsp), %r13d # %sfp, d
237 movl -100(%rsp), %r10d # %sfp, c
238 movl -104(%rsp), %r11d # %sfp, b
239 movl -108(%rsp), %r8d # %sfp, a
240 movl %eax, %esi # h, h
241 jmp .L8 #
242 .p2align 4,,10
243 .p2align 3
244 .L17:
245 movl (%r15,%r9), %r12d # MEM[symbol: K, index: ivtmp.29_224, offset: 0B], pretmp_255
246 movl %r10d, %r13d # c, d
247 movl %ebx, %esi # g, h
248 # ./sha256-block.c:67: e = d + T1;
249 movl %r11d, %r10d # b, c
250 movl %ebp, %ebx # f, g
251 movl %r8d, %r11d # a, b
252 movl %edi, %ebp # e, f
253 # ./sha256-block.c:71: a = T1 + T2;
254 movl %eax, %r8d # a, a
255 # ./sha256-block.c:67: e = d + T1;
256 movl %ecx, %edi # e, e
257 .L8:
258 # ./sha256-block.c:62: uint32_t T1 = h + S1 (e) + Ch (e, f, g) + K[t] + W[t];
259 movl %edi, %eax # e, tmp217
260 movl %edi, %edx # e, tmp218
261 movl %edi, %ecx # e, tmp224
262 rorl $11, %edx #, tmp218
263 rorl $6, %eax #, tmp217
264 andl %ebp, %ecx # f, tmp224
265 xorl %edx, %eax # tmp218, tmp219
266 movl %edi, %edx # e, tmp220
267 roll $7, %edx #, tmp220
268 xorl %eax, %edx # tmp219, tmp221
269 movl %edi, %eax # e, tmp222
270 notl %eax # tmp222
271 andl %ebx, %eax # g, tmp223
272 xorl %ecx, %eax # tmp224, tmp225
273 # ./sha256-block.c:63: uint32_t T2 = S0 (a) + Maj (a, b, c);
274 movl %r8d, %ecx # a, tmp232
275 # ./sha256-block.c:62: uint32_t T1 = h + S1 (e) + Ch (e, f, g) + K[t] + W[t];
276 addl %edx, %eax # tmp221, tmp226
277 # ./sha256-block.c:63: uint32_t T2 = S0 (a) + Maj (a, b, c);
278 movl %r8d, %edx # a, tmp231
279 # ./sha256-block.c:62: uint32_t T1 = h + S1 (e) + Ch (e, f, g) + K[t] + W[t];
280 addl (%r14,%r9), %eax # MEM[symbol: W, index: ivtmp.29_225, offset: 0B], tmp228
281 # ./sha256-block.c:63: uint32_t T2 = S0 (a) + Maj (a, b, c);
282 rorl $13, %ecx #, tmp232
283 rorl $2, %edx #, tmp231
284 addq $4, %r9 #, ivtmp.29
285 xorl %ecx, %edx # tmp232, tmp233
286 movl %r8d, %ecx # a, tmp234
287 roll $10, %ecx #, tmp234
288 xorl %edx, %ecx # tmp233, tmp235
289 movl %r11d, %edx # b, tmp236
290 # ./sha256-block.c:62: uint32_t T1 = h + S1 (e) + Ch (e, f, g) + K[t] + W[t];
291 addl %esi, %eax # h, tmp230
292 # ./sha256-block.c:63: uint32_t T2 = S0 (a) + Maj (a, b, c);
293 xorl %r10d, %edx # c, tmp236
294 movl %r11d, %esi # b, tmp238
295 # ./sha256-block.c:62: uint32_t T1 = h + S1 (e) + Ch (e, f, g) + K[t] + W[t];
296 addl %r12d, %eax # pretmp_255, T1
297 # ./sha256-block.c:63: uint32_t T2 = S0 (a) + Maj (a, b, c);
298 andl %r8d, %edx # a, tmp237
299 andl %r10d, %esi # c, tmp238
300 xorl %esi, %edx # tmp238, tmp239
301 addl %ecx, %edx # tmp235, T2
302 # ./sha256-block.c:67: e = d + T1;
303 leal (%rax,%r13), %ecx #, e
304 # ./sha256-block.c:71: a = T1 + T2;
305 addl %edx, %eax # T2, a
306 # ./sha256-block.c:60: for (unsigned int t = 0; t < 64; ++t)
307 cmpq $256, %r9 #, ivtmp.29
308 jne .L17 #,
309 # ./sha256-block.c:76: a += a_save;
310 addl %eax, -108(%rsp) # a, %sfp
311 # ./sha256-block.c:77: b += b_save;
312 addl %r8d, -104(%rsp) # a, %sfp
313 # ./sha256-block.c:78: c += c_save;
314 addl %r11d, -100(%rsp) # b, %sfp
315 # ./sha256-block.c:79: d += d_save;
316 addl %r10d, -112(%rsp) # c, %sfp
317 # ./sha256-block.c:80: e += e_save;
318 addl %ecx, -96(%rsp) # e, %sfp
319 # ./sha256-block.c:81: f += f_save;
320 addl %edi, -116(%rsp) # e, %sfp
321 # ./sha256-block.c:82: g += g_save;
322 addl %ebp, -92(%rsp) # f, %sfp
323 # ./sha256-block.c:83: h += h_save;
324 addl %ebx, -120(%rsp) # g, %sfp
325 # ./sha256-block.c:26: while (nwords > 0)
326 subq $16, -80(%rsp) #, %sfp
327 jne .L9 #,
328 .L4:
329 # ./sha256-block.c:90: ctx->H[0] = a;
330 movq -64(%rsp), %rax # %sfp, ctx
331 movl -108(%rsp), %ebx # %sfp, a
332 movl %ebx, (%rax) # a, ctx_80(D)->H
333 # ./sha256-block.c:91: ctx->H[1] = b;
334 movl -104(%rsp), %ebx # %sfp, b
335 movl %ebx, 4(%rax) # b, ctx_80(D)->H
336 # ./sha256-block.c:92: ctx->H[2] = c;
337 movl -100(%rsp), %ebx # %sfp, c
338 movl %ebx, 8(%rax) # c, ctx_80(D)->H
339 # ./sha256-block.c:93: ctx->H[3] = d;
340 movl -112(%rsp), %ebx # %sfp, d
341 movl %ebx, 12(%rax) # d, ctx_80(D)->H
342 # ./sha256-block.c:94: ctx->H[4] = e;
343 movl -96(%rsp), %ebx # %sfp, e
344 movl %ebx, 16(%rax) # e, ctx_80(D)->H
345 # ./sha256-block.c:95: ctx->H[5] = f;
346 movl -116(%rsp), %ebx # %sfp, f
347 movl %ebx, 20(%rax) # f, ctx_80(D)->H
348 # ./sha256-block.c:96: ctx->H[6] = g;
349 movl -92(%rsp), %ebx # %sfp, g
350 movl %ebx, 24(%rax) # g, ctx_80(D)->H
351 # ./sha256-block.c:97: ctx->H[7] = h;
352 movl -120(%rsp), %ebx # %sfp, h
353 movl %ebx, 28(%rax) # h, ctx_80(D)->H
354 # ./sha256-block.c:98: }
355 addq $208, %rsp #,
356 .cfi_def_cfa_offset 56
357 popq %rbx #
358 .cfi_def_cfa_offset 48
359 popq %rbp #
360 .cfi_def_cfa_offset 40
361 popq %r12 #
362 .cfi_def_cfa_offset 32
363 popq %r13 #
364 .cfi_def_cfa_offset 24
365 popq %r14 #
366 .cfi_def_cfa_offset 16
367 popq %r15 #
368 .cfi_def_cfa_offset 8
370 .cfi_endproc
371 .LFE35:
372 .size __sha256_process_block, .-__sha256_process_block
373 .p2align 4,,15
374 .globl __sha256_finish_ctx
375 .type __sha256_finish_ctx, @function
376 __sha256_finish_ctx:
377 .LFB33:
378 .cfi_startproc
379 pushq %r13 #
380 .cfi_def_cfa_offset 16
381 .cfi_offset 13, -16
382 pushq %r12 #
383 .cfi_def_cfa_offset 24
384 .cfi_offset 12, -24
385 pushq %rbp #
386 .cfi_def_cfa_offset 32
387 .cfi_offset 6, -32
388 pushq %rbx #
389 .cfi_def_cfa_offset 40
390 .cfi_offset 3, -40
391 movq %rsi, %rbp # resbuf, resbuf
392 movq %rdi, %rbx # ctx, ctx
393 subq $8, %rsp #,
394 .cfi_def_cfa_offset 48
395 # sha256.c:114: uint32_t bytes = ctx->buflen;
396 movl 40(%rdi), %r12d # ctx_21(D)->buflen,
397 # sha256.c:118: ctx->total64 += bytes;
398 addq %r12, 32(%rdi) # _2, ctx_21(D)->D.4694.total64
399 # sha256.c:120: pad = bytes >= 56 ? 64 + 56 - bytes : 56 - bytes;
400 cmpl $55, %r12d #, bytes
401 jbe .L19 #,
402 # sha256.c:120: pad = bytes >= 56 ? 64 + 56 - bytes : 56 - bytes;
403 movl $120, %r13d #, tmp114
404 subl %r12d, %r13d # bytes, iftmp.0_18
405 .L20:
406 # sha256.c:121: memcpy (&ctx->buffer[bytes], fillbuf, pad);
407 leaq 48(%rbx,%r12), %rdi #, tmp118
408 leaq fillbuf(%rip), %rsi #,
409 movq %r13, %rdx # iftmp.0_18,
410 # sha256.c:125: ctx->buffer64[(bytes + pad) / 8] = SWAP64 (ctx->total64 << 3);
411 addq %r13, %r12 # iftmp.0_18, _9
412 # sha256.c:121: memcpy (&ctx->buffer[bytes], fillbuf, pad);
413 call memcpy@PLT #
414 # sha256.c:125: ctx->buffer64[(bytes + pad) / 8] = SWAP64 (ctx->total64 << 3);
415 movq 32(%rbx), %rax # ctx_21(D)->D.4694.total64, tmp137
416 movq %r12, %rdx # _9, tmp127
417 # sha256.c:133: __sha256_process_block (ctx->buffer, bytes + pad + 8, ctx);
418 leaq 8(%r12), %rsi #, tmp129
419 # sha256.c:125: ctx->buffer64[(bytes + pad) / 8] = SWAP64 (ctx->total64 << 3);
420 shrq $3, %rdx #, tmp127
421 # sha256.c:133: __sha256_process_block (ctx->buffer, bytes + pad + 8, ctx);
422 leaq 48(%rbx), %rdi #, tmp130
423 # sha256.c:125: ctx->buffer64[(bytes + pad) / 8] = SWAP64 (ctx->total64 << 3);
424 salq $3, %rax #, tmp125
425 # ../bits/byteswap.h:73: return __builtin_bswap64 (__bsx);
426 bswap %rax # _31
427 # sha256.c:125: ctx->buffer64[(bytes + pad) / 8] = SWAP64 (ctx->total64 << 3);
428 movq %rax, 48(%rbx,%rdx,8) # _31, ctx_21(D)->D.4700.buffer64
429 # sha256.c:133: __sha256_process_block (ctx->buffer, bytes + pad + 8, ctx);
430 movq %rbx, %rdx # ctx,
431 call __sha256_process_block #
432 xorl %eax, %eax # ivtmp.68
433 .p2align 4,,10
434 .p2align 3
435 .L21:
436 # ../bits/byteswap.h:52: return __builtin_bswap32 (__bsx);
437 movl (%rbx,%rax), %edx # MEM[base: ctx_21(D), index: ivtmp.68_36, offset: 0B], MEM[base: ctx_21(D), index: ivtmp.68_36, offset: 0B]
438 bswap %edx # _27
439 # sha256.c:137: ((uint32_t *) resbuf)[i] = SWAP (ctx->H[i]);
440 movl %edx, 0(%rbp,%rax) # _27, MEM[base: resbuf_30(D), index: ivtmp.68_36, offset: 0B]
441 addq $4, %rax #, ivtmp.68
442 # sha256.c:136: for (unsigned int i = 0; i < 8; ++i)
443 cmpq $32, %rax #, ivtmp.68
444 jne .L21 #,
445 # sha256.c:140: }
446 addq $8, %rsp #,
447 .cfi_remember_state
448 .cfi_def_cfa_offset 40
449 movq %rbp, %rax # resbuf,
450 popq %rbx #
451 .cfi_def_cfa_offset 32
452 popq %rbp #
453 .cfi_def_cfa_offset 24
454 popq %r12 #
455 .cfi_def_cfa_offset 16
456 popq %r13 #
457 .cfi_def_cfa_offset 8
459 .p2align 4,,10
460 .p2align 3
461 .L19:
462 .cfi_restore_state
463 # sha256.c:120: pad = bytes >= 56 ? 64 + 56 - bytes : 56 - bytes;
464 movl $56, %r13d #, tmp116
465 subl %r12d, %r13d # bytes, iftmp.0_18
466 jmp .L20 #
467 .cfi_endproc
468 .LFE33:
469 .size __sha256_finish_ctx, .-__sha256_finish_ctx
470 .p2align 4,,15
471 .globl __sha256_process_bytes
472 .type __sha256_process_bytes, @function
473 __sha256_process_bytes:
474 .LFB34:
475 .cfi_startproc
476 pushq %r15 #
477 .cfi_def_cfa_offset 16
478 .cfi_offset 15, -16
479 pushq %r14 #
480 .cfi_def_cfa_offset 24
481 .cfi_offset 14, -24
482 pushq %r13 #
483 .cfi_def_cfa_offset 32
484 .cfi_offset 13, -32
485 pushq %r12 #
486 .cfi_def_cfa_offset 40
487 .cfi_offset 12, -40
488 movq %rdi, %r12 # buffer, buffer
489 pushq %rbp #
490 .cfi_def_cfa_offset 48
491 .cfi_offset 6, -48
492 pushq %rbx #
493 .cfi_def_cfa_offset 56
494 .cfi_offset 3, -56
495 movq %rdx, %rbp # ctx, ctx
496 movq %rsi, %rbx # len, len
497 subq $8, %rsp #,
498 .cfi_def_cfa_offset 64
499 # sha256.c:148: if (ctx->buflen != 0)
500 movl 40(%rdx), %eax # ctx_32(D)->buflen, _1
501 testl %eax, %eax # _1
502 jne .L63 #,
503 .L25:
504 # sha256.c:171: if (len >= 64)
505 cmpq $63, %rbx #, len
506 ja .L64 #,
507 .L33:
508 # sha256.c:198: if (len > 0)
509 testq %rbx, %rbx # len
510 jne .L65 #,
511 # sha256.c:212: }
512 addq $8, %rsp #,
513 .cfi_remember_state
514 .cfi_def_cfa_offset 56
515 popq %rbx #
516 .cfi_def_cfa_offset 48
517 popq %rbp #
518 .cfi_def_cfa_offset 40
519 popq %r12 #
520 .cfi_def_cfa_offset 32
521 popq %r13 #
522 .cfi_def_cfa_offset 24
523 popq %r14 #
524 .cfi_def_cfa_offset 16
525 popq %r15 #
526 .cfi_def_cfa_offset 8
528 .p2align 4,,10
529 .p2align 3
530 .L65:
531 .cfi_restore_state
532 # sha256.c:200: size_t left_over = ctx->buflen;
533 movl 40(%rbp), %esi # ctx_32(D)->buflen, left_over
534 # sha256.c:202: memcpy (&ctx->buffer[left_over], buffer, len);
535 cmpl $8, %ebx #, len
536 movl %ebx, %eax # len, len
537 leaq 48(%rbp,%rsi), %rcx #, tmp172
538 jnb .L35 #,
539 testb $4, %bl #, len
540 jne .L66 #,
541 testl %ebx, %ebx # len
542 je .L36 #,
543 movzbl (%r12), %edx #* buffer, tmp185
544 testb $2, %al #, len
545 movb %dl, (%rcx) # tmp185,
546 jne .L67 #,
547 .L36:
548 # sha256.c:203: left_over += len;
549 addq %rsi, %rbx # left_over, left_over
550 # sha256.c:204: if (left_over >= 64)
551 cmpq $63, %rbx #, left_over
552 jbe .L41 #,
553 # sha256.c:206: __sha256_process_block (ctx->buffer, 64, ctx);
554 leaq 48(%rbp), %r12 #, _18
555 movq %rbp, %rdx # ctx,
556 movl $64, %esi #,
557 # sha256.c:207: left_over -= 64;
558 subq $64, %rbx #, left_over
559 # sha256.c:206: __sha256_process_block (ctx->buffer, 64, ctx);
560 movq %r12, %rdi # _18,
561 call __sha256_process_block #
562 # sha256.c:208: memcpy (ctx->buffer, &ctx->buffer[64], left_over);
563 leaq 112(%rbp), %rsi #, tmp208
564 movq %rbx, %rdx # left_over,
565 movq %r12, %rdi # _18,
566 call memcpy@PLT #
567 .L41:
568 # sha256.c:210: ctx->buflen = left_over;
569 movl %ebx, 40(%rbp) # left_over, ctx_32(D)->buflen
570 # sha256.c:212: }
571 addq $8, %rsp #,
572 .cfi_remember_state
573 .cfi_def_cfa_offset 56
574 popq %rbx #
575 .cfi_def_cfa_offset 48
576 popq %rbp #
577 .cfi_def_cfa_offset 40
578 popq %r12 #
579 .cfi_def_cfa_offset 32
580 popq %r13 #
581 .cfi_def_cfa_offset 24
582 popq %r14 #
583 .cfi_def_cfa_offset 16
584 popq %r15 #
585 .cfi_def_cfa_offset 8
587 .p2align 4,,10
588 .p2align 3
589 .L64:
590 .cfi_restore_state
591 # sha256.c:191: __sha256_process_block (buffer, len & ~63, ctx);
592 movq %rbx, %r13 # len, _15
593 movq %r12, %rdi # buffer,
594 movq %rbp, %rdx # ctx,
595 andq $-64, %r13 #, _15
596 # sha256.c:193: len &= 63;
597 andl $63, %ebx #, len
598 # sha256.c:191: __sha256_process_block (buffer, len & ~63, ctx);
599 movq %r13, %rsi # _15,
600 # sha256.c:192: buffer = (const char *) buffer + (len & ~63);
601 addq %r13, %r12 # _15, buffer
602 # sha256.c:191: __sha256_process_block (buffer, len & ~63, ctx);
603 call __sha256_process_block #
604 jmp .L33 #
605 .p2align 4,,10
606 .p2align 3
607 .L63:
608 # sha256.c:150: size_t left_over = ctx->buflen;
609 movl %eax, %r13d # _1, left_over
610 # sha256.c:151: size_t add = 128 - left_over > len ? len : 128 - left_over;
611 movl $128, %edx #, tmp119
612 subq %r13, %rdx # left_over, tmp118
613 # sha256.c:153: memcpy (&ctx->buffer[left_over], buffer, add);
614 leaq 48(%rbp,%r13), %rdi #, tmp121
615 # sha256.c:151: size_t add = 128 - left_over > len ? len : 128 - left_over;
616 cmpq %rsi, %rdx # len, tmp118
617 cmova %rsi, %rdx # tmp118,, len, tmp118
618 # sha256.c:153: memcpy (&ctx->buffer[left_over], buffer, add);
619 movq %r12, %rsi # buffer,
620 # sha256.c:151: size_t add = 128 - left_over > len ? len : 128 - left_over;
621 movq %rdx, %r14 # tmp118, add
622 # sha256.c:153: memcpy (&ctx->buffer[left_over], buffer, add);
623 call memcpy@PLT #
624 # sha256.c:154: ctx->buflen += add;
625 movl 40(%rbp), %esi # ctx_32(D)->buflen, _6
626 addl %r14d, %esi # add, _6
627 # sha256.c:156: if (ctx->buflen > 64)
628 cmpl $64, %esi #, _6
629 # sha256.c:154: ctx->buflen += add;
630 movl %esi, 40(%rbp) # _6, ctx_32(D)->buflen
631 # sha256.c:156: if (ctx->buflen > 64)
632 ja .L68 #,
633 .L26:
634 # sha256.c:166: buffer = (const char *) buffer + add;
635 addq %r14, %r12 # add, buffer
636 # sha256.c:167: len -= add;
637 subq %r14, %rbx # add, len
638 jmp .L25 #
639 .p2align 4,,10
640 .p2align 3
641 .L35:
642 # sha256.c:202: memcpy (&ctx->buffer[left_over], buffer, len);
643 movq (%r12), %rax #* buffer, tmp194
644 movq %rax, (%rcx) # tmp194,
645 movl %ebx, %eax # len, len
646 movq -8(%r12,%rax), %rdx #, tmp201
647 movq %rdx, -8(%rcx,%rax) # tmp201,
648 leaq 8(%rcx), %rdx #, tmp202
649 andq $-8, %rdx #, tmp202
650 subq %rdx, %rcx # tmp202, tmp174
651 leal (%rbx,%rcx), %eax #, len
652 subq %rcx, %r12 # tmp174, buffer
653 andl $-8, %eax #, len
654 cmpl $8, %eax #, len
655 jb .L36 #,
656 andl $-8, %eax #, tmp204
657 xorl %ecx, %ecx # tmp203
658 .L39:
659 movl %ecx, %edi # tmp203, tmp205
660 addl $8, %ecx #, tmp203
661 movq (%r12,%rdi), %r8 #, tmp206
662 cmpl %eax, %ecx # tmp204, tmp203
663 movq %r8, (%rdx,%rdi) # tmp206,
664 jb .L39 #,
665 jmp .L36 #
666 .p2align 4,,10
667 .p2align 3
668 .L68:
669 # sha256.c:158: __sha256_process_block (ctx->buffer, ctx->buflen & ~63, ctx);
670 leaq 48(%rbp), %r15 #, _8
671 andl $-64, %esi #, tmp129
672 movq %rbp, %rdx # ctx,
673 movq %r15, %rdi # _8,
674 call __sha256_process_block #
675 # sha256.c:160: ctx->buflen &= 63;
676 movl 40(%rbp), %ecx # ctx_32(D)->buflen, ctx_32(D)->buflen
677 # sha256.c:162: memcpy (ctx->buffer, &ctx->buffer[(left_over + add) & ~63],
678 leaq 0(%r13,%r14), %rax #, tmp132
679 andq $-64, %rax #, tmp133
680 # sha256.c:160: ctx->buflen &= 63;
681 movl %ecx, %edx # ctx_32(D)->buflen, _10
682 # sha256.c:162: memcpy (ctx->buffer, &ctx->buffer[(left_over + add) & ~63],
683 leaq 48(%rbp,%rax), %rax #, tmp135
684 # sha256.c:160: ctx->buflen &= 63;
685 andl $63, %edx #,
686 # sha256.c:162: memcpy (ctx->buffer, &ctx->buffer[(left_over + add) & ~63],
687 cmpl $8, %edx #, _10
688 # sha256.c:160: ctx->buflen &= 63;
689 movl %edx, 40(%rbp) # _10, ctx_32(D)->buflen
690 # sha256.c:162: memcpy (ctx->buffer, &ctx->buffer[(left_over + add) & ~63],
691 jnb .L27 #,
692 testb $4, %cl #, ctx_32(D)->buflen
693 jne .L69 #,
694 testl %edx, %edx # _10
695 je .L26 #,
696 movzbl (%rax), %esi #, tmp148
697 andl $2, %ecx #, ctx_32(D)->buflen
698 movb %sil, 48(%rbp) # tmp148,
699 je .L26 #,
700 movzwl -2(%rax,%rdx), %eax #, tmp156
701 movw %ax, -2(%r15,%rdx) # tmp156,
702 jmp .L26 #
703 .p2align 4,,10
704 .p2align 3
705 .L27:
706 movq (%rax), %rcx #, tmp157
707 movq %rcx, 48(%rbp) # tmp157,
708 movl %edx, %ecx # _10, _10
709 movq -8(%rax,%rcx), %rsi #, tmp164
710 movq %rsi, -8(%r15,%rcx) # tmp164,
711 leaq 56(%rbp), %rcx #, tmp165
712 andq $-8, %rcx #, tmp165
713 subq %rcx, %r15 # tmp165, _8
714 addl %r15d, %edx # _8, _10
715 subq %r15, %rax # _8, tmp138
716 andl $-8, %edx #, _10
717 cmpl $8, %edx #, _10
718 jb .L26 #,
719 andl $-8, %edx #, tmp167
720 xorl %esi, %esi # tmp166
721 .L31:
722 movl %esi, %edi # tmp166, tmp168
723 addl $8, %esi #, tmp166
724 movq (%rax,%rdi), %r8 #, tmp169
725 cmpl %edx, %esi # tmp167, tmp166
726 movq %r8, (%rcx,%rdi) # tmp169,
727 jb .L31 #,
728 jmp .L26 #
729 .L69:
730 movl (%rax), %ecx #, tmp140
731 movl %ecx, 48(%rbp) # tmp140,
732 movl -4(%rax,%rdx), %eax #, tmp147
733 movl %eax, -4(%r15,%rdx) # tmp147,
734 jmp .L26 #
735 .p2align 4,,10
736 .p2align 3
737 .L66:
738 # sha256.c:202: memcpy (&ctx->buffer[left_over], buffer, len);
739 movl (%r12), %edx #* buffer, tmp177
740 movl %edx, (%rcx) # tmp177,
741 movl %ebx, %edx # len, len
742 movl -4(%r12,%rdx), %eax #, tmp184
743 movl %eax, -4(%rcx,%rdx) # tmp184,
744 jmp .L36 #
745 .L67:
746 movl %ebx, %edx # len, len
747 movzwl -2(%r12,%rdx), %eax #, tmp193
748 movw %ax, -2(%rcx,%rdx) # tmp193,
749 jmp .L36 #
750 .cfi_endproc
751 .LFE34:
752 .size __sha256_process_bytes, .-__sha256_process_bytes
753 .section .rodata
754 .align 32
755 .type K, @object
756 .size K, 256
758 .long 1116352408
759 .long 1899447441
760 .long -1245643825
761 .long -373957723
762 .long 961987163
763 .long 1508970993
764 .long -1841331548
765 .long -1424204075
766 .long -670586216
767 .long 310598401
768 .long 607225278
769 .long 1426881987
770 .long 1925078388
771 .long -2132889090
772 .long -1680079193
773 .long -1046744716
774 .long -459576895
775 .long -272742522
776 .long 264347078
777 .long 604807628
778 .long 770255983
779 .long 1249150122
780 .long 1555081692
781 .long 1996064986
782 .long -1740746414
783 .long -1473132947
784 .long -1341970488
785 .long -1084653625
786 .long -958395405
787 .long -710438585
788 .long 113926993
789 .long 338241895
790 .long 666307205
791 .long 773529912
792 .long 1294757372
793 .long 1396182291
794 .long 1695183700
795 .long 1986661051
796 .long -2117940946
797 .long -1838011259
798 .long -1564481375
799 .long -1474664885
800 .long -1035236496
801 .long -949202525
802 .long -778901479
803 .long -694614492
804 .long -200395387
805 .long 275423344
806 .long 430227734
807 .long 506948616
808 .long 659060556
809 .long 883997877
810 .long 958139571
811 .long 1322822218
812 .long 1537002063
813 .long 1747873779
814 .long 1955562222
815 .long 2024104815
816 .long -2067236844
817 .long -1933114872
818 .long -1866530822
819 .long -1538233109
820 .long -1090935817
821 .long -965641998
822 .align 32
823 .type fillbuf, @object
824 .size fillbuf, 64
825 fillbuf:
826 .byte -128
827 .byte 0
828 .zero 62
829 .ident "GCC: (GNU) 7.3.0"
830 .section .note.GNU-stack,"",@progbits