2 # GNU C11 (GCC) version 7.3.0 (x86_64-nyan-linux-gnu)
3 # compiled by GNU C version 7.3.0, GMP version 6.1.2, MPFR version 4.0.1, MPC version 1.1.0, isl version none
4 # GGC heuristics: --param ggc-min-expand=100 --param ggc-min-heapsize=131072
5 # options passed: -I ../include
6 # -I /root/wip/nyanglibc/builds/0/nyanglibc/build/crypt
7 # -I /root/wip/nyanglibc/builds/0/nyanglibc/build
8 # -I ../sysdeps/unix/sysv/linux/x86_64/64
9 # -I ../sysdeps/unix/sysv/linux/x86_64
10 # -I ../sysdeps/unix/sysv/linux/x86/include
11 # -I ../sysdeps/unix/sysv/linux/x86 -I ../sysdeps/x86/nptl
12 # -I ../sysdeps/unix/sysv/linux/wordsize-64 -I ../sysdeps/x86_64/nptl
13 # -I ../sysdeps/unix/sysv/linux/include -I ../sysdeps/unix/sysv/linux
14 # -I ../sysdeps/nptl -I ../sysdeps/pthread -I ../sysdeps/gnu
15 # -I ../sysdeps/unix/inet -I ../sysdeps/unix/sysv -I ../sysdeps/unix/x86_64
16 # -I ../sysdeps/unix -I ../sysdeps/posix -I ../sysdeps/x86_64/64
17 # -I ../sysdeps/x86_64/fpu -I ../sysdeps/x86/fpu -I ../sysdeps/x86_64
18 # -I ../sysdeps/x86/include -I ../sysdeps/x86
19 # -I ../sysdeps/ieee754/float128 -I ../sysdeps/ieee754/ldbl-96/include
20 # -I ../sysdeps/ieee754/ldbl-96 -I ../sysdeps/ieee754/dbl-64
21 # -I ../sysdeps/ieee754/flt-32 -I ../sysdeps/wordsize-64
22 # -I ../sysdeps/ieee754 -I ../sysdeps/generic -I .. -I ../libio -I .
23 # -MD /run/asm/crypt/sha256.shared.v.d -MF /run/asm/crypt/sha256.os.dt -MP
24 # -MT /run/asm/crypt/.os -D _LIBC_REENTRANT -D MODULE_NAME=libcrypt -D PIC
25 # -D SHARED -D TOP_NAMESPACE=glibc
26 # -include /root/wip/nyanglibc/builds/0/nyanglibc/build/libc-modules.h
27 # -include ../include/libc-symbols.h sha256.c -mtune=generic -march=x86-64
28 # -auxbase-strip /run/asm/crypt/sha256.shared.v.s -O2 -Wall -Wwrite-strings
29 # -Wundef -Werror -Wstrict-prototypes -Wold-style-definition -std=gnu11
30 # -fverbose-asm -fgnu89-inline -fmerge-all-constants -frounding-math
31 # -fno-stack-protector -fmath-errno -fPIC -ftls-model=initial-exec
32 # options enabled: -fPIC -faggressive-loop-optimizations -falign-labels
33 # -fasynchronous-unwind-tables -fauto-inc-dec -fbranch-count-reg
34 # -fcaller-saves -fchkp-check-incomplete-type -fchkp-check-read
35 # -fchkp-check-write -fchkp-instrument-calls -fchkp-narrow-bounds
36 # -fchkp-optimize -fchkp-store-bounds -fchkp-use-static-bounds
37 # -fchkp-use-static-const-bounds -fchkp-use-wrappers -fcode-hoisting
38 # -fcombine-stack-adjustments -fcommon -fcompare-elim -fcprop-registers
39 # -fcrossjumping -fcse-follow-jumps -fdefer-pop
40 # -fdelete-null-pointer-checks -fdevirtualize -fdevirtualize-speculatively
41 # -fdwarf2-cfi-asm -fearly-inlining -feliminate-unused-debug-types
42 # -fexpensive-optimizations -fforward-propagate -ffp-int-builtin-inexact
43 # -ffunction-cse -fgcse -fgcse-lm -fgnu-runtime -fgnu-unique
44 # -fguess-branch-probability -fhoist-adjacent-loads -fident -fif-conversion
45 # -fif-conversion2 -findirect-inlining -finline -finline-atomics
46 # -finline-functions-called-once -finline-small-functions -fipa-bit-cp
47 # -fipa-cp -fipa-icf -fipa-icf-functions -fipa-icf-variables -fipa-profile
48 # -fipa-pure-const -fipa-ra -fipa-reference -fipa-sra -fipa-vrp
49 # -fira-hoist-pressure -fira-share-save-slots -fira-share-spill-slots
50 # -fisolate-erroneous-paths-dereference -fivopts -fkeep-static-consts
51 # -fleading-underscore -flifetime-dse -flra-remat -flto-odr-type-merging
52 # -fmath-errno -fmerge-all-constants -fmerge-debug-strings
53 # -fmove-loop-invariants -fomit-frame-pointer -foptimize-sibling-calls
54 # -foptimize-strlen -fpartial-inlining -fpeephole -fpeephole2 -fplt
55 # -fprefetch-loop-arrays -free -freg-struct-return -freorder-blocks
56 # -freorder-functions -frerun-cse-after-loop -frounding-math
57 # -fsched-critical-path-heuristic -fsched-dep-count-heuristic
58 # -fsched-group-heuristic -fsched-interblock -fsched-last-insn-heuristic
59 # -fsched-rank-heuristic -fsched-spec -fsched-spec-insn-heuristic
60 # -fsched-stalled-insns-dep -fschedule-fusion -fschedule-insns2
61 # -fsemantic-interposition -fshow-column -fshrink-wrap
62 # -fshrink-wrap-separate -fsigned-zeros -fsplit-ivs-in-unroller
63 # -fsplit-wide-types -fssa-backprop -fssa-phiopt -fstdarg-opt
64 # -fstore-merging -fstrict-aliasing -fstrict-overflow
65 # -fstrict-volatile-bitfields -fsync-libcalls -fthread-jumps
66 # -ftoplevel-reorder -ftrapping-math -ftree-bit-ccp -ftree-builtin-call-dce
67 # -ftree-ccp -ftree-ch -ftree-coalesce-vars -ftree-copy-prop -ftree-cselim
68 # -ftree-dce -ftree-dominator-opts -ftree-dse -ftree-forwprop -ftree-fre
69 # -ftree-loop-if-convert -ftree-loop-im -ftree-loop-ivcanon
70 # -ftree-loop-optimize -ftree-parallelize-loops= -ftree-phiprop -ftree-pre
71 # -ftree-pta -ftree-reassoc -ftree-scev-cprop -ftree-sink -ftree-slsr
72 # -ftree-sra -ftree-switch-conversion -ftree-tail-merge -ftree-ter
73 # -ftree-vrp -funit-at-a-time -funwind-tables -fverbose-asm
74 # -fzero-initialized-in-bss -m128bit-long-double -m64 -m80387
75 # -malign-stringops -mavx256-split-unaligned-load
76 # -mavx256-split-unaligned-store -mfancy-math-387 -mfp-ret-in-387 -mfxsr
77 # -mglibc -mieee-fp -mlong-double-80 -mmmx -mno-sse4 -mpush-args -mred-zone
78 # -msse -msse2 -mstv -mtls-direct-seg-refs -mvzeroupper
82 .globl __sha256_init_ctx
83 .type __sha256_init_ctx, @function
87 # sha256.c:91: ctx->H[0] = 0x6a09e667;
88 movabsq $
-4942790177982912921, %rax
#, tmp92
89 # sha256.c:98: ctx->H[7] = 0x5be0cd19;
90 movq $
0, 32(%rdi
) #, MEM[(void *)ctx_2(D) + 32B]
91 # sha256.c:100: ctx->total64 = 0;
92 movl $
0, 40(%rdi
) #, MEM[(void *)ctx_2(D) + 40B]
93 # sha256.c:91: ctx->H[0] = 0x6a09e667;
94 movq
%rax
, (%rdi
) # tmp92, MEM[(unsigned int *)ctx_2(D)]
95 # sha256.c:92: ctx->H[1] = 0xbb67ae85;
96 movabsq $
-6534734903820487822, %rax
#, tmp93
97 movq
%rax
, 8(%rdi
) # tmp93, MEM[(unsigned int *)ctx_2(D) + 8B]
98 # sha256.c:94: ctx->H[3] = 0xa54ff53a;
99 movabsq $
-7276294671082564993, %rax
#, tmp94
100 movq
%rax
, 16(%rdi
) # tmp94, MEM[(unsigned int *)ctx_2(D) + 16B]
101 # sha256.c:96: ctx->H[5] = 0x9b05688c;
102 movabsq $
6620516960021240235, %rax
#, tmp95
103 movq
%rax
, 24(%rdi
) # tmp95, MEM[(void *)ctx_2(D) + 24B]
108 .size __sha256_init_ctx, .-__sha256_init_ctx
110 .globl __sha256_process_block
111 .type __sha256_process_block, @function
112 __sha256_process_block
:
116 .cfi_def_cfa_offset 16
119 .cfi_def_cfa_offset 24
122 .cfi_def_cfa_offset 32
125 .cfi_def_cfa_offset 40
128 .cfi_def_cfa_offset 48
131 .cfi_def_cfa_offset 56
133 # ./sha256-block.c:9: size_t nwords = len / sizeof (uint32_t);
134 movq
%rsi
, %rbx
# len, nwords
135 shrq $
2, %rbx
#, nwords
136 # ./sha256-block.c:7: {
138 .cfi_def_cfa_offset 264
139 # ./sha256-block.c:22: ctx->total64 += len;
140 addq
%rsi
, 32(%rdx
) # len, ctx_80(D)->D.4694.total64
141 # ./sha256-block.c:26: while (nwords > 0)
142 testq
%rbx
, %rbx
# nwords
143 # ./sha256-block.c:7: {
144 movq
%rdi
, -88(%rsp
) # buffer, %sfp
145 # ./sha256-block.c:10: uint32_t a = ctx->H[0];
146 movl
(%rdx
), %edi
# ctx_80(D)->H, a
147 # ./sha256-block.c:7: {
148 movq
%rdx
, -64(%rsp
) # ctx, %sfp
149 # ./sha256-block.c:9: size_t nwords = len / sizeof (uint32_t);
150 movq
%rbx
, -80(%rsp
) # nwords, %sfp
151 # ./sha256-block.c:10: uint32_t a = ctx->H[0];
152 movl
%edi
, -108(%rsp
) # a, %sfp
153 # ./sha256-block.c:11: uint32_t b = ctx->H[1];
154 movl
4(%rdx
), %edi
# ctx_80(D)->H, b
155 movl
%edi
, -104(%rsp
) # b, %sfp
156 # ./sha256-block.c:12: uint32_t c = ctx->H[2];
157 movl
8(%rdx
), %edi
# ctx_80(D)->H, c
158 movl
%edi
, -100(%rsp
) # c, %sfp
159 # ./sha256-block.c:13: uint32_t d = ctx->H[3];
160 movl
12(%rdx
), %edi
# ctx_80(D)->H, d
161 movl
%edi
, -112(%rsp
) # d, %sfp
162 # ./sha256-block.c:14: uint32_t e = ctx->H[4];
163 movl
16(%rdx
), %edi
# ctx_80(D)->H, e
164 movl
%edi
, -96(%rsp
) # e, %sfp
165 # ./sha256-block.c:15: uint32_t f = ctx->H[5];
166 movl
20(%rdx
), %edi
# ctx_80(D)->H, f
167 movl
%edi
, -116(%rsp
) # f, %sfp
168 # ./sha256-block.c:16: uint32_t g = ctx->H[6];
169 movl
24(%rdx
), %edi
# ctx_80(D)->H, g
170 movl
%edi
, -92(%rsp
) # g, %sfp
171 # ./sha256-block.c:17: uint32_t h = ctx->H[7];
172 movl
28(%rdx
), %edi
# ctx_80(D)->H, h
173 movl
%edi
, -120(%rsp
) # h, %sfp
174 # ./sha256-block.c:26: while (nwords > 0)
176 leaq
-56(%rsp
), %r14 #, tmp241
177 leaq K
(%rip
), %r15 #, tmp242
178 leaq
192(%r14), %rax
#, _214
179 movq
%rax
, -72(%rsp
) # _214, %sfp
183 # ./sha256-block.c:7: {
184 movq
-88(%rsp
), %rcx
# %sfp, buffer
185 xorl
%eax
, %eax
# ivtmp.43
189 # ../bits/byteswap.h:52: return __builtin_bswap32 (__bsx);
190 movl
(%rcx
,%rax
), %edx
# MEM[base: words_139, index: ivtmp.43_213, offset: 0B], MEM[base: words_139, index: ivtmp.43_213, offset: 0B]
192 # ./sha256-block.c:53: W[t] = SWAP (*words);
193 movl
%edx
, (%r14,%rax
) # _129, MEM[symbol: W, index: ivtmp.43_213, offset: 0B]
194 addq $
4, %rax
#, ivtmp.43
195 # ./sha256-block.c:51: for (unsigned int t = 0; t < 16; ++t)
196 cmpq $
64, %rax
#, ivtmp.43
198 addq $
64, -88(%rsp
) #, %sfp
199 movq
-72(%rsp
), %rdi
# %sfp, _214
200 movq
%r14, %rsi
# tmp241, ivtmp.39
204 # ./sha256-block.c:57: W[t] = R1 (W[t - 2]) + W[t - 7] + R0 (W[t - 15]) + W[t - 16];
205 movl
56(%rsi
), %eax
# MEM[base: _217, offset: 56B], _5
206 movl
4(%rsi
), %ecx
# MEM[base: _217, offset: 4B], _18
207 addq $
4, %rsi
#, ivtmp.39
208 movl
%eax
, %edx
# _5, tmp202
209 movl
%eax
, %r8d
# _5, tmp203
210 shrl $
10, %eax
#, tmp205
211 roll $
13, %r8d
#, tmp203
212 roll $
15, %edx
#, tmp202
213 xorl
%r8d
, %edx
# tmp203, tmp204
214 movl
%ecx
, %r8d
# _18, tmp212
215 xorl
%eax
, %edx
# tmp205, tmp206
216 movl
-4(%rsi
), %eax
# MEM[base: _217, offset: 0B], MEM[base: _217, offset: 0B]
217 addl
32(%rsi
), %eax
# MEM[base: _217, offset: 36B], tmp207
218 roll $
14, %r8d
#, tmp212
219 addl
%eax
, %edx
# tmp207, tmp210
220 movl
%ecx
, %eax
# _18, tmp211
221 shrl $
3, %ecx
#, tmp214
222 rorl $
7, %eax
#, tmp211
223 xorl
%r8d
, %eax
# tmp212, tmp213
224 xorl
%ecx
, %eax
# tmp214, tmp215
225 addl
%edx
, %eax
# tmp210, tmp216
226 movl
%eax
, 60(%rsi
) # tmp216, MEM[base: _217, offset: 64B]
227 # ./sha256-block.c:56: for (unsigned int t = 16; t < 64; ++t)
228 cmpq
%rsi
, %rdi
# ivtmp.39, _214
230 movl
-120(%rsp
), %eax
# %sfp, h
231 movl
-92(%rsp
), %ebx
# %sfp, g
232 xorl
%r9d
, %r9d
# ivtmp.29
233 movl
-116(%rsp
), %ebp
# %sfp, f
234 movl
-96(%rsp
), %edi
# %sfp, e
235 movl $
1116352408, %r12d
#, pretmp_255
236 movl
-112(%rsp
), %r13d
# %sfp, d
237 movl
-100(%rsp
), %r10d
# %sfp, c
238 movl
-104(%rsp
), %r11d
# %sfp, b
239 movl
-108(%rsp
), %r8d
# %sfp, a
240 movl
%eax
, %esi
# h, h
245 movl
(%r15,%r9), %r12d
# MEM[symbol: K, index: ivtmp.29_224, offset: 0B], pretmp_255
246 movl
%r10d
, %r13d
# c, d
247 movl
%ebx
, %esi
# g, h
248 # ./sha256-block.c:67: e = d + T1;
249 movl
%r11d
, %r10d
# b, c
250 movl
%ebp
, %ebx
# f, g
251 movl
%r8d
, %r11d
# a, b
252 movl
%edi
, %ebp
# e, f
253 # ./sha256-block.c:71: a = T1 + T2;
254 movl
%eax
, %r8d
# a, a
255 # ./sha256-block.c:67: e = d + T1;
256 movl
%ecx
, %edi
# e, e
258 # ./sha256-block.c:62: uint32_t T1 = h + S1 (e) + Ch (e, f, g) + K[t] + W[t];
259 movl
%edi
, %eax
# e, tmp217
260 movl
%edi
, %edx
# e, tmp218
261 movl
%edi
, %ecx
# e, tmp224
262 rorl $
11, %edx
#, tmp218
263 rorl $
6, %eax
#, tmp217
264 andl
%ebp
, %ecx
# f, tmp224
265 xorl
%edx
, %eax
# tmp218, tmp219
266 movl
%edi
, %edx
# e, tmp220
267 roll $
7, %edx
#, tmp220
268 xorl
%eax
, %edx
# tmp219, tmp221
269 movl
%edi
, %eax
# e, tmp222
271 andl
%ebx
, %eax
# g, tmp223
272 xorl
%ecx
, %eax
# tmp224, tmp225
273 # ./sha256-block.c:63: uint32_t T2 = S0 (a) + Maj (a, b, c);
274 movl
%r8d
, %ecx
# a, tmp232
275 # ./sha256-block.c:62: uint32_t T1 = h + S1 (e) + Ch (e, f, g) + K[t] + W[t];
276 addl
%edx
, %eax
# tmp221, tmp226
277 # ./sha256-block.c:63: uint32_t T2 = S0 (a) + Maj (a, b, c);
278 movl
%r8d
, %edx
# a, tmp231
279 # ./sha256-block.c:62: uint32_t T1 = h + S1 (e) + Ch (e, f, g) + K[t] + W[t];
280 addl
(%r14,%r9), %eax
# MEM[symbol: W, index: ivtmp.29_225, offset: 0B], tmp228
281 # ./sha256-block.c:63: uint32_t T2 = S0 (a) + Maj (a, b, c);
282 rorl $
13, %ecx
#, tmp232
283 rorl $
2, %edx
#, tmp231
284 addq $
4, %r9 #, ivtmp.29
285 xorl
%ecx
, %edx
# tmp232, tmp233
286 movl
%r8d
, %ecx
# a, tmp234
287 roll $
10, %ecx
#, tmp234
288 xorl
%edx
, %ecx
# tmp233, tmp235
289 movl
%r11d
, %edx
# b, tmp236
290 # ./sha256-block.c:62: uint32_t T1 = h + S1 (e) + Ch (e, f, g) + K[t] + W[t];
291 addl
%esi
, %eax
# h, tmp230
292 # ./sha256-block.c:63: uint32_t T2 = S0 (a) + Maj (a, b, c);
293 xorl
%r10d
, %edx
# c, tmp236
294 movl
%r11d
, %esi
# b, tmp238
295 # ./sha256-block.c:62: uint32_t T1 = h + S1 (e) + Ch (e, f, g) + K[t] + W[t];
296 addl
%r12d
, %eax
# pretmp_255, T1
297 # ./sha256-block.c:63: uint32_t T2 = S0 (a) + Maj (a, b, c);
298 andl
%r8d
, %edx
# a, tmp237
299 andl
%r10d
, %esi
# c, tmp238
300 xorl
%esi
, %edx
# tmp238, tmp239
301 addl
%ecx
, %edx
# tmp235, T2
302 # ./sha256-block.c:67: e = d + T1;
303 leal
(%rax
,%r13), %ecx
#, e
304 # ./sha256-block.c:71: a = T1 + T2;
305 addl
%edx
, %eax
# T2, a
306 # ./sha256-block.c:60: for (unsigned int t = 0; t < 64; ++t)
307 cmpq $
256, %r9 #, ivtmp.29
309 # ./sha256-block.c:76: a += a_save;
310 addl
%eax
, -108(%rsp
) # a, %sfp
311 # ./sha256-block.c:77: b += b_save;
312 addl
%r8d
, -104(%rsp
) # a, %sfp
313 # ./sha256-block.c:78: c += c_save;
314 addl
%r11d
, -100(%rsp
) # b, %sfp
315 # ./sha256-block.c:79: d += d_save;
316 addl
%r10d
, -112(%rsp
) # c, %sfp
317 # ./sha256-block.c:80: e += e_save;
318 addl
%ecx
, -96(%rsp
) # e, %sfp
319 # ./sha256-block.c:81: f += f_save;
320 addl
%edi
, -116(%rsp
) # e, %sfp
321 # ./sha256-block.c:82: g += g_save;
322 addl
%ebp
, -92(%rsp
) # f, %sfp
323 # ./sha256-block.c:83: h += h_save;
324 addl
%ebx
, -120(%rsp
) # g, %sfp
325 # ./sha256-block.c:26: while (nwords > 0)
326 subq $
16, -80(%rsp
) #, %sfp
329 # ./sha256-block.c:90: ctx->H[0] = a;
330 movq
-64(%rsp
), %rax
# %sfp, ctx
331 movl
-108(%rsp
), %ebx
# %sfp, a
332 movl
%ebx
, (%rax
) # a, ctx_80(D)->H
333 # ./sha256-block.c:91: ctx->H[1] = b;
334 movl
-104(%rsp
), %ebx
# %sfp, b
335 movl
%ebx
, 4(%rax
) # b, ctx_80(D)->H
336 # ./sha256-block.c:92: ctx->H[2] = c;
337 movl
-100(%rsp
), %ebx
# %sfp, c
338 movl
%ebx
, 8(%rax
) # c, ctx_80(D)->H
339 # ./sha256-block.c:93: ctx->H[3] = d;
340 movl
-112(%rsp
), %ebx
# %sfp, d
341 movl
%ebx
, 12(%rax
) # d, ctx_80(D)->H
342 # ./sha256-block.c:94: ctx->H[4] = e;
343 movl
-96(%rsp
), %ebx
# %sfp, e
344 movl
%ebx
, 16(%rax
) # e, ctx_80(D)->H
345 # ./sha256-block.c:95: ctx->H[5] = f;
346 movl
-116(%rsp
), %ebx
# %sfp, f
347 movl
%ebx
, 20(%rax
) # f, ctx_80(D)->H
348 # ./sha256-block.c:96: ctx->H[6] = g;
349 movl
-92(%rsp
), %ebx
# %sfp, g
350 movl
%ebx
, 24(%rax
) # g, ctx_80(D)->H
351 # ./sha256-block.c:97: ctx->H[7] = h;
352 movl
-120(%rsp
), %ebx
# %sfp, h
353 movl
%ebx
, 28(%rax
) # h, ctx_80(D)->H
354 # ./sha256-block.c:98: }
356 .cfi_def_cfa_offset 56
358 .cfi_def_cfa_offset 48
360 .cfi_def_cfa_offset 40
362 .cfi_def_cfa_offset 32
364 .cfi_def_cfa_offset 24
366 .cfi_def_cfa_offset 16
368 .cfi_def_cfa_offset 8
372 .size __sha256_process_block, .-__sha256_process_block
374 .globl __sha256_finish_ctx
375 .type __sha256_finish_ctx, @function
380 .cfi_def_cfa_offset 16
383 .cfi_def_cfa_offset 24
386 .cfi_def_cfa_offset 32
389 .cfi_def_cfa_offset 40
391 movq
%rsi
, %rbp
# resbuf, resbuf
392 movq
%rdi
, %rbx
# ctx, ctx
394 .cfi_def_cfa_offset 48
395 # sha256.c:114: uint32_t bytes = ctx->buflen;
396 movl
40(%rdi
), %r12d
# ctx_21(D)->buflen,
397 # sha256.c:118: ctx->total64 += bytes;
398 addq
%r12, 32(%rdi
) # _2, ctx_21(D)->D.4694.total64
399 # sha256.c:120: pad = bytes >= 56 ? 64 + 56 - bytes : 56 - bytes;
400 cmpl $
55, %r12d
#, bytes
402 # sha256.c:120: pad = bytes >= 56 ? 64 + 56 - bytes : 56 - bytes;
403 movl $
120, %r13d
#, tmp114
404 subl
%r12d
, %r13d
# bytes, iftmp.0_18
406 # sha256.c:121: memcpy (&ctx->buffer[bytes], fillbuf, pad);
407 leaq
48(%rbx
,%r12), %rdi
#, tmp118
408 leaq fillbuf
(%rip
), %rsi
#,
409 movq
%r13, %rdx
# iftmp.0_18,
410 # sha256.c:125: ctx->buffer64[(bytes + pad) / 8] = SWAP64 (ctx->total64 << 3);
411 addq
%r13, %r12 # iftmp.0_18, _9
412 # sha256.c:121: memcpy (&ctx->buffer[bytes], fillbuf, pad);
414 # sha256.c:125: ctx->buffer64[(bytes + pad) / 8] = SWAP64 (ctx->total64 << 3);
415 movq
32(%rbx
), %rax
# ctx_21(D)->D.4694.total64, tmp137
416 movq
%r12, %rdx
# _9, tmp127
417 # sha256.c:133: __sha256_process_block (ctx->buffer, bytes + pad + 8, ctx);
418 leaq
8(%r12), %rsi
#, tmp129
419 # sha256.c:125: ctx->buffer64[(bytes + pad) / 8] = SWAP64 (ctx->total64 << 3);
420 shrq $
3, %rdx
#, tmp127
421 # sha256.c:133: __sha256_process_block (ctx->buffer, bytes + pad + 8, ctx);
422 leaq
48(%rbx
), %rdi
#, tmp130
423 # sha256.c:125: ctx->buffer64[(bytes + pad) / 8] = SWAP64 (ctx->total64 << 3);
424 salq $
3, %rax
#, tmp125
425 # ../bits/byteswap.h:73: return __builtin_bswap64 (__bsx);
427 # sha256.c:125: ctx->buffer64[(bytes + pad) / 8] = SWAP64 (ctx->total64 << 3);
428 movq
%rax
, 48(%rbx
,%rdx
,8) # _31, ctx_21(D)->D.4700.buffer64
429 # sha256.c:133: __sha256_process_block (ctx->buffer, bytes + pad + 8, ctx);
430 movq
%rbx
, %rdx
# ctx,
431 call __sha256_process_block@PLT
#
432 xorl
%eax
, %eax
# ivtmp.68
436 # ../bits/byteswap.h:52: return __builtin_bswap32 (__bsx);
437 movl
(%rbx
,%rax
), %edx
# MEM[base: ctx_21(D), index: ivtmp.68_36, offset: 0B], MEM[base: ctx_21(D), index: ivtmp.68_36, offset: 0B]
439 # sha256.c:137: ((uint32_t *) resbuf)[i] = SWAP (ctx->H[i]);
440 movl
%edx
, 0(%rbp
,%rax
) # _27, MEM[base: resbuf_30(D), index: ivtmp.68_36, offset: 0B]
441 addq $
4, %rax
#, ivtmp.68
442 # sha256.c:136: for (unsigned int i = 0; i < 8; ++i)
443 cmpq $
32, %rax
#, ivtmp.68
448 .cfi_def_cfa_offset 40
449 movq
%rbp
, %rax
# resbuf,
451 .cfi_def_cfa_offset 32
453 .cfi_def_cfa_offset 24
455 .cfi_def_cfa_offset 16
457 .cfi_def_cfa_offset 8
463 # sha256.c:120: pad = bytes >= 56 ? 64 + 56 - bytes : 56 - bytes;
464 movl $
56, %r13d
#, tmp116
465 subl
%r12d
, %r13d
# bytes, iftmp.0_18
469 .size __sha256_finish_ctx, .-__sha256_finish_ctx
471 .globl __sha256_process_bytes
472 .type __sha256_process_bytes, @function
473 __sha256_process_bytes
:
477 .cfi_def_cfa_offset 16
480 .cfi_def_cfa_offset 24
483 .cfi_def_cfa_offset 32
486 .cfi_def_cfa_offset 40
488 movq
%rdi
, %r12 # buffer, buffer
490 .cfi_def_cfa_offset 48
493 .cfi_def_cfa_offset 56
495 movq
%rdx
, %rbp
# ctx, ctx
496 movq
%rsi
, %rbx
# len, len
498 .cfi_def_cfa_offset 64
499 # sha256.c:148: if (ctx->buflen != 0)
500 movl
40(%rdx
), %eax
# ctx_32(D)->buflen, _1
501 testl
%eax
, %eax
# _1
504 # sha256.c:171: if (len >= 64)
505 cmpq $
63, %rbx
#, len
508 # sha256.c:198: if (len > 0)
509 testq
%rbx
, %rbx
# len
514 .cfi_def_cfa_offset 56
516 .cfi_def_cfa_offset 48
518 .cfi_def_cfa_offset 40
520 .cfi_def_cfa_offset 32
522 .cfi_def_cfa_offset 24
524 .cfi_def_cfa_offset 16
526 .cfi_def_cfa_offset 8
532 # sha256.c:200: size_t left_over = ctx->buflen;
533 movl
40(%rbp
), %esi
# ctx_32(D)->buflen, left_over
534 # sha256.c:202: memcpy (&ctx->buffer[left_over], buffer, len);
536 movl
%ebx
, %eax
# len, len
537 leaq
48(%rbp
,%rsi
), %rcx
#, tmp172
541 testl
%ebx
, %ebx
# len
543 movzbl
(%r12), %edx
#* buffer, tmp185
545 movb
%dl
, (%rcx
) # tmp185,
548 # sha256.c:203: left_over += len;
549 addq
%rsi
, %rbx
# left_over, left_over
550 # sha256.c:204: if (left_over >= 64)
551 cmpq $
63, %rbx
#, left_over
553 # sha256.c:206: __sha256_process_block (ctx->buffer, 64, ctx);
554 leaq
48(%rbp
), %r12 #, _18
555 movq
%rbp
, %rdx
# ctx,
557 # sha256.c:207: left_over -= 64;
558 subq $
64, %rbx
#, left_over
559 # sha256.c:206: __sha256_process_block (ctx->buffer, 64, ctx);
560 movq
%r12, %rdi
# _18,
561 call __sha256_process_block@PLT
#
562 # sha256.c:208: memcpy (ctx->buffer, &ctx->buffer[64], left_over);
563 leaq
112(%rbp
), %rsi
#, tmp208
564 movq
%rbx
, %rdx
# left_over,
565 movq
%r12, %rdi
# _18,
568 # sha256.c:210: ctx->buflen = left_over;
569 movl
%ebx
, 40(%rbp
) # left_over, ctx_32(D)->buflen
573 .cfi_def_cfa_offset 56
575 .cfi_def_cfa_offset 48
577 .cfi_def_cfa_offset 40
579 .cfi_def_cfa_offset 32
581 .cfi_def_cfa_offset 24
583 .cfi_def_cfa_offset 16
585 .cfi_def_cfa_offset 8
591 # sha256.c:191: __sha256_process_block (buffer, len & ~63, ctx);
592 movq
%rbx
, %r13 # len, _15
593 movq
%r12, %rdi
# buffer,
594 movq
%rbp
, %rdx
# ctx,
595 andq $
-64, %r13 #, _15
596 # sha256.c:193: len &= 63;
597 andl $
63, %ebx
#, len
598 # sha256.c:191: __sha256_process_block (buffer, len & ~63, ctx);
599 movq
%r13, %rsi
# _15,
600 # sha256.c:192: buffer = (const char *) buffer + (len & ~63);
601 addq
%r13, %r12 # _15, buffer
602 # sha256.c:191: __sha256_process_block (buffer, len & ~63, ctx);
603 call __sha256_process_block@PLT
#
608 # sha256.c:150: size_t left_over = ctx->buflen;
609 movl
%eax
, %r13d
# _1, left_over
610 # sha256.c:151: size_t add = 128 - left_over > len ? len : 128 - left_over;
611 movl $
128, %edx
#, tmp119
612 subq
%r13, %rdx
# left_over, tmp118
613 # sha256.c:153: memcpy (&ctx->buffer[left_over], buffer, add);
614 leaq
48(%rbp
,%r13), %rdi
#, tmp121
615 # sha256.c:151: size_t add = 128 - left_over > len ? len : 128 - left_over;
616 cmpq
%rsi
, %rdx
# len, tmp118
617 cmova
%rsi
, %rdx
# tmp118,, len, tmp118
618 # sha256.c:153: memcpy (&ctx->buffer[left_over], buffer, add);
619 movq
%r12, %rsi
# buffer,
620 # sha256.c:151: size_t add = 128 - left_over > len ? len : 128 - left_over;
621 movq
%rdx
, %r14 # tmp118, add
622 # sha256.c:153: memcpy (&ctx->buffer[left_over], buffer, add);
624 # sha256.c:154: ctx->buflen += add;
625 movl
40(%rbp
), %esi
# ctx_32(D)->buflen, _6
626 addl
%r14d
, %esi
# add, _6
627 # sha256.c:156: if (ctx->buflen > 64)
629 # sha256.c:154: ctx->buflen += add;
630 movl
%esi
, 40(%rbp
) # _6, ctx_32(D)->buflen
631 # sha256.c:156: if (ctx->buflen > 64)
634 # sha256.c:166: buffer = (const char *) buffer + add;
635 addq
%r14, %r12 # add, buffer
636 # sha256.c:167: len -= add;
637 subq
%r14, %rbx
# add, len
642 # sha256.c:202: memcpy (&ctx->buffer[left_over], buffer, len);
643 movq
(%r12), %rax
#* buffer, tmp194
644 movq
%rax
, (%rcx
) # tmp194,
645 movl
%ebx
, %eax
# len, len
646 movq
-8(%r12,%rax
), %rdx
#, tmp201
647 movq
%rdx
, -8(%rcx
,%rax
) # tmp201,
648 leaq
8(%rcx
), %rdx
#, tmp202
649 andq $
-8, %rdx
#, tmp202
650 subq
%rdx
, %rcx
# tmp202, tmp174
651 leal
(%rbx
,%rcx
), %eax
#, len
652 subq
%rcx
, %r12 # tmp174, buffer
653 andl $
-8, %eax
#, len
656 andl $
-8, %eax
#, tmp204
657 xorl
%ecx
, %ecx
# tmp203
659 movl
%ecx
, %edi
# tmp203, tmp205
660 addl $
8, %ecx
#, tmp203
661 movq
(%r12,%rdi
), %r8 #, tmp206
662 cmpl %eax
, %ecx
# tmp204, tmp203
663 movq
%r8, (%rdx
,%rdi
) # tmp206,
669 # sha256.c:158: __sha256_process_block (ctx->buffer, ctx->buflen & ~63, ctx);
670 leaq
48(%rbp
), %r15 #, _8
671 andl $
-64, %esi
#, tmp129
672 movq
%rbp
, %rdx
# ctx,
673 movq
%r15, %rdi
# _8,
674 call __sha256_process_block@PLT
#
675 # sha256.c:160: ctx->buflen &= 63;
676 movl
40(%rbp
), %ecx
# ctx_32(D)->buflen, ctx_32(D)->buflen
677 # sha256.c:162: memcpy (ctx->buffer, &ctx->buffer[(left_over + add) & ~63],
678 leaq
0(%r13,%r14), %rax
#, tmp132
679 andq $
-64, %rax
#, tmp133
680 # sha256.c:160: ctx->buflen &= 63;
681 movl
%ecx
, %edx
# ctx_32(D)->buflen, _10
682 # sha256.c:162: memcpy (ctx->buffer, &ctx->buffer[(left_over + add) & ~63],
683 leaq
48(%rbp
,%rax
), %rax
#, tmp135
684 # sha256.c:160: ctx->buflen &= 63;
686 # sha256.c:162: memcpy (ctx->buffer, &ctx->buffer[(left_over + add) & ~63],
688 # sha256.c:160: ctx->buflen &= 63;
689 movl
%edx
, 40(%rbp
) # _10, ctx_32(D)->buflen
690 # sha256.c:162: memcpy (ctx->buffer, &ctx->buffer[(left_over + add) & ~63],
692 testb $
4, %cl
#, ctx_32(D)->buflen
694 testl
%edx
, %edx
# _10
696 movzbl
(%rax
), %esi
#, tmp148
697 andl $
2, %ecx
#, ctx_32(D)->buflen
698 movb
%sil
, 48(%rbp
) # tmp148,
700 movzwl
-2(%rax
,%rdx
), %eax
#, tmp156
701 movw
%ax
, -2(%r15,%rdx
) # tmp156,
706 movq
(%rax
), %rcx
#, tmp157
707 movq
%rcx
, 48(%rbp
) # tmp157,
708 movl
%edx
, %ecx
# _10, _10
709 movq
-8(%rax
,%rcx
), %rsi
#, tmp164
710 movq
%rsi
, -8(%r15,%rcx
) # tmp164,
711 leaq
56(%rbp
), %rcx
#, tmp165
712 andq $
-8, %rcx
#, tmp165
713 subq
%rcx
, %r15 # tmp165, _8
714 addl
%r15d
, %edx
# _8, _10
715 subq
%r15, %rax
# _8, tmp138
716 andl $
-8, %edx
#, _10
719 andl $
-8, %edx
#, tmp167
720 xorl
%esi
, %esi
# tmp166
722 movl
%esi
, %edi
# tmp166, tmp168
723 addl $
8, %esi
#, tmp166
724 movq
(%rax
,%rdi
), %r8 #, tmp169
725 cmpl %edx
, %esi
# tmp167, tmp166
726 movq
%r8, (%rcx
,%rdi
) # tmp169,
730 movl
(%rax
), %ecx
#, tmp140
731 movl
%ecx
, 48(%rbp
) # tmp140,
732 movl
-4(%rax
,%rdx
), %eax
#, tmp147
733 movl
%eax
, -4(%r15,%rdx
) # tmp147,
738 # sha256.c:202: memcpy (&ctx->buffer[left_over], buffer, len);
739 movl
(%r12), %edx
#* buffer, tmp177
740 movl
%edx
, (%rcx
) # tmp177,
741 movl
%ebx
, %edx
# len, len
742 movl
-4(%r12,%rdx
), %eax
#, tmp184
743 movl
%eax
, -4(%rcx
,%rdx
) # tmp184,
746 movl
%ebx
, %edx
# len, len
747 movzwl
-2(%r12,%rdx
), %eax
#, tmp193
748 movw
%ax
, -2(%rcx
,%rdx
) # tmp193,
752 .size __sha256_process_bytes, .-__sha256_process_bytes
823 .type fillbuf, @object
829 .ident "GCC: (GNU) 7.3.0"
830 .section .note.GNU-stack,"",@progbits