2 # GNU C11 (GCC) version 7.3.0 (x86_64-nyan-linux-gnu)
3 # compiled by GNU C version 7.3.0, GMP version 6.1.2, MPFR version 4.0.1, MPC version 1.1.0, isl version none
4 # GGC heuristics: --param ggc-min-expand=100 --param ggc-min-heapsize=131072
5 # options passed: -I ../include
6 # -I /root/wip/nyanglibc/builds/0/nyanglibc/build/crypt
7 # -I /root/wip/nyanglibc/builds/0/nyanglibc/build
8 # -I ../sysdeps/unix/sysv/linux/x86_64/64
9 # -I ../sysdeps/unix/sysv/linux/x86_64
10 # -I ../sysdeps/unix/sysv/linux/x86/include
11 # -I ../sysdeps/unix/sysv/linux/x86 -I ../sysdeps/x86/nptl
12 # -I ../sysdeps/unix/sysv/linux/wordsize-64 -I ../sysdeps/x86_64/nptl
13 # -I ../sysdeps/unix/sysv/linux/include -I ../sysdeps/unix/sysv/linux
14 # -I ../sysdeps/nptl -I ../sysdeps/pthread -I ../sysdeps/gnu
15 # -I ../sysdeps/unix/inet -I ../sysdeps/unix/sysv -I ../sysdeps/unix/x86_64
16 # -I ../sysdeps/unix -I ../sysdeps/posix -I ../sysdeps/x86_64/64
17 # -I ../sysdeps/x86_64/fpu -I ../sysdeps/x86/fpu -I ../sysdeps/x86_64
18 # -I ../sysdeps/x86/include -I ../sysdeps/x86
19 # -I ../sysdeps/ieee754/float128 -I ../sysdeps/ieee754/ldbl-96/include
20 # -I ../sysdeps/ieee754/ldbl-96 -I ../sysdeps/ieee754/dbl-64
21 # -I ../sysdeps/ieee754/flt-32 -I ../sysdeps/wordsize-64
22 # -I ../sysdeps/ieee754 -I ../sysdeps/generic -I .. -I ../libio -I .
23 # -MD /run/asm/crypt/sha256-crypt.v.d -MF /run/asm/crypt/sha256-crypt.o.dt
24 # -MP -MT /run/asm/crypt/.o -D _LIBC_REENTRANT -D MODULE_NAME=libcrypt
25 # -D PIC -D TOP_NAMESPACE=glibc
26 # -include /root/wip/nyanglibc/builds/0/nyanglibc/build/libc-modules.h
27 # -include ../include/libc-symbols.h sha256-crypt.c -mtune=generic
28 # -march=x86-64 -auxbase-strip /run/asm/crypt/sha256-crypt.v.s -O2 -Wall
29 # -Wwrite-strings -Wundef -Werror -Wstrict-prototypes
30 # -Wold-style-definition -std=gnu11 -fverbose-asm -fgnu89-inline
31 # -fmerge-all-constants -frounding-math -fno-stack-protector -fmath-errno
32 # -fpie -ftls-model=initial-exec
33 # options enabled: -faggressive-loop-optimizations -falign-labels
34 # -fasynchronous-unwind-tables -fauto-inc-dec -fbranch-count-reg
35 # -fcaller-saves -fchkp-check-incomplete-type -fchkp-check-read
36 # -fchkp-check-write -fchkp-instrument-calls -fchkp-narrow-bounds
37 # -fchkp-optimize -fchkp-store-bounds -fchkp-use-static-bounds
38 # -fchkp-use-static-const-bounds -fchkp-use-wrappers -fcode-hoisting
39 # -fcombine-stack-adjustments -fcommon -fcompare-elim -fcprop-registers
40 # -fcrossjumping -fcse-follow-jumps -fdefer-pop
41 # -fdelete-null-pointer-checks -fdevirtualize -fdevirtualize-speculatively
42 # -fdwarf2-cfi-asm -fearly-inlining -feliminate-unused-debug-types
43 # -fexpensive-optimizations -fforward-propagate -ffp-int-builtin-inexact
44 # -ffunction-cse -fgcse -fgcse-lm -fgnu-runtime -fgnu-unique
45 # -fguess-branch-probability -fhoist-adjacent-loads -fident -fif-conversion
46 # -fif-conversion2 -findirect-inlining -finline -finline-atomics
47 # -finline-functions-called-once -finline-small-functions -fipa-bit-cp
48 # -fipa-cp -fipa-icf -fipa-icf-functions -fipa-icf-variables -fipa-profile
49 # -fipa-pure-const -fipa-ra -fipa-reference -fipa-sra -fipa-vrp
50 # -fira-hoist-pressure -fira-share-save-slots -fira-share-spill-slots
51 # -fisolate-erroneous-paths-dereference -fivopts -fkeep-static-consts
52 # -fleading-underscore -flifetime-dse -flra-remat -flto-odr-type-merging
53 # -fmath-errno -fmerge-all-constants -fmerge-debug-strings
54 # -fmove-loop-invariants -fomit-frame-pointer -foptimize-sibling-calls
55 # -foptimize-strlen -fpartial-inlining -fpeephole -fpeephole2 -fpic -fpie
56 # -fplt -fprefetch-loop-arrays -free -freg-struct-return -freorder-blocks
57 # -freorder-functions -frerun-cse-after-loop -frounding-math
58 # -fsched-critical-path-heuristic -fsched-dep-count-heuristic
59 # -fsched-group-heuristic -fsched-interblock -fsched-last-insn-heuristic
60 # -fsched-rank-heuristic -fsched-spec -fsched-spec-insn-heuristic
61 # -fsched-stalled-insns-dep -fschedule-fusion -fschedule-insns2
62 # -fsemantic-interposition -fshow-column -fshrink-wrap
63 # -fshrink-wrap-separate -fsigned-zeros -fsplit-ivs-in-unroller
64 # -fsplit-wide-types -fssa-backprop -fssa-phiopt -fstdarg-opt
65 # -fstore-merging -fstrict-aliasing -fstrict-overflow
66 # -fstrict-volatile-bitfields -fsync-libcalls -fthread-jumps
67 # -ftoplevel-reorder -ftrapping-math -ftree-bit-ccp -ftree-builtin-call-dce
68 # -ftree-ccp -ftree-ch -ftree-coalesce-vars -ftree-copy-prop -ftree-cselim
69 # -ftree-dce -ftree-dominator-opts -ftree-dse -ftree-forwprop -ftree-fre
70 # -ftree-loop-if-convert -ftree-loop-im -ftree-loop-ivcanon
71 # -ftree-loop-optimize -ftree-parallelize-loops= -ftree-phiprop -ftree-pre
72 # -ftree-pta -ftree-reassoc -ftree-scev-cprop -ftree-sink -ftree-slsr
73 # -ftree-sra -ftree-switch-conversion -ftree-tail-merge -ftree-ter
74 # -ftree-vrp -funit-at-a-time -funwind-tables -fverbose-asm
75 # -fzero-initialized-in-bss -m128bit-long-double -m64 -m80387
76 # -malign-stringops -mavx256-split-unaligned-load
77 # -mavx256-split-unaligned-store -mfancy-math-387 -mfp-ret-in-387 -mfxsr
78 # -mglibc -mieee-fp -mlong-double-80 -mmmx -mno-sse4 -mpush-args -mred-zone
79 # -msse -msse2 -mstv -mtls-direct-seg-refs -mvzeroupper
83 .section __libc_freeres_ptrs, "aw", %nobits
85 .section .rodata.str1.1,"aMS",@progbits,1
93 .globl __sha256_crypt_r
94 .type __sha256_crypt_r, @function
99 .cfi_def_cfa_offset 16
102 .cfi_def_cfa_register 6
111 movq
%rdi
, %r12 # key, key
113 # sha256-crypt.c:125: if (strncmp (sha256_salt_prefix, salt, sizeof (sha256_salt_prefix) - 1) == 0)
114 leaq sha256_salt_prefix
(%rip
), %rdi
#,
116 # sha256-crypt.c:103: {
117 movq
%rsi
, %rbx
# salt, salt
119 # sha256-crypt.c:103: {
120 movq
%rdx
, -560(%rbp
) # buffer, %sfp
121 # sha256-crypt.c:125: if (strncmp (sha256_salt_prefix, salt, sizeof (sha256_salt_prefix) - 1) == 0)
123 # sha256-crypt.c:103: {
124 movl
%ecx
, -484(%rbp
) # buflen, buflen
125 # sha256-crypt.c:125: if (strncmp (sha256_salt_prefix, salt, sizeof (sha256_salt_prefix) - 1) == 0)
127 # sha256-crypt.c:127: salt += sizeof (sha256_salt_prefix) - 1;
128 leaq
3(%rbx
), %rdx
#, tmp628
129 testl
%eax
, %eax
# _1
130 # sha256-crypt.c:129: if (strncmp (salt, sha256_rounds_prefix, sizeof (sha256_rounds_prefix) - 1)
131 leaq sha256_rounds_prefix
(%rip
), %rsi
#,
132 # sha256-crypt.c:127: salt += sizeof (sha256_salt_prefix) - 1;
133 cmovne
%rbx
, %rdx
# tmp628,, salt, tmp628
134 movq
%rdx
, %rdi
# tmp628, salt
135 movq
%rdx
, -512(%rbp
) # salt, %sfp
136 # sha256-crypt.c:129: if (strncmp (salt, sha256_rounds_prefix, sizeof (sha256_rounds_prefix) - 1)
139 testl
%eax
, %eax
# _2
140 # sha256-crypt.c:118: bool rounds_custom = false;
141 movb $
0, -485(%rbp
) #, %sfp
142 # sha256-crypt.c:117: size_t rounds = ROUNDS_DEFAULT;
143 movq $
5000, -528(%rbp
) #, %sfp
144 # sha256-crypt.c:129: if (strncmp (salt, sha256_rounds_prefix, sizeof (sha256_rounds_prefix) - 1)
147 # sha256-crypt.c:143: salt_len = MIN (strcspn (salt, "$"), SALT_LEN_MAX);
148 movq
-512(%rbp
), %rdi
# %sfp,
149 leaq
.LC0(%rip), %rsi #,
151 # sha256-crypt.c:144: key_len = strlen (key);
152 movq
%r12, %rdi
# key,
153 # sha256-crypt.c:143: salt_len = MIN (strcspn (salt, "$"), SALT_LEN_MAX);
155 movq
%rax
, %r14 #, _6
156 movl $
16, %eax
#, tmp630
157 cmovnb
%rax
, %r14 # _6,, tmp630, _6
158 # sha256-crypt.c:144: key_len = strlen (key);
160 # sha256-crypt.c:146: if ((key - (char *) 0) % __alignof__ (uint32_t) != 0)
161 testb $
3, %r12b
#, key
162 # sha256-crypt.c:144: key_len = strlen (key);
163 movq
%rax
, %r15 #, tmp286
164 # sha256-crypt.c:146: if ((key - (char *) 0) % __alignof__ (uint32_t) != 0)
166 # sha256-crypt.c:150: if (__libc_use_alloca (alloca_used + key_len + __alignof__ (uint32_t)))
167 leaq
4(%rax
), %rbx
#, _9
168 # ../sysdeps/pthread/allocalim.h:27: return (__glibc_likely (__libc_alloca_cutoff (size))
169 movq
%rbx
, %rdi
# _9,
170 call __libc_alloca_cutoff@PLT
#
171 # ../sysdeps/pthread/allocalim.h:29: || __glibc_likely (size <= PTHREAD_STACK_MIN / 4)
172 testl
%eax
, %eax
# _305
174 cmpq $
4096, %rbx
#, _9
177 # sha256-crypt.c:151: tmp = alloca_account (key_len + __alignof__ (uint32_t), alloca_used);
179 # 151 "sha256-crypt.c" 1
183 addq $
30, %rbx
#, tmp297
184 andq $
-16, %rbx
#, tmp301
185 subq
%rbx
, %rsp
# tmp301,
186 leaq
15(%rsp
), %rdi
#, tmp303
187 andq $
-16, %rdi
#, m__
189 # 151 "sha256-crypt.c" 1
190 sub %rsp
, %rax
# d__
193 movq
%rax
, -496(%rbp
) # d__, %sfp
194 # sha256-crypt.c:120: char *free_key = NULL;
195 movq $
0, -576(%rbp
) #, %sfp
197 # sha256-crypt.c:159: key = copied_key =
198 movq
%r12, %rsi
# key,
199 # sha256-crypt.c:161: - (tmp - (char *) 0) % __alignof__ (uint32_t),
200 addq $
4, %rdi
#, tmp307
201 # sha256-crypt.c:159: key = copied_key =
202 movq
%r15, %rdx
# tmp286,
204 movq
%rax
, %r12 #, key
205 movq
%rax
, -584(%rbp
) # key, %sfp
210 # sha256-crypt.c:120: char *free_key = NULL;
211 movq $
0, -576(%rbp
) #, %sfp
212 # sha256-crypt.c:119: size_t alloca_used = 0;
213 movq $
0, -496(%rbp
) #, %sfp
214 # sha256-crypt.c:112: char *copied_key = NULL;
215 movq $
0, -584(%rbp
) #, %sfp
217 # sha256-crypt.c:166: if ((salt - (char *) 0) % __alignof__ (uint32_t) != 0)
218 testb $
3, -512(%rbp
) #, %sfp
219 # sha256-crypt.c:113: char *copied_salt = NULL;
220 movq $
0, -568(%rbp
) #, %sfp
221 # sha256-crypt.c:166: if ((salt - (char *) 0) % __alignof__ (uint32_t) != 0)
224 # sha256-crypt.c:193: sha256_init_ctx (&ctx, nss_ctx);
225 leaq
-400(%rbp
), %rbx
#, tmp624
226 # sha256-crypt.c:205: sha256_init_ctx (&alt_ctx, nss_alt_ctx);
227 leaq
-224(%rbp
), %r13 #, tmp625
228 # sha256-crypt.c:193: sha256_init_ctx (&ctx, nss_ctx);
229 movq
%rbx
, %rdi
# tmp624,
230 call __sha256_init_ctx@PLT
#
231 # sha256-crypt.c:196: sha256_process_bytes (key, key_len, &ctx, nss_ctx);
232 movq
%rbx
, %rdx
# tmp624,
233 movq
%r15, %rsi
# tmp286,
234 movq
%r12, %rdi
# key,
235 call __sha256_process_bytes@PLT
#
236 # sha256-crypt.c:200: sha256_process_bytes (salt, salt_len, &ctx, nss_ctx);
237 movq
-512(%rbp
), %rdi
# %sfp,
238 movq
%rbx
, %rdx
# tmp624,
239 movq
%r14, %rsi
# _6,
240 call __sha256_process_bytes@PLT
#
241 # sha256-crypt.c:205: sha256_init_ctx (&alt_ctx, nss_alt_ctx);
242 movq
%r13, %rdi
# tmp625,
243 movq
%r13, -520(%rbp
) # tmp625, %sfp
244 call __sha256_init_ctx@PLT
#
245 # sha256-crypt.c:208: sha256_process_bytes (key, key_len, &alt_ctx, nss_alt_ctx);
246 movq
%r13, %rdx
# tmp625,
247 movq
%r15, %rsi
# tmp286,
248 movq
%r12, %rdi
# key,
249 call __sha256_process_bytes@PLT
#
250 # sha256-crypt.c:211: sha256_process_bytes (salt, salt_len, &alt_ctx, nss_alt_ctx);
251 movq
-512(%rbp
), %rdi
# %sfp,
252 movq
%r13, %rdx
# tmp625,
253 movq
%r14, %rsi
# _6,
254 call __sha256_process_bytes@PLT
#
255 # sha256-crypt.c:214: sha256_process_bytes (key, key_len, &alt_ctx, nss_alt_ctx);
256 movq
%r13, %rdx
# tmp625,
257 movq
%r15, %rsi
# tmp286,
258 movq
%r12, %rdi
# key,
259 call __sha256_process_bytes@PLT
#
260 # sha256-crypt.c:218: sha256_finish_ctx (&alt_ctx, nss_alt_ctx, alt_result);
261 leaq
-464(%rbp
), %rax
#, tmp626
262 movq
%r13, %rdi
# tmp625,
263 movq
%rax
, %rsi
# tmp626,
264 movq
%rax
, -504(%rbp
) # tmp626, %sfp
265 call __sha256_finish_ctx@PLT
#
266 # sha256-crypt.c:221: for (cnt = key_len; cnt > 32; cnt -= 32)
267 cmpq $
32, %r15 #, tmp286
269 leaq
-33(%r15), %rcx
#, _15
270 leaq
-32(%r15), %rsi
#, _319
271 movq
%r14, -552(%rbp
) # _6, %sfp
272 movq
-504(%rbp
), %r14 # %sfp, tmp626
273 movq
%r12, -592(%rbp
) # key, %sfp
274 movq
%r15, %r12 # cnt, cnt
275 movq
%rcx
, -536(%rbp
) # _15, %sfp
276 andq $
-32, %rcx
#, tmp370
277 movq
%rsi
, -544(%rbp
) # _319, %sfp
278 movq
%rcx
, %rax
# tmp370, tmp370
279 movq
%rsi
, %rcx
# _319, _331
280 subq
%rax
, %rcx
# tmp370, _331
281 movq
%rcx
, %r13 # _331, _331
285 # sha256-crypt.c:222: sha256_process_bytes (alt_result, 32, &ctx, nss_ctx);
286 movq
%rbx
, %rdx
# tmp624,
288 movq
%r14, %rdi
# tmp626,
289 # sha256-crypt.c:221: for (cnt = key_len; cnt > 32; cnt -= 32)
290 subq $
32, %r12 #, cnt
291 # sha256-crypt.c:222: sha256_process_bytes (alt_result, 32, &ctx, nss_ctx);
292 call __sha256_process_bytes@PLT
#
293 # sha256-crypt.c:221: for (cnt = key_len; cnt > 32; cnt -= 32)
294 cmpq
%r13, %r12 # _331, cnt
296 # sha256-crypt.c:223: sha256_process_bytes (alt_result, cnt, &ctx, nss_ctx);
297 movq
-536(%rbp
), %rax
# %sfp, _15
298 movq
-544(%rbp
), %rsi
# %sfp, _319
299 movq
%rbx
, %rdx
# tmp624,
300 movq
-504(%rbp
), %rdi
# %sfp,
301 movq
-552(%rbp
), %r14 # %sfp, _6
302 movq
-592(%rbp
), %r12 # %sfp, key
303 andq $
-32, %rax
#, _15
304 subq
%rax
, %rsi
# tmp376, _319
305 call __sha256_process_bytes@PLT
#
307 # sha256-crypt.c:221: for (cnt = key_len; cnt > 32; cnt -= 32)
308 movq
%r15, %r13 # tmp286, cnt
313 # sha256-crypt.c:229: sha256_process_bytes (alt_result, 32, &ctx, nss_ctx);
314 movq
-504(%rbp
), %rdi
# %sfp,
316 call __sha256_process_bytes@PLT
#
317 # sha256-crypt.c:227: for (cnt = key_len; cnt > 0; cnt >>= 1)
321 # sha256-crypt.c:228: if ((cnt & 1) != 0)
322 testb $
1, %r13b
#, cnt
323 # sha256-crypt.c:229: sha256_process_bytes (alt_result, 32, &ctx, nss_ctx);
324 movq
%rbx
, %rdx
# tmp624,
325 # sha256-crypt.c:228: if ((cnt & 1) != 0)
327 # sha256-crypt.c:231: sha256_process_bytes (key, key_len, &ctx, nss_ctx);
328 movq
%r15, %rsi
# tmp286,
329 movq
%r12, %rdi
# key,
330 call __sha256_process_bytes@PLT
#
331 # sha256-crypt.c:227: for (cnt = key_len; cnt > 0; cnt >>= 1)
335 # sha256-crypt.c:234: sha256_finish_ctx (&ctx, nss_ctx, alt_result);
336 movq
-504(%rbp
), %rsi
# %sfp,
337 movq
%rbx
, %rdi
# tmp624,
338 call __sha256_finish_ctx@PLT
#
339 # sha256-crypt.c:237: sha256_init_ctx (&alt_ctx, nss_alt_ctx);
340 movq
-520(%rbp
), %rdi
# %sfp,
341 call __sha256_init_ctx@PLT
#
342 movq
%rbx
, -536(%rbp
) # tmp624, %sfp
343 movq
%r13, %rbx
# cnt, cnt
344 movq
-520(%rbp
), %r13 # %sfp, tmp625
348 # sha256-crypt.c:241: sha256_process_bytes (key, key_len, &alt_ctx, nss_alt_ctx);
349 movq
%r13, %rdx
# tmp625,
350 movq
%r15, %rsi
# tmp286,
351 movq
%r12, %rdi
# key,
352 # sha256-crypt.c:240: for (cnt = 0; cnt < key_len; ++cnt)
354 # sha256-crypt.c:241: sha256_process_bytes (key, key_len, &alt_ctx, nss_alt_ctx);
355 call __sha256_process_bytes@PLT
#
356 # sha256-crypt.c:240: for (cnt = 0; cnt < key_len; ++cnt)
357 cmpq
%rbx
, %r15 # cnt, tmp286
359 movq
-536(%rbp
), %rbx
# %sfp, tmp624
361 # sha256-crypt.c:244: sha256_finish_ctx (&alt_ctx, nss_alt_ctx, temp_result);
362 leaq
-432(%rbp
), %rax
#, tmp627
363 movq
-520(%rbp
), %rdi
# %sfp,
364 movq
%rax
, %rsi
# tmp627,
365 movq
%rax
, -544(%rbp
) # tmp627, %sfp
366 call __sha256_finish_ctx@PLT
#
367 # sha256-crypt.c:247: if (__libc_use_alloca (alloca_used + key_len))
368 movq
-496(%rbp
), %r12 # %sfp, alloca_used
369 addq
%r15, %r12 # tmp286, alloca_used
370 # ../sysdeps/pthread/allocalim.h:27: return (__glibc_likely (__libc_alloca_cutoff (size))
371 movq
%r12, %rdi
# _18,
372 call __libc_alloca_cutoff@PLT
#
373 # ../sysdeps/pthread/allocalim.h:29: || __glibc_likely (size <= PTHREAD_STACK_MIN / 4)
374 cmpq $
4096, %r12 #, _18
376 testl
%eax
, %eax
# _311
379 # sha256-crypt.c:248: cp = p_bytes = (char *) alloca (key_len);
380 leaq
30(%r15), %rax
#, tmp396
381 # sha256-crypt.c:121: char *free_pbytes = NULL;
382 movq $
0, -592(%rbp
) #, %sfp
383 # sha256-crypt.c:248: cp = p_bytes = (char *) alloca (key_len);
384 andq $
-16, %rax
#, tmp400
385 subq
%rax
, %rsp
# tmp400,
386 leaq
15(%rsp
), %rax
#, tmp402
387 andq $
-16, %rax
#, tmp402
388 movq
%rax
, -496(%rbp
) # p_bytes, %sfp
389 movq
%rax
, -472(%rbp
) # p_bytes, cp
391 # sha256-crypt.c:259: for (cnt = key_len; cnt >= 32; cnt -= 32)
392 cmpq $
31, %r15 #, tmp286
394 leaq
-32(%r15), %rcx
#, _139
395 movq
-496(%rbp
), %rsi
# %sfp, p_bytes
396 movq
%rcx
, %rdx
# _139, tmp406
397 andq $
-32, %rdx
#, tmp406
398 leaq
32(%rsi
), %rax
#, ivtmp.55
399 leaq
64(%rsi
,%rdx
), %rdx
#, _26
403 # sha256-crypt.c:260: cp = mempcpy (cp, temp_result, 32);
404 movdqa
-432(%rbp
), %xmm0
# MEM[(char * {ref-all})&temp_result], MEM[(char * {ref-all})&temp_result]
405 movups
%xmm0
, -32(%rax
) # MEM[(char * {ref-all})&temp_result], MEM[base: _140, offset: -32B]
406 movdqa
-416(%rbp
), %xmm0
# MEM[(char * {ref-all})&temp_result], MEM[(char * {ref-all})&temp_result]
407 movups
%xmm0
, -16(%rax
) # MEM[(char * {ref-all})&temp_result], MEM[base: _140, offset: -32B]
408 movq
%rax
, -472(%rbp
) # ivtmp.55, cp
409 addq $
32, %rax
#, ivtmp.55
410 # sha256-crypt.c:259: for (cnt = key_len; cnt >= 32; cnt -= 32)
411 cmpq
%rax
, %rdx
# ivtmp.55, _26
413 movq
-496(%rbp
), %rax
# %sfp, p_bytes
414 andq $
-32, %rcx
#, tmp410
415 movq
%r15, %rdx
# tmp286, cnt
416 andl $
31, %edx
#, cnt
417 leaq
32(%rax
,%rcx
), %rcx
#, _309
419 # sha256-crypt.c:261: memcpy (cp, temp_result, cnt);
421 movl
%edx
, %eax
# cnt, cnt
425 testl
%eax
, %eax
# cnt
427 movq
-544(%rbp
), %rsi
# %sfp, tmp627
429 movzbl
(%rsi
), %edx
#, tmp425
430 movb
%dl
, (%rcx
) # tmp425,* _309
433 # sha256-crypt.c:264: sha256_init_ctx (&alt_ctx, nss_alt_ctx);
434 movq
-520(%rbp
), %rdi
# %sfp,
435 # sha256-crypt.c:267: for (cnt = 0; cnt < 16 + alt_result[0]; ++cnt)
436 xorl
%r12d
, %r12d
# cnt
437 # sha256-crypt.c:264: sha256_init_ctx (&alt_ctx, nss_alt_ctx);
438 call __sha256_init_ctx@PLT
#
439 # sha256-crypt.c:267: for (cnt = 0; cnt < 16 + alt_result[0]; ++cnt)
440 movq
%rbx
, -536(%rbp
) # tmp624, %sfp
441 movq
-512(%rbp
), %r13 # %sfp, salt
442 movq
%r12, %rbx
# cnt, cnt
443 movq
-520(%rbp
), %r12 # %sfp, tmp625
447 # sha256-crypt.c:268: sha256_process_bytes (salt, salt_len, &alt_ctx, nss_alt_ctx);
448 movq
%r12, %rdx
# tmp625,
449 movq
%r14, %rsi
# _6,
450 movq
%r13, %rdi
# salt,
451 call __sha256_process_bytes@PLT
#
452 # sha256-crypt.c:267: for (cnt = 0; cnt < 16 + alt_result[0]; ++cnt)
453 movzbl
-464(%rbp
), %edx
# alt_result, tmp451
455 addq $
16, %rdx
#, tmp452
456 cmpq
%rbx
, %rdx
# cnt, tmp452
458 # sha256-crypt.c:271: sha256_finish_ctx (&alt_ctx, nss_alt_ctx, temp_result);
459 movq
-544(%rbp
), %r13 # %sfp, tmp627
460 movq
-520(%rbp
), %rdi
# %sfp,
461 movq
-536(%rbp
), %rbx
# %sfp, tmp624
462 movq
%r13, %rsi
# tmp627,
463 call __sha256_finish_ctx@PLT
#
464 # sha256-crypt.c:274: cp = s_bytes = alloca (salt_len);
465 leaq
30(%r14), %rax
#, tmp457
466 # sha256-crypt.c:277: memcpy (cp, temp_result, cnt);
467 movl
%r14d
, %ecx
# _6, _6
468 # sha256-crypt.c:274: cp = s_bytes = alloca (salt_len);
469 andq $
-16, %rax
#, tmp461
470 subq
%rax
, %rsp
# tmp461,
471 leaq
15(%rsp
), %rax
#, tmp463
472 andq $
-16, %rax
#, tmp463
473 # sha256-crypt.c:277: memcpy (cp, temp_result, cnt);
475 # sha256-crypt.c:274: cp = s_bytes = alloca (salt_len);
476 movq
%rax
, %rsi
# tmp463, tmp465
477 movq
%rax
, -536(%rbp
) # tmp465, %sfp
478 movq
%rax
, -472(%rbp
) # tmp465, cp
479 # sha256-crypt.c:277: memcpy (cp, temp_result, cnt);
480 movq
%r13, %rax
# tmp627, tmp469
483 xorl
%edx
, %edx
# tmp475
492 # sha256-crypt.c:297: if (cnt % 7 != 0)
493 movq
%r14, -552(%rbp
) # _6, %sfp
494 # sha256-crypt.c:281: for (cnt = 0; cnt < rounds; ++cnt)
495 xorl
%r12d
, %r12d
# cnt
496 # sha256-crypt.c:297: if (cnt % 7 != 0)
497 movq
-504(%rbp
), %r14 # %sfp, tmp626
502 # sha256-crypt.c:288: sha256_process_bytes (p_bytes, key_len, &ctx, nss_ctx);
503 movq
-496(%rbp
), %rdi
# %sfp,
504 movq
%r15, %rsi
# tmp286,
505 call __sha256_process_bytes@PLT
#
507 # sha256-crypt.c:293: if (cnt % 3 != 0)
508 movabsq $
-6148914691236517205, %rax
#, tmp688
511 leaq
(%rdx
,%rdx
,2), %rax
#, tmp493
512 cmpq
%rax
, %r12 # tmp493, cnt
515 # sha256-crypt.c:297: if (cnt % 7 != 0)
516 movabsq $
5270498306774157605, %rax
#, tmp689
518 movq
%r12, %rax
# cnt, tmp518
519 sarq $
63, %rax
#, tmp518
521 subq
%rax
, %rdx
# tmp518, tmp514
522 leaq
0(,%rdx
,8), %rax
#, tmp520
523 subq
%rdx
, %rax
# tmp514, tmp521
524 cmpq
%rax
, %r12 # tmp521, cnt
527 # sha256-crypt.c:301: if ((cnt & 1) != 0)
528 testq
%r13, %r13 # _27
529 # sha256-crypt.c:302: sha256_process_bytes (alt_result, 32, &ctx, nss_ctx);
530 movq
%rbx
, %rdx
# tmp624,
531 # sha256-crypt.c:301: if ((cnt & 1) != 0)
533 # sha256-crypt.c:302: sha256_process_bytes (alt_result, 32, &ctx, nss_ctx);
535 movq
%r14, %rdi
# tmp626,
536 call __sha256_process_bytes@PLT
#
538 # sha256-crypt.c:307: sha256_finish_ctx (&ctx, nss_ctx, alt_result);
539 movq
%r14, %rsi
# tmp626,
540 movq
%rbx
, %rdi
# tmp624,
541 # sha256-crypt.c:281: for (cnt = 0; cnt < rounds; ++cnt)
543 # sha256-crypt.c:307: sha256_finish_ctx (&ctx, nss_ctx, alt_result);
544 call __sha256_finish_ctx@PLT
#
545 # sha256-crypt.c:281: for (cnt = 0; cnt < rounds; ++cnt)
546 cmpq
%r12, -528(%rbp
) # cnt, %sfp
549 # sha256-crypt.c:284: sha256_init_ctx (&ctx, nss_ctx);
550 movq
%rbx
, %rdi
# tmp624,
551 # sha256-crypt.c:287: if ((cnt & 1) != 0)
552 movq
%r12, %r13 # cnt, _27
553 # sha256-crypt.c:284: sha256_init_ctx (&ctx, nss_ctx);
554 call __sha256_init_ctx@PLT
#
555 # sha256-crypt.c:287: if ((cnt & 1) != 0)
556 andl $
1, %r13d
#, _27
557 # sha256-crypt.c:288: sha256_process_bytes (p_bytes, key_len, &ctx, nss_ctx);
558 movq
%rbx
, %rdx
# tmp624,
559 # sha256-crypt.c:287: if ((cnt & 1) != 0)
561 # sha256-crypt.c:290: sha256_process_bytes (alt_result, 32, &ctx, nss_ctx);
563 movq
%r14, %rdi
# tmp626,
564 call __sha256_process_bytes@PLT
#
569 # sha256-crypt.c:261: memcpy (cp, temp_result, cnt);
570 movq
-432(%rbp
), %rax
#, tmp434
571 movq
%rax
, (%rcx
) # tmp434,* _309
572 movq
-544(%rbp
), %rdi
# %sfp, tmp627
573 movl
%edx
, %eax
# cnt, cnt
574 movq
-8(%rdi
,%rax
), %rsi
#, tmp441
575 movq
%rsi
, -8(%rcx
,%rax
) # tmp441,
576 leaq
8(%rcx
), %rsi
#, tmp442
577 andq $
-8, %rsi
#, tmp442
578 subq
%rsi
, %rcx
# tmp442, _309
579 leal
(%rdx
,%rcx
), %eax
#, cnt
580 subq
%rcx
, %rdi
# _309, tmp415
581 andl $
-8, %eax
#, cnt
584 andl $
-8, %eax
#, tmp444
585 xorl
%edx
, %edx
# tmp443
587 movl
%edx
, %ecx
# tmp443, tmp445
588 addl $
8, %edx
#, tmp443
589 movq
(%rdi
,%rcx
), %r8 #, tmp446
590 cmpl %eax
, %edx
# tmp444, tmp443
591 movq
%r8, (%rsi
,%rcx
) # tmp446,
597 # sha256-crypt.c:304: sha256_process_bytes (p_bytes, key_len, &ctx, nss_ctx);
598 movq
-496(%rbp
), %rdi
# %sfp,
599 movq
%r15, %rsi
# tmp286,
600 call __sha256_process_bytes@PLT
#
605 # sha256-crypt.c:298: sha256_process_bytes (p_bytes, key_len, &ctx, nss_ctx);
606 movq
-496(%rbp
), %rdi
# %sfp,
607 movq
%rbx
, %rdx
# tmp624,
608 movq
%r15, %rsi
# tmp286,
609 call __sha256_process_bytes@PLT
#
614 # sha256-crypt.c:294: sha256_process_bytes (s_bytes, salt_len, &ctx, nss_ctx);
615 movq
-552(%rbp
), %rsi
# %sfp,
616 movq
-536(%rbp
), %rdi
# %sfp,
617 movq
%rbx
, %rdx
# tmp624,
618 call __sha256_process_bytes@PLT
#
623 # sha256-crypt.c:317: cp = __stpncpy (buffer, sha256_salt_prefix, MAX (0, buflen));
624 movl
-484(%rbp
), %edx
# buflen,
625 xorl
%r12d
, %r12d
# tmp530
626 movq
-560(%rbp
), %rdi
# %sfp,
627 leaq sha256_salt_prefix
(%rip
), %rsi
#,
628 movq
-552(%rbp
), %r14 # %sfp, _6
630 movl
%r12d
, %edx
# tmp530, tmp529
631 cmovns
-484(%rbp
), %edx
# buflen,, tmp529
632 movslq
%edx
, %rdx
# tmp529, tmp531
634 movq
%rax
, %rdi
#, _33
635 movq
%rax
, -472(%rbp
) # _33, cp
636 # sha256-crypt.c:318: buflen -= sizeof (sha256_salt_prefix) - 1;
637 movl
-484(%rbp
), %eax
# buflen, tmp750
638 # sha256-crypt.c:320: if (rounds_custom)
639 cmpb $
0, -485(%rbp
) #, %sfp
640 # sha256-crypt.c:318: buflen -= sizeof (sha256_salt_prefix) - 1;
641 leal
-3(%rax
), %edx
#, _37
642 movl
%edx
, -484(%rbp
) # _37, buflen
643 # sha256-crypt.c:320: if (rounds_custom)
646 # sha256-crypt.c:328: cp = __stpncpy (cp, salt, MIN ((size_t) MAX (0, buflen), salt_len));
648 testl
%edx
, %edx
# _37
649 movq
-512(%rbp
), %rsi
# %sfp,
650 cmovs
%r12d
, %edx
# _37,, tmp541, tmp540
651 movslq
%edx
, %rdx
# tmp540, tmp542
652 cmpq
%r14, %rdx
# _6, tmp542
653 cmova
%r14, %rdx
# tmp542,, _6, tmp539
655 # sha256-crypt.c:329: buflen -= MIN ((size_t) MAX (0, buflen), salt_len);
656 movslq
-484(%rbp
), %rdx
# buflen,
657 # sha256-crypt.c:328: cp = __stpncpy (cp, salt, MIN ((size_t) MAX (0, buflen), salt_len));
658 movq
%rax
, -472(%rbp
) # _50, cp
659 # sha256-crypt.c:329: buflen -= MIN ((size_t) MAX (0, buflen), salt_len);
660 testl
%edx
, %edx
# buflen.23_51
661 cmovns
%rdx
, %r12 #,,
662 cmpq
%r14, %r12 # _6, tmp546
663 cmova
%r14, %r12 # tmp546,, _6, tmp543
664 subl
%r12d
, %edx
# tmp543, _58
665 # sha256-crypt.c:331: if (buflen > 0)
666 testl
%edx
, %edx
# _58
667 # sha256-crypt.c:329: buflen -= MIN ((size_t) MAX (0, buflen), salt_len);
668 movl
%edx
, -484(%rbp
) # _58, buflen
669 # sha256-crypt.c:331: if (buflen > 0)
671 # sha256-crypt.c:333: *cp++ = '$';
672 leaq
1(%rax
), %rdx
#, tmp547
673 movq
%rdx
, -472(%rbp
) # tmp547, cp
674 movb $
36, (%rax
) #, *_50
675 # sha256-crypt.c:334: --buflen;
676 subl $
1, -484(%rbp
) #, buflen
678 # sha256-crypt.c:337: __b64_from_24bit (&cp, &buflen,
679 movzbl
-454(%rbp
), %ecx
# alt_result, alt_result
680 movzbl
-464(%rbp
), %edx
# alt_result, alt_result
681 leaq
-484(%rbp
), %r13 #, tmp551
682 movzbl
-444(%rbp
), %r8d
# alt_result,
683 leaq
-472(%rbp
), %r12 #, tmp552
685 movq
%r13, %rsi
# tmp551,
686 movq
%r12, %rdi
# tmp552,
687 call __b64_from_24bit@PLT
#
688 # sha256-crypt.c:339: __b64_from_24bit (&cp, &buflen,
689 movzbl
-463(%rbp
), %ecx
# alt_result, alt_result
690 movzbl
-443(%rbp
), %edx
# alt_result, alt_result
692 movzbl
-453(%rbp
), %r8d
# alt_result,
693 movq
%r13, %rsi
# tmp551,
694 movq
%r12, %rdi
# tmp552,
695 call __b64_from_24bit@PLT
#
696 # sha256-crypt.c:341: __b64_from_24bit (&cp, &buflen,
697 movzbl
-442(%rbp
), %ecx
# alt_result, alt_result
698 movzbl
-452(%rbp
), %edx
# alt_result, alt_result
700 movzbl
-462(%rbp
), %r8d
# alt_result,
701 movq
%r13, %rsi
# tmp551,
702 movq
%r12, %rdi
# tmp552,
703 call __b64_from_24bit@PLT
#
704 # sha256-crypt.c:343: __b64_from_24bit (&cp, &buflen,
705 movzbl
-451(%rbp
), %ecx
# alt_result, alt_result
706 movzbl
-461(%rbp
), %edx
# alt_result, alt_result
708 movzbl
-441(%rbp
), %r8d
# alt_result,
709 movq
%r13, %rsi
# tmp551,
710 movq
%r12, %rdi
# tmp552,
711 call __b64_from_24bit@PLT
#
712 # sha256-crypt.c:345: __b64_from_24bit (&cp, &buflen,
713 movzbl
-460(%rbp
), %ecx
# alt_result, alt_result
714 movzbl
-440(%rbp
), %edx
# alt_result, alt_result
716 movzbl
-450(%rbp
), %r8d
# alt_result,
717 movq
%r13, %rsi
# tmp551,
718 movq
%r12, %rdi
# tmp552,
719 call __b64_from_24bit@PLT
#
720 # sha256-crypt.c:347: __b64_from_24bit (&cp, &buflen,
721 movzbl
-439(%rbp
), %ecx
# alt_result, alt_result
722 movzbl
-449(%rbp
), %edx
# alt_result, alt_result
724 movzbl
-459(%rbp
), %r8d
# alt_result,
725 movq
%r13, %rsi
# tmp551,
726 movq
%r12, %rdi
# tmp552,
727 call __b64_from_24bit@PLT
#
728 # sha256-crypt.c:349: __b64_from_24bit (&cp, &buflen,
729 movzbl
-448(%rbp
), %ecx
# alt_result, alt_result
730 movzbl
-458(%rbp
), %edx
# alt_result, alt_result
732 movzbl
-438(%rbp
), %r8d
# alt_result,
733 movq
%r13, %rsi
# tmp551,
734 movq
%r12, %rdi
# tmp552,
735 call __b64_from_24bit@PLT
#
736 # sha256-crypt.c:351: __b64_from_24bit (&cp, &buflen,
737 movzbl
-457(%rbp
), %ecx
# alt_result, alt_result
738 movzbl
-437(%rbp
), %edx
# alt_result, alt_result
740 movzbl
-447(%rbp
), %r8d
# alt_result,
741 movq
%r13, %rsi
# tmp551,
742 movq
%r12, %rdi
# tmp552,
743 call __b64_from_24bit@PLT
#
744 # sha256-crypt.c:353: __b64_from_24bit (&cp, &buflen,
745 movzbl
-436(%rbp
), %ecx
# alt_result, alt_result
746 movzbl
-446(%rbp
), %edx
# alt_result, alt_result
748 movzbl
-456(%rbp
), %r8d
# alt_result,
749 movq
%r13, %rsi
# tmp551,
750 movq
%r12, %rdi
# tmp552,
751 call __b64_from_24bit@PLT
#
752 # sha256-crypt.c:355: __b64_from_24bit (&cp, &buflen,
753 movzbl
-445(%rbp
), %ecx
# alt_result, alt_result
754 movzbl
-455(%rbp
), %edx
# alt_result, alt_result
756 movzbl
-435(%rbp
), %r8d
# alt_result,
757 movq
%r13, %rsi
# tmp551,
758 movq
%r12, %rdi
# tmp552,
759 call __b64_from_24bit@PLT
#
760 # sha256-crypt.c:357: __b64_from_24bit (&cp, &buflen,
761 movzbl
-433(%rbp
), %ecx
# alt_result, alt_result
762 movzbl
-434(%rbp
), %r8d
# alt_result,
765 movq
%r13, %rsi
# tmp551,
766 movq
%r12, %rdi
# tmp552,
767 call __b64_from_24bit@PLT
#
768 # sha256-crypt.c:359: if (buflen <= 0)
769 movl
-484(%rbp
), %eax
# buflen,
772 # sha256-crypt.c:365: *cp = '\0'; /* Terminate the string. */
773 movq
-472(%rbp
), %rax
# cp, cp.31_127
774 movq
-560(%rbp
), %r12 # %sfp, <retval>
775 movb $
0, (%rax
) #, *cp.31_127
777 # sha256-crypt.c:372: __sha256_init_ctx (&ctx);
778 movq
%rbx
, %rdi
# tmp624,
779 call __sha256_init_ctx@PLT
#
780 # sha256-crypt.c:373: __sha256_finish_ctx (&ctx, alt_result);
781 movq
-504(%rbp
), %rsi
# %sfp,
782 movq
%rbx
, %rdi
# tmp624,
783 call __sha256_finish_ctx@PLT
#
784 # sha256-crypt.c:374: explicit_bzero (&ctx, sizeof (ctx));
787 movq
%rbx
, %rdi
# tmp624,
788 call __explicit_bzero_chk@PLT
#
789 # sha256-crypt.c:375: explicit_bzero (&alt_ctx, sizeof (alt_ctx));
790 movq
-520(%rbp
), %rdi
# %sfp,
793 call __explicit_bzero_chk@PLT
#
794 # sha256-crypt.c:377: explicit_bzero (temp_result, sizeof (temp_result));
795 movq
-544(%rbp
), %rdi
# %sfp,
798 call __explicit_bzero_chk@PLT
#
799 # sha256-crypt.c:378: explicit_bzero (p_bytes, key_len);
800 movq
-496(%rbp
), %rdi
# %sfp,
802 movq
%r15, %rsi
# tmp286,
803 call __explicit_bzero_chk@PLT
#
804 # sha256-crypt.c:379: explicit_bzero (s_bytes, salt_len);
805 movq
-536(%rbp
), %rdi
# %sfp,
807 movq
%r14, %rsi
# _6,
808 call __explicit_bzero_chk@PLT
#
809 # sha256-crypt.c:380: if (copied_key != NULL)
810 movq
-584(%rbp
), %rax
# %sfp, copied_key
811 testq
%rax
, %rax
# copied_key
813 # sha256-crypt.c:381: explicit_bzero (copied_key, key_len);
815 movq
%r15, %rsi
# tmp286,
816 movq
%rax
, %rdi
# copied_key,
817 call __explicit_bzero_chk@PLT
#
819 # sha256-crypt.c:382: if (copied_salt != NULL)
820 movq
-568(%rbp
), %rax
# %sfp, copied_salt
821 testq
%rax
, %rax
# copied_salt
823 # sha256-crypt.c:383: explicit_bzero (copied_salt, salt_len);
825 movq
%r14, %rsi
# _6,
826 movq
%rax
, %rdi
# copied_salt,
827 call __explicit_bzero_chk@PLT
#
829 # sha256-crypt.c:385: free (free_key);
830 movq
-576(%rbp
), %rdi
# %sfp,
832 # sha256-crypt.c:386: free (free_pbytes);
833 movq
-592(%rbp
), %rdi
# %sfp,
836 # sha256-crypt.c:388: }
837 leaq
-40(%rbp
), %rsp
#,
838 movq
%r12, %rax
# <retval>,
852 # sha256-crypt.c:277: memcpy (cp, temp_result, cnt);
853 movl
(%rax
), %edx
#, tmp477
855 movl
%edx
, (%rsi
) # tmp477,* s_bytes
856 movl $
4, %edx
#, tmp475
862 movzbl
(%rax
,%rdx
), %eax
#, tmp483
863 movb
%al
, (%rsi
,%rdx
) # tmp483,
868 movzwl
(%rax
,%rdx
), %edi
#, tmp480
869 movw
%di
, (%rsi
,%rdx
) # tmp480,
870 addq $
2, %rdx
#, tmp475
877 movl
%r14d
, %esi
# _6, tmp471
878 xorl
%eax
, %eax
# tmp470
879 movq
%r13, %r8 # tmp627, tmp627
880 andl $
-8, %esi
#, tmp471
882 movl
%eax
, %edx
# tmp470, tmp472
883 movq
-536(%rbp
), %r10 # %sfp, tmp465
885 movq
(%r8,%rdx
), %rdi
# MEM[(void *)&temp_result], tmp473
886 cmpl %esi
, %eax
# tmp471, tmp470
887 movq
%rdi
, (%r10,%rdx
) # tmp473, MEM[(void *)s_bytes_233]
889 movq
%r10, %rsi
# tmp465, tmp465
890 addq
%rax
, %rsi
# tmp474, s_bytes
891 addq
-544(%rbp
), %rax
# %sfp, tmp469
896 # sha256-crypt.c:361: __set_errno (ERANGE);
897 movq errno@gottpoff
(%rip
), %rax
#, tmp602
898 # sha256-crypt.c:362: buffer = NULL;
899 xorl
%r12d
, %r12d
# <retval>
900 # sha256-crypt.c:361: __set_errno (ERANGE);
901 movl $
34, %fs:(%rax
) #, errno
906 # sha256-crypt.c:322: int n = __snprintf (cp, MAX (0, buflen), "%s%zu$",
907 testl
%edx
, %edx
# _37
908 movq
-528(%rbp
), %r8 # %sfp,
909 leaq sha256_rounds_prefix
(%rip
), %rcx
#,
910 cmovs
%r12d
, %edx
# _37,, tmp530, tmp533
912 movslq
%edx
, %rsi
# tmp533, tmp535
913 leaq
.LC1(%rip), %rdx #,
914 call __snprintf@PLT
#
915 # sha256-crypt.c:325: buflen -= n;
916 movl
-484(%rbp
), %edx
# buflen, _37
917 # sha256-crypt.c:324: cp += n;
918 movslq
%eax
, %rdi
# n, n
919 addq
-472(%rbp
), %rdi
# cp, _33
920 # sha256-crypt.c:325: buflen -= n;
921 subl
%eax
, %edx
# n, _37
922 # sha256-crypt.c:324: cp += n;
923 movq
%rdi
, -472(%rbp
) # _33, cp
924 # sha256-crypt.c:325: buflen -= n;
925 movl
%edx
, -484(%rbp
) # _37, buflen
930 # sha256-crypt.c:132: const char *num = salt + sizeof (sha256_rounds_prefix) - 1;
931 movq
-512(%rbp
), %rax
# %sfp, salt
932 # sha256-crypt.c:134: unsigned long int srounds = strtoul (num, &endp, 10);
933 leaq
-224(%rbp
), %rsi
#, tmp281
935 # sha256-crypt.c:132: const char *num = salt + sizeof (sha256_rounds_prefix) - 1;
936 leaq
7(%rax
), %rdi
#, num
937 # sha256-crypt.c:134: unsigned long int srounds = strtoul (num, &endp, 10);
939 # sha256-crypt.c:135: if (*endp == '$')
940 movq
-224(%rbp
), %rdx
# endp, endp.0_3
941 cmpb $
36, (%rdx
) #, *endp.0_3
943 # sha256-crypt.c:138: rounds = MAX (ROUNDS_MIN, MIN (srounds, ROUNDS_MAX));
944 cmpq $
999999999, %rax
#, srounds
945 # sha256-crypt.c:137: salt = endp + 1;
946 leaq
1(%rdx
), %rcx
#, salt
947 # sha256-crypt.c:138: rounds = MAX (ROUNDS_MIN, MIN (srounds, ROUNDS_MAX));
948 movl $
999999999, %edx
#, tmp283
949 cmovbe
%rax
, %rdx
# srounds,, tmp283
950 movl $
1000, %eax
#, tmp284
951 # sha256-crypt.c:139: rounds_custom = true;
952 movb $
1, -485(%rbp
) #, %sfp
953 cmpq $
1000, %rdx
#, rounds
954 # sha256-crypt.c:137: salt = endp + 1;
955 movq
%rcx
, -512(%rbp
) # salt, %sfp
956 cmovnb
%rdx
, %rax
# rounds,, tmp284
957 movq
%rax
, -528(%rbp
) # tmp284, %sfp
962 # sha256-crypt.c:168: char *tmp = (char *) alloca (salt_len + __alignof__ (uint32_t));
963 leaq
34(%r14), %rax
#, tmp316
964 # sha256-crypt.c:169: alloca_used += salt_len + __alignof__ (uint32_t);
965 movq
-496(%rbp
), %rcx
# %sfp, alloca_used
966 # sha256-crypt.c:170: salt = copied_salt =
967 movl
%r14d
, %edx
# _6,
968 # sha256-crypt.c:168: char *tmp = (char *) alloca (salt_len + __alignof__ (uint32_t));
969 andq $
-16, %rax
#, tmp320
970 subq
%rax
, %rsp
# tmp320,
971 # sha256-crypt.c:169: alloca_used += salt_len + __alignof__ (uint32_t);
972 leaq
4(%r14,%rcx
), %rcx
#, alloca_used
973 # sha256-crypt.c:168: char *tmp = (char *) alloca (salt_len + __alignof__ (uint32_t));
974 leaq
15(%rsp
), %rax
#, tmp322
975 # sha256-crypt.c:169: alloca_used += salt_len + __alignof__ (uint32_t);
976 movq
%rcx
, -496(%rbp
) # alloca_used, %sfp
977 # sha256-crypt.c:168: char *tmp = (char *) alloca (salt_len + __alignof__ (uint32_t));
978 andq $
-16, %rax
#, tmp324
979 # sha256-crypt.c:170: salt = copied_salt =
981 # sha256-crypt.c:172: - (tmp - (char *) 0) % __alignof__ (uint32_t),
982 leaq
4(%rax
), %rcx
#, tmp325
983 # sha256-crypt.c:170: salt = copied_salt =
985 testb $
4, %r14b
#, _6
987 testl
%edx
, %edx
# _6
989 movq
-512(%rbp
), %rbx
# %sfp, salt
991 movzbl
(%rbx
), %esi
#* salt, tmp338
992 movb
%sil
, 4(%rax
) # tmp338,
995 movq
%rcx
, -512(%rbp
) # salt, %sfp
996 movq
%rcx
, -568(%rbp
) # salt, %sfp
1001 movq
-512(%rbp
), %rbx
# %sfp, salt
1002 addq $
8, %rax
#, tmp355
1003 movq
(%rbx
), %rdx
#* salt, tmp347
1004 movq
%rdx
, -4(%rax
) # tmp347,
1005 movl
%r14d
, %edx
# _6, _6
1006 movq
-8(%rbx
,%rdx
), %rsi
#, tmp354
1007 movq
%rsi
, -8(%rcx
,%rdx
) # tmp354,
1008 movq
%rcx
, %rdx
# tmp325, tmp327
1009 subq
%rax
, %rdx
# tmp355, tmp327
1010 subq
%rdx
, %rbx
# tmp327, salt
1011 addl
%r14d
, %edx
# _6, _6
1012 andl $
-8, %edx
#, _6
1013 movq
%rbx
, %r8 # salt, salt
1016 andl $
-8, %edx
#, tmp357
1017 xorl
%esi
, %esi
# tmp356
1019 movl
%esi
, %edi
# tmp356, tmp358
1020 addl $
8, %esi
#, tmp356
1021 movq
(%r8,%rdi
), %r9 #, tmp359
1022 cmpl %edx
, %esi
# tmp357, tmp356
1023 movq
%r9, (%rax
,%rdi
) # tmp359,
1027 # sha256-crypt.c:261: memcpy (cp, temp_result, cnt);
1028 movq
-544(%rbp
), %rsi
# %sfp, tmp627
1029 movl
(%rsi
), %edx
#, tmp417
1030 movl
%edx
, (%rcx
) # tmp417,* _309
1031 movl
%eax
, %edx
# cnt, cnt
1032 movl
-4(%rsi
,%rdx
), %eax
#, tmp424
1033 movl
%eax
, -4(%rcx
,%rdx
) # tmp424,
1038 # sha256-crypt.c:223: sha256_process_bytes (alt_result, cnt, &ctx, nss_ctx);
1039 movq
-504(%rbp
), %rdi
# %sfp,
1040 movq
%rbx
, %rdx
# tmp624,
1041 movq
%r15, %rsi
# tmp286,
1042 call __sha256_process_bytes@PLT
#
1043 # sha256-crypt.c:227: for (cnt = key_len; cnt > 0; cnt >>= 1)
1044 testq
%r15, %r15 # tmp286
1046 # sha256-crypt.c:234: sha256_finish_ctx (&ctx, nss_ctx, alt_result);
1047 movq
-504(%rbp
), %rsi
# %sfp,
1048 movq
%rbx
, %rdi
# tmp624,
1049 call __sha256_finish_ctx@PLT
#
1050 # sha256-crypt.c:237: sha256_init_ctx (&alt_ctx, nss_alt_ctx);
1051 movq
-520(%rbp
), %rdi
# %sfp,
1052 call __sha256_init_ctx@PLT
#
1057 # sha256-crypt.c:259: for (cnt = key_len; cnt >= 32; cnt -= 32)
1058 movq
-496(%rbp
), %rcx
# %sfp, _309
1059 movq
%r15, %rdx
# tmp286, cnt
1062 # sha256-crypt.c:261: memcpy (cp, temp_result, cnt);
1063 movl
%eax
, %edx
# cnt, cnt
1064 movq
-544(%rbp
), %rax
# %sfp, tmp627
1065 movzwl
-2(%rax
,%rdx
), %eax
#, tmp433
1066 movw
%ax
, -2(%rcx
,%rdx
) # tmp433,
1069 # sha256-crypt.c:251: free_pbytes = cp = p_bytes = (char *)malloc (key_len);
1070 movq
%r15, %rdi
# tmp286,
1072 # sha256-crypt.c:252: if (free_pbytes == NULL)
1073 testq
%rax
, %rax
# p_bytes
1074 # sha256-crypt.c:251: free_pbytes = cp = p_bytes = (char *)malloc (key_len);
1075 movq
%rax
, -496(%rbp
) # p_bytes, %sfp
1076 movq
%rax
, -472(%rbp
) # p_bytes, cp
1077 # sha256-crypt.c:252: if (free_pbytes == NULL)
1079 movq
%rax
, -592(%rbp
) # p_bytes, %sfp
1082 # sha256-crypt.c:170: salt = copied_salt =
1083 movq
-512(%rbp
), %rbx
# %sfp, salt
1084 movl
(%rbx
), %esi
#* salt, tmp330
1085 movl
%esi
, 4(%rax
) # tmp330,
1086 movl
-4(%rbx
,%rdx
), %eax
#, tmp337
1087 movl
%eax
, -4(%rcx
,%rdx
) # tmp337,
1090 movq
-512(%rbp
), %rax
# %sfp, salt
1091 movzwl
-2(%rax
,%rdx
), %eax
#, tmp346
1092 movw
%ax
, -2(%rcx
,%rdx
) # tmp346,
1095 # sha256-crypt.c:154: free_key = tmp = (char *) malloc (key_len + __alignof__ (uint32_t));
1096 movq
%rbx
, %rdi
# _9,
1098 # sha256-crypt.c:155: if (tmp == NULL)
1099 testq
%rax
, %rax
# free_key
1100 # sha256-crypt.c:154: free_key = tmp = (char *) malloc (key_len + __alignof__ (uint32_t));
1101 movq
%rax
, -576(%rbp
) # free_key, %sfp
1102 # sha256-crypt.c:155: if (tmp == NULL)
1104 movq
%rax
, %rdi
# free_key, m__
1105 # sha256-crypt.c:119: size_t alloca_used = 0;
1106 movq $
0, -496(%rbp
) #, %sfp
1109 # sha256-crypt.c:254: free (free_key);
1110 movq
-576(%rbp
), %rdi
# %sfp,
1111 # sha256-crypt.c:255: return NULL;
1112 xorl
%r12d
, %r12d
# <retval>
1113 # sha256-crypt.c:254: free (free_key);
1115 # sha256-crypt.c:255: return NULL;
1118 # sha256-crypt.c:156: return NULL;
1119 xorl
%r12d
, %r12d
# <retval>
1123 .size __sha256_crypt_r, .-__sha256_crypt_r
1125 .globl __sha256_crypt
1126 .type __sha256_crypt, @function
1131 .cfi_def_cfa_offset 16
1134 .cfi_def_cfa_offset 24
1136 movq
%rdi
, %r12 # key, key
1138 .cfi_def_cfa_offset 32
1140 # sha256-crypt.c:407: + strlen (salt) + 1 + 43 + 1);
1141 movq
%rsi
, %rdi
# salt,
1142 # sha256-crypt.c:399: {
1143 movq
%rsi
, %rbp
# salt, salt
1144 # sha256-crypt.c:407: + strlen (salt) + 1 + 43 + 1);
1146 # sha256-crypt.c:409: if (buflen < needed)
1147 movl buflen.5422
(%rip
), %ecx
# buflen, buflen.33_4
1148 # sha256-crypt.c:407: + strlen (salt) + 1 + 43 + 1);
1149 leal
66(%rax
), %ebx
#, needed
1150 movq buffer
(%rip
), %rdx
# buffer, <retval>
1151 # sha256-crypt.c:409: if (buflen < needed)
1152 cmpl %ebx
, %ecx
# needed, buflen.33_4
1154 # sha256-crypt.c:411: char *new_buffer = (char *) realloc (buffer, needed);
1155 movq
%rdx
, %rdi
# <retval>,
1156 movslq
%ebx
, %rsi
# needed, needed
1158 # sha256-crypt.c:412: if (new_buffer == NULL)
1159 testq
%rax
, %rax
# <retval>
1160 # sha256-crypt.c:411: char *new_buffer = (char *) realloc (buffer, needed);
1161 movq
%rax
, %rdx
#, <retval>
1162 # sha256-crypt.c:412: if (new_buffer == NULL)
1164 # sha256-crypt.c:415: buffer = new_buffer;
1165 movq
%rax
, buffer
(%rip
) # <retval>, buffer
1166 # sha256-crypt.c:416: buflen = needed;
1167 movl
%ebx
, buflen.5422
(%rip
) # needed, buflen
1168 movl
%ebx
, %ecx
# needed, buflen.33_4
1170 # sha256-crypt.c:420: }
1173 .cfi_def_cfa_offset 24
1174 # sha256-crypt.c:419: return __sha256_crypt_r (key, salt, buffer, buflen);
1175 movq
%rbp
, %rsi
# salt,
1176 movq
%r12, %rdi
# key,
1177 # sha256-crypt.c:420: }
1179 .cfi_def_cfa_offset 16
1181 .cfi_def_cfa_offset 8
1182 # sha256-crypt.c:419: return __sha256_crypt_r (key, salt, buffer, buflen);
1183 jmp __sha256_crypt_r
#
1188 # sha256-crypt.c:420: }
1190 .cfi_def_cfa_offset 24
1193 .cfi_def_cfa_offset 16
1195 .cfi_def_cfa_offset 8
1199 .size __sha256_crypt, .-__sha256_crypt
1201 .comm buflen.5422,4,4
1202 .section __libc_freeres_ptrs
1205 .type buffer, @object
1209 .section .rodata.str1.8,"aMS",@progbits,1
1211 .type sha256_rounds_prefix, @object
1212 .size sha256_rounds_prefix, 8
1213 sha256_rounds_prefix
:
1215 .section .rodata.str1.1
1216 .type sha256_salt_prefix, @object
1217 .size sha256_salt_prefix, 4
1220 .ident "GCC: (GNU) 7.3.0"
1221 .section .note.GNU-stack,"",@progbits