2 # GNU C11 (GCC) version 7.3.0 (x86_64-nyan-linux-gnu)
3 # compiled by GNU C version 7.3.0, GMP version 6.1.2, MPFR version 4.0.1, MPC version 1.1.0, isl version none
4 # GGC heuristics: --param ggc-min-expand=100 --param ggc-min-heapsize=131072
5 # options passed: -I ../include
6 # -I /root/wip/nyanglibc/builds/0/nyanglibc/build/crypt
7 # -I /root/wip/nyanglibc/builds/0/nyanglibc/build
8 # -I ../sysdeps/unix/sysv/linux/x86_64/64
9 # -I ../sysdeps/unix/sysv/linux/x86_64
10 # -I ../sysdeps/unix/sysv/linux/x86/include
11 # -I ../sysdeps/unix/sysv/linux/x86 -I ../sysdeps/x86/nptl
12 # -I ../sysdeps/unix/sysv/linux/wordsize-64 -I ../sysdeps/x86_64/nptl
13 # -I ../sysdeps/unix/sysv/linux/include -I ../sysdeps/unix/sysv/linux
14 # -I ../sysdeps/nptl -I ../sysdeps/pthread -I ../sysdeps/gnu
15 # -I ../sysdeps/unix/inet -I ../sysdeps/unix/sysv -I ../sysdeps/unix/x86_64
16 # -I ../sysdeps/unix -I ../sysdeps/posix -I ../sysdeps/x86_64/64
17 # -I ../sysdeps/x86_64/fpu -I ../sysdeps/x86/fpu -I ../sysdeps/x86_64
18 # -I ../sysdeps/x86/include -I ../sysdeps/x86
19 # -I ../sysdeps/ieee754/float128 -I ../sysdeps/ieee754/ldbl-96/include
20 # -I ../sysdeps/ieee754/ldbl-96 -I ../sysdeps/ieee754/dbl-64
21 # -I ../sysdeps/ieee754/flt-32 -I ../sysdeps/wordsize-64
22 # -I ../sysdeps/ieee754 -I ../sysdeps/generic -I .. -I ../libio -I .
23 # -MD /run/asm/crypt/sha256-crypt.shared.v.d
24 # -MF /run/asm/crypt/sha256-crypt.os.dt -MP -MT /run/asm/crypt/.os
25 # -D _LIBC_REENTRANT -D MODULE_NAME=libcrypt -D PIC -D SHARED
26 # -D TOP_NAMESPACE=glibc
27 # -include /root/wip/nyanglibc/builds/0/nyanglibc/build/libc-modules.h
28 # -include ../include/libc-symbols.h sha256-crypt.c -mtune=generic
29 # -march=x86-64 -auxbase-strip /run/asm/crypt/sha256-crypt.shared.v.s -O2
30 # -Wall -Wwrite-strings -Wundef -Werror -Wstrict-prototypes
31 # -Wold-style-definition -std=gnu11 -fverbose-asm -fgnu89-inline
32 # -fmerge-all-constants -frounding-math -fno-stack-protector -fmath-errno
33 # -fPIC -ftls-model=initial-exec
34 # options enabled: -fPIC -faggressive-loop-optimizations -falign-labels
35 # -fasynchronous-unwind-tables -fauto-inc-dec -fbranch-count-reg
36 # -fcaller-saves -fchkp-check-incomplete-type -fchkp-check-read
37 # -fchkp-check-write -fchkp-instrument-calls -fchkp-narrow-bounds
38 # -fchkp-optimize -fchkp-store-bounds -fchkp-use-static-bounds
39 # -fchkp-use-static-const-bounds -fchkp-use-wrappers -fcode-hoisting
40 # -fcombine-stack-adjustments -fcommon -fcompare-elim -fcprop-registers
41 # -fcrossjumping -fcse-follow-jumps -fdefer-pop
42 # -fdelete-null-pointer-checks -fdevirtualize -fdevirtualize-speculatively
43 # -fdwarf2-cfi-asm -fearly-inlining -feliminate-unused-debug-types
44 # -fexpensive-optimizations -fforward-propagate -ffp-int-builtin-inexact
45 # -ffunction-cse -fgcse -fgcse-lm -fgnu-runtime -fgnu-unique
46 # -fguess-branch-probability -fhoist-adjacent-loads -fident -fif-conversion
47 # -fif-conversion2 -findirect-inlining -finline -finline-atomics
48 # -finline-functions-called-once -finline-small-functions -fipa-bit-cp
49 # -fipa-cp -fipa-icf -fipa-icf-functions -fipa-icf-variables -fipa-profile
50 # -fipa-pure-const -fipa-ra -fipa-reference -fipa-sra -fipa-vrp
51 # -fira-hoist-pressure -fira-share-save-slots -fira-share-spill-slots
52 # -fisolate-erroneous-paths-dereference -fivopts -fkeep-static-consts
53 # -fleading-underscore -flifetime-dse -flra-remat -flto-odr-type-merging
54 # -fmath-errno -fmerge-all-constants -fmerge-debug-strings
55 # -fmove-loop-invariants -fomit-frame-pointer -foptimize-sibling-calls
56 # -foptimize-strlen -fpartial-inlining -fpeephole -fpeephole2 -fplt
57 # -fprefetch-loop-arrays -free -freg-struct-return -freorder-blocks
58 # -freorder-functions -frerun-cse-after-loop -frounding-math
59 # -fsched-critical-path-heuristic -fsched-dep-count-heuristic
60 # -fsched-group-heuristic -fsched-interblock -fsched-last-insn-heuristic
61 # -fsched-rank-heuristic -fsched-spec -fsched-spec-insn-heuristic
62 # -fsched-stalled-insns-dep -fschedule-fusion -fschedule-insns2
63 # -fsemantic-interposition -fshow-column -fshrink-wrap
64 # -fshrink-wrap-separate -fsigned-zeros -fsplit-ivs-in-unroller
65 # -fsplit-wide-types -fssa-backprop -fssa-phiopt -fstdarg-opt
66 # -fstore-merging -fstrict-aliasing -fstrict-overflow
67 # -fstrict-volatile-bitfields -fsync-libcalls -fthread-jumps
68 # -ftoplevel-reorder -ftrapping-math -ftree-bit-ccp -ftree-builtin-call-dce
69 # -ftree-ccp -ftree-ch -ftree-coalesce-vars -ftree-copy-prop -ftree-cselim
70 # -ftree-dce -ftree-dominator-opts -ftree-dse -ftree-forwprop -ftree-fre
71 # -ftree-loop-if-convert -ftree-loop-im -ftree-loop-ivcanon
72 # -ftree-loop-optimize -ftree-parallelize-loops= -ftree-phiprop -ftree-pre
73 # -ftree-pta -ftree-reassoc -ftree-scev-cprop -ftree-sink -ftree-slsr
74 # -ftree-sra -ftree-switch-conversion -ftree-tail-merge -ftree-ter
75 # -ftree-vrp -funit-at-a-time -funwind-tables -fverbose-asm
76 # -fzero-initialized-in-bss -m128bit-long-double -m64 -m80387
77 # -malign-stringops -mavx256-split-unaligned-load
78 # -mavx256-split-unaligned-store -mfancy-math-387 -mfp-ret-in-387 -mfxsr
79 # -mglibc -mieee-fp -mlong-double-80 -mmmx -mno-sse4 -mpush-args -mred-zone
80 # -msse -msse2 -mstv -mtls-direct-seg-refs -mvzeroupper
84 .section __libc_freeres_ptrs, "aw", %nobits
86 .section .rodata.str1.1,"aMS",@progbits,1
94 .globl __sha256_crypt_r
95 .type __sha256_crypt_r, @function
100 .cfi_def_cfa_offset 16
103 .cfi_def_cfa_register 6
112 movq
%rdi
, %r12 # key, key
114 # sha256-crypt.c:125: if (strncmp (sha256_salt_prefix, salt, sizeof (sha256_salt_prefix) - 1) == 0)
115 leaq sha256_salt_prefix
(%rip
), %rdi
#,
117 # sha256-crypt.c:103: {
118 movq
%rsi
, %rbx
# salt, salt
120 # sha256-crypt.c:103: {
121 movq
%rdx
, -560(%rbp
) # buffer, %sfp
122 # sha256-crypt.c:125: if (strncmp (sha256_salt_prefix, salt, sizeof (sha256_salt_prefix) - 1) == 0)
124 # sha256-crypt.c:103: {
125 movl
%ecx
, -484(%rbp
) # buflen, buflen
126 # sha256-crypt.c:125: if (strncmp (sha256_salt_prefix, salt, sizeof (sha256_salt_prefix) - 1) == 0)
128 # sha256-crypt.c:127: salt += sizeof (sha256_salt_prefix) - 1;
129 leaq
3(%rbx
), %rdx
#, tmp628
130 testl
%eax
, %eax
# _1
131 # sha256-crypt.c:129: if (strncmp (salt, sha256_rounds_prefix, sizeof (sha256_rounds_prefix) - 1)
132 leaq sha256_rounds_prefix
(%rip
), %rsi
#,
133 # sha256-crypt.c:127: salt += sizeof (sha256_salt_prefix) - 1;
134 cmovne
%rbx
, %rdx
# tmp628,, salt, tmp628
135 movq
%rdx
, %rdi
# tmp628, salt
136 movq
%rdx
, -512(%rbp
) # salt, %sfp
137 # sha256-crypt.c:129: if (strncmp (salt, sha256_rounds_prefix, sizeof (sha256_rounds_prefix) - 1)
140 testl
%eax
, %eax
# _2
141 # sha256-crypt.c:118: bool rounds_custom = false;
142 movb $
0, -485(%rbp
) #, %sfp
143 # sha256-crypt.c:117: size_t rounds = ROUNDS_DEFAULT;
144 movq $
5000, -528(%rbp
) #, %sfp
145 # sha256-crypt.c:129: if (strncmp (salt, sha256_rounds_prefix, sizeof (sha256_rounds_prefix) - 1)
148 # sha256-crypt.c:143: salt_len = MIN (strcspn (salt, "$"), SALT_LEN_MAX);
149 movq
-512(%rbp
), %rdi
# %sfp,
150 leaq
.LC0(%rip), %rsi #,
152 # sha256-crypt.c:144: key_len = strlen (key);
153 movq
%r12, %rdi
# key,
154 # sha256-crypt.c:143: salt_len = MIN (strcspn (salt, "$"), SALT_LEN_MAX);
156 movq
%rax
, %r14 #, _6
157 movl $
16, %eax
#, tmp630
158 cmovnb
%rax
, %r14 # _6,, tmp630, _6
159 # sha256-crypt.c:144: key_len = strlen (key);
161 # sha256-crypt.c:146: if ((key - (char *) 0) % __alignof__ (uint32_t) != 0)
162 testb $
3, %r12b
#, key
163 # sha256-crypt.c:144: key_len = strlen (key);
164 movq
%rax
, %r15 #, tmp286
165 # sha256-crypt.c:146: if ((key - (char *) 0) % __alignof__ (uint32_t) != 0)
167 # sha256-crypt.c:150: if (__libc_use_alloca (alloca_used + key_len + __alignof__ (uint32_t)))
168 leaq
4(%rax
), %rbx
#, _9
169 # ../sysdeps/pthread/allocalim.h:27: return (__glibc_likely (__libc_alloca_cutoff (size))
170 movq
%rbx
, %rdi
# _9,
171 call __libc_alloca_cutoff@PLT
#
172 # ../sysdeps/pthread/allocalim.h:29: || __glibc_likely (size <= PTHREAD_STACK_MIN / 4)
173 testl
%eax
, %eax
# _305
175 cmpq $
4096, %rbx
#, _9
178 # sha256-crypt.c:151: tmp = alloca_account (key_len + __alignof__ (uint32_t), alloca_used);
180 # 151 "sha256-crypt.c" 1
184 addq $
30, %rbx
#, tmp297
185 andq $
-16, %rbx
#, tmp301
186 subq
%rbx
, %rsp
# tmp301,
187 leaq
15(%rsp
), %rdi
#, tmp303
188 andq $
-16, %rdi
#, m__
190 # 151 "sha256-crypt.c" 1
191 sub %rsp
, %rax
# d__
194 movq
%rax
, -496(%rbp
) # d__, %sfp
195 # sha256-crypt.c:120: char *free_key = NULL;
196 movq $
0, -576(%rbp
) #, %sfp
198 # sha256-crypt.c:159: key = copied_key =
199 movq
%r12, %rsi
# key,
200 # sha256-crypt.c:161: - (tmp - (char *) 0) % __alignof__ (uint32_t),
201 addq $
4, %rdi
#, tmp307
202 # sha256-crypt.c:159: key = copied_key =
203 movq
%r15, %rdx
# tmp286,
205 movq
%rax
, %r12 #, key
206 movq
%rax
, -584(%rbp
) # key, %sfp
211 # sha256-crypt.c:120: char *free_key = NULL;
212 movq $
0, -576(%rbp
) #, %sfp
213 # sha256-crypt.c:119: size_t alloca_used = 0;
214 movq $
0, -496(%rbp
) #, %sfp
215 # sha256-crypt.c:112: char *copied_key = NULL;
216 movq $
0, -584(%rbp
) #, %sfp
218 # sha256-crypt.c:166: if ((salt - (char *) 0) % __alignof__ (uint32_t) != 0)
219 testb $
3, -512(%rbp
) #, %sfp
220 # sha256-crypt.c:113: char *copied_salt = NULL;
221 movq $
0, -568(%rbp
) #, %sfp
222 # sha256-crypt.c:166: if ((salt - (char *) 0) % __alignof__ (uint32_t) != 0)
225 # sha256-crypt.c:193: sha256_init_ctx (&ctx, nss_ctx);
226 leaq
-400(%rbp
), %rbx
#, tmp624
227 # sha256-crypt.c:205: sha256_init_ctx (&alt_ctx, nss_alt_ctx);
228 leaq
-224(%rbp
), %r13 #, tmp625
229 # sha256-crypt.c:193: sha256_init_ctx (&ctx, nss_ctx);
230 movq
%rbx
, %rdi
# tmp624,
231 call __sha256_init_ctx@PLT
#
232 # sha256-crypt.c:196: sha256_process_bytes (key, key_len, &ctx, nss_ctx);
233 movq
%rbx
, %rdx
# tmp624,
234 movq
%r15, %rsi
# tmp286,
235 movq
%r12, %rdi
# key,
236 call __sha256_process_bytes@PLT
#
237 # sha256-crypt.c:200: sha256_process_bytes (salt, salt_len, &ctx, nss_ctx);
238 movq
-512(%rbp
), %rdi
# %sfp,
239 movq
%rbx
, %rdx
# tmp624,
240 movq
%r14, %rsi
# _6,
241 call __sha256_process_bytes@PLT
#
242 # sha256-crypt.c:205: sha256_init_ctx (&alt_ctx, nss_alt_ctx);
243 movq
%r13, %rdi
# tmp625,
244 movq
%r13, -520(%rbp
) # tmp625, %sfp
245 call __sha256_init_ctx@PLT
#
246 # sha256-crypt.c:208: sha256_process_bytes (key, key_len, &alt_ctx, nss_alt_ctx);
247 movq
%r13, %rdx
# tmp625,
248 movq
%r15, %rsi
# tmp286,
249 movq
%r12, %rdi
# key,
250 call __sha256_process_bytes@PLT
#
251 # sha256-crypt.c:211: sha256_process_bytes (salt, salt_len, &alt_ctx, nss_alt_ctx);
252 movq
-512(%rbp
), %rdi
# %sfp,
253 movq
%r13, %rdx
# tmp625,
254 movq
%r14, %rsi
# _6,
255 call __sha256_process_bytes@PLT
#
256 # sha256-crypt.c:214: sha256_process_bytes (key, key_len, &alt_ctx, nss_alt_ctx);
257 movq
%r13, %rdx
# tmp625,
258 movq
%r15, %rsi
# tmp286,
259 movq
%r12, %rdi
# key,
260 call __sha256_process_bytes@PLT
#
261 # sha256-crypt.c:218: sha256_finish_ctx (&alt_ctx, nss_alt_ctx, alt_result);
262 leaq
-464(%rbp
), %rax
#, tmp626
263 movq
%r13, %rdi
# tmp625,
264 movq
%rax
, %rsi
# tmp626,
265 movq
%rax
, -504(%rbp
) # tmp626, %sfp
266 call __sha256_finish_ctx@PLT
#
267 # sha256-crypt.c:221: for (cnt = key_len; cnt > 32; cnt -= 32)
268 cmpq $
32, %r15 #, tmp286
270 leaq
-33(%r15), %rcx
#, _15
271 leaq
-32(%r15), %rsi
#, _319
272 movq
%r14, -552(%rbp
) # _6, %sfp
273 movq
-504(%rbp
), %r14 # %sfp, tmp626
274 movq
%r12, -592(%rbp
) # key, %sfp
275 movq
%r15, %r12 # cnt, cnt
276 movq
%rcx
, -536(%rbp
) # _15, %sfp
277 andq $
-32, %rcx
#, tmp370
278 movq
%rsi
, -544(%rbp
) # _319, %sfp
279 movq
%rcx
, %rax
# tmp370, tmp370
280 movq
%rsi
, %rcx
# _319, _331
281 subq
%rax
, %rcx
# tmp370, _331
282 movq
%rcx
, %r13 # _331, _331
286 # sha256-crypt.c:222: sha256_process_bytes (alt_result, 32, &ctx, nss_ctx);
287 movq
%rbx
, %rdx
# tmp624,
289 movq
%r14, %rdi
# tmp626,
290 # sha256-crypt.c:221: for (cnt = key_len; cnt > 32; cnt -= 32)
291 subq $
32, %r12 #, cnt
292 # sha256-crypt.c:222: sha256_process_bytes (alt_result, 32, &ctx, nss_ctx);
293 call __sha256_process_bytes@PLT
#
294 # sha256-crypt.c:221: for (cnt = key_len; cnt > 32; cnt -= 32)
295 cmpq
%r13, %r12 # _331, cnt
297 # sha256-crypt.c:223: sha256_process_bytes (alt_result, cnt, &ctx, nss_ctx);
298 movq
-536(%rbp
), %rax
# %sfp, _15
299 movq
-544(%rbp
), %rsi
# %sfp, _319
300 movq
%rbx
, %rdx
# tmp624,
301 movq
-504(%rbp
), %rdi
# %sfp,
302 movq
-552(%rbp
), %r14 # %sfp, _6
303 movq
-592(%rbp
), %r12 # %sfp, key
304 andq $
-32, %rax
#, _15
305 subq
%rax
, %rsi
# tmp376, _319
306 call __sha256_process_bytes@PLT
#
308 # sha256-crypt.c:221: for (cnt = key_len; cnt > 32; cnt -= 32)
309 movq
%r15, %r13 # tmp286, cnt
314 # sha256-crypt.c:229: sha256_process_bytes (alt_result, 32, &ctx, nss_ctx);
315 movq
-504(%rbp
), %rdi
# %sfp,
317 call __sha256_process_bytes@PLT
#
318 # sha256-crypt.c:227: for (cnt = key_len; cnt > 0; cnt >>= 1)
322 # sha256-crypt.c:228: if ((cnt & 1) != 0)
323 testb $
1, %r13b
#, cnt
324 # sha256-crypt.c:229: sha256_process_bytes (alt_result, 32, &ctx, nss_ctx);
325 movq
%rbx
, %rdx
# tmp624,
326 # sha256-crypt.c:228: if ((cnt & 1) != 0)
328 # sha256-crypt.c:231: sha256_process_bytes (key, key_len, &ctx, nss_ctx);
329 movq
%r15, %rsi
# tmp286,
330 movq
%r12, %rdi
# key,
331 call __sha256_process_bytes@PLT
#
332 # sha256-crypt.c:227: for (cnt = key_len; cnt > 0; cnt >>= 1)
336 # sha256-crypt.c:234: sha256_finish_ctx (&ctx, nss_ctx, alt_result);
337 movq
-504(%rbp
), %rsi
# %sfp,
338 movq
%rbx
, %rdi
# tmp624,
339 call __sha256_finish_ctx@PLT
#
340 # sha256-crypt.c:237: sha256_init_ctx (&alt_ctx, nss_alt_ctx);
341 movq
-520(%rbp
), %rdi
# %sfp,
342 call __sha256_init_ctx@PLT
#
343 movq
%rbx
, -536(%rbp
) # tmp624, %sfp
344 movq
%r13, %rbx
# cnt, cnt
345 movq
-520(%rbp
), %r13 # %sfp, tmp625
349 # sha256-crypt.c:241: sha256_process_bytes (key, key_len, &alt_ctx, nss_alt_ctx);
350 movq
%r13, %rdx
# tmp625,
351 movq
%r15, %rsi
# tmp286,
352 movq
%r12, %rdi
# key,
353 # sha256-crypt.c:240: for (cnt = 0; cnt < key_len; ++cnt)
355 # sha256-crypt.c:241: sha256_process_bytes (key, key_len, &alt_ctx, nss_alt_ctx);
356 call __sha256_process_bytes@PLT
#
357 # sha256-crypt.c:240: for (cnt = 0; cnt < key_len; ++cnt)
358 cmpq
%rbx
, %r15 # cnt, tmp286
360 movq
-536(%rbp
), %rbx
# %sfp, tmp624
362 # sha256-crypt.c:244: sha256_finish_ctx (&alt_ctx, nss_alt_ctx, temp_result);
363 leaq
-432(%rbp
), %rax
#, tmp627
364 movq
-520(%rbp
), %rdi
# %sfp,
365 movq
%rax
, %rsi
# tmp627,
366 movq
%rax
, -544(%rbp
) # tmp627, %sfp
367 call __sha256_finish_ctx@PLT
#
368 # sha256-crypt.c:247: if (__libc_use_alloca (alloca_used + key_len))
369 movq
-496(%rbp
), %r12 # %sfp, alloca_used
370 addq
%r15, %r12 # tmp286, alloca_used
371 # ../sysdeps/pthread/allocalim.h:27: return (__glibc_likely (__libc_alloca_cutoff (size))
372 movq
%r12, %rdi
# _18,
373 call __libc_alloca_cutoff@PLT
#
374 # ../sysdeps/pthread/allocalim.h:29: || __glibc_likely (size <= PTHREAD_STACK_MIN / 4)
375 cmpq $
4096, %r12 #, _18
377 testl
%eax
, %eax
# _311
380 # sha256-crypt.c:248: cp = p_bytes = (char *) alloca (key_len);
381 leaq
30(%r15), %rax
#, tmp396
382 # sha256-crypt.c:121: char *free_pbytes = NULL;
383 movq $
0, -592(%rbp
) #, %sfp
384 # sha256-crypt.c:248: cp = p_bytes = (char *) alloca (key_len);
385 andq $
-16, %rax
#, tmp400
386 subq
%rax
, %rsp
# tmp400,
387 leaq
15(%rsp
), %rax
#, tmp402
388 andq $
-16, %rax
#, tmp402
389 movq
%rax
, -496(%rbp
) # p_bytes, %sfp
390 movq
%rax
, -472(%rbp
) # p_bytes, cp
392 # sha256-crypt.c:259: for (cnt = key_len; cnt >= 32; cnt -= 32)
393 cmpq $
31, %r15 #, tmp286
395 leaq
-32(%r15), %rcx
#, _139
396 movq
-496(%rbp
), %rsi
# %sfp, p_bytes
397 movq
%rcx
, %rdx
# _139, tmp406
398 andq $
-32, %rdx
#, tmp406
399 leaq
32(%rsi
), %rax
#, ivtmp.55
400 leaq
64(%rsi
,%rdx
), %rdx
#, _26
404 # sha256-crypt.c:260: cp = mempcpy (cp, temp_result, 32);
405 movdqa
-432(%rbp
), %xmm0
# MEM[(char * {ref-all})&temp_result], MEM[(char * {ref-all})&temp_result]
406 movups
%xmm0
, -32(%rax
) # MEM[(char * {ref-all})&temp_result], MEM[base: _140, offset: -32B]
407 movdqa
-416(%rbp
), %xmm0
# MEM[(char * {ref-all})&temp_result], MEM[(char * {ref-all})&temp_result]
408 movups
%xmm0
, -16(%rax
) # MEM[(char * {ref-all})&temp_result], MEM[base: _140, offset: -32B]
409 movq
%rax
, -472(%rbp
) # ivtmp.55, cp
410 addq $
32, %rax
#, ivtmp.55
411 # sha256-crypt.c:259: for (cnt = key_len; cnt >= 32; cnt -= 32)
412 cmpq
%rax
, %rdx
# ivtmp.55, _26
414 movq
-496(%rbp
), %rax
# %sfp, p_bytes
415 andq $
-32, %rcx
#, tmp410
416 movq
%r15, %rdx
# tmp286, cnt
417 andl $
31, %edx
#, cnt
418 leaq
32(%rax
,%rcx
), %rcx
#, _309
420 # sha256-crypt.c:261: memcpy (cp, temp_result, cnt);
422 movl
%edx
, %eax
# cnt, cnt
426 testl
%eax
, %eax
# cnt
428 movq
-544(%rbp
), %rsi
# %sfp, tmp627
430 movzbl
(%rsi
), %edx
#, tmp425
431 movb
%dl
, (%rcx
) # tmp425,* _309
434 # sha256-crypt.c:264: sha256_init_ctx (&alt_ctx, nss_alt_ctx);
435 movq
-520(%rbp
), %rdi
# %sfp,
436 # sha256-crypt.c:267: for (cnt = 0; cnt < 16 + alt_result[0]; ++cnt)
437 xorl
%r12d
, %r12d
# cnt
438 # sha256-crypt.c:264: sha256_init_ctx (&alt_ctx, nss_alt_ctx);
439 call __sha256_init_ctx@PLT
#
440 # sha256-crypt.c:267: for (cnt = 0; cnt < 16 + alt_result[0]; ++cnt)
441 movq
%rbx
, -536(%rbp
) # tmp624, %sfp
442 movq
-512(%rbp
), %r13 # %sfp, salt
443 movq
%r12, %rbx
# cnt, cnt
444 movq
-520(%rbp
), %r12 # %sfp, tmp625
448 # sha256-crypt.c:268: sha256_process_bytes (salt, salt_len, &alt_ctx, nss_alt_ctx);
449 movq
%r12, %rdx
# tmp625,
450 movq
%r14, %rsi
# _6,
451 movq
%r13, %rdi
# salt,
452 call __sha256_process_bytes@PLT
#
453 # sha256-crypt.c:267: for (cnt = 0; cnt < 16 + alt_result[0]; ++cnt)
454 movzbl
-464(%rbp
), %edx
# alt_result, tmp451
456 addq $
16, %rdx
#, tmp452
457 cmpq
%rbx
, %rdx
# cnt, tmp452
459 # sha256-crypt.c:271: sha256_finish_ctx (&alt_ctx, nss_alt_ctx, temp_result);
460 movq
-544(%rbp
), %r13 # %sfp, tmp627
461 movq
-520(%rbp
), %rdi
# %sfp,
462 movq
-536(%rbp
), %rbx
# %sfp, tmp624
463 movq
%r13, %rsi
# tmp627,
464 call __sha256_finish_ctx@PLT
#
465 # sha256-crypt.c:274: cp = s_bytes = alloca (salt_len);
466 leaq
30(%r14), %rax
#, tmp457
467 # sha256-crypt.c:277: memcpy (cp, temp_result, cnt);
468 movl
%r14d
, %ecx
# _6, _6
469 # sha256-crypt.c:274: cp = s_bytes = alloca (salt_len);
470 andq $
-16, %rax
#, tmp461
471 subq
%rax
, %rsp
# tmp461,
472 leaq
15(%rsp
), %rax
#, tmp463
473 andq $
-16, %rax
#, tmp463
474 # sha256-crypt.c:277: memcpy (cp, temp_result, cnt);
476 # sha256-crypt.c:274: cp = s_bytes = alloca (salt_len);
477 movq
%rax
, %rsi
# tmp463, tmp465
478 movq
%rax
, -536(%rbp
) # tmp465, %sfp
479 movq
%rax
, -472(%rbp
) # tmp465, cp
480 # sha256-crypt.c:277: memcpy (cp, temp_result, cnt);
481 movq
%r13, %rax
# tmp627, tmp469
484 xorl
%edx
, %edx
# tmp475
493 # sha256-crypt.c:297: if (cnt % 7 != 0)
494 movq
%r14, -552(%rbp
) # _6, %sfp
495 # sha256-crypt.c:281: for (cnt = 0; cnt < rounds; ++cnt)
496 xorl
%r12d
, %r12d
# cnt
497 # sha256-crypt.c:297: if (cnt % 7 != 0)
498 movq
-504(%rbp
), %r14 # %sfp, tmp626
503 # sha256-crypt.c:288: sha256_process_bytes (p_bytes, key_len, &ctx, nss_ctx);
504 movq
-496(%rbp
), %rdi
# %sfp,
505 movq
%r15, %rsi
# tmp286,
506 call __sha256_process_bytes@PLT
#
508 # sha256-crypt.c:293: if (cnt % 3 != 0)
509 movabsq $
-6148914691236517205, %rax
#, tmp688
512 leaq
(%rdx
,%rdx
,2), %rax
#, tmp493
513 cmpq
%rax
, %r12 # tmp493, cnt
516 # sha256-crypt.c:297: if (cnt % 7 != 0)
517 movabsq $
5270498306774157605, %rax
#, tmp689
519 movq
%r12, %rax
# cnt, tmp518
520 sarq $
63, %rax
#, tmp518
522 subq
%rax
, %rdx
# tmp518, tmp514
523 leaq
0(,%rdx
,8), %rax
#, tmp520
524 subq
%rdx
, %rax
# tmp514, tmp521
525 cmpq
%rax
, %r12 # tmp521, cnt
528 # sha256-crypt.c:301: if ((cnt & 1) != 0)
529 testq
%r13, %r13 # _27
530 # sha256-crypt.c:302: sha256_process_bytes (alt_result, 32, &ctx, nss_ctx);
531 movq
%rbx
, %rdx
# tmp624,
532 # sha256-crypt.c:301: if ((cnt & 1) != 0)
534 # sha256-crypt.c:302: sha256_process_bytes (alt_result, 32, &ctx, nss_ctx);
536 movq
%r14, %rdi
# tmp626,
537 call __sha256_process_bytes@PLT
#
539 # sha256-crypt.c:307: sha256_finish_ctx (&ctx, nss_ctx, alt_result);
540 movq
%r14, %rsi
# tmp626,
541 movq
%rbx
, %rdi
# tmp624,
542 # sha256-crypt.c:281: for (cnt = 0; cnt < rounds; ++cnt)
544 # sha256-crypt.c:307: sha256_finish_ctx (&ctx, nss_ctx, alt_result);
545 call __sha256_finish_ctx@PLT
#
546 # sha256-crypt.c:281: for (cnt = 0; cnt < rounds; ++cnt)
547 cmpq
%r12, -528(%rbp
) # cnt, %sfp
550 # sha256-crypt.c:284: sha256_init_ctx (&ctx, nss_ctx);
551 movq
%rbx
, %rdi
# tmp624,
552 # sha256-crypt.c:287: if ((cnt & 1) != 0)
553 movq
%r12, %r13 # cnt, _27
554 # sha256-crypt.c:284: sha256_init_ctx (&ctx, nss_ctx);
555 call __sha256_init_ctx@PLT
#
556 # sha256-crypt.c:287: if ((cnt & 1) != 0)
557 andl $
1, %r13d
#, _27
558 # sha256-crypt.c:288: sha256_process_bytes (p_bytes, key_len, &ctx, nss_ctx);
559 movq
%rbx
, %rdx
# tmp624,
560 # sha256-crypt.c:287: if ((cnt & 1) != 0)
562 # sha256-crypt.c:290: sha256_process_bytes (alt_result, 32, &ctx, nss_ctx);
564 movq
%r14, %rdi
# tmp626,
565 call __sha256_process_bytes@PLT
#
570 # sha256-crypt.c:261: memcpy (cp, temp_result, cnt);
571 movq
-432(%rbp
), %rax
#, tmp434
572 movq
%rax
, (%rcx
) # tmp434,* _309
573 movq
-544(%rbp
), %rdi
# %sfp, tmp627
574 movl
%edx
, %eax
# cnt, cnt
575 movq
-8(%rdi
,%rax
), %rsi
#, tmp441
576 movq
%rsi
, -8(%rcx
,%rax
) # tmp441,
577 leaq
8(%rcx
), %rsi
#, tmp442
578 andq $
-8, %rsi
#, tmp442
579 subq
%rsi
, %rcx
# tmp442, _309
580 leal
(%rdx
,%rcx
), %eax
#, cnt
581 subq
%rcx
, %rdi
# _309, tmp415
582 andl $
-8, %eax
#, cnt
585 andl $
-8, %eax
#, tmp444
586 xorl
%edx
, %edx
# tmp443
588 movl
%edx
, %ecx
# tmp443, tmp445
589 addl $
8, %edx
#, tmp443
590 movq
(%rdi
,%rcx
), %r8 #, tmp446
591 cmpl %eax
, %edx
# tmp444, tmp443
592 movq
%r8, (%rsi
,%rcx
) # tmp446,
598 # sha256-crypt.c:304: sha256_process_bytes (p_bytes, key_len, &ctx, nss_ctx);
599 movq
-496(%rbp
), %rdi
# %sfp,
600 movq
%r15, %rsi
# tmp286,
601 call __sha256_process_bytes@PLT
#
606 # sha256-crypt.c:298: sha256_process_bytes (p_bytes, key_len, &ctx, nss_ctx);
607 movq
-496(%rbp
), %rdi
# %sfp,
608 movq
%rbx
, %rdx
# tmp624,
609 movq
%r15, %rsi
# tmp286,
610 call __sha256_process_bytes@PLT
#
615 # sha256-crypt.c:294: sha256_process_bytes (s_bytes, salt_len, &ctx, nss_ctx);
616 movq
-552(%rbp
), %rsi
# %sfp,
617 movq
-536(%rbp
), %rdi
# %sfp,
618 movq
%rbx
, %rdx
# tmp624,
619 call __sha256_process_bytes@PLT
#
624 # sha256-crypt.c:317: cp = __stpncpy (buffer, sha256_salt_prefix, MAX (0, buflen));
625 movl
-484(%rbp
), %edx
# buflen,
626 xorl
%r12d
, %r12d
# tmp530
627 movq
-560(%rbp
), %rdi
# %sfp,
628 leaq sha256_salt_prefix
(%rip
), %rsi
#,
629 movq
-552(%rbp
), %r14 # %sfp, _6
631 movl
%r12d
, %edx
# tmp530, tmp529
632 cmovns
-484(%rbp
), %edx
# buflen,, tmp529
633 movslq
%edx
, %rdx
# tmp529, tmp531
635 movq
%rax
, %rdi
#, _33
636 movq
%rax
, -472(%rbp
) # _33, cp
637 # sha256-crypt.c:318: buflen -= sizeof (sha256_salt_prefix) - 1;
638 movl
-484(%rbp
), %eax
# buflen, tmp750
639 # sha256-crypt.c:320: if (rounds_custom)
640 cmpb $
0, -485(%rbp
) #, %sfp
641 # sha256-crypt.c:318: buflen -= sizeof (sha256_salt_prefix) - 1;
642 leal
-3(%rax
), %edx
#, _37
643 movl
%edx
, -484(%rbp
) # _37, buflen
644 # sha256-crypt.c:320: if (rounds_custom)
647 # sha256-crypt.c:328: cp = __stpncpy (cp, salt, MIN ((size_t) MAX (0, buflen), salt_len));
649 testl
%edx
, %edx
# _37
650 movq
-512(%rbp
), %rsi
# %sfp,
651 cmovs
%r12d
, %edx
# _37,, tmp541, tmp540
652 movslq
%edx
, %rdx
# tmp540, tmp542
653 cmpq
%r14, %rdx
# _6, tmp542
654 cmova
%r14, %rdx
# tmp542,, _6, tmp539
656 # sha256-crypt.c:329: buflen -= MIN ((size_t) MAX (0, buflen), salt_len);
657 movslq
-484(%rbp
), %rdx
# buflen,
658 # sha256-crypt.c:328: cp = __stpncpy (cp, salt, MIN ((size_t) MAX (0, buflen), salt_len));
659 movq
%rax
, -472(%rbp
) # _50, cp
660 # sha256-crypt.c:329: buflen -= MIN ((size_t) MAX (0, buflen), salt_len);
661 testl
%edx
, %edx
# buflen.23_51
662 cmovns
%rdx
, %r12 #,,
663 cmpq
%r14, %r12 # _6, tmp546
664 cmova
%r14, %r12 # tmp546,, _6, tmp543
665 subl
%r12d
, %edx
# tmp543, _58
666 # sha256-crypt.c:331: if (buflen > 0)
667 testl
%edx
, %edx
# _58
668 # sha256-crypt.c:329: buflen -= MIN ((size_t) MAX (0, buflen), salt_len);
669 movl
%edx
, -484(%rbp
) # _58, buflen
670 # sha256-crypt.c:331: if (buflen > 0)
672 # sha256-crypt.c:333: *cp++ = '$';
673 leaq
1(%rax
), %rdx
#, tmp547
674 movq
%rdx
, -472(%rbp
) # tmp547, cp
675 movb $
36, (%rax
) #, *_50
676 # sha256-crypt.c:334: --buflen;
677 subl $
1, -484(%rbp
) #, buflen
679 # sha256-crypt.c:337: __b64_from_24bit (&cp, &buflen,
680 movzbl
-454(%rbp
), %ecx
# alt_result, alt_result
681 movzbl
-464(%rbp
), %edx
# alt_result, alt_result
682 leaq
-484(%rbp
), %r13 #, tmp551
683 movzbl
-444(%rbp
), %r8d
# alt_result,
684 leaq
-472(%rbp
), %r12 #, tmp552
686 movq
%r13, %rsi
# tmp551,
687 movq
%r12, %rdi
# tmp552,
688 call __b64_from_24bit@PLT
#
689 # sha256-crypt.c:339: __b64_from_24bit (&cp, &buflen,
690 movzbl
-463(%rbp
), %ecx
# alt_result, alt_result
691 movzbl
-443(%rbp
), %edx
# alt_result, alt_result
693 movzbl
-453(%rbp
), %r8d
# alt_result,
694 movq
%r13, %rsi
# tmp551,
695 movq
%r12, %rdi
# tmp552,
696 call __b64_from_24bit@PLT
#
697 # sha256-crypt.c:341: __b64_from_24bit (&cp, &buflen,
698 movzbl
-442(%rbp
), %ecx
# alt_result, alt_result
699 movzbl
-452(%rbp
), %edx
# alt_result, alt_result
701 movzbl
-462(%rbp
), %r8d
# alt_result,
702 movq
%r13, %rsi
# tmp551,
703 movq
%r12, %rdi
# tmp552,
704 call __b64_from_24bit@PLT
#
705 # sha256-crypt.c:343: __b64_from_24bit (&cp, &buflen,
706 movzbl
-451(%rbp
), %ecx
# alt_result, alt_result
707 movzbl
-461(%rbp
), %edx
# alt_result, alt_result
709 movzbl
-441(%rbp
), %r8d
# alt_result,
710 movq
%r13, %rsi
# tmp551,
711 movq
%r12, %rdi
# tmp552,
712 call __b64_from_24bit@PLT
#
713 # sha256-crypt.c:345: __b64_from_24bit (&cp, &buflen,
714 movzbl
-460(%rbp
), %ecx
# alt_result, alt_result
715 movzbl
-440(%rbp
), %edx
# alt_result, alt_result
717 movzbl
-450(%rbp
), %r8d
# alt_result,
718 movq
%r13, %rsi
# tmp551,
719 movq
%r12, %rdi
# tmp552,
720 call __b64_from_24bit@PLT
#
721 # sha256-crypt.c:347: __b64_from_24bit (&cp, &buflen,
722 movzbl
-439(%rbp
), %ecx
# alt_result, alt_result
723 movzbl
-449(%rbp
), %edx
# alt_result, alt_result
725 movzbl
-459(%rbp
), %r8d
# alt_result,
726 movq
%r13, %rsi
# tmp551,
727 movq
%r12, %rdi
# tmp552,
728 call __b64_from_24bit@PLT
#
729 # sha256-crypt.c:349: __b64_from_24bit (&cp, &buflen,
730 movzbl
-448(%rbp
), %ecx
# alt_result, alt_result
731 movzbl
-458(%rbp
), %edx
# alt_result, alt_result
733 movzbl
-438(%rbp
), %r8d
# alt_result,
734 movq
%r13, %rsi
# tmp551,
735 movq
%r12, %rdi
# tmp552,
736 call __b64_from_24bit@PLT
#
737 # sha256-crypt.c:351: __b64_from_24bit (&cp, &buflen,
738 movzbl
-457(%rbp
), %ecx
# alt_result, alt_result
739 movzbl
-437(%rbp
), %edx
# alt_result, alt_result
741 movzbl
-447(%rbp
), %r8d
# alt_result,
742 movq
%r13, %rsi
# tmp551,
743 movq
%r12, %rdi
# tmp552,
744 call __b64_from_24bit@PLT
#
745 # sha256-crypt.c:353: __b64_from_24bit (&cp, &buflen,
746 movzbl
-436(%rbp
), %ecx
# alt_result, alt_result
747 movzbl
-446(%rbp
), %edx
# alt_result, alt_result
749 movzbl
-456(%rbp
), %r8d
# alt_result,
750 movq
%r13, %rsi
# tmp551,
751 movq
%r12, %rdi
# tmp552,
752 call __b64_from_24bit@PLT
#
753 # sha256-crypt.c:355: __b64_from_24bit (&cp, &buflen,
754 movzbl
-445(%rbp
), %ecx
# alt_result, alt_result
755 movzbl
-455(%rbp
), %edx
# alt_result, alt_result
757 movzbl
-435(%rbp
), %r8d
# alt_result,
758 movq
%r13, %rsi
# tmp551,
759 movq
%r12, %rdi
# tmp552,
760 call __b64_from_24bit@PLT
#
761 # sha256-crypt.c:357: __b64_from_24bit (&cp, &buflen,
762 movzbl
-433(%rbp
), %ecx
# alt_result, alt_result
763 movzbl
-434(%rbp
), %r8d
# alt_result,
766 movq
%r13, %rsi
# tmp551,
767 movq
%r12, %rdi
# tmp552,
768 call __b64_from_24bit@PLT
#
769 # sha256-crypt.c:359: if (buflen <= 0)
770 movl
-484(%rbp
), %eax
# buflen,
773 # sha256-crypt.c:365: *cp = '\0'; /* Terminate the string. */
774 movq
-472(%rbp
), %rax
# cp, cp.31_127
775 movq
-560(%rbp
), %r12 # %sfp, <retval>
776 movb $
0, (%rax
) #, *cp.31_127
778 # sha256-crypt.c:372: __sha256_init_ctx (&ctx);
779 movq
%rbx
, %rdi
# tmp624,
780 call __sha256_init_ctx@PLT
#
781 # sha256-crypt.c:373: __sha256_finish_ctx (&ctx, alt_result);
782 movq
-504(%rbp
), %rsi
# %sfp,
783 movq
%rbx
, %rdi
# tmp624,
784 call __sha256_finish_ctx@PLT
#
785 # sha256-crypt.c:374: explicit_bzero (&ctx, sizeof (ctx));
788 movq
%rbx
, %rdi
# tmp624,
789 call __explicit_bzero_chk@PLT
#
790 # sha256-crypt.c:375: explicit_bzero (&alt_ctx, sizeof (alt_ctx));
791 movq
-520(%rbp
), %rdi
# %sfp,
794 call __explicit_bzero_chk@PLT
#
795 # sha256-crypt.c:377: explicit_bzero (temp_result, sizeof (temp_result));
796 movq
-544(%rbp
), %rdi
# %sfp,
799 call __explicit_bzero_chk@PLT
#
800 # sha256-crypt.c:378: explicit_bzero (p_bytes, key_len);
801 movq
-496(%rbp
), %rdi
# %sfp,
803 movq
%r15, %rsi
# tmp286,
804 call __explicit_bzero_chk@PLT
#
805 # sha256-crypt.c:379: explicit_bzero (s_bytes, salt_len);
806 movq
-536(%rbp
), %rdi
# %sfp,
808 movq
%r14, %rsi
# _6,
809 call __explicit_bzero_chk@PLT
#
810 # sha256-crypt.c:380: if (copied_key != NULL)
811 movq
-584(%rbp
), %rax
# %sfp, copied_key
812 testq
%rax
, %rax
# copied_key
814 # sha256-crypt.c:381: explicit_bzero (copied_key, key_len);
816 movq
%r15, %rsi
# tmp286,
817 movq
%rax
, %rdi
# copied_key,
818 call __explicit_bzero_chk@PLT
#
820 # sha256-crypt.c:382: if (copied_salt != NULL)
821 movq
-568(%rbp
), %rax
# %sfp, copied_salt
822 testq
%rax
, %rax
# copied_salt
824 # sha256-crypt.c:383: explicit_bzero (copied_salt, salt_len);
826 movq
%r14, %rsi
# _6,
827 movq
%rax
, %rdi
# copied_salt,
828 call __explicit_bzero_chk@PLT
#
830 # sha256-crypt.c:385: free (free_key);
831 movq
-576(%rbp
), %rdi
# %sfp,
833 # sha256-crypt.c:386: free (free_pbytes);
834 movq
-592(%rbp
), %rdi
# %sfp,
837 # sha256-crypt.c:388: }
838 leaq
-40(%rbp
), %rsp
#,
839 movq
%r12, %rax
# <retval>,
853 # sha256-crypt.c:277: memcpy (cp, temp_result, cnt);
854 movl
(%rax
), %edx
#, tmp477
856 movl
%edx
, (%rsi
) # tmp477,* s_bytes
857 movl $
4, %edx
#, tmp475
863 movzbl
(%rax
,%rdx
), %eax
#, tmp483
864 movb
%al
, (%rsi
,%rdx
) # tmp483,
869 movzwl
(%rax
,%rdx
), %edi
#, tmp480
870 movw
%di
, (%rsi
,%rdx
) # tmp480,
871 addq $
2, %rdx
#, tmp475
878 movl
%r14d
, %esi
# _6, tmp471
879 xorl
%eax
, %eax
# tmp470
880 movq
%r13, %r8 # tmp627, tmp627
881 andl $
-8, %esi
#, tmp471
883 movl
%eax
, %edx
# tmp470, tmp472
884 movq
-536(%rbp
), %r10 # %sfp, tmp465
886 movq
(%r8,%rdx
), %rdi
# MEM[(void *)&temp_result], tmp473
887 cmpl %esi
, %eax
# tmp471, tmp470
888 movq
%rdi
, (%r10,%rdx
) # tmp473, MEM[(void *)s_bytes_233]
890 movq
%r10, %rsi
# tmp465, tmp465
891 addq
%rax
, %rsi
# tmp474, s_bytes
892 addq
-544(%rbp
), %rax
# %sfp, tmp469
897 # sha256-crypt.c:361: __set_errno (ERANGE);
898 movq errno@gottpoff
(%rip
), %rax
#, tmp602
899 # sha256-crypt.c:362: buffer = NULL;
900 xorl
%r12d
, %r12d
# <retval>
901 # sha256-crypt.c:361: __set_errno (ERANGE);
902 movl $
34, %fs:(%rax
) #, errno
907 # sha256-crypt.c:322: int n = __snprintf (cp, MAX (0, buflen), "%s%zu$",
908 testl
%edx
, %edx
# _37
909 movq
-528(%rbp
), %r8 # %sfp,
910 leaq sha256_rounds_prefix
(%rip
), %rcx
#,
911 cmovs
%r12d
, %edx
# _37,, tmp530, tmp533
913 movslq
%edx
, %rsi
# tmp533, tmp535
914 leaq
.LC1(%rip), %rdx #,
915 call __snprintf@PLT
#
916 # sha256-crypt.c:325: buflen -= n;
917 movl
-484(%rbp
), %edx
# buflen, _37
918 # sha256-crypt.c:324: cp += n;
919 movslq
%eax
, %rdi
# n, n
920 addq
-472(%rbp
), %rdi
# cp, _33
921 # sha256-crypt.c:325: buflen -= n;
922 subl
%eax
, %edx
# n, _37
923 # sha256-crypt.c:324: cp += n;
924 movq
%rdi
, -472(%rbp
) # _33, cp
925 # sha256-crypt.c:325: buflen -= n;
926 movl
%edx
, -484(%rbp
) # _37, buflen
931 # sha256-crypt.c:132: const char *num = salt + sizeof (sha256_rounds_prefix) - 1;
932 movq
-512(%rbp
), %rax
# %sfp, salt
933 # sha256-crypt.c:134: unsigned long int srounds = strtoul (num, &endp, 10);
934 leaq
-224(%rbp
), %rsi
#, tmp281
936 # sha256-crypt.c:132: const char *num = salt + sizeof (sha256_rounds_prefix) - 1;
937 leaq
7(%rax
), %rdi
#, num
938 # sha256-crypt.c:134: unsigned long int srounds = strtoul (num, &endp, 10);
940 # sha256-crypt.c:135: if (*endp == '$')
941 movq
-224(%rbp
), %rdx
# endp, endp.0_3
942 cmpb $
36, (%rdx
) #, *endp.0_3
944 # sha256-crypt.c:138: rounds = MAX (ROUNDS_MIN, MIN (srounds, ROUNDS_MAX));
945 cmpq $
999999999, %rax
#, srounds
946 # sha256-crypt.c:137: salt = endp + 1;
947 leaq
1(%rdx
), %rcx
#, salt
948 # sha256-crypt.c:138: rounds = MAX (ROUNDS_MIN, MIN (srounds, ROUNDS_MAX));
949 movl $
999999999, %edx
#, tmp283
950 cmovbe
%rax
, %rdx
# srounds,, tmp283
951 movl $
1000, %eax
#, tmp284
952 # sha256-crypt.c:139: rounds_custom = true;
953 movb $
1, -485(%rbp
) #, %sfp
954 cmpq $
1000, %rdx
#, rounds
955 # sha256-crypt.c:137: salt = endp + 1;
956 movq
%rcx
, -512(%rbp
) # salt, %sfp
957 cmovnb
%rdx
, %rax
# rounds,, tmp284
958 movq
%rax
, -528(%rbp
) # tmp284, %sfp
963 # sha256-crypt.c:168: char *tmp = (char *) alloca (salt_len + __alignof__ (uint32_t));
964 leaq
34(%r14), %rax
#, tmp316
965 # sha256-crypt.c:169: alloca_used += salt_len + __alignof__ (uint32_t);
966 movq
-496(%rbp
), %rcx
# %sfp, alloca_used
967 # sha256-crypt.c:170: salt = copied_salt =
968 movl
%r14d
, %edx
# _6,
969 # sha256-crypt.c:168: char *tmp = (char *) alloca (salt_len + __alignof__ (uint32_t));
970 andq $
-16, %rax
#, tmp320
971 subq
%rax
, %rsp
# tmp320,
972 # sha256-crypt.c:169: alloca_used += salt_len + __alignof__ (uint32_t);
973 leaq
4(%r14,%rcx
), %rcx
#, alloca_used
974 # sha256-crypt.c:168: char *tmp = (char *) alloca (salt_len + __alignof__ (uint32_t));
975 leaq
15(%rsp
), %rax
#, tmp322
976 # sha256-crypt.c:169: alloca_used += salt_len + __alignof__ (uint32_t);
977 movq
%rcx
, -496(%rbp
) # alloca_used, %sfp
978 # sha256-crypt.c:168: char *tmp = (char *) alloca (salt_len + __alignof__ (uint32_t));
979 andq $
-16, %rax
#, tmp324
980 # sha256-crypt.c:170: salt = copied_salt =
982 # sha256-crypt.c:172: - (tmp - (char *) 0) % __alignof__ (uint32_t),
983 leaq
4(%rax
), %rcx
#, tmp325
984 # sha256-crypt.c:170: salt = copied_salt =
986 testb $
4, %r14b
#, _6
988 testl
%edx
, %edx
# _6
990 movq
-512(%rbp
), %rbx
# %sfp, salt
992 movzbl
(%rbx
), %esi
#* salt, tmp338
993 movb
%sil
, 4(%rax
) # tmp338,
996 movq
%rcx
, -512(%rbp
) # salt, %sfp
997 movq
%rcx
, -568(%rbp
) # salt, %sfp
1002 movq
-512(%rbp
), %rbx
# %sfp, salt
1003 addq $
8, %rax
#, tmp355
1004 movq
(%rbx
), %rdx
#* salt, tmp347
1005 movq
%rdx
, -4(%rax
) # tmp347,
1006 movl
%r14d
, %edx
# _6, _6
1007 movq
-8(%rbx
,%rdx
), %rsi
#, tmp354
1008 movq
%rsi
, -8(%rcx
,%rdx
) # tmp354,
1009 movq
%rcx
, %rdx
# tmp325, tmp327
1010 subq
%rax
, %rdx
# tmp355, tmp327
1011 subq
%rdx
, %rbx
# tmp327, salt
1012 addl
%r14d
, %edx
# _6, _6
1013 andl $
-8, %edx
#, _6
1014 movq
%rbx
, %r8 # salt, salt
1017 andl $
-8, %edx
#, tmp357
1018 xorl
%esi
, %esi
# tmp356
1020 movl
%esi
, %edi
# tmp356, tmp358
1021 addl $
8, %esi
#, tmp356
1022 movq
(%r8,%rdi
), %r9 #, tmp359
1023 cmpl %edx
, %esi
# tmp357, tmp356
1024 movq
%r9, (%rax
,%rdi
) # tmp359,
1028 # sha256-crypt.c:261: memcpy (cp, temp_result, cnt);
1029 movq
-544(%rbp
), %rsi
# %sfp, tmp627
1030 movl
(%rsi
), %edx
#, tmp417
1031 movl
%edx
, (%rcx
) # tmp417,* _309
1032 movl
%eax
, %edx
# cnt, cnt
1033 movl
-4(%rsi
,%rdx
), %eax
#, tmp424
1034 movl
%eax
, -4(%rcx
,%rdx
) # tmp424,
1039 # sha256-crypt.c:223: sha256_process_bytes (alt_result, cnt, &ctx, nss_ctx);
1040 movq
-504(%rbp
), %rdi
# %sfp,
1041 movq
%rbx
, %rdx
# tmp624,
1042 movq
%r15, %rsi
# tmp286,
1043 call __sha256_process_bytes@PLT
#
1044 # sha256-crypt.c:227: for (cnt = key_len; cnt > 0; cnt >>= 1)
1045 testq
%r15, %r15 # tmp286
1047 # sha256-crypt.c:234: sha256_finish_ctx (&ctx, nss_ctx, alt_result);
1048 movq
-504(%rbp
), %rsi
# %sfp,
1049 movq
%rbx
, %rdi
# tmp624,
1050 call __sha256_finish_ctx@PLT
#
1051 # sha256-crypt.c:237: sha256_init_ctx (&alt_ctx, nss_alt_ctx);
1052 movq
-520(%rbp
), %rdi
# %sfp,
1053 call __sha256_init_ctx@PLT
#
1058 # sha256-crypt.c:259: for (cnt = key_len; cnt >= 32; cnt -= 32)
1059 movq
-496(%rbp
), %rcx
# %sfp, _309
1060 movq
%r15, %rdx
# tmp286, cnt
1063 # sha256-crypt.c:261: memcpy (cp, temp_result, cnt);
1064 movl
%eax
, %edx
# cnt, cnt
1065 movq
-544(%rbp
), %rax
# %sfp, tmp627
1066 movzwl
-2(%rax
,%rdx
), %eax
#, tmp433
1067 movw
%ax
, -2(%rcx
,%rdx
) # tmp433,
1070 # sha256-crypt.c:251: free_pbytes = cp = p_bytes = (char *)malloc (key_len);
1071 movq
%r15, %rdi
# tmp286,
1073 # sha256-crypt.c:252: if (free_pbytes == NULL)
1074 testq
%rax
, %rax
# p_bytes
1075 # sha256-crypt.c:251: free_pbytes = cp = p_bytes = (char *)malloc (key_len);
1076 movq
%rax
, -496(%rbp
) # p_bytes, %sfp
1077 movq
%rax
, -472(%rbp
) # p_bytes, cp
1078 # sha256-crypt.c:252: if (free_pbytes == NULL)
1080 movq
%rax
, -592(%rbp
) # p_bytes, %sfp
1083 # sha256-crypt.c:170: salt = copied_salt =
1084 movq
-512(%rbp
), %rbx
# %sfp, salt
1085 movl
(%rbx
), %esi
#* salt, tmp330
1086 movl
%esi
, 4(%rax
) # tmp330,
1087 movl
-4(%rbx
,%rdx
), %eax
#, tmp337
1088 movl
%eax
, -4(%rcx
,%rdx
) # tmp337,
1091 movq
-512(%rbp
), %rax
# %sfp, salt
1092 movzwl
-2(%rax
,%rdx
), %eax
#, tmp346
1093 movw
%ax
, -2(%rcx
,%rdx
) # tmp346,
1096 # sha256-crypt.c:154: free_key = tmp = (char *) malloc (key_len + __alignof__ (uint32_t));
1097 movq
%rbx
, %rdi
# _9,
1099 # sha256-crypt.c:155: if (tmp == NULL)
1100 testq
%rax
, %rax
# free_key
1101 # sha256-crypt.c:154: free_key = tmp = (char *) malloc (key_len + __alignof__ (uint32_t));
1102 movq
%rax
, -576(%rbp
) # free_key, %sfp
1103 # sha256-crypt.c:155: if (tmp == NULL)
1105 movq
%rax
, %rdi
# free_key, m__
1106 # sha256-crypt.c:119: size_t alloca_used = 0;
1107 movq $
0, -496(%rbp
) #, %sfp
1110 # sha256-crypt.c:254: free (free_key);
1111 movq
-576(%rbp
), %rdi
# %sfp,
1112 # sha256-crypt.c:255: return NULL;
1113 xorl
%r12d
, %r12d
# <retval>
1114 # sha256-crypt.c:254: free (free_key);
1116 # sha256-crypt.c:255: return NULL;
1119 # sha256-crypt.c:156: return NULL;
1120 xorl
%r12d
, %r12d
# <retval>
1124 .size __sha256_crypt_r, .-__sha256_crypt_r
1126 .globl __sha256_crypt
1127 .type __sha256_crypt, @function
1132 .cfi_def_cfa_offset 16
1135 .cfi_def_cfa_offset 24
1137 movq
%rdi
, %r12 # key, key
1139 .cfi_def_cfa_offset 32
1141 # sha256-crypt.c:407: + strlen (salt) + 1 + 43 + 1);
1142 movq
%rsi
, %rdi
# salt,
1143 # sha256-crypt.c:399: {
1144 movq
%rsi
, %rbp
# salt, salt
1145 # sha256-crypt.c:407: + strlen (salt) + 1 + 43 + 1);
1147 # sha256-crypt.c:409: if (buflen < needed)
1148 movl buflen.5422
(%rip
), %ecx
# buflen, buflen.33_4
1149 # sha256-crypt.c:407: + strlen (salt) + 1 + 43 + 1);
1150 leal
66(%rax
), %ebx
#, needed
1151 movq buffer
(%rip
), %rdx
# buffer, <retval>
1152 # sha256-crypt.c:409: if (buflen < needed)
1153 cmpl %ebx
, %ecx
# needed, buflen.33_4
1155 # sha256-crypt.c:411: char *new_buffer = (char *) realloc (buffer, needed);
1156 movq
%rdx
, %rdi
# <retval>,
1157 movslq
%ebx
, %rsi
# needed, needed
1159 # sha256-crypt.c:412: if (new_buffer == NULL)
1160 testq
%rax
, %rax
# <retval>
1161 # sha256-crypt.c:411: char *new_buffer = (char *) realloc (buffer, needed);
1162 movq
%rax
, %rdx
#, <retval>
1163 # sha256-crypt.c:412: if (new_buffer == NULL)
1165 # sha256-crypt.c:415: buffer = new_buffer;
1166 movq
%rax
, buffer
(%rip
) # <retval>, buffer
1167 # sha256-crypt.c:416: buflen = needed;
1168 movl
%ebx
, buflen.5422
(%rip
) # needed, buflen
1169 movl
%ebx
, %ecx
# needed, buflen.33_4
1171 # sha256-crypt.c:420: }
1174 .cfi_def_cfa_offset 24
1175 # sha256-crypt.c:419: return __sha256_crypt_r (key, salt, buffer, buflen);
1176 movq
%rbp
, %rsi
# salt,
1177 movq
%r12, %rdi
# key,
1178 # sha256-crypt.c:420: }
1180 .cfi_def_cfa_offset 16
1182 .cfi_def_cfa_offset 8
1183 # sha256-crypt.c:419: return __sha256_crypt_r (key, salt, buffer, buflen);
1184 jmp __sha256_crypt_r@PLT
#
1189 # sha256-crypt.c:420: }
1191 .cfi_def_cfa_offset 24
1194 .cfi_def_cfa_offset 16
1196 .cfi_def_cfa_offset 8
1200 .size __sha256_crypt, .-__sha256_crypt
1202 .comm buflen.5422,4,4
1203 .section __libc_freeres_ptrs
1206 .type buffer, @object
1210 .section .rodata.str1.8,"aMS",@progbits,1
1212 .type sha256_rounds_prefix, @object
1213 .size sha256_rounds_prefix, 8
1214 sha256_rounds_prefix
:
1216 .section .rodata.str1.1
1217 .type sha256_salt_prefix, @object
1218 .size sha256_salt_prefix, 4
1221 .ident "GCC: (GNU) 7.3.0"
1222 .section .note.GNU-stack,"",@progbits