2 # GNU C11 (GCC) version 7.3.0 (x86_64-nyan-linux-gnu)
3 # compiled by GNU C version 7.3.0, GMP version 6.1.2, MPFR version 4.0.1, MPC version 1.1.0, isl version none
4 # GGC heuristics: --param ggc-min-expand=100 --param ggc-min-heapsize=131072
5 # options passed: -I ../include
6 # -I /root/wip/nyanglibc/builds/0/nyanglibc/build/crypt
7 # -I /root/wip/nyanglibc/builds/0/nyanglibc/build
8 # -I ../sysdeps/unix/sysv/linux/x86_64/64
9 # -I ../sysdeps/unix/sysv/linux/x86_64
10 # -I ../sysdeps/unix/sysv/linux/x86/include
11 # -I ../sysdeps/unix/sysv/linux/x86 -I ../sysdeps/x86/nptl
12 # -I ../sysdeps/unix/sysv/linux/wordsize-64 -I ../sysdeps/x86_64/nptl
13 # -I ../sysdeps/unix/sysv/linux/include -I ../sysdeps/unix/sysv/linux
14 # -I ../sysdeps/nptl -I ../sysdeps/pthread -I ../sysdeps/gnu
15 # -I ../sysdeps/unix/inet -I ../sysdeps/unix/sysv -I ../sysdeps/unix/x86_64
16 # -I ../sysdeps/unix -I ../sysdeps/posix -I ../sysdeps/x86_64/64
17 # -I ../sysdeps/x86_64/fpu -I ../sysdeps/x86/fpu -I ../sysdeps/x86_64
18 # -I ../sysdeps/x86/include -I ../sysdeps/x86
19 # -I ../sysdeps/ieee754/float128 -I ../sysdeps/ieee754/ldbl-96/include
20 # -I ../sysdeps/ieee754/ldbl-96 -I ../sysdeps/ieee754/dbl-64
21 # -I ../sysdeps/ieee754/flt-32 -I ../sysdeps/wordsize-64
22 # -I ../sysdeps/ieee754 -I ../sysdeps/generic -I .. -I ../libio -I .
23 # -MD /run/asm/crypt/sha512-crypt.v.d -MF /run/asm/crypt/sha512-crypt.o.dt
24 # -MP -MT /run/asm/crypt/.o -D _LIBC_REENTRANT -D MODULE_NAME=libcrypt
25 # -D PIC -D TOP_NAMESPACE=glibc
26 # -include /root/wip/nyanglibc/builds/0/nyanglibc/build/libc-modules.h
27 # -include ../include/libc-symbols.h sha512-crypt.c -mtune=generic
28 # -march=x86-64 -auxbase-strip /run/asm/crypt/sha512-crypt.v.s -O2 -Wall
29 # -Wwrite-strings -Wundef -Werror -Wstrict-prototypes
30 # -Wold-style-definition -std=gnu11 -fverbose-asm -fgnu89-inline
31 # -fmerge-all-constants -frounding-math -fno-stack-protector -fmath-errno
32 # -fpie -ftls-model=initial-exec
33 # options enabled: -faggressive-loop-optimizations -falign-labels
34 # -fasynchronous-unwind-tables -fauto-inc-dec -fbranch-count-reg
35 # -fcaller-saves -fchkp-check-incomplete-type -fchkp-check-read
36 # -fchkp-check-write -fchkp-instrument-calls -fchkp-narrow-bounds
37 # -fchkp-optimize -fchkp-store-bounds -fchkp-use-static-bounds
38 # -fchkp-use-static-const-bounds -fchkp-use-wrappers -fcode-hoisting
39 # -fcombine-stack-adjustments -fcommon -fcompare-elim -fcprop-registers
40 # -fcrossjumping -fcse-follow-jumps -fdefer-pop
41 # -fdelete-null-pointer-checks -fdevirtualize -fdevirtualize-speculatively
42 # -fdwarf2-cfi-asm -fearly-inlining -feliminate-unused-debug-types
43 # -fexpensive-optimizations -fforward-propagate -ffp-int-builtin-inexact
44 # -ffunction-cse -fgcse -fgcse-lm -fgnu-runtime -fgnu-unique
45 # -fguess-branch-probability -fhoist-adjacent-loads -fident -fif-conversion
46 # -fif-conversion2 -findirect-inlining -finline -finline-atomics
47 # -finline-functions-called-once -finline-small-functions -fipa-bit-cp
48 # -fipa-cp -fipa-icf -fipa-icf-functions -fipa-icf-variables -fipa-profile
49 # -fipa-pure-const -fipa-ra -fipa-reference -fipa-sra -fipa-vrp
50 # -fira-hoist-pressure -fira-share-save-slots -fira-share-spill-slots
51 # -fisolate-erroneous-paths-dereference -fivopts -fkeep-static-consts
52 # -fleading-underscore -flifetime-dse -flra-remat -flto-odr-type-merging
53 # -fmath-errno -fmerge-all-constants -fmerge-debug-strings
54 # -fmove-loop-invariants -fomit-frame-pointer -foptimize-sibling-calls
55 # -foptimize-strlen -fpartial-inlining -fpeephole -fpeephole2 -fpic -fpie
56 # -fplt -fprefetch-loop-arrays -free -freg-struct-return -freorder-blocks
57 # -freorder-functions -frerun-cse-after-loop -frounding-math
58 # -fsched-critical-path-heuristic -fsched-dep-count-heuristic
59 # -fsched-group-heuristic -fsched-interblock -fsched-last-insn-heuristic
60 # -fsched-rank-heuristic -fsched-spec -fsched-spec-insn-heuristic
61 # -fsched-stalled-insns-dep -fschedule-fusion -fschedule-insns2
62 # -fsemantic-interposition -fshow-column -fshrink-wrap
63 # -fshrink-wrap-separate -fsigned-zeros -fsplit-ivs-in-unroller
64 # -fsplit-wide-types -fssa-backprop -fssa-phiopt -fstdarg-opt
65 # -fstore-merging -fstrict-aliasing -fstrict-overflow
66 # -fstrict-volatile-bitfields -fsync-libcalls -fthread-jumps
67 # -ftoplevel-reorder -ftrapping-math -ftree-bit-ccp -ftree-builtin-call-dce
68 # -ftree-ccp -ftree-ch -ftree-coalesce-vars -ftree-copy-prop -ftree-cselim
69 # -ftree-dce -ftree-dominator-opts -ftree-dse -ftree-forwprop -ftree-fre
70 # -ftree-loop-if-convert -ftree-loop-im -ftree-loop-ivcanon
71 # -ftree-loop-optimize -ftree-parallelize-loops= -ftree-phiprop -ftree-pre
72 # -ftree-pta -ftree-reassoc -ftree-scev-cprop -ftree-sink -ftree-slsr
73 # -ftree-sra -ftree-switch-conversion -ftree-tail-merge -ftree-ter
74 # -ftree-vrp -funit-at-a-time -funwind-tables -fverbose-asm
75 # -fzero-initialized-in-bss -m128bit-long-double -m64 -m80387
76 # -malign-stringops -mavx256-split-unaligned-load
77 # -mavx256-split-unaligned-store -mfancy-math-387 -mfp-ret-in-387 -mfxsr
78 # -mglibc -mieee-fp -mlong-double-80 -mmmx -mno-sse4 -mpush-args -mred-zone
79 # -msse -msse2 -mstv -mtls-direct-seg-refs -mvzeroupper
83 .section __libc_freeres_ptrs, "aw", %nobits
85 .section .rodata.str1.1,"aMS",@progbits,1
93 .globl __sha512_crypt_r
94 .type __sha512_crypt_r, @function
99 .cfi_def_cfa_offset 16
102 .cfi_def_cfa_register 6
111 movq
%rsi
, %r15 # salt, salt
114 movq
%rdi
, %rbx
# key, key
115 # sha512-crypt.c:125: if (strncmp (sha512_salt_prefix, salt, sizeof (sha512_salt_prefix) - 1) == 0)
116 leaq sha512_salt_prefix
(%rip
), %rdi
#,
117 # sha512-crypt.c:103: {
119 # sha512-crypt.c:103: {
120 movq
%rdx
, -968(%rbp
) # buffer, %sfp
121 # sha512-crypt.c:125: if (strncmp (sha512_salt_prefix, salt, sizeof (sha512_salt_prefix) - 1) == 0)
123 # sha512-crypt.c:103: {
124 movl
%ecx
, -900(%rbp
) # buflen, buflen
125 # sha512-crypt.c:125: if (strncmp (sha512_salt_prefix, salt, sizeof (sha512_salt_prefix) - 1) == 0)
127 # sha512-crypt.c:127: salt += sizeof (sha512_salt_prefix) - 1;
128 leaq
3(%r15), %rdx
#, tmp747
129 testl
%eax
, %eax
# _1
130 # sha512-crypt.c:129: if (strncmp (salt, sha512_rounds_prefix, sizeof (sha512_rounds_prefix) - 1)
131 leaq sha512_rounds_prefix
(%rip
), %rsi
#,
132 # sha512-crypt.c:127: salt += sizeof (sha512_salt_prefix) - 1;
133 cmovne
%r15, %rdx
# tmp747,, salt, tmp747
134 movq
%rdx
, %rdi
# tmp747, salt
135 movq
%rdx
, -944(%rbp
) # salt, %sfp
136 # sha512-crypt.c:129: if (strncmp (salt, sha512_rounds_prefix, sizeof (sha512_rounds_prefix) - 1)
139 testl
%eax
, %eax
# _2
140 # sha512-crypt.c:118: bool rounds_custom = false;
141 movb $
0, -901(%rbp
) #, %sfp
142 # sha512-crypt.c:117: size_t rounds = ROUNDS_DEFAULT;
143 movq $
5000, -936(%rbp
) #, %sfp
144 # sha512-crypt.c:129: if (strncmp (salt, sha512_rounds_prefix, sizeof (sha512_rounds_prefix) - 1)
147 # sha512-crypt.c:143: salt_len = MIN (strcspn (salt, "$"), SALT_LEN_MAX);
148 movq
-944(%rbp
), %rdi
# %sfp,
149 leaq
.LC0(%rip), %rsi #,
152 movq
%rax
, %rcx
#, _6
153 movl $
16, %eax
#, tmp749
154 cmovb
%rcx
, %rax
# _6,, tmp749
155 # sha512-crypt.c:144: key_len = strlen (key);
156 movq
%rbx
, %rdi
# key,
157 # sha512-crypt.c:143: salt_len = MIN (strcspn (salt, "$"), SALT_LEN_MAX);
158 movq
%rax
, -920(%rbp
) # tmp749, %sfp
159 # sha512-crypt.c:144: key_len = strlen (key);
161 # sha512-crypt.c:146: if ((key - (char *) 0) % __alignof__ (uint64_t) != 0)
163 # sha512-crypt.c:144: key_len = strlen (key);
164 movq
%rax
, %r14 #, tmp350
165 # sha512-crypt.c:146: if ((key - (char *) 0) % __alignof__ (uint64_t) != 0)
167 # sha512-crypt.c:150: if (__libc_use_alloca (alloca_used + key_len + __alignof__ (uint64_t)))
168 leaq
8(%rax
), %r12 #, _9
169 # ../sysdeps/pthread/allocalim.h:27: return (__glibc_likely (__libc_alloca_cutoff (size))
170 movq
%r12, %rdi
# _9,
171 call __libc_alloca_cutoff@PLT
#
172 # ../sysdeps/pthread/allocalim.h:29: || __glibc_likely (size <= PTHREAD_STACK_MIN / 4)
173 testl
%eax
, %eax
# _377
175 cmpq $
4096, %r12 #, _9
178 # sha512-crypt.c:151: tmp = alloca_account (key_len + __alignof__ (uint64_t), alloca_used);
180 # 151 "sha512-crypt.c" 1
184 addq $
30, %r12 #, tmp361
185 andq $
-16, %r12 #, tmp365
186 subq
%r12, %rsp
# tmp365,
187 leaq
15(%rsp
), %rdi
#, tmp367
188 andq $
-16, %rdi
#, m__
190 # 151 "sha512-crypt.c" 1
191 sub %rsp
, %rax
# d__
194 addq
%r14, %rax
# tmp350, prephitmp_391
195 # sha512-crypt.c:120: char *free_key = NULL;
196 movq $
0, -984(%rbp
) #, %sfp
197 movq
%rax
, -912(%rbp
) # prephitmp_391, %sfp
199 # sha512-crypt.c:159: key = copied_key =
200 movq
%rbx
, %rsi
# key,
201 # sha512-crypt.c:161: - (tmp - (char *) 0) % __alignof__ (uint64_t),
202 addq $
8, %rdi
#, tmp371
203 # sha512-crypt.c:159: key = copied_key =
204 movq
%r14, %rdx
# tmp350,
206 movq
%rax
, %rbx
#, key
207 movq
%rax
, -992(%rbp
) # key, %sfp
212 # sha512-crypt.c:144: key_len = strlen (key);
213 movq
%rax
, -912(%rbp
) # tmp350, %sfp
214 # sha512-crypt.c:120: char *free_key = NULL;
215 movq $
0, -984(%rbp
) #, %sfp
216 # sha512-crypt.c:112: char *copied_key = NULL;
217 movq $
0, -992(%rbp
) #, %sfp
219 # sha512-crypt.c:166: if ((salt - (char *) 0) % __alignof__ (uint64_t) != 0)
220 testb $
7, -944(%rbp
) #, %sfp
221 # sha512-crypt.c:113: char *copied_salt = NULL;
222 movq $
0, -976(%rbp
) #, %sfp
223 # sha512-crypt.c:166: if ((salt - (char *) 0) % __alignof__ (uint64_t) != 0)
226 # sha512-crypt.c:192: sha512_init_ctx (&ctx, nss_ctx);
227 leaq
-752(%rbp
), %r12 #, tmp745
228 movq
%r12, %rdi
# tmp745,
229 call __sha512_init_ctx@PLT
#
230 # sha512-crypt.c:195: sha512_process_bytes (key, key_len, &ctx, nss_ctx);
231 movq
%r12, %rdx
# tmp745,
232 movq
%r14, %rsi
# tmp350,
233 movq
%rbx
, %rdi
# key,
234 call __sha512_process_bytes@PLT
#
235 # sha512-crypt.c:199: sha512_process_bytes (salt, salt_len, &ctx, nss_ctx);
236 movq
-944(%rbp
), %r15 # %sfp, salt
237 movq
-920(%rbp
), %rsi
# %sfp,
238 movq
%r12, %rdx
# tmp745,
239 movq
%r15, %rdi
# salt,
240 call __sha512_process_bytes@PLT
#
241 # sha512-crypt.c:204: sha512_init_ctx (&alt_ctx, nss_alt_ctx);
242 leaq
-400(%rbp
), %rax
#, tmp746
243 movq
%rax
, %r13 # tmp746, tmp746
244 movq
%rax
, %rdi
# tmp746,
245 movq
%rax
, -928(%rbp
) # tmp746, %sfp
246 call __sha512_init_ctx@PLT
#
247 # sha512-crypt.c:207: sha512_process_bytes (key, key_len, &alt_ctx, nss_alt_ctx);
248 movq
%r13, %rdx
# tmp746,
249 movq
%r14, %rsi
# tmp350,
250 movq
%rbx
, %rdi
# key,
251 call __sha512_process_bytes@PLT
#
252 # sha512-crypt.c:210: sha512_process_bytes (salt, salt_len, &alt_ctx, nss_alt_ctx);
253 movq
-920(%rbp
), %rsi
# %sfp,
254 movq
%r15, %rdi
# salt,
255 movq
%r13, %rdx
# tmp746,
256 # sha512-crypt.c:217: sha512_finish_ctx (&alt_ctx, nss_alt_ctx, alt_result);
257 leaq
-880(%rbp
), %r15 #, tmp743
258 # sha512-crypt.c:210: sha512_process_bytes (salt, salt_len, &alt_ctx, nss_alt_ctx);
259 call __sha512_process_bytes@PLT
#
260 # sha512-crypt.c:213: sha512_process_bytes (key, key_len, &alt_ctx, nss_alt_ctx);
261 movq
%r13, %rdx
# tmp746,
262 movq
%r14, %rsi
# tmp350,
263 movq
%rbx
, %rdi
# key,
264 call __sha512_process_bytes@PLT
#
265 # sha512-crypt.c:217: sha512_finish_ctx (&alt_ctx, nss_alt_ctx, alt_result);
266 movq
%r15, %rsi
# tmp743,
267 movq
%r13, %rdi
# tmp746,
268 call __sha512_finish_ctx@PLT
#
269 # sha512-crypt.c:220: for (cnt = key_len; cnt > 64; cnt -= 64)
270 cmpq $
64, %r14 #, tmp350
272 leaq
-65(%r14), %rax
#, _201
273 leaq
-64(%r14), %rcx
#, _191
274 movq
%rbx
, -1000(%rbp
) # key, %sfp
275 movq
%r14, %rbx
# cnt, cnt
276 movq
%rax
, -960(%rbp
) # _201, %sfp
277 andq $
-64, %rax
#, tmp433
278 movq
%rcx
, -952(%rbp
) # _191, %sfp
279 subq
%rax
, %rcx
# tmp433, _192
280 movq
%rcx
, %r13 # _192, _192
284 # sha512-crypt.c:221: sha512_process_bytes (alt_result, 64, &ctx, nss_ctx);
285 movq
%r12, %rdx
# tmp745,
287 movq
%r15, %rdi
# tmp743,
288 # sha512-crypt.c:220: for (cnt = key_len; cnt > 64; cnt -= 64)
289 subq $
64, %rbx
#, cnt
290 # sha512-crypt.c:221: sha512_process_bytes (alt_result, 64, &ctx, nss_ctx);
291 call __sha512_process_bytes@PLT
#
292 # sha512-crypt.c:220: for (cnt = key_len; cnt > 64; cnt -= 64)
293 cmpq
%rbx
, %r13 # cnt, _192
295 # sha512-crypt.c:222: sha512_process_bytes (alt_result, cnt, &ctx, nss_ctx);
296 movq
-960(%rbp
), %rax
# %sfp, _201
297 movq
-952(%rbp
), %rsi
# %sfp, _191
298 movq
%r12, %rdx
# tmp745,
299 movq
%r15, %rdi
# tmp743,
300 movq
-1000(%rbp
), %rbx
# %sfp, key
301 andq $
-64, %rax
#, _201
302 subq
%rax
, %rsi
# tmp439, _191
303 call __sha512_process_bytes@PLT
#
305 # sha512-crypt.c:220: for (cnt = key_len; cnt > 64; cnt -= 64)
306 movq
%r14, %r13 # tmp350, cnt
311 # sha512-crypt.c:228: sha512_process_bytes (alt_result, 64, &ctx, nss_ctx);
313 movq
%r15, %rdi
# tmp743,
314 call __sha512_process_bytes@PLT
#
315 # sha512-crypt.c:226: for (cnt = key_len; cnt > 0; cnt >>= 1)
319 # sha512-crypt.c:227: if ((cnt & 1) != 0)
320 testb $
1, %r13b
#, cnt
321 # sha512-crypt.c:228: sha512_process_bytes (alt_result, 64, &ctx, nss_ctx);
322 movq
%r12, %rdx
# tmp745,
323 # sha512-crypt.c:227: if ((cnt & 1) != 0)
325 # sha512-crypt.c:230: sha512_process_bytes (key, key_len, &ctx, nss_ctx);
326 movq
%r14, %rsi
# tmp350,
327 movq
%rbx
, %rdi
# key,
328 call __sha512_process_bytes@PLT
#
329 # sha512-crypt.c:226: for (cnt = key_len; cnt > 0; cnt >>= 1)
333 # sha512-crypt.c:233: sha512_finish_ctx (&ctx, nss_ctx, alt_result);
334 movq
%r12, %rdi
# tmp745,
335 movq
%r15, %rsi
# tmp743,
336 call __sha512_finish_ctx@PLT
#
337 # sha512-crypt.c:236: sha512_init_ctx (&alt_ctx, nss_alt_ctx);
338 movq
-928(%rbp
), %rdi
# %sfp,
339 call __sha512_init_ctx@PLT
#
340 movq
%r12, -952(%rbp
) # tmp745, %sfp
341 movq
%r13, %r12 # cnt, cnt
342 movq
-928(%rbp
), %r13 # %sfp, tmp746
346 # sha512-crypt.c:240: sha512_process_bytes (key, key_len, &alt_ctx, nss_alt_ctx);
347 movq
%r13, %rdx
# tmp746,
348 movq
%r14, %rsi
# tmp350,
349 movq
%rbx
, %rdi
# key,
350 # sha512-crypt.c:239: for (cnt = 0; cnt < key_len; ++cnt)
352 # sha512-crypt.c:240: sha512_process_bytes (key, key_len, &alt_ctx, nss_alt_ctx);
353 call __sha512_process_bytes@PLT
#
354 # sha512-crypt.c:239: for (cnt = 0; cnt < key_len; ++cnt)
355 cmpq
%r12, %r14 # cnt, tmp350
357 movq
-952(%rbp
), %r12 # %sfp, tmp745
359 # sha512-crypt.c:243: sha512_finish_ctx (&alt_ctx, nss_alt_ctx, temp_result);
360 leaq
-816(%rbp
), %rax
#, tmp744
361 movq
-928(%rbp
), %rdi
# %sfp,
362 movq
%rax
, %rsi
# tmp744,
363 movq
%rax
, -960(%rbp
) # tmp744, %sfp
364 call __sha512_finish_ctx@PLT
#
365 # ../sysdeps/pthread/allocalim.h:27: return (__glibc_likely (__libc_alloca_cutoff (size))
366 movq
-912(%rbp
), %rbx
# %sfp, prephitmp_391
367 movq
%rbx
, %rdi
# prephitmp_391,
368 call __libc_alloca_cutoff@PLT
#
369 # ../sysdeps/pthread/allocalim.h:29: || __glibc_likely (size <= PTHREAD_STACK_MIN / 4)
370 cmpq $
4096, %rbx
#, prephitmp_391
372 testl
%eax
, %eax
# _383
375 # sha512-crypt.c:247: cp = p_bytes = (char *) alloca (key_len);
376 leaq
30(%r14), %rax
#, tmp459
377 # sha512-crypt.c:121: char *free_pbytes = NULL;
378 movq $
0, -1000(%rbp
) #, %sfp
379 # sha512-crypt.c:247: cp = p_bytes = (char *) alloca (key_len);
380 andq $
-16, %rax
#, tmp463
381 subq
%rax
, %rsp
# tmp463,
382 leaq
15(%rsp
), %rax
#, tmp465
383 andq $
-16, %rax
#, tmp465
384 movq
%rax
, -912(%rbp
) # p_bytes, %sfp
385 movq
%rax
, -888(%rbp
) # p_bytes, cp
387 # sha512-crypt.c:258: for (cnt = key_len; cnt >= 64; cnt -= 64)
388 cmpq $
63, %r14 #, tmp350
390 leaq
-64(%r14), %rcx
#, _230
391 movq
-912(%rbp
), %rsi
# %sfp, p_bytes
392 movq
%rcx
, %rdx
# _230, tmp469
393 andq $
-64, %rdx
#, tmp469
394 leaq
64(%rsi
), %rax
#, ivtmp.53
395 leaq
128(%rsi
,%rdx
), %rdx
#, _202
399 # sha512-crypt.c:259: cp = mempcpy (cp, temp_result, 64);
400 movdqa
-816(%rbp
), %xmm0
# MEM[(char * {ref-all})&temp_result], MEM[(char * {ref-all})&temp_result]
401 movups
%xmm0
, -64(%rax
) # MEM[(char * {ref-all})&temp_result], MEM[base: _233, offset: -64B]
402 movdqa
-800(%rbp
), %xmm0
# MEM[(char * {ref-all})&temp_result], MEM[(char * {ref-all})&temp_result]
403 movups
%xmm0
, -48(%rax
) # MEM[(char * {ref-all})&temp_result], MEM[base: _233, offset: -64B]
404 movdqa
-784(%rbp
), %xmm0
# MEM[(char * {ref-all})&temp_result], MEM[(char * {ref-all})&temp_result]
405 movups
%xmm0
, -32(%rax
) # MEM[(char * {ref-all})&temp_result], MEM[base: _233, offset: -64B]
406 movdqa
-768(%rbp
), %xmm0
# MEM[(char * {ref-all})&temp_result], MEM[(char * {ref-all})&temp_result]
407 movups
%xmm0
, -16(%rax
) # MEM[(char * {ref-all})&temp_result], MEM[base: _233, offset: -64B]
408 movq
%rax
, -888(%rbp
) # ivtmp.53, cp
409 addq $
64, %rax
#, ivtmp.53
410 # sha512-crypt.c:258: for (cnt = key_len; cnt >= 64; cnt -= 64)
411 cmpq
%rax
, %rdx
# ivtmp.53, _202
413 movq
-912(%rbp
), %rax
# %sfp, p_bytes
414 andq $
-64, %rcx
#, tmp475
415 movq
%r14, %rdx
# tmp350, cnt
416 andl $
63, %edx
#, cnt
417 leaq
64(%rax
,%rcx
), %rcx
#, _215
419 # sha512-crypt.c:260: memcpy (cp, temp_result, cnt);
421 movl
%edx
, %eax
# cnt, cnt
425 testl
%eax
, %eax
# cnt
427 movq
-960(%rbp
), %rsi
# %sfp, tmp744
429 movzbl
(%rsi
), %edx
#, tmp490
430 movb
%dl
, (%rcx
) # tmp490,* _215
433 # sha512-crypt.c:263: sha512_init_ctx (&alt_ctx, nss_alt_ctx);
434 movq
-928(%rbp
), %rdi
# %sfp,
435 # sha512-crypt.c:266: for (cnt = 0; cnt < 16 + alt_result[0]; ++cnt)
436 xorl
%ebx
, %ebx
# cnt
437 # sha512-crypt.c:263: sha512_init_ctx (&alt_ctx, nss_alt_ctx);
438 call __sha512_init_ctx@PLT
#
439 # sha512-crypt.c:266: for (cnt = 0; cnt < 16 + alt_result[0]; ++cnt)
440 movq
%r14, -952(%rbp
) # tmp350, %sfp
441 movq
%r12, -1008(%rbp
) # tmp745, %sfp
442 movq
%rbx
, %r14 # cnt, cnt
443 movq
-944(%rbp
), %r13 # %sfp, salt
444 movq
-920(%rbp
), %r12 # %sfp, _6
445 movq
-928(%rbp
), %rbx
# %sfp, tmp746
449 # sha512-crypt.c:267: sha512_process_bytes (salt, salt_len, &alt_ctx, nss_alt_ctx);
450 movq
%rbx
, %rdx
# tmp746,
451 movq
%r12, %rsi
# _6,
452 movq
%r13, %rdi
# salt,
453 call __sha512_process_bytes@PLT
#
454 # sha512-crypt.c:266: for (cnt = 0; cnt < 16 + alt_result[0]; ++cnt)
455 movzbl
-880(%rbp
), %edx
# alt_result, tmp516
457 addq $
16, %rdx
#, tmp517
458 cmpq
%r14, %rdx
# cnt, tmp517
460 # sha512-crypt.c:270: sha512_finish_ctx (&alt_ctx, nss_alt_ctx, temp_result);
461 movq
-960(%rbp
), %rbx
# %sfp, tmp744
462 movq
-928(%rbp
), %rdi
# %sfp,
463 movq
-952(%rbp
), %r14 # %sfp, tmp350
464 movq
-1008(%rbp
), %r12 # %sfp, tmp745
465 movq
%rbx
, %rsi
# tmp744,
466 call __sha512_finish_ctx@PLT
#
467 # sha512-crypt.c:273: cp = s_bytes = alloca (salt_len);
468 movq
-920(%rbp
), %rdi
# %sfp, _6
469 leaq
30(%rdi
), %rax
#, tmp522
470 movq
%rdi
, %rcx
# _6, _6
471 andq $
-16, %rax
#, tmp526
472 subq
%rax
, %rsp
# tmp526,
473 leaq
15(%rsp
), %rax
#, tmp528
474 andq $
-16, %rax
#, tmp528
475 # sha512-crypt.c:276: memcpy (cp, temp_result, cnt);
477 # sha512-crypt.c:273: cp = s_bytes = alloca (salt_len);
478 movq
%rax
, %rsi
# tmp528, tmp530
479 movq
%rax
, -952(%rbp
) # tmp530, %sfp
480 movq
%rax
, -888(%rbp
) # tmp530, cp
481 # sha512-crypt.c:276: memcpy (cp, temp_result, cnt);
482 movq
%rbx
, %rax
# tmp744, tmp534
485 xorl
%edx
, %edx
# tmp540
494 # sha512-crypt.c:280: for (cnt = 0; cnt < rounds; ++cnt)
495 xorl
%ebx
, %ebx
# cnt
500 # sha512-crypt.c:287: sha512_process_bytes (p_bytes, key_len, &ctx, nss_ctx);
501 movq
-912(%rbp
), %rdi
# %sfp,
502 movq
%r14, %rsi
# tmp350,
503 call __sha512_process_bytes@PLT
#
505 # sha512-crypt.c:292: if (cnt % 3 != 0)
506 movabsq $
-6148914691236517205, %rax
#, tmp804
509 leaq
(%rdx
,%rdx
,2), %rax
#, tmp558
510 cmpq
%rax
, %rbx
# tmp558, cnt
513 # sha512-crypt.c:296: if (cnt % 7 != 0)
514 movabsq $
5270498306774157605, %rax
#, tmp805
516 movq
%rbx
, %rax
# cnt, tmp583
517 sarq $
63, %rax
#, tmp583
519 subq
%rax
, %rdx
# tmp583, tmp579
520 leaq
0(,%rdx
,8), %rax
#, tmp585
521 subq
%rdx
, %rax
# tmp579, tmp586
522 cmpq
%rax
, %rbx
# tmp586, cnt
525 # sha512-crypt.c:300: if ((cnt & 1) != 0)
526 testq
%r13, %r13 # _26
527 # sha512-crypt.c:301: sha512_process_bytes (alt_result, 64, &ctx, nss_ctx);
528 movq
%r12, %rdx
# tmp745,
529 # sha512-crypt.c:300: if ((cnt & 1) != 0)
531 # sha512-crypt.c:301: sha512_process_bytes (alt_result, 64, &ctx, nss_ctx);
533 movq
%r15, %rdi
# tmp743,
534 call __sha512_process_bytes@PLT
#
536 # sha512-crypt.c:306: sha512_finish_ctx (&ctx, nss_ctx, alt_result);
537 movq
%r15, %rsi
# tmp743,
538 movq
%r12, %rdi
# tmp745,
539 # sha512-crypt.c:280: for (cnt = 0; cnt < rounds; ++cnt)
541 # sha512-crypt.c:306: sha512_finish_ctx (&ctx, nss_ctx, alt_result);
542 call __sha512_finish_ctx@PLT
#
543 # sha512-crypt.c:280: for (cnt = 0; cnt < rounds; ++cnt)
544 cmpq
%rbx
, -936(%rbp
) # cnt, %sfp
547 # sha512-crypt.c:283: sha512_init_ctx (&ctx, nss_ctx);
548 movq
%r12, %rdi
# tmp745,
549 # sha512-crypt.c:286: if ((cnt & 1) != 0)
550 movq
%rbx
, %r13 # cnt, _26
551 # sha512-crypt.c:283: sha512_init_ctx (&ctx, nss_ctx);
552 call __sha512_init_ctx@PLT
#
553 # sha512-crypt.c:286: if ((cnt & 1) != 0)
554 andl $
1, %r13d
#, _26
555 # sha512-crypt.c:287: sha512_process_bytes (p_bytes, key_len, &ctx, nss_ctx);
556 movq
%r12, %rdx
# tmp745,
557 # sha512-crypt.c:286: if ((cnt & 1) != 0)
559 # sha512-crypt.c:289: sha512_process_bytes (alt_result, 64, &ctx, nss_ctx);
561 movq
%r15, %rdi
# tmp743,
562 call __sha512_process_bytes@PLT
#
567 # sha512-crypt.c:260: memcpy (cp, temp_result, cnt);
568 movq
-816(%rbp
), %rax
#, tmp499
569 movq
%rax
, (%rcx
) # tmp499,* _215
570 movq
-960(%rbp
), %rdi
# %sfp, tmp744
571 movl
%edx
, %eax
# cnt, cnt
572 movq
-8(%rdi
,%rax
), %rsi
#, tmp506
573 movq
%rsi
, -8(%rcx
,%rax
) # tmp506,
574 leaq
8(%rcx
), %rsi
#, tmp507
575 andq $
-8, %rsi
#, tmp507
576 subq
%rsi
, %rcx
# tmp507, _215
577 leal
(%rdx
,%rcx
), %eax
#, cnt
578 subq
%rcx
, %rdi
# _215, tmp480
579 andl $
-8, %eax
#, cnt
582 andl $
-8, %eax
#, tmp509
583 xorl
%edx
, %edx
# tmp508
585 movl
%edx
, %ecx
# tmp508, tmp510
586 addl $
8, %edx
#, tmp508
587 movq
(%rdi
,%rcx
), %r8 #, tmp511
588 cmpl %eax
, %edx
# tmp509, tmp508
589 movq
%r8, (%rsi
,%rcx
) # tmp511,
595 # sha512-crypt.c:303: sha512_process_bytes (p_bytes, key_len, &ctx, nss_ctx);
596 movq
-912(%rbp
), %rdi
# %sfp,
597 movq
%r14, %rsi
# tmp350,
598 call __sha512_process_bytes@PLT
#
603 # sha512-crypt.c:297: sha512_process_bytes (p_bytes, key_len, &ctx, nss_ctx);
604 movq
-912(%rbp
), %rdi
# %sfp,
605 movq
%r12, %rdx
# tmp745,
606 movq
%r14, %rsi
# tmp350,
607 call __sha512_process_bytes@PLT
#
612 # sha512-crypt.c:293: sha512_process_bytes (s_bytes, salt_len, &ctx, nss_ctx);
613 movq
-920(%rbp
), %rsi
# %sfp,
614 movq
-952(%rbp
), %rdi
# %sfp,
615 movq
%r12, %rdx
# tmp745,
616 call __sha512_process_bytes@PLT
#
621 # sha512-crypt.c:316: cp = __stpncpy (buffer, sha512_salt_prefix, MAX (0, buflen));
622 movl
-900(%rbp
), %edx
# buflen,
623 xorl
%ebx
, %ebx
# tmp595
624 movq
-968(%rbp
), %rdi
# %sfp,
625 leaq sha512_salt_prefix
(%rip
), %rsi
#,
627 movl
%ebx
, %edx
# tmp595, tmp594
628 cmovns
-900(%rbp
), %edx
# buflen,, tmp594
629 movslq
%edx
, %rdx
# tmp594, tmp596
631 movq
%rax
, %rdi
#, _32
632 movq
%rax
, -888(%rbp
) # _32, cp
633 # sha512-crypt.c:317: buflen -= sizeof (sha512_salt_prefix) - 1;
634 movl
-900(%rbp
), %eax
# buflen, tmp868
635 # sha512-crypt.c:319: if (rounds_custom)
636 cmpb $
0, -901(%rbp
) #, %sfp
637 # sha512-crypt.c:317: buflen -= sizeof (sha512_salt_prefix) - 1;
638 leal
-3(%rax
), %edx
#, _36
639 movl
%edx
, -900(%rbp
) # _36, buflen
640 # sha512-crypt.c:319: if (rounds_custom)
643 # sha512-crypt.c:327: cp = __stpncpy (cp, salt, MIN ((size_t) MAX (0, buflen), salt_len));
644 movq
-920(%rbp
), %r13 # %sfp, _6
646 testl
%edx
, %edx
# _36
647 cmovs
%ebx
, %edx
# _36,, tmp606, tmp605
648 movq
-944(%rbp
), %rsi
# %sfp,
649 movslq
%edx
, %rdx
# tmp605, tmp607
650 cmpq
%r13, %rdx
# _6, tmp607
651 cmova
%r13, %rdx
# tmp607,, _6, tmp604
653 # sha512-crypt.c:328: buflen -= MIN ((size_t) MAX (0, buflen), salt_len);
654 movslq
-900(%rbp
), %rdx
# buflen,
655 # sha512-crypt.c:327: cp = __stpncpy (cp, salt, MIN ((size_t) MAX (0, buflen), salt_len));
656 movq
%rax
, -888(%rbp
) # _49, cp
657 # sha512-crypt.c:328: buflen -= MIN ((size_t) MAX (0, buflen), salt_len);
658 testl
%edx
, %edx
# buflen.23_50
659 cmovns
%rdx
, %rbx
#,,
660 cmpq
%r13, %rbx
# _6, tmp611
661 cmova
%r13, %rbx
# tmp611,, _6, tmp608
662 subl
%ebx
, %edx
# tmp608, _57
663 # sha512-crypt.c:330: if (buflen > 0)
664 testl
%edx
, %edx
# _57
665 # sha512-crypt.c:328: buflen -= MIN ((size_t) MAX (0, buflen), salt_len);
666 movl
%edx
, -900(%rbp
) # _57, buflen
667 # sha512-crypt.c:330: if (buflen > 0)
669 # sha512-crypt.c:332: *cp++ = '$';
670 leaq
1(%rax
), %rdx
#, tmp612
671 movq
%rdx
, -888(%rbp
) # tmp612, cp
672 movb $
36, (%rax
) #, *_49
673 # sha512-crypt.c:333: --buflen;
674 subl $
1, -900(%rbp
) #, buflen
676 # sha512-crypt.c:336: __b64_from_24bit (&cp, &buflen,
677 movzbl
-859(%rbp
), %ecx
# alt_result, alt_result
678 movzbl
-880(%rbp
), %edx
# alt_result, alt_result
679 leaq
-900(%rbp
), %r13 #, tmp616
680 movzbl
-838(%rbp
), %r8d
# alt_result,
681 leaq
-888(%rbp
), %rbx
#, tmp617
683 movq
%r13, %rsi
# tmp616,
684 movq
%rbx
, %rdi
# tmp617,
685 call __b64_from_24bit@PLT
#
686 # sha512-crypt.c:338: __b64_from_24bit (&cp, &buflen,
687 movzbl
-837(%rbp
), %ecx
# alt_result, alt_result
688 movzbl
-858(%rbp
), %edx
# alt_result, alt_result
690 movzbl
-879(%rbp
), %r8d
# alt_result,
691 movq
%r13, %rsi
# tmp616,
692 movq
%rbx
, %rdi
# tmp617,
693 call __b64_from_24bit@PLT
#
694 # sha512-crypt.c:340: __b64_from_24bit (&cp, &buflen,
695 movzbl
-878(%rbp
), %ecx
# alt_result, alt_result
696 movzbl
-836(%rbp
), %edx
# alt_result, alt_result
698 movzbl
-857(%rbp
), %r8d
# alt_result,
699 movq
%r13, %rsi
# tmp616,
700 movq
%rbx
, %rdi
# tmp617,
701 call __b64_from_24bit@PLT
#
702 # sha512-crypt.c:342: __b64_from_24bit (&cp, &buflen,
703 movzbl
-856(%rbp
), %ecx
# alt_result, alt_result
704 movzbl
-877(%rbp
), %edx
# alt_result, alt_result
706 movzbl
-835(%rbp
), %r8d
# alt_result,
707 movq
%r13, %rsi
# tmp616,
708 movq
%rbx
, %rdi
# tmp617,
709 call __b64_from_24bit@PLT
#
710 # sha512-crypt.c:344: __b64_from_24bit (&cp, &buflen,
711 movzbl
-834(%rbp
), %ecx
# alt_result, alt_result
712 movzbl
-855(%rbp
), %edx
# alt_result, alt_result
714 movzbl
-876(%rbp
), %r8d
# alt_result,
715 movq
%r13, %rsi
# tmp616,
716 movq
%rbx
, %rdi
# tmp617,
717 call __b64_from_24bit@PLT
#
718 # sha512-crypt.c:346: __b64_from_24bit (&cp, &buflen,
719 movzbl
-875(%rbp
), %ecx
# alt_result, alt_result
720 movzbl
-833(%rbp
), %edx
# alt_result, alt_result
722 movzbl
-854(%rbp
), %r8d
# alt_result,
723 movq
%r13, %rsi
# tmp616,
724 movq
%rbx
, %rdi
# tmp617,
725 call __b64_from_24bit@PLT
#
726 # sha512-crypt.c:348: __b64_from_24bit (&cp, &buflen,
727 movzbl
-853(%rbp
), %ecx
# alt_result, alt_result
728 movzbl
-874(%rbp
), %edx
# alt_result, alt_result
730 movzbl
-832(%rbp
), %r8d
# alt_result,
731 movq
%r13, %rsi
# tmp616,
732 movq
%rbx
, %rdi
# tmp617,
733 call __b64_from_24bit@PLT
#
734 # sha512-crypt.c:350: __b64_from_24bit (&cp, &buflen,
735 movzbl
-831(%rbp
), %ecx
# alt_result, alt_result
736 movzbl
-852(%rbp
), %edx
# alt_result, alt_result
738 movzbl
-873(%rbp
), %r8d
# alt_result,
739 movq
%r13, %rsi
# tmp616,
740 movq
%rbx
, %rdi
# tmp617,
741 call __b64_from_24bit@PLT
#
742 # sha512-crypt.c:352: __b64_from_24bit (&cp, &buflen,
743 movzbl
-872(%rbp
), %ecx
# alt_result, alt_result
744 movzbl
-830(%rbp
), %edx
# alt_result, alt_result
746 movzbl
-851(%rbp
), %r8d
# alt_result,
747 movq
%r13, %rsi
# tmp616,
748 movq
%rbx
, %rdi
# tmp617,
749 call __b64_from_24bit@PLT
#
750 # sha512-crypt.c:354: __b64_from_24bit (&cp, &buflen,
751 movzbl
-850(%rbp
), %ecx
# alt_result, alt_result
752 movzbl
-871(%rbp
), %edx
# alt_result, alt_result
754 movzbl
-829(%rbp
), %r8d
# alt_result,
755 movq
%r13, %rsi
# tmp616,
756 movq
%rbx
, %rdi
# tmp617,
757 call __b64_from_24bit@PLT
#
758 # sha512-crypt.c:356: __b64_from_24bit (&cp, &buflen,
759 movzbl
-828(%rbp
), %ecx
# alt_result, alt_result
760 movzbl
-849(%rbp
), %edx
# alt_result, alt_result
762 movzbl
-870(%rbp
), %r8d
# alt_result,
763 movq
%r13, %rsi
# tmp616,
764 movq
%rbx
, %rdi
# tmp617,
765 call __b64_from_24bit@PLT
#
766 # sha512-crypt.c:358: __b64_from_24bit (&cp, &buflen,
767 movzbl
-869(%rbp
), %ecx
# alt_result, alt_result
768 movzbl
-827(%rbp
), %edx
# alt_result, alt_result
770 movzbl
-848(%rbp
), %r8d
# alt_result,
771 movq
%r13, %rsi
# tmp616,
772 movq
%rbx
, %rdi
# tmp617,
773 call __b64_from_24bit@PLT
#
774 # sha512-crypt.c:360: __b64_from_24bit (&cp, &buflen,
775 movzbl
-847(%rbp
), %ecx
# alt_result, alt_result
776 movzbl
-868(%rbp
), %edx
# alt_result, alt_result
778 movzbl
-826(%rbp
), %r8d
# alt_result,
779 movq
%r13, %rsi
# tmp616,
780 movq
%rbx
, %rdi
# tmp617,
781 call __b64_from_24bit@PLT
#
782 # sha512-crypt.c:362: __b64_from_24bit (&cp, &buflen,
783 movzbl
-825(%rbp
), %ecx
# alt_result, alt_result
784 movzbl
-846(%rbp
), %edx
# alt_result, alt_result
786 movzbl
-867(%rbp
), %r8d
# alt_result,
787 movq
%r13, %rsi
# tmp616,
788 movq
%rbx
, %rdi
# tmp617,
789 call __b64_from_24bit@PLT
#
790 # sha512-crypt.c:364: __b64_from_24bit (&cp, &buflen,
791 movzbl
-866(%rbp
), %ecx
# alt_result, alt_result
792 movzbl
-824(%rbp
), %edx
# alt_result, alt_result
794 movzbl
-845(%rbp
), %r8d
# alt_result,
795 movq
%r13, %rsi
# tmp616,
796 movq
%rbx
, %rdi
# tmp617,
797 call __b64_from_24bit@PLT
#
798 # sha512-crypt.c:366: __b64_from_24bit (&cp, &buflen,
799 movzbl
-844(%rbp
), %ecx
# alt_result, alt_result
800 movzbl
-865(%rbp
), %edx
# alt_result, alt_result
802 movzbl
-823(%rbp
), %r8d
# alt_result,
803 movq
%r13, %rsi
# tmp616,
804 movq
%rbx
, %rdi
# tmp617,
805 call __b64_from_24bit@PLT
#
806 # sha512-crypt.c:368: __b64_from_24bit (&cp, &buflen,
807 movzbl
-822(%rbp
), %ecx
# alt_result, alt_result
808 movzbl
-843(%rbp
), %edx
# alt_result, alt_result
810 movzbl
-864(%rbp
), %r8d
# alt_result,
811 movq
%r13, %rsi
# tmp616,
812 movq
%rbx
, %rdi
# tmp617,
813 call __b64_from_24bit@PLT
#
814 # sha512-crypt.c:370: __b64_from_24bit (&cp, &buflen,
815 movzbl
-863(%rbp
), %ecx
# alt_result, alt_result
816 movzbl
-821(%rbp
), %edx
# alt_result, alt_result
818 movzbl
-842(%rbp
), %r8d
# alt_result,
819 movq
%r13, %rsi
# tmp616,
820 movq
%rbx
, %rdi
# tmp617,
821 call __b64_from_24bit@PLT
#
822 # sha512-crypt.c:372: __b64_from_24bit (&cp, &buflen,
823 movzbl
-841(%rbp
), %ecx
# alt_result, alt_result
824 movzbl
-862(%rbp
), %edx
# alt_result, alt_result
826 movzbl
-820(%rbp
), %r8d
# alt_result,
827 movq
%r13, %rsi
# tmp616,
828 movq
%rbx
, %rdi
# tmp617,
829 call __b64_from_24bit@PLT
#
830 # sha512-crypt.c:374: __b64_from_24bit (&cp, &buflen,
831 movzbl
-819(%rbp
), %ecx
# alt_result, alt_result
832 movzbl
-840(%rbp
), %edx
# alt_result, alt_result
834 movzbl
-861(%rbp
), %r8d
# alt_result,
835 movq
%r13, %rsi
# tmp616,
836 movq
%rbx
, %rdi
# tmp617,
837 call __b64_from_24bit@PLT
#
838 # sha512-crypt.c:376: __b64_from_24bit (&cp, &buflen,
839 movzbl
-860(%rbp
), %ecx
# alt_result, alt_result
840 movzbl
-818(%rbp
), %edx
# alt_result, alt_result
842 movzbl
-839(%rbp
), %r8d
# alt_result,
843 movq
%r13, %rsi
# tmp616,
844 movq
%rbx
, %rdi
# tmp617,
845 call __b64_from_24bit@PLT
#
846 # sha512-crypt.c:378: __b64_from_24bit (&cp, &buflen,
847 movzbl
-817(%rbp
), %r8d
# alt_result,
851 movq
%r13, %rsi
# tmp616,
852 movq
%rbx
, %rdi
# tmp617,
853 call __b64_from_24bit@PLT
#
854 # sha512-crypt.c:381: if (buflen <= 0)
855 movl
-900(%rbp
), %eax
# buflen,
858 # sha512-crypt.c:387: *cp = '\0'; /* Terminate the string. */
859 movq
-888(%rbp
), %rax
# cp, cp.31_190
860 movq
-968(%rbp
), %rbx
# %sfp, <retval>
861 movb $
0, (%rax
) #, *cp.31_190
863 # sha512-crypt.c:394: __sha512_init_ctx (&ctx);
864 movq
%r12, %rdi
# tmp745,
865 call __sha512_init_ctx@PLT
#
866 # sha512-crypt.c:395: __sha512_finish_ctx (&ctx, alt_result);
867 movq
%r15, %rsi
# tmp743,
868 movq
%r12, %rdi
# tmp745,
869 call __sha512_finish_ctx@PLT
#
870 # sha512-crypt.c:396: explicit_bzero (&ctx, sizeof (ctx));
873 movq
%r12, %rdi
# tmp745,
874 call __explicit_bzero_chk@PLT
#
875 # sha512-crypt.c:397: explicit_bzero (&alt_ctx, sizeof (alt_ctx));
876 movq
-928(%rbp
), %rdi
# %sfp,
879 call __explicit_bzero_chk@PLT
#
880 # sha512-crypt.c:399: explicit_bzero (temp_result, sizeof (temp_result));
881 movq
-960(%rbp
), %rdi
# %sfp,
884 call __explicit_bzero_chk@PLT
#
885 # sha512-crypt.c:400: explicit_bzero (p_bytes, key_len);
886 movq
-912(%rbp
), %rdi
# %sfp,
888 movq
%r14, %rsi
# tmp350,
889 call __explicit_bzero_chk@PLT
#
890 # sha512-crypt.c:401: explicit_bzero (s_bytes, salt_len);
891 movq
-920(%rbp
), %rsi
# %sfp,
892 movq
-952(%rbp
), %rdi
# %sfp,
894 call __explicit_bzero_chk@PLT
#
895 # sha512-crypt.c:402: if (copied_key != NULL)
896 movq
-992(%rbp
), %rax
# %sfp, copied_key
897 testq
%rax
, %rax
# copied_key
899 # sha512-crypt.c:403: explicit_bzero (copied_key, key_len);
901 movq
%r14, %rsi
# tmp350,
902 movq
%rax
, %rdi
# copied_key,
903 call __explicit_bzero_chk@PLT
#
905 # sha512-crypt.c:404: if (copied_salt != NULL)
906 movq
-976(%rbp
), %rax
# %sfp, copied_salt
907 testq
%rax
, %rax
# copied_salt
909 # sha512-crypt.c:405: explicit_bzero (copied_salt, salt_len);
910 movq
-920(%rbp
), %rsi
# %sfp,
912 movq
%rax
, %rdi
# copied_salt,
913 call __explicit_bzero_chk@PLT
#
915 # sha512-crypt.c:407: free (free_key);
916 movq
-984(%rbp
), %rdi
# %sfp,
918 # sha512-crypt.c:408: free (free_pbytes);
919 movq
-1000(%rbp
), %rdi
# %sfp,
922 # sha512-crypt.c:410: }
923 leaq
-40(%rbp
), %rsp
#,
924 movq
%rbx
, %rax
# <retval>,
938 # sha512-crypt.c:276: memcpy (cp, temp_result, cnt);
939 movl
(%rax
), %edx
#, tmp542
941 movl
%edx
, (%rsi
) # tmp542,* s_bytes
942 movl $
4, %edx
#, tmp540
948 movzbl
(%rax
,%rdx
), %eax
#, tmp548
949 movb
%al
, (%rsi
,%rdx
) # tmp548,
954 movzwl
(%rax
,%rdx
), %edi
#, tmp545
955 movw
%di
, (%rsi
,%rdx
) # tmp545,
956 addq $
2, %rdx
#, tmp540
963 movl
%edi
, %esi
# _6, tmp536
964 xorl
%eax
, %eax
# tmp535
965 movq
%rbx
, %r8 # tmp744, tmp744
966 andl $
-8, %esi
#, tmp536
968 movl
%eax
, %edx
# tmp535, tmp537
969 movq
-952(%rbp
), %rbx
# %sfp, tmp530
971 movq
(%r8,%rdx
), %rdi
# MEM[(void *)&temp_result], tmp538
972 cmpl %esi
, %eax
# tmp536, tmp535
973 movq
%rdi
, (%rbx
,%rdx
) # tmp538, MEM[(void *)s_bytes_294]
975 movq
%rbx
, %rsi
# tmp530, tmp530
976 addq
%rax
, %rsi
# tmp539, s_bytes
977 addq
-960(%rbp
), %rax
# %sfp, tmp534
982 # sha512-crypt.c:383: __set_errno (ERANGE);
983 movq errno@gottpoff
(%rip
), %rax
#, tmp721
984 # sha512-crypt.c:384: buffer = NULL;
985 xorl
%ebx
, %ebx
# <retval>
986 # sha512-crypt.c:383: __set_errno (ERANGE);
987 movl $
34, %fs:(%rax
) #, errno
992 # sha512-crypt.c:321: int n = __snprintf (cp, MAX (0, buflen), "%s%zu$",
993 testl
%edx
, %edx
# _36
994 movq
-936(%rbp
), %r8 # %sfp,
995 leaq sha512_rounds_prefix
(%rip
), %rcx
#,
996 cmovs
%ebx
, %edx
# _36,, tmp595, tmp598
998 movslq
%edx
, %rsi
# tmp598, tmp600
999 leaq
.LC1(%rip), %rdx #,
1000 call __snprintf@PLT
#
1001 # sha512-crypt.c:324: buflen -= n;
1002 movl
-900(%rbp
), %edx
# buflen, _36
1003 # sha512-crypt.c:323: cp += n;
1004 movslq
%eax
, %rdi
# n, n
1005 addq
-888(%rbp
), %rdi
# cp, _32
1006 # sha512-crypt.c:324: buflen -= n;
1007 subl
%eax
, %edx
# n, _36
1008 # sha512-crypt.c:323: cp += n;
1009 movq
%rdi
, -888(%rbp
) # _32, cp
1010 # sha512-crypt.c:324: buflen -= n;
1011 movl
%edx
, -900(%rbp
) # _36, buflen
1016 # sha512-crypt.c:132: const char *num = salt + sizeof (sha512_rounds_prefix) - 1;
1017 movq
-944(%rbp
), %rax
# %sfp, salt
1018 # sha512-crypt.c:134: unsigned long int srounds = strtoul (num, &endp, 10);
1019 leaq
-400(%rbp
), %rsi
#, tmp345
1021 # sha512-crypt.c:132: const char *num = salt + sizeof (sha512_rounds_prefix) - 1;
1022 leaq
7(%rax
), %rdi
#, num
1023 # sha512-crypt.c:134: unsigned long int srounds = strtoul (num, &endp, 10);
1025 # sha512-crypt.c:135: if (*endp == '$')
1026 movq
-400(%rbp
), %rdx
# endp, endp.0_3
1027 cmpb $
36, (%rdx
) #, *endp.0_3
1029 # sha512-crypt.c:138: rounds = MAX (ROUNDS_MIN, MIN (srounds, ROUNDS_MAX));
1030 cmpq $
999999999, %rax
#, srounds
1031 # sha512-crypt.c:137: salt = endp + 1;
1032 leaq
1(%rdx
), %rcx
#, salt
1033 # sha512-crypt.c:138: rounds = MAX (ROUNDS_MIN, MIN (srounds, ROUNDS_MAX));
1034 movl $
999999999, %edx
#, tmp347
1035 cmovbe
%rax
, %rdx
# srounds,, tmp347
1036 movl $
1000, %eax
#, tmp348
1037 # sha512-crypt.c:139: rounds_custom = true;
1038 movb $
1, -901(%rbp
) #, %sfp
1039 cmpq $
1000, %rdx
#, rounds
1040 # sha512-crypt.c:137: salt = endp + 1;
1041 movq
%rcx
, -944(%rbp
) # salt, %sfp
1042 cmovnb
%rdx
, %rax
# rounds,, tmp348
1043 movq
%rax
, -936(%rbp
) # tmp348, %sfp
1048 # sha512-crypt.c:168: char *tmp = (char *) alloca (salt_len + __alignof__ (uint64_t));
1049 movq
-920(%rbp
), %rsi
# %sfp, _6
1050 leaq
38(%rsi
), %rax
#, tmp381
1051 # sha512-crypt.c:169: salt = copied_salt =
1052 movl
%esi
, %edx
# _6,
1053 # sha512-crypt.c:168: char *tmp = (char *) alloca (salt_len + __alignof__ (uint64_t));
1054 andq $
-16, %rax
#, tmp385
1055 subq
%rax
, %rsp
# tmp385,
1056 leaq
15(%rsp
), %rax
#, tmp387
1057 andq $
-16, %rax
#, tmp389
1058 # sha512-crypt.c:169: salt = copied_salt =
1060 # sha512-crypt.c:171: - (tmp - (char *) 0) % __alignof__ (uint64_t),
1061 leaq
8(%rax
), %rcx
#, tmp390
1062 # sha512-crypt.c:169: salt = copied_salt =
1066 testl
%edx
, %edx
# _6
1068 movq
-944(%rbp
), %rsi
# %sfp, salt
1070 movzbl
(%rsi
), %esi
#* salt, tmp403
1071 movb
%sil
, 8(%rax
) # tmp403,
1074 movq
%rcx
, -944(%rbp
) # salt, %sfp
1075 movq
%rcx
, -976(%rbp
) # salt, %sfp
1080 movq
-920(%rbp
), %rsi
# %sfp, _6
1081 movq
-944(%rbp
), %r8 # %sfp, salt
1082 movl
%esi
, %eax
# _6, _6
1083 movq
-8(%r8,%rax
), %rdx
#, tmp418
1084 movq
%rdx
, -8(%rcx
,%rax
) # tmp418,
1085 leal
-1(%rsi
), %edx
#, _6
1088 andl $
-8, %edx
#, tmp420
1089 xorl
%eax
, %eax
# tmp419
1091 movl
%eax
, %esi
# tmp419, tmp421
1092 addl $
8, %eax
#, tmp419
1093 movq
(%r8,%rsi
), %rdi
#, tmp422
1094 cmpl %edx
, %eax
# tmp420, tmp419
1095 movq
%rdi
, (%rcx
,%rsi
) # tmp422,
1099 # sha512-crypt.c:260: memcpy (cp, temp_result, cnt);
1100 movq
-960(%rbp
), %rsi
# %sfp, tmp744
1101 movl
(%rsi
), %edx
#, tmp482
1102 movl
%edx
, (%rcx
) # tmp482,* _215
1103 movl
%eax
, %edx
# cnt, cnt
1104 movl
-4(%rsi
,%rdx
), %eax
#, tmp489
1105 movl
%eax
, -4(%rcx
,%rdx
) # tmp489,
1110 # sha512-crypt.c:222: sha512_process_bytes (alt_result, cnt, &ctx, nss_ctx);
1111 movq
%r12, %rdx
# tmp745,
1112 movq
%r14, %rsi
# tmp350,
1113 movq
%r15, %rdi
# tmp743,
1114 call __sha512_process_bytes@PLT
#
1115 # sha512-crypt.c:226: for (cnt = key_len; cnt > 0; cnt >>= 1)
1116 testq
%r14, %r14 # tmp350
1118 # sha512-crypt.c:233: sha512_finish_ctx (&ctx, nss_ctx, alt_result);
1119 movq
%r12, %rdi
# tmp745,
1120 movq
%r15, %rsi
# tmp743,
1121 call __sha512_finish_ctx@PLT
#
1122 # sha512-crypt.c:236: sha512_init_ctx (&alt_ctx, nss_alt_ctx);
1123 movq
-928(%rbp
), %rdi
# %sfp,
1124 call __sha512_init_ctx@PLT
#
1129 # sha512-crypt.c:258: for (cnt = key_len; cnt >= 64; cnt -= 64)
1130 movq
-912(%rbp
), %rcx
# %sfp, _215
1131 movq
%r14, %rdx
# tmp350, cnt
1134 # sha512-crypt.c:260: memcpy (cp, temp_result, cnt);
1135 movl
%eax
, %edx
# cnt, cnt
1136 movq
-960(%rbp
), %rax
# %sfp, tmp744
1137 movzwl
-2(%rax
,%rdx
), %eax
#, tmp498
1138 movw
%ax
, -2(%rcx
,%rdx
) # tmp498,
1141 # sha512-crypt.c:250: free_pbytes = cp = p_bytes = (char *)malloc (key_len);
1142 movq
%r14, %rdi
# tmp350,
1144 # sha512-crypt.c:251: if (free_pbytes == NULL)
1145 testq
%rax
, %rax
# p_bytes
1146 # sha512-crypt.c:250: free_pbytes = cp = p_bytes = (char *)malloc (key_len);
1147 movq
%rax
, -912(%rbp
) # p_bytes, %sfp
1148 movq
%rax
, -888(%rbp
) # p_bytes, cp
1149 # sha512-crypt.c:251: if (free_pbytes == NULL)
1151 movq
%rax
, -1000(%rbp
) # p_bytes, %sfp
1154 # sha512-crypt.c:169: salt = copied_salt =
1155 movq
-944(%rbp
), %rdi
# %sfp, salt
1156 movl
(%rdi
), %esi
#* salt, tmp395
1157 movl
%esi
, 8(%rax
) # tmp395,
1158 movl
-4(%rdi
,%rdx
), %eax
#, tmp402
1159 movl
%eax
, -4(%rcx
,%rdx
) # tmp402,
1162 movq
-944(%rbp
), %rax
# %sfp, salt
1163 movzwl
-2(%rax
,%rdx
), %eax
#, tmp411
1164 movw
%ax
, -2(%rcx
,%rdx
) # tmp411,
1167 # sha512-crypt.c:154: free_key = tmp = (char *) malloc (key_len + __alignof__ (uint64_t));
1168 movq
%r12, %rdi
# _9,
1170 # sha512-crypt.c:155: if (tmp == NULL)
1171 testq
%rax
, %rax
# free_key
1172 # sha512-crypt.c:154: free_key = tmp = (char *) malloc (key_len + __alignof__ (uint64_t));
1173 movq
%rax
, %rdi
#, free_key
1174 movq
%rax
, -984(%rbp
) # free_key, %sfp
1175 # sha512-crypt.c:155: if (tmp == NULL)
1177 movq
%r14, -912(%rbp
) # tmp350, %sfp
1180 # sha512-crypt.c:253: free (free_key);
1181 movq
-984(%rbp
), %rdi
# %sfp,
1182 # sha512-crypt.c:254: return NULL;
1183 xorl
%ebx
, %ebx
# <retval>
1184 # sha512-crypt.c:253: free (free_key);
1186 # sha512-crypt.c:254: return NULL;
1189 # sha512-crypt.c:156: return NULL;
1190 xorl
%ebx
, %ebx
# <retval>
1194 .size __sha512_crypt_r, .-__sha512_crypt_r
1196 .globl __sha512_crypt
1197 .type __sha512_crypt, @function
1202 .cfi_def_cfa_offset 16
1205 .cfi_def_cfa_offset 24
1207 movq
%rdi
, %r12 # key, key
1209 .cfi_def_cfa_offset 32
1211 # sha512-crypt.c:429: + strlen (salt) + 1 + 86 + 1);
1212 movq
%rsi
, %rdi
# salt,
1213 # sha512-crypt.c:421: {
1214 movq
%rsi
, %rbp
# salt, salt
1215 # sha512-crypt.c:429: + strlen (salt) + 1 + 86 + 1);
1217 # sha512-crypt.c:431: if (buflen < needed)
1218 movl buflen.5421
(%rip
), %ecx
# buflen, buflen.33_4
1219 # sha512-crypt.c:429: + strlen (salt) + 1 + 86 + 1);
1220 leal
109(%rax
), %ebx
#, needed
1221 movq buffer
(%rip
), %rdx
# buffer, <retval>
1222 # sha512-crypt.c:431: if (buflen < needed)
1223 cmpl %ebx
, %ecx
# needed, buflen.33_4
1225 # sha512-crypt.c:433: char *new_buffer = (char *) realloc (buffer, needed);
1226 movq
%rdx
, %rdi
# <retval>,
1227 movslq
%ebx
, %rsi
# needed, needed
1229 # sha512-crypt.c:434: if (new_buffer == NULL)
1230 testq
%rax
, %rax
# <retval>
1231 # sha512-crypt.c:433: char *new_buffer = (char *) realloc (buffer, needed);
1232 movq
%rax
, %rdx
#, <retval>
1233 # sha512-crypt.c:434: if (new_buffer == NULL)
1235 # sha512-crypt.c:437: buffer = new_buffer;
1236 movq
%rax
, buffer
(%rip
) # <retval>, buffer
1237 # sha512-crypt.c:438: buflen = needed;
1238 movl
%ebx
, buflen.5421
(%rip
) # needed, buflen
1239 movl
%ebx
, %ecx
# needed, buflen.33_4
1241 # sha512-crypt.c:442: }
1244 .cfi_def_cfa_offset 24
1245 # sha512-crypt.c:441: return __sha512_crypt_r (key, salt, buffer, buflen);
1246 movq
%rbp
, %rsi
# salt,
1247 movq
%r12, %rdi
# key,
1248 # sha512-crypt.c:442: }
1250 .cfi_def_cfa_offset 16
1252 .cfi_def_cfa_offset 8
1253 # sha512-crypt.c:441: return __sha512_crypt_r (key, salt, buffer, buflen);
1254 jmp __sha512_crypt_r
#
1259 # sha512-crypt.c:442: }
1261 .cfi_def_cfa_offset 24
1264 .cfi_def_cfa_offset 16
1266 .cfi_def_cfa_offset 8
1270 .size __sha512_crypt, .-__sha512_crypt
1272 .comm buflen.5421,4,4
1273 .section __libc_freeres_ptrs
1276 .type buffer, @object
1280 .section .rodata.str1.8,"aMS",@progbits,1
1282 .type sha512_rounds_prefix, @object
1283 .size sha512_rounds_prefix, 8
1284 sha512_rounds_prefix
:
1286 .section .rodata.str1.1
1287 .type sha512_salt_prefix, @object
1288 .size sha512_salt_prefix, 4
1291 .ident "GCC: (GNU) 7.3.0"
1292 .section .note.GNU-stack,"",@progbits