clarify the purpose of this project
[nyanglibc.git] / crypt / sha512-crypt.shared.v.s
blobc00142b42527effed0c8a2109b79ab529c7f6da4
1 .file "sha512-crypt.c"
2 # GNU C11 (GCC) version 7.3.0 (x86_64-nyan-linux-gnu)
3 # compiled by GNU C version 7.3.0, GMP version 6.1.2, MPFR version 4.0.1, MPC version 1.1.0, isl version none
4 # GGC heuristics: --param ggc-min-expand=100 --param ggc-min-heapsize=131072
5 # options passed: -I ../include
6 # -I /root/wip/nyanglibc/builds/0/nyanglibc/build/crypt
7 # -I /root/wip/nyanglibc/builds/0/nyanglibc/build
8 # -I ../sysdeps/unix/sysv/linux/x86_64/64
9 # -I ../sysdeps/unix/sysv/linux/x86_64
10 # -I ../sysdeps/unix/sysv/linux/x86/include
11 # -I ../sysdeps/unix/sysv/linux/x86 -I ../sysdeps/x86/nptl
12 # -I ../sysdeps/unix/sysv/linux/wordsize-64 -I ../sysdeps/x86_64/nptl
13 # -I ../sysdeps/unix/sysv/linux/include -I ../sysdeps/unix/sysv/linux
14 # -I ../sysdeps/nptl -I ../sysdeps/pthread -I ../sysdeps/gnu
15 # -I ../sysdeps/unix/inet -I ../sysdeps/unix/sysv -I ../sysdeps/unix/x86_64
16 # -I ../sysdeps/unix -I ../sysdeps/posix -I ../sysdeps/x86_64/64
17 # -I ../sysdeps/x86_64/fpu -I ../sysdeps/x86/fpu -I ../sysdeps/x86_64
18 # -I ../sysdeps/x86/include -I ../sysdeps/x86
19 # -I ../sysdeps/ieee754/float128 -I ../sysdeps/ieee754/ldbl-96/include
20 # -I ../sysdeps/ieee754/ldbl-96 -I ../sysdeps/ieee754/dbl-64
21 # -I ../sysdeps/ieee754/flt-32 -I ../sysdeps/wordsize-64
22 # -I ../sysdeps/ieee754 -I ../sysdeps/generic -I .. -I ../libio -I .
23 # -MD /run/asm/crypt/sha512-crypt.shared.v.d
24 # -MF /run/asm/crypt/sha512-crypt.os.dt -MP -MT /run/asm/crypt/.os
25 # -D _LIBC_REENTRANT -D MODULE_NAME=libcrypt -D PIC -D SHARED
26 # -D TOP_NAMESPACE=glibc
27 # -include /root/wip/nyanglibc/builds/0/nyanglibc/build/libc-modules.h
28 # -include ../include/libc-symbols.h sha512-crypt.c -mtune=generic
29 # -march=x86-64 -auxbase-strip /run/asm/crypt/sha512-crypt.shared.v.s -O2
30 # -Wall -Wwrite-strings -Wundef -Werror -Wstrict-prototypes
31 # -Wold-style-definition -std=gnu11 -fverbose-asm -fgnu89-inline
32 # -fmerge-all-constants -frounding-math -fno-stack-protector -fmath-errno
33 # -fPIC -ftls-model=initial-exec
34 # options enabled: -fPIC -faggressive-loop-optimizations -falign-labels
35 # -fasynchronous-unwind-tables -fauto-inc-dec -fbranch-count-reg
36 # -fcaller-saves -fchkp-check-incomplete-type -fchkp-check-read
37 # -fchkp-check-write -fchkp-instrument-calls -fchkp-narrow-bounds
38 # -fchkp-optimize -fchkp-store-bounds -fchkp-use-static-bounds
39 # -fchkp-use-static-const-bounds -fchkp-use-wrappers -fcode-hoisting
40 # -fcombine-stack-adjustments -fcommon -fcompare-elim -fcprop-registers
41 # -fcrossjumping -fcse-follow-jumps -fdefer-pop
42 # -fdelete-null-pointer-checks -fdevirtualize -fdevirtualize-speculatively
43 # -fdwarf2-cfi-asm -fearly-inlining -feliminate-unused-debug-types
44 # -fexpensive-optimizations -fforward-propagate -ffp-int-builtin-inexact
45 # -ffunction-cse -fgcse -fgcse-lm -fgnu-runtime -fgnu-unique
46 # -fguess-branch-probability -fhoist-adjacent-loads -fident -fif-conversion
47 # -fif-conversion2 -findirect-inlining -finline -finline-atomics
48 # -finline-functions-called-once -finline-small-functions -fipa-bit-cp
49 # -fipa-cp -fipa-icf -fipa-icf-functions -fipa-icf-variables -fipa-profile
50 # -fipa-pure-const -fipa-ra -fipa-reference -fipa-sra -fipa-vrp
51 # -fira-hoist-pressure -fira-share-save-slots -fira-share-spill-slots
52 # -fisolate-erroneous-paths-dereference -fivopts -fkeep-static-consts
53 # -fleading-underscore -flifetime-dse -flra-remat -flto-odr-type-merging
54 # -fmath-errno -fmerge-all-constants -fmerge-debug-strings
55 # -fmove-loop-invariants -fomit-frame-pointer -foptimize-sibling-calls
56 # -foptimize-strlen -fpartial-inlining -fpeephole -fpeephole2 -fplt
57 # -fprefetch-loop-arrays -free -freg-struct-return -freorder-blocks
58 # -freorder-functions -frerun-cse-after-loop -frounding-math
59 # -fsched-critical-path-heuristic -fsched-dep-count-heuristic
60 # -fsched-group-heuristic -fsched-interblock -fsched-last-insn-heuristic
61 # -fsched-rank-heuristic -fsched-spec -fsched-spec-insn-heuristic
62 # -fsched-stalled-insns-dep -fschedule-fusion -fschedule-insns2
63 # -fsemantic-interposition -fshow-column -fshrink-wrap
64 # -fshrink-wrap-separate -fsigned-zeros -fsplit-ivs-in-unroller
65 # -fsplit-wide-types -fssa-backprop -fssa-phiopt -fstdarg-opt
66 # -fstore-merging -fstrict-aliasing -fstrict-overflow
67 # -fstrict-volatile-bitfields -fsync-libcalls -fthread-jumps
68 # -ftoplevel-reorder -ftrapping-math -ftree-bit-ccp -ftree-builtin-call-dce
69 # -ftree-ccp -ftree-ch -ftree-coalesce-vars -ftree-copy-prop -ftree-cselim
70 # -ftree-dce -ftree-dominator-opts -ftree-dse -ftree-forwprop -ftree-fre
71 # -ftree-loop-if-convert -ftree-loop-im -ftree-loop-ivcanon
72 # -ftree-loop-optimize -ftree-parallelize-loops= -ftree-phiprop -ftree-pre
73 # -ftree-pta -ftree-reassoc -ftree-scev-cprop -ftree-sink -ftree-slsr
74 # -ftree-sra -ftree-switch-conversion -ftree-tail-merge -ftree-ter
75 # -ftree-vrp -funit-at-a-time -funwind-tables -fverbose-asm
76 # -fzero-initialized-in-bss -m128bit-long-double -m64 -m80387
77 # -malign-stringops -mavx256-split-unaligned-load
78 # -mavx256-split-unaligned-store -mfancy-math-387 -mfp-ret-in-387 -mfxsr
79 # -mglibc -mieee-fp -mlong-double-80 -mmmx -mno-sse4 -mpush-args -mred-zone
80 # -msse -msse2 -mstv -mtls-direct-seg-refs -mvzeroupper
82 .text
83 #APP
84 .section __libc_freeres_ptrs, "aw", %nobits
85 .previous
86 .section .rodata.str1.1,"aMS",@progbits,1
87 .LC0:
88 .string "$"
89 .LC1:
90 .string "%s%zu$"
91 #NO_APP
92 .text
93 .p2align 4,,15
94 .globl __sha512_crypt_r
95 .type __sha512_crypt_r, @function
96 __sha512_crypt_r:
97 .LFB41:
98 .cfi_startproc
99 pushq %rbp #
100 .cfi_def_cfa_offset 16
101 .cfi_offset 6, -16
102 movq %rsp, %rbp #,
103 .cfi_def_cfa_register 6
104 pushq %r15 #
105 pushq %r14 #
106 pushq %r13 #
107 pushq %r12 #
108 .cfi_offset 15, -24
109 .cfi_offset 14, -32
110 .cfi_offset 13, -40
111 .cfi_offset 12, -48
112 movq %rsi, %r15 # salt, salt
113 pushq %rbx #
114 .cfi_offset 3, -56
115 movq %rdi, %rbx # key, key
116 # sha512-crypt.c:125: if (strncmp (sha512_salt_prefix, salt, sizeof (sha512_salt_prefix) - 1) == 0)
117 leaq sha512_salt_prefix(%rip), %rdi #,
118 # sha512-crypt.c:103: {
119 subq $968, %rsp #,
120 # sha512-crypt.c:103: {
121 movq %rdx, -968(%rbp) # buffer, %sfp
122 # sha512-crypt.c:125: if (strncmp (sha512_salt_prefix, salt, sizeof (sha512_salt_prefix) - 1) == 0)
123 movl $3, %edx #,
124 # sha512-crypt.c:103: {
125 movl %ecx, -900(%rbp) # buflen, buflen
126 # sha512-crypt.c:125: if (strncmp (sha512_salt_prefix, salt, sizeof (sha512_salt_prefix) - 1) == 0)
127 call strncmp@PLT #
128 # sha512-crypt.c:127: salt += sizeof (sha512_salt_prefix) - 1;
129 leaq 3(%r15), %rdx #, tmp747
130 testl %eax, %eax # _1
131 # sha512-crypt.c:129: if (strncmp (salt, sha512_rounds_prefix, sizeof (sha512_rounds_prefix) - 1)
132 leaq sha512_rounds_prefix(%rip), %rsi #,
133 # sha512-crypt.c:127: salt += sizeof (sha512_salt_prefix) - 1;
134 cmovne %r15, %rdx # tmp747,, salt, tmp747
135 movq %rdx, %rdi # tmp747, salt
136 movq %rdx, -944(%rbp) # salt, %sfp
137 # sha512-crypt.c:129: if (strncmp (salt, sha512_rounds_prefix, sizeof (sha512_rounds_prefix) - 1)
138 movl $7, %edx #,
139 call strncmp@PLT #
140 testl %eax, %eax # _2
141 # sha512-crypt.c:118: bool rounds_custom = false;
142 movb $0, -901(%rbp) #, %sfp
143 # sha512-crypt.c:117: size_t rounds = ROUNDS_DEFAULT;
144 movq $5000, -936(%rbp) #, %sfp
145 # sha512-crypt.c:129: if (strncmp (salt, sha512_rounds_prefix, sizeof (sha512_rounds_prefix) - 1)
146 je .L113 #,
147 .L3:
148 # sha512-crypt.c:143: salt_len = MIN (strcspn (salt, "$"), SALT_LEN_MAX);
149 movq -944(%rbp), %rdi # %sfp,
150 leaq .LC0(%rip), %rsi #,
151 call strcspn@PLT #
152 cmpq $16, %rax #, _6
153 movq %rax, %rcx #, _6
154 movl $16, %eax #, tmp749
155 cmovb %rcx, %rax # _6,, tmp749
156 # sha512-crypt.c:144: key_len = strlen (key);
157 movq %rbx, %rdi # key,
158 # sha512-crypt.c:143: salt_len = MIN (strcspn (salt, "$"), SALT_LEN_MAX);
159 movq %rax, -920(%rbp) # tmp749, %sfp
160 # sha512-crypt.c:144: key_len = strlen (key);
161 call strlen@PLT #
162 # sha512-crypt.c:146: if ((key - (char *) 0) % __alignof__ (uint64_t) != 0)
163 testb $7, %bl #, key
164 # sha512-crypt.c:144: key_len = strlen (key);
165 movq %rax, %r14 #, tmp350
166 # sha512-crypt.c:146: if ((key - (char *) 0) % __alignof__ (uint64_t) != 0)
167 je .L60 #,
168 # sha512-crypt.c:150: if (__libc_use_alloca (alloca_used + key_len + __alignof__ (uint64_t)))
169 leaq 8(%rax), %r12 #, _9
170 # ../sysdeps/pthread/allocalim.h:27: return (__glibc_likely (__libc_alloca_cutoff (size))
171 movq %r12, %rdi # _9,
172 call __libc_alloca_cutoff@PLT #
173 # ../sysdeps/pthread/allocalim.h:29: || __glibc_likely (size <= PTHREAD_STACK_MIN / 4)
174 testl %eax, %eax # _377
175 jne .L7 #,
176 cmpq $4096, %r12 #, _9
177 ja .L114 #,
178 .L7:
179 # sha512-crypt.c:151: tmp = alloca_account (key_len + __alignof__ (uint64_t), alloca_used);
180 #APP
181 # 151 "sha512-crypt.c" 1
182 mov %rsp, %rax # p__
183 # 0 "" 2
184 #NO_APP
185 addq $30, %r12 #, tmp361
186 andq $-16, %r12 #, tmp365
187 subq %r12, %rsp # tmp365,
188 leaq 15(%rsp), %rdi #, tmp367
189 andq $-16, %rdi #, m__
190 #APP
191 # 151 "sha512-crypt.c" 1
192 sub %rsp , %rax # d__
193 # 0 "" 2
194 #NO_APP
195 addq %r14, %rax # tmp350, prephitmp_391
196 # sha512-crypt.c:120: char *free_key = NULL;
197 movq $0, -984(%rbp) #, %sfp
198 movq %rax, -912(%rbp) # prephitmp_391, %sfp
199 .L9:
200 # sha512-crypt.c:159: key = copied_key =
201 movq %rbx, %rsi # key,
202 # sha512-crypt.c:161: - (tmp - (char *) 0) % __alignof__ (uint64_t),
203 addq $8, %rdi #, tmp371
204 # sha512-crypt.c:159: key = copied_key =
205 movq %r14, %rdx # tmp350,
206 call memcpy@PLT #
207 movq %rax, %rbx #, key
208 movq %rax, -992(%rbp) # key, %sfp
209 jmp .L6 #
210 .p2align 4,,10
211 .p2align 3
212 .L60:
213 # sha512-crypt.c:144: key_len = strlen (key);
214 movq %rax, -912(%rbp) # tmp350, %sfp
215 # sha512-crypt.c:120: char *free_key = NULL;
216 movq $0, -984(%rbp) #, %sfp
217 # sha512-crypt.c:112: char *copied_key = NULL;
218 movq $0, -992(%rbp) #, %sfp
219 .L6:
220 # sha512-crypt.c:166: if ((salt - (char *) 0) % __alignof__ (uint64_t) != 0)
221 testb $7, -944(%rbp) #, %sfp
222 # sha512-crypt.c:113: char *copied_salt = NULL;
223 movq $0, -976(%rbp) #, %sfp
224 # sha512-crypt.c:166: if ((salt - (char *) 0) % __alignof__ (uint64_t) != 0)
225 jne .L115 #,
226 .L10:
227 # sha512-crypt.c:192: sha512_init_ctx (&ctx, nss_ctx);
228 leaq -752(%rbp), %r12 #, tmp745
229 movq %r12, %rdi # tmp745,
230 call __sha512_init_ctx@PLT #
231 # sha512-crypt.c:195: sha512_process_bytes (key, key_len, &ctx, nss_ctx);
232 movq %r12, %rdx # tmp745,
233 movq %r14, %rsi # tmp350,
234 movq %rbx, %rdi # key,
235 call __sha512_process_bytes@PLT #
236 # sha512-crypt.c:199: sha512_process_bytes (salt, salt_len, &ctx, nss_ctx);
237 movq -944(%rbp), %r15 # %sfp, salt
238 movq -920(%rbp), %rsi # %sfp,
239 movq %r12, %rdx # tmp745,
240 movq %r15, %rdi # salt,
241 call __sha512_process_bytes@PLT #
242 # sha512-crypt.c:204: sha512_init_ctx (&alt_ctx, nss_alt_ctx);
243 leaq -400(%rbp), %rax #, tmp746
244 movq %rax, %r13 # tmp746, tmp746
245 movq %rax, %rdi # tmp746,
246 movq %rax, -928(%rbp) # tmp746, %sfp
247 call __sha512_init_ctx@PLT #
248 # sha512-crypt.c:207: sha512_process_bytes (key, key_len, &alt_ctx, nss_alt_ctx);
249 movq %r13, %rdx # tmp746,
250 movq %r14, %rsi # tmp350,
251 movq %rbx, %rdi # key,
252 call __sha512_process_bytes@PLT #
253 # sha512-crypt.c:210: sha512_process_bytes (salt, salt_len, &alt_ctx, nss_alt_ctx);
254 movq -920(%rbp), %rsi # %sfp,
255 movq %r15, %rdi # salt,
256 movq %r13, %rdx # tmp746,
257 # sha512-crypt.c:217: sha512_finish_ctx (&alt_ctx, nss_alt_ctx, alt_result);
258 leaq -880(%rbp), %r15 #, tmp743
259 # sha512-crypt.c:210: sha512_process_bytes (salt, salt_len, &alt_ctx, nss_alt_ctx);
260 call __sha512_process_bytes@PLT #
261 # sha512-crypt.c:213: sha512_process_bytes (key, key_len, &alt_ctx, nss_alt_ctx);
262 movq %r13, %rdx # tmp746,
263 movq %r14, %rsi # tmp350,
264 movq %rbx, %rdi # key,
265 call __sha512_process_bytes@PLT #
266 # sha512-crypt.c:217: sha512_finish_ctx (&alt_ctx, nss_alt_ctx, alt_result);
267 movq %r15, %rsi # tmp743,
268 movq %r13, %rdi # tmp746,
269 call __sha512_finish_ctx@PLT #
270 # sha512-crypt.c:220: for (cnt = key_len; cnt > 64; cnt -= 64)
271 cmpq $64, %r14 #, tmp350
272 jbe .L17 #,
273 leaq -65(%r14), %rax #, _201
274 leaq -64(%r14), %rcx #, _191
275 movq %rbx, -1000(%rbp) # key, %sfp
276 movq %r14, %rbx # cnt, cnt
277 movq %rax, -960(%rbp) # _201, %sfp
278 andq $-64, %rax #, tmp433
279 movq %rcx, -952(%rbp) # _191, %sfp
280 subq %rax, %rcx # tmp433, _192
281 movq %rcx, %r13 # _192, _192
282 .p2align 4,,10
283 .p2align 3
284 .L18:
285 # sha512-crypt.c:221: sha512_process_bytes (alt_result, 64, &ctx, nss_ctx);
286 movq %r12, %rdx # tmp745,
287 movl $64, %esi #,
288 movq %r15, %rdi # tmp743,
289 # sha512-crypt.c:220: for (cnt = key_len; cnt > 64; cnt -= 64)
290 subq $64, %rbx #, cnt
291 # sha512-crypt.c:221: sha512_process_bytes (alt_result, 64, &ctx, nss_ctx);
292 call __sha512_process_bytes@PLT #
293 # sha512-crypt.c:220: for (cnt = key_len; cnt > 64; cnt -= 64)
294 cmpq %rbx, %r13 # cnt, _192
295 jne .L18 #,
296 # sha512-crypt.c:222: sha512_process_bytes (alt_result, cnt, &ctx, nss_ctx);
297 movq -960(%rbp), %rax # %sfp, _201
298 movq -952(%rbp), %rsi # %sfp, _191
299 movq %r12, %rdx # tmp745,
300 movq %r15, %rdi # tmp743,
301 movq -1000(%rbp), %rbx # %sfp, key
302 andq $-64, %rax #, _201
303 subq %rax, %rsi # tmp439, _191
304 call __sha512_process_bytes@PLT #
305 .L19:
306 # sha512-crypt.c:220: for (cnt = key_len; cnt > 64; cnt -= 64)
307 movq %r14, %r13 # tmp350, cnt
308 jmp .L23 #
309 .p2align 4,,10
310 .p2align 3
311 .L117:
312 # sha512-crypt.c:228: sha512_process_bytes (alt_result, 64, &ctx, nss_ctx);
313 movl $64, %esi #,
314 movq %r15, %rdi # tmp743,
315 call __sha512_process_bytes@PLT #
316 # sha512-crypt.c:226: for (cnt = key_len; cnt > 0; cnt >>= 1)
317 shrq %r13 # cnt
318 je .L116 #,
319 .L23:
320 # sha512-crypt.c:227: if ((cnt & 1) != 0)
321 testb $1, %r13b #, cnt
322 # sha512-crypt.c:228: sha512_process_bytes (alt_result, 64, &ctx, nss_ctx);
323 movq %r12, %rdx # tmp745,
324 # sha512-crypt.c:227: if ((cnt & 1) != 0)
325 jne .L117 #,
326 # sha512-crypt.c:230: sha512_process_bytes (key, key_len, &ctx, nss_ctx);
327 movq %r14, %rsi # tmp350,
328 movq %rbx, %rdi # key,
329 call __sha512_process_bytes@PLT #
330 # sha512-crypt.c:226: for (cnt = key_len; cnt > 0; cnt >>= 1)
331 shrq %r13 # cnt
332 jne .L23 #,
333 .L116:
334 # sha512-crypt.c:233: sha512_finish_ctx (&ctx, nss_ctx, alt_result);
335 movq %r12, %rdi # tmp745,
336 movq %r15, %rsi # tmp743,
337 call __sha512_finish_ctx@PLT #
338 # sha512-crypt.c:236: sha512_init_ctx (&alt_ctx, nss_alt_ctx);
339 movq -928(%rbp), %rdi # %sfp,
340 call __sha512_init_ctx@PLT #
341 movq %r12, -952(%rbp) # tmp745, %sfp
342 movq %r13, %r12 # cnt, cnt
343 movq -928(%rbp), %r13 # %sfp, tmp746
344 .p2align 4,,10
345 .p2align 3
346 .L25:
347 # sha512-crypt.c:240: sha512_process_bytes (key, key_len, &alt_ctx, nss_alt_ctx);
348 movq %r13, %rdx # tmp746,
349 movq %r14, %rsi # tmp350,
350 movq %rbx, %rdi # key,
351 # sha512-crypt.c:239: for (cnt = 0; cnt < key_len; ++cnt)
352 addq $1, %r12 #, cnt
353 # sha512-crypt.c:240: sha512_process_bytes (key, key_len, &alt_ctx, nss_alt_ctx);
354 call __sha512_process_bytes@PLT #
355 # sha512-crypt.c:239: for (cnt = 0; cnt < key_len; ++cnt)
356 cmpq %r12, %r14 # cnt, tmp350
357 jne .L25 #,
358 movq -952(%rbp), %r12 # %sfp, tmp745
359 .L57:
360 # sha512-crypt.c:243: sha512_finish_ctx (&alt_ctx, nss_alt_ctx, temp_result);
361 leaq -816(%rbp), %rax #, tmp744
362 movq -928(%rbp), %rdi # %sfp,
363 movq %rax, %rsi # tmp744,
364 movq %rax, -960(%rbp) # tmp744, %sfp
365 call __sha512_finish_ctx@PLT #
366 # ../sysdeps/pthread/allocalim.h:27: return (__glibc_likely (__libc_alloca_cutoff (size))
367 movq -912(%rbp), %rbx # %sfp, prephitmp_391
368 movq %rbx, %rdi # prephitmp_391,
369 call __libc_alloca_cutoff@PLT #
370 # ../sysdeps/pthread/allocalim.h:29: || __glibc_likely (size <= PTHREAD_STACK_MIN / 4)
371 cmpq $4096, %rbx #, prephitmp_391
372 jbe .L26 #,
373 testl %eax, %eax # _383
374 je .L118 #,
375 .L26:
376 # sha512-crypt.c:247: cp = p_bytes = (char *) alloca (key_len);
377 leaq 30(%r14), %rax #, tmp459
378 # sha512-crypt.c:121: char *free_pbytes = NULL;
379 movq $0, -1000(%rbp) #, %sfp
380 # sha512-crypt.c:247: cp = p_bytes = (char *) alloca (key_len);
381 andq $-16, %rax #, tmp463
382 subq %rax, %rsp # tmp463,
383 leaq 15(%rsp), %rax #, tmp465
384 andq $-16, %rax #, tmp465
385 movq %rax, -912(%rbp) # p_bytes, %sfp
386 movq %rax, -888(%rbp) # p_bytes, cp
387 .L28:
388 # sha512-crypt.c:258: for (cnt = key_len; cnt >= 64; cnt -= 64)
389 cmpq $63, %r14 #, tmp350
390 jbe .L63 #,
391 leaq -64(%r14), %rcx #, _230
392 movq -912(%rbp), %rsi # %sfp, p_bytes
393 movq %rcx, %rdx # _230, tmp469
394 andq $-64, %rdx #, tmp469
395 leaq 64(%rsi), %rax #, ivtmp.53
396 leaq 128(%rsi,%rdx), %rdx #, _202
397 .p2align 4,,10
398 .p2align 3
399 .L30:
400 # sha512-crypt.c:259: cp = mempcpy (cp, temp_result, 64);
401 movdqa -816(%rbp), %xmm0 # MEM[(char * {ref-all})&temp_result], MEM[(char * {ref-all})&temp_result]
402 movups %xmm0, -64(%rax) # MEM[(char * {ref-all})&temp_result], MEM[base: _233, offset: -64B]
403 movdqa -800(%rbp), %xmm0 # MEM[(char * {ref-all})&temp_result], MEM[(char * {ref-all})&temp_result]
404 movups %xmm0, -48(%rax) # MEM[(char * {ref-all})&temp_result], MEM[base: _233, offset: -64B]
405 movdqa -784(%rbp), %xmm0 # MEM[(char * {ref-all})&temp_result], MEM[(char * {ref-all})&temp_result]
406 movups %xmm0, -32(%rax) # MEM[(char * {ref-all})&temp_result], MEM[base: _233, offset: -64B]
407 movdqa -768(%rbp), %xmm0 # MEM[(char * {ref-all})&temp_result], MEM[(char * {ref-all})&temp_result]
408 movups %xmm0, -16(%rax) # MEM[(char * {ref-all})&temp_result], MEM[base: _233, offset: -64B]
409 movq %rax, -888(%rbp) # ivtmp.53, cp
410 addq $64, %rax #, ivtmp.53
411 # sha512-crypt.c:258: for (cnt = key_len; cnt >= 64; cnt -= 64)
412 cmpq %rax, %rdx # ivtmp.53, _202
413 jne .L30 #,
414 movq -912(%rbp), %rax # %sfp, p_bytes
415 andq $-64, %rcx #, tmp475
416 movq %r14, %rdx # tmp350, cnt
417 andl $63, %edx #, cnt
418 leaq 64(%rax,%rcx), %rcx #, _215
419 .L29:
420 # sha512-crypt.c:260: memcpy (cp, temp_result, cnt);
421 cmpl $8, %edx #, cnt
422 movl %edx, %eax # cnt, cnt
423 jnb .L31 #,
424 andl $4, %edx #, cnt
425 jne .L119 #,
426 testl %eax, %eax # cnt
427 je .L32 #,
428 movq -960(%rbp), %rsi # %sfp, tmp744
429 testb $2, %al #, cnt
430 movzbl (%rsi), %edx #, tmp490
431 movb %dl, (%rcx) # tmp490,* _215
432 jne .L120 #,
433 .L32:
434 # sha512-crypt.c:263: sha512_init_ctx (&alt_ctx, nss_alt_ctx);
435 movq -928(%rbp), %rdi # %sfp,
436 # sha512-crypt.c:266: for (cnt = 0; cnt < 16 + alt_result[0]; ++cnt)
437 xorl %ebx, %ebx # cnt
438 # sha512-crypt.c:263: sha512_init_ctx (&alt_ctx, nss_alt_ctx);
439 call __sha512_init_ctx@PLT #
440 # sha512-crypt.c:266: for (cnt = 0; cnt < 16 + alt_result[0]; ++cnt)
441 movq %r14, -952(%rbp) # tmp350, %sfp
442 movq %r12, -1008(%rbp) # tmp745, %sfp
443 movq %rbx, %r14 # cnt, cnt
444 movq -944(%rbp), %r13 # %sfp, salt
445 movq -920(%rbp), %r12 # %sfp, _6
446 movq -928(%rbp), %rbx # %sfp, tmp746
447 .p2align 4,,10
448 .p2align 3
449 .L37:
450 # sha512-crypt.c:267: sha512_process_bytes (salt, salt_len, &alt_ctx, nss_alt_ctx);
451 movq %rbx, %rdx # tmp746,
452 movq %r12, %rsi # _6,
453 movq %r13, %rdi # salt,
454 call __sha512_process_bytes@PLT #
455 # sha512-crypt.c:266: for (cnt = 0; cnt < 16 + alt_result[0]; ++cnt)
456 movzbl -880(%rbp), %edx # alt_result, tmp516
457 addq $1, %r14 #, cnt
458 addq $16, %rdx #, tmp517
459 cmpq %r14, %rdx # cnt, tmp517
460 ja .L37 #,
461 # sha512-crypt.c:270: sha512_finish_ctx (&alt_ctx, nss_alt_ctx, temp_result);
462 movq -960(%rbp), %rbx # %sfp, tmp744
463 movq -928(%rbp), %rdi # %sfp,
464 movq -952(%rbp), %r14 # %sfp, tmp350
465 movq -1008(%rbp), %r12 # %sfp, tmp745
466 movq %rbx, %rsi # tmp744,
467 call __sha512_finish_ctx@PLT #
468 # sha512-crypt.c:273: cp = s_bytes = alloca (salt_len);
469 movq -920(%rbp), %rdi # %sfp, _6
470 leaq 30(%rdi), %rax #, tmp522
471 movq %rdi, %rcx # _6, _6
472 andq $-16, %rax #, tmp526
473 subq %rax, %rsp # tmp526,
474 leaq 15(%rsp), %rax #, tmp528
475 andq $-16, %rax #, tmp528
476 # sha512-crypt.c:276: memcpy (cp, temp_result, cnt);
477 cmpl $8, %edi #, _6
478 # sha512-crypt.c:273: cp = s_bytes = alloca (salt_len);
479 movq %rax, %rsi # tmp528, tmp530
480 movq %rax, -952(%rbp) # tmp530, %sfp
481 movq %rax, -888(%rbp) # tmp530, cp
482 # sha512-crypt.c:276: memcpy (cp, temp_result, cnt);
483 movq %rbx, %rax # tmp744, tmp534
484 jnb .L121 #,
485 .L38:
486 xorl %edx, %edx # tmp540
487 testb $4, %cl #, _6
488 jne .L122 #,
489 testb $2, %cl #, _6
490 jne .L123 #,
491 .L42:
492 andl $1, %ecx #, _6
493 jne .L124 #,
494 .L43:
495 # sha512-crypt.c:280: for (cnt = 0; cnt < rounds; ++cnt)
496 xorl %ebx, %ebx # cnt
497 jmp .L50 #
498 .p2align 4,,10
499 .p2align 3
500 .L128:
501 # sha512-crypt.c:287: sha512_process_bytes (p_bytes, key_len, &ctx, nss_ctx);
502 movq -912(%rbp), %rdi # %sfp,
503 movq %r14, %rsi # tmp350,
504 call __sha512_process_bytes@PLT #
505 .L45:
506 # sha512-crypt.c:292: if (cnt % 3 != 0)
507 movabsq $-6148914691236517205, %rax #, tmp804
508 mulq %rbx # cnt
509 shrq %rdx # tmp553
510 leaq (%rdx,%rdx,2), %rax #, tmp558
511 cmpq %rax, %rbx # tmp558, cnt
512 jne .L125 #,
513 .L46:
514 # sha512-crypt.c:296: if (cnt % 7 != 0)
515 movabsq $5270498306774157605, %rax #, tmp805
516 imulq %rbx # cnt
517 movq %rbx, %rax # cnt, tmp583
518 sarq $63, %rax #, tmp583
519 sarq %rdx # tmp582
520 subq %rax, %rdx # tmp583, tmp579
521 leaq 0(,%rdx,8), %rax #, tmp585
522 subq %rdx, %rax # tmp579, tmp586
523 cmpq %rax, %rbx # tmp586, cnt
524 jne .L126 #,
525 .L47:
526 # sha512-crypt.c:300: if ((cnt & 1) != 0)
527 testq %r13, %r13 # _26
528 # sha512-crypt.c:301: sha512_process_bytes (alt_result, 64, &ctx, nss_ctx);
529 movq %r12, %rdx # tmp745,
530 # sha512-crypt.c:300: if ((cnt & 1) != 0)
531 je .L48 #,
532 # sha512-crypt.c:301: sha512_process_bytes (alt_result, 64, &ctx, nss_ctx);
533 movl $64, %esi #,
534 movq %r15, %rdi # tmp743,
535 call __sha512_process_bytes@PLT #
536 .L49:
537 # sha512-crypt.c:306: sha512_finish_ctx (&ctx, nss_ctx, alt_result);
538 movq %r15, %rsi # tmp743,
539 movq %r12, %rdi # tmp745,
540 # sha512-crypt.c:280: for (cnt = 0; cnt < rounds; ++cnt)
541 addq $1, %rbx #, cnt
542 # sha512-crypt.c:306: sha512_finish_ctx (&ctx, nss_ctx, alt_result);
543 call __sha512_finish_ctx@PLT #
544 # sha512-crypt.c:280: for (cnt = 0; cnt < rounds; ++cnt)
545 cmpq %rbx, -936(%rbp) # cnt, %sfp
546 je .L127 #,
547 .L50:
548 # sha512-crypt.c:283: sha512_init_ctx (&ctx, nss_ctx);
549 movq %r12, %rdi # tmp745,
550 # sha512-crypt.c:286: if ((cnt & 1) != 0)
551 movq %rbx, %r13 # cnt, _26
552 # sha512-crypt.c:283: sha512_init_ctx (&ctx, nss_ctx);
553 call __sha512_init_ctx@PLT #
554 # sha512-crypt.c:286: if ((cnt & 1) != 0)
555 andl $1, %r13d #, _26
556 # sha512-crypt.c:287: sha512_process_bytes (p_bytes, key_len, &ctx, nss_ctx);
557 movq %r12, %rdx # tmp745,
558 # sha512-crypt.c:286: if ((cnt & 1) != 0)
559 jne .L128 #,
560 # sha512-crypt.c:289: sha512_process_bytes (alt_result, 64, &ctx, nss_ctx);
561 movl $64, %esi #,
562 movq %r15, %rdi # tmp743,
563 call __sha512_process_bytes@PLT #
564 jmp .L45 #
565 .p2align 4,,10
566 .p2align 3
567 .L31:
568 # sha512-crypt.c:260: memcpy (cp, temp_result, cnt);
569 movq -816(%rbp), %rax #, tmp499
570 movq %rax, (%rcx) # tmp499,* _215
571 movq -960(%rbp), %rdi # %sfp, tmp744
572 movl %edx, %eax # cnt, cnt
573 movq -8(%rdi,%rax), %rsi #, tmp506
574 movq %rsi, -8(%rcx,%rax) # tmp506,
575 leaq 8(%rcx), %rsi #, tmp507
576 andq $-8, %rsi #, tmp507
577 subq %rsi, %rcx # tmp507, _215
578 leal (%rdx,%rcx), %eax #, cnt
579 subq %rcx, %rdi # _215, tmp480
580 andl $-8, %eax #, cnt
581 cmpl $8, %eax #, cnt
582 jb .L32 #,
583 andl $-8, %eax #, tmp509
584 xorl %edx, %edx # tmp508
585 .L35:
586 movl %edx, %ecx # tmp508, tmp510
587 addl $8, %edx #, tmp508
588 movq (%rdi,%rcx), %r8 #, tmp511
589 cmpl %eax, %edx # tmp509, tmp508
590 movq %r8, (%rsi,%rcx) # tmp511,
591 jb .L35 #,
592 jmp .L32 #
593 .p2align 4,,10
594 .p2align 3
595 .L48:
596 # sha512-crypt.c:303: sha512_process_bytes (p_bytes, key_len, &ctx, nss_ctx);
597 movq -912(%rbp), %rdi # %sfp,
598 movq %r14, %rsi # tmp350,
599 call __sha512_process_bytes@PLT #
600 jmp .L49 #
601 .p2align 4,,10
602 .p2align 3
603 .L126:
604 # sha512-crypt.c:297: sha512_process_bytes (p_bytes, key_len, &ctx, nss_ctx);
605 movq -912(%rbp), %rdi # %sfp,
606 movq %r12, %rdx # tmp745,
607 movq %r14, %rsi # tmp350,
608 call __sha512_process_bytes@PLT #
609 jmp .L47 #
610 .p2align 4,,10
611 .p2align 3
612 .L125:
613 # sha512-crypt.c:293: sha512_process_bytes (s_bytes, salt_len, &ctx, nss_ctx);
614 movq -920(%rbp), %rsi # %sfp,
615 movq -952(%rbp), %rdi # %sfp,
616 movq %r12, %rdx # tmp745,
617 call __sha512_process_bytes@PLT #
618 jmp .L46 #
619 .p2align 4,,10
620 .p2align 3
621 .L127:
622 # sha512-crypt.c:316: cp = __stpncpy (buffer, sha512_salt_prefix, MAX (0, buflen));
623 movl -900(%rbp), %edx # buflen,
624 xorl %ebx, %ebx # tmp595
625 movq -968(%rbp), %rdi # %sfp,
626 leaq sha512_salt_prefix(%rip), %rsi #,
627 testl %edx, %edx #
628 movl %ebx, %edx # tmp595, tmp594
629 cmovns -900(%rbp), %edx # buflen,, tmp594
630 movslq %edx, %rdx # tmp594, tmp596
631 call __stpncpy@PLT #
632 movq %rax, %rdi #, _32
633 movq %rax, -888(%rbp) # _32, cp
634 # sha512-crypt.c:317: buflen -= sizeof (sha512_salt_prefix) - 1;
635 movl -900(%rbp), %eax # buflen, tmp868
636 # sha512-crypt.c:319: if (rounds_custom)
637 cmpb $0, -901(%rbp) #, %sfp
638 # sha512-crypt.c:317: buflen -= sizeof (sha512_salt_prefix) - 1;
639 leal -3(%rax), %edx #, _36
640 movl %edx, -900(%rbp) # _36, buflen
641 # sha512-crypt.c:319: if (rounds_custom)
642 jne .L129 #,
643 .L51:
644 # sha512-crypt.c:327: cp = __stpncpy (cp, salt, MIN ((size_t) MAX (0, buflen), salt_len));
645 movq -920(%rbp), %r13 # %sfp, _6
646 xorl %ebx, %ebx #
647 testl %edx, %edx # _36
648 cmovs %ebx, %edx # _36,, tmp606, tmp605
649 movq -944(%rbp), %rsi # %sfp,
650 movslq %edx, %rdx # tmp605, tmp607
651 cmpq %r13, %rdx # _6, tmp607
652 cmova %r13, %rdx # tmp607,, _6, tmp604
653 call __stpncpy@PLT #
654 # sha512-crypt.c:328: buflen -= MIN ((size_t) MAX (0, buflen), salt_len);
655 movslq -900(%rbp), %rdx # buflen,
656 # sha512-crypt.c:327: cp = __stpncpy (cp, salt, MIN ((size_t) MAX (0, buflen), salt_len));
657 movq %rax, -888(%rbp) # _49, cp
658 # sha512-crypt.c:328: buflen -= MIN ((size_t) MAX (0, buflen), salt_len);
659 testl %edx, %edx # buflen.23_50
660 cmovns %rdx, %rbx #,,
661 cmpq %r13, %rbx # _6, tmp611
662 cmova %r13, %rbx # tmp611,, _6, tmp608
663 subl %ebx, %edx # tmp608, _57
664 # sha512-crypt.c:330: if (buflen > 0)
665 testl %edx, %edx # _57
666 # sha512-crypt.c:328: buflen -= MIN ((size_t) MAX (0, buflen), salt_len);
667 movl %edx, -900(%rbp) # _57, buflen
668 # sha512-crypt.c:330: if (buflen > 0)
669 jle .L52 #,
670 # sha512-crypt.c:332: *cp++ = '$';
671 leaq 1(%rax), %rdx #, tmp612
672 movq %rdx, -888(%rbp) # tmp612, cp
673 movb $36, (%rax) #, *_49
674 # sha512-crypt.c:333: --buflen;
675 subl $1, -900(%rbp) #, buflen
676 .L52:
677 # sha512-crypt.c:336: __b64_from_24bit (&cp, &buflen,
678 movzbl -859(%rbp), %ecx # alt_result, alt_result
679 movzbl -880(%rbp), %edx # alt_result, alt_result
680 leaq -900(%rbp), %r13 #, tmp616
681 movzbl -838(%rbp), %r8d # alt_result,
682 leaq -888(%rbp), %rbx #, tmp617
683 movl $4, %r9d #,
684 movq %r13, %rsi # tmp616,
685 movq %rbx, %rdi # tmp617,
686 call __b64_from_24bit@PLT #
687 # sha512-crypt.c:338: __b64_from_24bit (&cp, &buflen,
688 movzbl -837(%rbp), %ecx # alt_result, alt_result
689 movzbl -858(%rbp), %edx # alt_result, alt_result
690 movl $4, %r9d #,
691 movzbl -879(%rbp), %r8d # alt_result,
692 movq %r13, %rsi # tmp616,
693 movq %rbx, %rdi # tmp617,
694 call __b64_from_24bit@PLT #
695 # sha512-crypt.c:340: __b64_from_24bit (&cp, &buflen,
696 movzbl -878(%rbp), %ecx # alt_result, alt_result
697 movzbl -836(%rbp), %edx # alt_result, alt_result
698 movl $4, %r9d #,
699 movzbl -857(%rbp), %r8d # alt_result,
700 movq %r13, %rsi # tmp616,
701 movq %rbx, %rdi # tmp617,
702 call __b64_from_24bit@PLT #
703 # sha512-crypt.c:342: __b64_from_24bit (&cp, &buflen,
704 movzbl -856(%rbp), %ecx # alt_result, alt_result
705 movzbl -877(%rbp), %edx # alt_result, alt_result
706 movl $4, %r9d #,
707 movzbl -835(%rbp), %r8d # alt_result,
708 movq %r13, %rsi # tmp616,
709 movq %rbx, %rdi # tmp617,
710 call __b64_from_24bit@PLT #
711 # sha512-crypt.c:344: __b64_from_24bit (&cp, &buflen,
712 movzbl -834(%rbp), %ecx # alt_result, alt_result
713 movzbl -855(%rbp), %edx # alt_result, alt_result
714 movl $4, %r9d #,
715 movzbl -876(%rbp), %r8d # alt_result,
716 movq %r13, %rsi # tmp616,
717 movq %rbx, %rdi # tmp617,
718 call __b64_from_24bit@PLT #
719 # sha512-crypt.c:346: __b64_from_24bit (&cp, &buflen,
720 movzbl -875(%rbp), %ecx # alt_result, alt_result
721 movzbl -833(%rbp), %edx # alt_result, alt_result
722 movl $4, %r9d #,
723 movzbl -854(%rbp), %r8d # alt_result,
724 movq %r13, %rsi # tmp616,
725 movq %rbx, %rdi # tmp617,
726 call __b64_from_24bit@PLT #
727 # sha512-crypt.c:348: __b64_from_24bit (&cp, &buflen,
728 movzbl -853(%rbp), %ecx # alt_result, alt_result
729 movzbl -874(%rbp), %edx # alt_result, alt_result
730 movl $4, %r9d #,
731 movzbl -832(%rbp), %r8d # alt_result,
732 movq %r13, %rsi # tmp616,
733 movq %rbx, %rdi # tmp617,
734 call __b64_from_24bit@PLT #
735 # sha512-crypt.c:350: __b64_from_24bit (&cp, &buflen,
736 movzbl -831(%rbp), %ecx # alt_result, alt_result
737 movzbl -852(%rbp), %edx # alt_result, alt_result
738 movl $4, %r9d #,
739 movzbl -873(%rbp), %r8d # alt_result,
740 movq %r13, %rsi # tmp616,
741 movq %rbx, %rdi # tmp617,
742 call __b64_from_24bit@PLT #
743 # sha512-crypt.c:352: __b64_from_24bit (&cp, &buflen,
744 movzbl -872(%rbp), %ecx # alt_result, alt_result
745 movzbl -830(%rbp), %edx # alt_result, alt_result
746 movl $4, %r9d #,
747 movzbl -851(%rbp), %r8d # alt_result,
748 movq %r13, %rsi # tmp616,
749 movq %rbx, %rdi # tmp617,
750 call __b64_from_24bit@PLT #
751 # sha512-crypt.c:354: __b64_from_24bit (&cp, &buflen,
752 movzbl -850(%rbp), %ecx # alt_result, alt_result
753 movzbl -871(%rbp), %edx # alt_result, alt_result
754 movl $4, %r9d #,
755 movzbl -829(%rbp), %r8d # alt_result,
756 movq %r13, %rsi # tmp616,
757 movq %rbx, %rdi # tmp617,
758 call __b64_from_24bit@PLT #
759 # sha512-crypt.c:356: __b64_from_24bit (&cp, &buflen,
760 movzbl -828(%rbp), %ecx # alt_result, alt_result
761 movzbl -849(%rbp), %edx # alt_result, alt_result
762 movl $4, %r9d #,
763 movzbl -870(%rbp), %r8d # alt_result,
764 movq %r13, %rsi # tmp616,
765 movq %rbx, %rdi # tmp617,
766 call __b64_from_24bit@PLT #
767 # sha512-crypt.c:358: __b64_from_24bit (&cp, &buflen,
768 movzbl -869(%rbp), %ecx # alt_result, alt_result
769 movzbl -827(%rbp), %edx # alt_result, alt_result
770 movl $4, %r9d #,
771 movzbl -848(%rbp), %r8d # alt_result,
772 movq %r13, %rsi # tmp616,
773 movq %rbx, %rdi # tmp617,
774 call __b64_from_24bit@PLT #
775 # sha512-crypt.c:360: __b64_from_24bit (&cp, &buflen,
776 movzbl -847(%rbp), %ecx # alt_result, alt_result
777 movzbl -868(%rbp), %edx # alt_result, alt_result
778 movl $4, %r9d #,
779 movzbl -826(%rbp), %r8d # alt_result,
780 movq %r13, %rsi # tmp616,
781 movq %rbx, %rdi # tmp617,
782 call __b64_from_24bit@PLT #
783 # sha512-crypt.c:362: __b64_from_24bit (&cp, &buflen,
784 movzbl -825(%rbp), %ecx # alt_result, alt_result
785 movzbl -846(%rbp), %edx # alt_result, alt_result
786 movl $4, %r9d #,
787 movzbl -867(%rbp), %r8d # alt_result,
788 movq %r13, %rsi # tmp616,
789 movq %rbx, %rdi # tmp617,
790 call __b64_from_24bit@PLT #
791 # sha512-crypt.c:364: __b64_from_24bit (&cp, &buflen,
792 movzbl -866(%rbp), %ecx # alt_result, alt_result
793 movzbl -824(%rbp), %edx # alt_result, alt_result
794 movl $4, %r9d #,
795 movzbl -845(%rbp), %r8d # alt_result,
796 movq %r13, %rsi # tmp616,
797 movq %rbx, %rdi # tmp617,
798 call __b64_from_24bit@PLT #
799 # sha512-crypt.c:366: __b64_from_24bit (&cp, &buflen,
800 movzbl -844(%rbp), %ecx # alt_result, alt_result
801 movzbl -865(%rbp), %edx # alt_result, alt_result
802 movl $4, %r9d #,
803 movzbl -823(%rbp), %r8d # alt_result,
804 movq %r13, %rsi # tmp616,
805 movq %rbx, %rdi # tmp617,
806 call __b64_from_24bit@PLT #
807 # sha512-crypt.c:368: __b64_from_24bit (&cp, &buflen,
808 movzbl -822(%rbp), %ecx # alt_result, alt_result
809 movzbl -843(%rbp), %edx # alt_result, alt_result
810 movl $4, %r9d #,
811 movzbl -864(%rbp), %r8d # alt_result,
812 movq %r13, %rsi # tmp616,
813 movq %rbx, %rdi # tmp617,
814 call __b64_from_24bit@PLT #
815 # sha512-crypt.c:370: __b64_from_24bit (&cp, &buflen,
816 movzbl -863(%rbp), %ecx # alt_result, alt_result
817 movzbl -821(%rbp), %edx # alt_result, alt_result
818 movl $4, %r9d #,
819 movzbl -842(%rbp), %r8d # alt_result,
820 movq %r13, %rsi # tmp616,
821 movq %rbx, %rdi # tmp617,
822 call __b64_from_24bit@PLT #
823 # sha512-crypt.c:372: __b64_from_24bit (&cp, &buflen,
824 movzbl -841(%rbp), %ecx # alt_result, alt_result
825 movzbl -862(%rbp), %edx # alt_result, alt_result
826 movl $4, %r9d #,
827 movzbl -820(%rbp), %r8d # alt_result,
828 movq %r13, %rsi # tmp616,
829 movq %rbx, %rdi # tmp617,
830 call __b64_from_24bit@PLT #
831 # sha512-crypt.c:374: __b64_from_24bit (&cp, &buflen,
832 movzbl -819(%rbp), %ecx # alt_result, alt_result
833 movzbl -840(%rbp), %edx # alt_result, alt_result
834 movl $4, %r9d #,
835 movzbl -861(%rbp), %r8d # alt_result,
836 movq %r13, %rsi # tmp616,
837 movq %rbx, %rdi # tmp617,
838 call __b64_from_24bit@PLT #
839 # sha512-crypt.c:376: __b64_from_24bit (&cp, &buflen,
840 movzbl -860(%rbp), %ecx # alt_result, alt_result
841 movzbl -818(%rbp), %edx # alt_result, alt_result
842 movl $4, %r9d #,
843 movzbl -839(%rbp), %r8d # alt_result,
844 movq %r13, %rsi # tmp616,
845 movq %rbx, %rdi # tmp617,
846 call __b64_from_24bit@PLT #
847 # sha512-crypt.c:378: __b64_from_24bit (&cp, &buflen,
848 movzbl -817(%rbp), %r8d # alt_result,
849 xorl %ecx, %ecx #
850 xorl %edx, %edx #
851 movl $2, %r9d #,
852 movq %r13, %rsi # tmp616,
853 movq %rbx, %rdi # tmp617,
854 call __b64_from_24bit@PLT #
855 # sha512-crypt.c:381: if (buflen <= 0)
856 movl -900(%rbp), %eax # buflen,
857 testl %eax, %eax #
858 jle .L130 #,
859 # sha512-crypt.c:387: *cp = '\0'; /* Terminate the string. */
860 movq -888(%rbp), %rax # cp, cp.31_190
861 movq -968(%rbp), %rbx # %sfp, <retval>
862 movb $0, (%rax) #, *cp.31_190
863 .L54:
864 # sha512-crypt.c:394: __sha512_init_ctx (&ctx);
865 movq %r12, %rdi # tmp745,
866 call __sha512_init_ctx@PLT #
867 # sha512-crypt.c:395: __sha512_finish_ctx (&ctx, alt_result);
868 movq %r15, %rsi # tmp743,
869 movq %r12, %rdi # tmp745,
870 call __sha512_finish_ctx@PLT #
871 # sha512-crypt.c:396: explicit_bzero (&ctx, sizeof (ctx));
872 movl $352, %edx #,
873 movl $352, %esi #,
874 movq %r12, %rdi # tmp745,
875 call __explicit_bzero_chk@PLT #
876 # sha512-crypt.c:397: explicit_bzero (&alt_ctx, sizeof (alt_ctx));
877 movq -928(%rbp), %rdi # %sfp,
878 movl $352, %edx #,
879 movl $352, %esi #,
880 call __explicit_bzero_chk@PLT #
881 # sha512-crypt.c:399: explicit_bzero (temp_result, sizeof (temp_result));
882 movq -960(%rbp), %rdi # %sfp,
883 movl $64, %edx #,
884 movl $64, %esi #,
885 call __explicit_bzero_chk@PLT #
886 # sha512-crypt.c:400: explicit_bzero (p_bytes, key_len);
887 movq -912(%rbp), %rdi # %sfp,
888 movq $-1, %rdx #,
889 movq %r14, %rsi # tmp350,
890 call __explicit_bzero_chk@PLT #
891 # sha512-crypt.c:401: explicit_bzero (s_bytes, salt_len);
892 movq -920(%rbp), %rsi # %sfp,
893 movq -952(%rbp), %rdi # %sfp,
894 movq $-1, %rdx #,
895 call __explicit_bzero_chk@PLT #
896 # sha512-crypt.c:402: if (copied_key != NULL)
897 movq -992(%rbp), %rax # %sfp, copied_key
898 testq %rax, %rax # copied_key
899 je .L55 #,
900 # sha512-crypt.c:403: explicit_bzero (copied_key, key_len);
901 movq $-1, %rdx #,
902 movq %r14, %rsi # tmp350,
903 movq %rax, %rdi # copied_key,
904 call __explicit_bzero_chk@PLT #
905 .L55:
906 # sha512-crypt.c:404: if (copied_salt != NULL)
907 movq -976(%rbp), %rax # %sfp, copied_salt
908 testq %rax, %rax # copied_salt
909 je .L56 #,
910 # sha512-crypt.c:405: explicit_bzero (copied_salt, salt_len);
911 movq -920(%rbp), %rsi # %sfp,
912 movq $-1, %rdx #,
913 movq %rax, %rdi # copied_salt,
914 call __explicit_bzero_chk@PLT #
915 .L56:
916 # sha512-crypt.c:407: free (free_key);
917 movq -984(%rbp), %rdi # %sfp,
918 call free@PLT #
919 # sha512-crypt.c:408: free (free_pbytes);
920 movq -1000(%rbp), %rdi # %sfp,
921 call free@PLT #
922 .L1:
923 # sha512-crypt.c:410: }
924 leaq -40(%rbp), %rsp #,
925 movq %rbx, %rax # <retval>,
926 popq %rbx #
927 popq %r12 #
928 popq %r13 #
929 popq %r14 #
930 popq %r15 #
931 popq %rbp #
932 .cfi_remember_state
933 .cfi_def_cfa 7, 8
935 .p2align 4,,10
936 .p2align 3
937 .L122:
938 .cfi_restore_state
939 # sha512-crypt.c:276: memcpy (cp, temp_result, cnt);
940 movl (%rax), %edx #, tmp542
941 testb $2, %cl #, _6
942 movl %edx, (%rsi) # tmp542,* s_bytes
943 movl $4, %edx #, tmp540
944 je .L42 #,
945 jmp .L123 #
946 .p2align 4,,10
947 .p2align 3
948 .L124:
949 movzbl (%rax,%rdx), %eax #, tmp548
950 movb %al, (%rsi,%rdx) # tmp548,
951 jmp .L43 #
952 .p2align 4,,10
953 .p2align 3
954 .L123:
955 movzwl (%rax,%rdx), %edi #, tmp545
956 movw %di, (%rsi,%rdx) # tmp545,
957 addq $2, %rdx #, tmp540
958 andl $1, %ecx #, _6
959 je .L43 #,
960 jmp .L124 #
961 .p2align 4,,10
962 .p2align 3
963 .L121:
964 movl %edi, %esi # _6, tmp536
965 xorl %eax, %eax # tmp535
966 movq %rbx, %r8 # tmp744, tmp744
967 andl $-8, %esi #, tmp536
968 .L39:
969 movl %eax, %edx # tmp535, tmp537
970 movq -952(%rbp), %rbx # %sfp, tmp530
971 addl $8, %eax #,
972 movq (%r8,%rdx), %rdi # MEM[(void *)&temp_result], tmp538
973 cmpl %esi, %eax # tmp536, tmp535
974 movq %rdi, (%rbx,%rdx) # tmp538, MEM[(void *)s_bytes_294]
975 jb .L39 #,
976 movq %rbx, %rsi # tmp530, tmp530
977 addq %rax, %rsi # tmp539, s_bytes
978 addq -960(%rbp), %rax # %sfp, tmp534
979 jmp .L38 #
980 .p2align 4,,10
981 .p2align 3
982 .L130:
983 # sha512-crypt.c:383: __set_errno (ERANGE);
984 movq errno@gottpoff(%rip), %rax #, tmp721
985 # sha512-crypt.c:384: buffer = NULL;
986 xorl %ebx, %ebx # <retval>
987 # sha512-crypt.c:383: __set_errno (ERANGE);
988 movl $34, %fs:(%rax) #, errno
989 jmp .L54 #
990 .p2align 4,,10
991 .p2align 3
992 .L129:
993 # sha512-crypt.c:321: int n = __snprintf (cp, MAX (0, buflen), "%s%zu$",
994 testl %edx, %edx # _36
995 movq -936(%rbp), %r8 # %sfp,
996 leaq sha512_rounds_prefix(%rip), %rcx #,
997 cmovs %ebx, %edx # _36,, tmp595, tmp598
998 xorl %eax, %eax #
999 movslq %edx, %rsi # tmp598, tmp600
1000 leaq .LC1(%rip), %rdx #,
1001 call __snprintf@PLT #
1002 # sha512-crypt.c:324: buflen -= n;
1003 movl -900(%rbp), %edx # buflen, _36
1004 # sha512-crypt.c:323: cp += n;
1005 movslq %eax, %rdi # n, n
1006 addq -888(%rbp), %rdi # cp, _32
1007 # sha512-crypt.c:324: buflen -= n;
1008 subl %eax, %edx # n, _36
1009 # sha512-crypt.c:323: cp += n;
1010 movq %rdi, -888(%rbp) # _32, cp
1011 # sha512-crypt.c:324: buflen -= n;
1012 movl %edx, -900(%rbp) # _36, buflen
1013 jmp .L51 #
1014 .p2align 4,,10
1015 .p2align 3
1016 .L113:
1017 # sha512-crypt.c:132: const char *num = salt + sizeof (sha512_rounds_prefix) - 1;
1018 movq -944(%rbp), %rax # %sfp, salt
1019 # sha512-crypt.c:134: unsigned long int srounds = strtoul (num, &endp, 10);
1020 leaq -400(%rbp), %rsi #, tmp345
1021 movl $10, %edx #,
1022 # sha512-crypt.c:132: const char *num = salt + sizeof (sha512_rounds_prefix) - 1;
1023 leaq 7(%rax), %rdi #, num
1024 # sha512-crypt.c:134: unsigned long int srounds = strtoul (num, &endp, 10);
1025 call strtoul@PLT #
1026 # sha512-crypt.c:135: if (*endp == '$')
1027 movq -400(%rbp), %rdx # endp, endp.0_3
1028 cmpb $36, (%rdx) #, *endp.0_3
1029 jne .L3 #,
1030 # sha512-crypt.c:138: rounds = MAX (ROUNDS_MIN, MIN (srounds, ROUNDS_MAX));
1031 cmpq $999999999, %rax #, srounds
1032 # sha512-crypt.c:137: salt = endp + 1;
1033 leaq 1(%rdx), %rcx #, salt
1034 # sha512-crypt.c:138: rounds = MAX (ROUNDS_MIN, MIN (srounds, ROUNDS_MAX));
1035 movl $999999999, %edx #, tmp347
1036 cmovbe %rax, %rdx # srounds,, tmp347
1037 movl $1000, %eax #, tmp348
1038 # sha512-crypt.c:139: rounds_custom = true;
1039 movb $1, -901(%rbp) #, %sfp
1040 cmpq $1000, %rdx #, rounds
1041 # sha512-crypt.c:137: salt = endp + 1;
1042 movq %rcx, -944(%rbp) # salt, %sfp
1043 cmovnb %rdx, %rax # rounds,, tmp348
1044 movq %rax, -936(%rbp) # tmp348, %sfp
1045 jmp .L3 #
1046 .p2align 4,,10
1047 .p2align 3
1048 .L115:
1049 # sha512-crypt.c:168: char *tmp = (char *) alloca (salt_len + __alignof__ (uint64_t));
1050 movq -920(%rbp), %rsi # %sfp, _6
1051 leaq 38(%rsi), %rax #, tmp381
1052 # sha512-crypt.c:169: salt = copied_salt =
1053 movl %esi, %edx # _6,
1054 # sha512-crypt.c:168: char *tmp = (char *) alloca (salt_len + __alignof__ (uint64_t));
1055 andq $-16, %rax #, tmp385
1056 subq %rax, %rsp # tmp385,
1057 leaq 15(%rsp), %rax #, tmp387
1058 andq $-16, %rax #, tmp389
1059 # sha512-crypt.c:169: salt = copied_salt =
1060 cmpl $8, %esi #, _6
1061 # sha512-crypt.c:171: - (tmp - (char *) 0) % __alignof__ (uint64_t),
1062 leaq 8(%rax), %rcx #, tmp390
1063 # sha512-crypt.c:169: salt = copied_salt =
1064 jnb .L11 #,
1065 andl $4, %esi #, _6
1066 jne .L131 #,
1067 testl %edx, %edx # _6
1068 je .L12 #,
1069 movq -944(%rbp), %rsi # %sfp, salt
1070 testb $2, %dl #, _6
1071 movzbl (%rsi), %esi #* salt, tmp403
1072 movb %sil, 8(%rax) # tmp403,
1073 jne .L132 #,
1074 .L12:
1075 movq %rcx, -944(%rbp) # salt, %sfp
1076 movq %rcx, -976(%rbp) # salt, %sfp
1077 jmp .L10 #
1078 .p2align 4,,10
1079 .p2align 3
1080 .L11:
1081 movq -920(%rbp), %rsi # %sfp, _6
1082 movq -944(%rbp), %r8 # %sfp, salt
1083 movl %esi, %eax # _6, _6
1084 movq -8(%r8,%rax), %rdx #, tmp418
1085 movq %rdx, -8(%rcx,%rax) # tmp418,
1086 leal -1(%rsi), %edx #, _6
1087 cmpl $8, %edx #, _6
1088 jb .L12 #,
1089 andl $-8, %edx #, tmp420
1090 xorl %eax, %eax # tmp419
1091 .L15:
1092 movl %eax, %esi # tmp419, tmp421
1093 addl $8, %eax #, tmp419
1094 movq (%r8,%rsi), %rdi #, tmp422
1095 cmpl %edx, %eax # tmp420, tmp419
1096 movq %rdi, (%rcx,%rsi) # tmp422,
1097 jb .L15 #,
1098 jmp .L12 #
1099 .L119:
1100 # sha512-crypt.c:260: memcpy (cp, temp_result, cnt);
1101 movq -960(%rbp), %rsi # %sfp, tmp744
1102 movl (%rsi), %edx #, tmp482
1103 movl %edx, (%rcx) # tmp482,* _215
1104 movl %eax, %edx # cnt, cnt
1105 movl -4(%rsi,%rdx), %eax #, tmp489
1106 movl %eax, -4(%rcx,%rdx) # tmp489,
1107 jmp .L32 #
1108 .p2align 4,,10
1109 .p2align 3
1110 .L17:
1111 # sha512-crypt.c:222: sha512_process_bytes (alt_result, cnt, &ctx, nss_ctx);
1112 movq %r12, %rdx # tmp745,
1113 movq %r14, %rsi # tmp350,
1114 movq %r15, %rdi # tmp743,
1115 call __sha512_process_bytes@PLT #
1116 # sha512-crypt.c:226: for (cnt = key_len; cnt > 0; cnt >>= 1)
1117 testq %r14, %r14 # tmp350
1118 jne .L19 #,
1119 # sha512-crypt.c:233: sha512_finish_ctx (&ctx, nss_ctx, alt_result);
1120 movq %r12, %rdi # tmp745,
1121 movq %r15, %rsi # tmp743,
1122 call __sha512_finish_ctx@PLT #
1123 # sha512-crypt.c:236: sha512_init_ctx (&alt_ctx, nss_alt_ctx);
1124 movq -928(%rbp), %rdi # %sfp,
1125 call __sha512_init_ctx@PLT #
1126 jmp .L57 #
1127 .p2align 4,,10
1128 .p2align 3
1129 .L63:
1130 # sha512-crypt.c:258: for (cnt = key_len; cnt >= 64; cnt -= 64)
1131 movq -912(%rbp), %rcx # %sfp, _215
1132 movq %r14, %rdx # tmp350, cnt
1133 jmp .L29 #
1134 .L120:
1135 # sha512-crypt.c:260: memcpy (cp, temp_result, cnt);
1136 movl %eax, %edx # cnt, cnt
1137 movq -960(%rbp), %rax # %sfp, tmp744
1138 movzwl -2(%rax,%rdx), %eax #, tmp498
1139 movw %ax, -2(%rcx,%rdx) # tmp498,
1140 jmp .L32 #
1141 .L118:
1142 # sha512-crypt.c:250: free_pbytes = cp = p_bytes = (char *)malloc (key_len);
1143 movq %r14, %rdi # tmp350,
1144 call malloc@PLT #
1145 # sha512-crypt.c:251: if (free_pbytes == NULL)
1146 testq %rax, %rax # p_bytes
1147 # sha512-crypt.c:250: free_pbytes = cp = p_bytes = (char *)malloc (key_len);
1148 movq %rax, -912(%rbp) # p_bytes, %sfp
1149 movq %rax, -888(%rbp) # p_bytes, cp
1150 # sha512-crypt.c:251: if (free_pbytes == NULL)
1151 je .L27 #,
1152 movq %rax, -1000(%rbp) # p_bytes, %sfp
1153 jmp .L28 #
1154 .L131:
1155 # sha512-crypt.c:169: salt = copied_salt =
1156 movq -944(%rbp), %rdi # %sfp, salt
1157 movl (%rdi), %esi #* salt, tmp395
1158 movl %esi, 8(%rax) # tmp395,
1159 movl -4(%rdi,%rdx), %eax #, tmp402
1160 movl %eax, -4(%rcx,%rdx) # tmp402,
1161 jmp .L12 #
1162 .L132:
1163 movq -944(%rbp), %rax # %sfp, salt
1164 movzwl -2(%rax,%rdx), %eax #, tmp411
1165 movw %ax, -2(%rcx,%rdx) # tmp411,
1166 jmp .L12 #
1167 .L114:
1168 # sha512-crypt.c:154: free_key = tmp = (char *) malloc (key_len + __alignof__ (uint64_t));
1169 movq %r12, %rdi # _9,
1170 call malloc@PLT #
1171 # sha512-crypt.c:155: if (tmp == NULL)
1172 testq %rax, %rax # free_key
1173 # sha512-crypt.c:154: free_key = tmp = (char *) malloc (key_len + __alignof__ (uint64_t));
1174 movq %rax, %rdi #, free_key
1175 movq %rax, -984(%rbp) # free_key, %sfp
1176 # sha512-crypt.c:155: if (tmp == NULL)
1177 je .L61 #,
1178 movq %r14, -912(%rbp) # tmp350, %sfp
1179 jmp .L9 #
1180 .L27:
1181 # sha512-crypt.c:253: free (free_key);
1182 movq -984(%rbp), %rdi # %sfp,
1183 # sha512-crypt.c:254: return NULL;
1184 xorl %ebx, %ebx # <retval>
1185 # sha512-crypt.c:253: free (free_key);
1186 call free@PLT #
1187 # sha512-crypt.c:254: return NULL;
1188 jmp .L1 #
1189 .L61:
1190 # sha512-crypt.c:156: return NULL;
1191 xorl %ebx, %ebx # <retval>
1192 jmp .L1 #
1193 .cfi_endproc
1194 .LFE41:
1195 .size __sha512_crypt_r, .-__sha512_crypt_r
1196 .p2align 4,,15
1197 .globl __sha512_crypt
1198 .type __sha512_crypt, @function
1199 __sha512_crypt:
1200 .LFB42:
1201 .cfi_startproc
1202 pushq %r12 #
1203 .cfi_def_cfa_offset 16
1204 .cfi_offset 12, -16
1205 pushq %rbp #
1206 .cfi_def_cfa_offset 24
1207 .cfi_offset 6, -24
1208 movq %rdi, %r12 # key, key
1209 pushq %rbx #
1210 .cfi_def_cfa_offset 32
1211 .cfi_offset 3, -32
1212 # sha512-crypt.c:429: + strlen (salt) + 1 + 86 + 1);
1213 movq %rsi, %rdi # salt,
1214 # sha512-crypt.c:421: {
1215 movq %rsi, %rbp # salt, salt
1216 # sha512-crypt.c:429: + strlen (salt) + 1 + 86 + 1);
1217 call strlen@PLT #
1218 # sha512-crypt.c:431: if (buflen < needed)
1219 movl buflen.5421(%rip), %ecx # buflen, buflen.33_4
1220 # sha512-crypt.c:429: + strlen (salt) + 1 + 86 + 1);
1221 leal 109(%rax), %ebx #, needed
1222 movq buffer(%rip), %rdx # buffer, <retval>
1223 # sha512-crypt.c:431: if (buflen < needed)
1224 cmpl %ebx, %ecx # needed, buflen.33_4
1225 jge .L134 #,
1226 # sha512-crypt.c:433: char *new_buffer = (char *) realloc (buffer, needed);
1227 movq %rdx, %rdi # <retval>,
1228 movslq %ebx, %rsi # needed, needed
1229 call realloc@PLT #
1230 # sha512-crypt.c:434: if (new_buffer == NULL)
1231 testq %rax, %rax # <retval>
1232 # sha512-crypt.c:433: char *new_buffer = (char *) realloc (buffer, needed);
1233 movq %rax, %rdx #, <retval>
1234 # sha512-crypt.c:434: if (new_buffer == NULL)
1235 je .L133 #,
1236 # sha512-crypt.c:437: buffer = new_buffer;
1237 movq %rax, buffer(%rip) # <retval>, buffer
1238 # sha512-crypt.c:438: buflen = needed;
1239 movl %ebx, buflen.5421(%rip) # needed, buflen
1240 movl %ebx, %ecx # needed, buflen.33_4
1241 .L134:
1242 # sha512-crypt.c:442: }
1243 popq %rbx #
1244 .cfi_remember_state
1245 .cfi_def_cfa_offset 24
1246 # sha512-crypt.c:441: return __sha512_crypt_r (key, salt, buffer, buflen);
1247 movq %rbp, %rsi # salt,
1248 movq %r12, %rdi # key,
1249 # sha512-crypt.c:442: }
1250 popq %rbp #
1251 .cfi_def_cfa_offset 16
1252 popq %r12 #
1253 .cfi_def_cfa_offset 8
1254 # sha512-crypt.c:441: return __sha512_crypt_r (key, salt, buffer, buflen);
1255 jmp __sha512_crypt_r@PLT #
1256 .p2align 4,,10
1257 .p2align 3
1258 .L133:
1259 .cfi_restore_state
1260 # sha512-crypt.c:442: }
1261 popq %rbx #
1262 .cfi_def_cfa_offset 24
1263 xorl %eax, %eax #
1264 popq %rbp #
1265 .cfi_def_cfa_offset 16
1266 popq %r12 #
1267 .cfi_def_cfa_offset 8
1269 .cfi_endproc
1270 .LFE42:
1271 .size __sha512_crypt, .-__sha512_crypt
1272 .local buflen.5421
1273 .comm buflen.5421,4,4
1274 .section __libc_freeres_ptrs
1275 #,"aw",@progbits
1276 .align 8
1277 .type buffer, @object
1278 .size buffer, 8
1279 buffer:
1280 .zero 8
1281 .section .rodata.str1.8,"aMS",@progbits,1
1282 .align 8
1283 .type sha512_rounds_prefix, @object
1284 .size sha512_rounds_prefix, 8
1285 sha512_rounds_prefix:
1286 .string "rounds="
1287 .section .rodata.str1.1
1288 .type sha512_salt_prefix, @object
1289 .size sha512_salt_prefix, 4
1290 sha512_salt_prefix:
1291 .string "$6$"
1292 .ident "GCC: (GNU) 7.3.0"
1293 .section .note.GNU-stack,"",@progbits