clarify the purpose of this project
[nyanglibc.git] / crypt / sha256-crypt.v.s
blob017aff58eef28a4520d0fbddf22c9c7a3cc309e8
1 .file "sha256-crypt.c"
2 # GNU C11 (GCC) version 7.3.0 (x86_64-nyan-linux-gnu)
3 # compiled by GNU C version 7.3.0, GMP version 6.1.2, MPFR version 4.0.1, MPC version 1.1.0, isl version none
4 # GGC heuristics: --param ggc-min-expand=100 --param ggc-min-heapsize=131072
5 # options passed: -I ../include
6 # -I /root/wip/nyanglibc/builds/0/nyanglibc/build/crypt
7 # -I /root/wip/nyanglibc/builds/0/nyanglibc/build
8 # -I ../sysdeps/unix/sysv/linux/x86_64/64
9 # -I ../sysdeps/unix/sysv/linux/x86_64
10 # -I ../sysdeps/unix/sysv/linux/x86/include
11 # -I ../sysdeps/unix/sysv/linux/x86 -I ../sysdeps/x86/nptl
12 # -I ../sysdeps/unix/sysv/linux/wordsize-64 -I ../sysdeps/x86_64/nptl
13 # -I ../sysdeps/unix/sysv/linux/include -I ../sysdeps/unix/sysv/linux
14 # -I ../sysdeps/nptl -I ../sysdeps/pthread -I ../sysdeps/gnu
15 # -I ../sysdeps/unix/inet -I ../sysdeps/unix/sysv -I ../sysdeps/unix/x86_64
16 # -I ../sysdeps/unix -I ../sysdeps/posix -I ../sysdeps/x86_64/64
17 # -I ../sysdeps/x86_64/fpu -I ../sysdeps/x86/fpu -I ../sysdeps/x86_64
18 # -I ../sysdeps/x86/include -I ../sysdeps/x86
19 # -I ../sysdeps/ieee754/float128 -I ../sysdeps/ieee754/ldbl-96/include
20 # -I ../sysdeps/ieee754/ldbl-96 -I ../sysdeps/ieee754/dbl-64
21 # -I ../sysdeps/ieee754/flt-32 -I ../sysdeps/wordsize-64
22 # -I ../sysdeps/ieee754 -I ../sysdeps/generic -I .. -I ../libio -I .
23 # -MD /run/asm/crypt/sha256-crypt.v.d -MF /run/asm/crypt/sha256-crypt.o.dt
24 # -MP -MT /run/asm/crypt/.o -D _LIBC_REENTRANT -D MODULE_NAME=libcrypt
25 # -D PIC -D TOP_NAMESPACE=glibc
26 # -include /root/wip/nyanglibc/builds/0/nyanglibc/build/libc-modules.h
27 # -include ../include/libc-symbols.h sha256-crypt.c -mtune=generic
28 # -march=x86-64 -auxbase-strip /run/asm/crypt/sha256-crypt.v.s -O2 -Wall
29 # -Wwrite-strings -Wundef -Werror -Wstrict-prototypes
30 # -Wold-style-definition -std=gnu11 -fverbose-asm -fgnu89-inline
31 # -fmerge-all-constants -frounding-math -fno-stack-protector -fmath-errno
32 # -fpie -ftls-model=initial-exec
33 # options enabled: -faggressive-loop-optimizations -falign-labels
34 # -fasynchronous-unwind-tables -fauto-inc-dec -fbranch-count-reg
35 # -fcaller-saves -fchkp-check-incomplete-type -fchkp-check-read
36 # -fchkp-check-write -fchkp-instrument-calls -fchkp-narrow-bounds
37 # -fchkp-optimize -fchkp-store-bounds -fchkp-use-static-bounds
38 # -fchkp-use-static-const-bounds -fchkp-use-wrappers -fcode-hoisting
39 # -fcombine-stack-adjustments -fcommon -fcompare-elim -fcprop-registers
40 # -fcrossjumping -fcse-follow-jumps -fdefer-pop
41 # -fdelete-null-pointer-checks -fdevirtualize -fdevirtualize-speculatively
42 # -fdwarf2-cfi-asm -fearly-inlining -feliminate-unused-debug-types
43 # -fexpensive-optimizations -fforward-propagate -ffp-int-builtin-inexact
44 # -ffunction-cse -fgcse -fgcse-lm -fgnu-runtime -fgnu-unique
45 # -fguess-branch-probability -fhoist-adjacent-loads -fident -fif-conversion
46 # -fif-conversion2 -findirect-inlining -finline -finline-atomics
47 # -finline-functions-called-once -finline-small-functions -fipa-bit-cp
48 # -fipa-cp -fipa-icf -fipa-icf-functions -fipa-icf-variables -fipa-profile
49 # -fipa-pure-const -fipa-ra -fipa-reference -fipa-sra -fipa-vrp
50 # -fira-hoist-pressure -fira-share-save-slots -fira-share-spill-slots
51 # -fisolate-erroneous-paths-dereference -fivopts -fkeep-static-consts
52 # -fleading-underscore -flifetime-dse -flra-remat -flto-odr-type-merging
53 # -fmath-errno -fmerge-all-constants -fmerge-debug-strings
54 # -fmove-loop-invariants -fomit-frame-pointer -foptimize-sibling-calls
55 # -foptimize-strlen -fpartial-inlining -fpeephole -fpeephole2 -fpic -fpie
56 # -fplt -fprefetch-loop-arrays -free -freg-struct-return -freorder-blocks
57 # -freorder-functions -frerun-cse-after-loop -frounding-math
58 # -fsched-critical-path-heuristic -fsched-dep-count-heuristic
59 # -fsched-group-heuristic -fsched-interblock -fsched-last-insn-heuristic
60 # -fsched-rank-heuristic -fsched-spec -fsched-spec-insn-heuristic
61 # -fsched-stalled-insns-dep -fschedule-fusion -fschedule-insns2
62 # -fsemantic-interposition -fshow-column -fshrink-wrap
63 # -fshrink-wrap-separate -fsigned-zeros -fsplit-ivs-in-unroller
64 # -fsplit-wide-types -fssa-backprop -fssa-phiopt -fstdarg-opt
65 # -fstore-merging -fstrict-aliasing -fstrict-overflow
66 # -fstrict-volatile-bitfields -fsync-libcalls -fthread-jumps
67 # -ftoplevel-reorder -ftrapping-math -ftree-bit-ccp -ftree-builtin-call-dce
68 # -ftree-ccp -ftree-ch -ftree-coalesce-vars -ftree-copy-prop -ftree-cselim
69 # -ftree-dce -ftree-dominator-opts -ftree-dse -ftree-forwprop -ftree-fre
70 # -ftree-loop-if-convert -ftree-loop-im -ftree-loop-ivcanon
71 # -ftree-loop-optimize -ftree-parallelize-loops= -ftree-phiprop -ftree-pre
72 # -ftree-pta -ftree-reassoc -ftree-scev-cprop -ftree-sink -ftree-slsr
73 # -ftree-sra -ftree-switch-conversion -ftree-tail-merge -ftree-ter
74 # -ftree-vrp -funit-at-a-time -funwind-tables -fverbose-asm
75 # -fzero-initialized-in-bss -m128bit-long-double -m64 -m80387
76 # -malign-stringops -mavx256-split-unaligned-load
77 # -mavx256-split-unaligned-store -mfancy-math-387 -mfp-ret-in-387 -mfxsr
78 # -mglibc -mieee-fp -mlong-double-80 -mmmx -mno-sse4 -mpush-args -mred-zone
79 # -msse -msse2 -mstv -mtls-direct-seg-refs -mvzeroupper
81 .text
82 #APP
83 .section __libc_freeres_ptrs, "aw", %nobits
84 .previous
85 .section .rodata.str1.1,"aMS",@progbits,1
86 .LC0:
87 .string "$"
88 .LC1:
89 .string "%s%zu$"
90 #NO_APP
91 .text
92 .p2align 4,,15
93 .globl __sha256_crypt_r
94 .type __sha256_crypt_r, @function
95 __sha256_crypt_r:
96 .LFB41:
97 .cfi_startproc
98 pushq %rbp #
99 .cfi_def_cfa_offset 16
100 .cfi_offset 6, -16
101 movq %rsp, %rbp #,
102 .cfi_def_cfa_register 6
103 pushq %r15 #
104 pushq %r14 #
105 pushq %r13 #
106 pushq %r12 #
107 .cfi_offset 15, -24
108 .cfi_offset 14, -32
109 .cfi_offset 13, -40
110 .cfi_offset 12, -48
111 movq %rdi, %r12 # key, key
112 pushq %rbx #
113 # sha256-crypt.c:125: if (strncmp (sha256_salt_prefix, salt, sizeof (sha256_salt_prefix) - 1) == 0)
114 leaq sha256_salt_prefix(%rip), %rdi #,
115 .cfi_offset 3, -56
116 # sha256-crypt.c:103: {
117 movq %rsi, %rbx # salt, salt
118 subq $552, %rsp #,
119 # sha256-crypt.c:103: {
120 movq %rdx, -560(%rbp) # buffer, %sfp
121 # sha256-crypt.c:125: if (strncmp (sha256_salt_prefix, salt, sizeof (sha256_salt_prefix) - 1) == 0)
122 movl $3, %edx #,
123 # sha256-crypt.c:103: {
124 movl %ecx, -484(%rbp) # buflen, buflen
125 # sha256-crypt.c:125: if (strncmp (sha256_salt_prefix, salt, sizeof (sha256_salt_prefix) - 1) == 0)
126 call strncmp@PLT #
127 # sha256-crypt.c:127: salt += sizeof (sha256_salt_prefix) - 1;
128 leaq 3(%rbx), %rdx #, tmp628
129 testl %eax, %eax # _1
130 # sha256-crypt.c:129: if (strncmp (salt, sha256_rounds_prefix, sizeof (sha256_rounds_prefix) - 1)
131 leaq sha256_rounds_prefix(%rip), %rsi #,
132 # sha256-crypt.c:127: salt += sizeof (sha256_salt_prefix) - 1;
133 cmovne %rbx, %rdx # tmp628,, salt, tmp628
134 movq %rdx, %rdi # tmp628, salt
135 movq %rdx, -512(%rbp) # salt, %sfp
136 # sha256-crypt.c:129: if (strncmp (salt, sha256_rounds_prefix, sizeof (sha256_rounds_prefix) - 1)
137 movl $7, %edx #,
138 call strncmp@PLT #
139 testl %eax, %eax # _2
140 # sha256-crypt.c:118: bool rounds_custom = false;
141 movb $0, -485(%rbp) #, %sfp
142 # sha256-crypt.c:117: size_t rounds = ROUNDS_DEFAULT;
143 movq $5000, -528(%rbp) #, %sfp
144 # sha256-crypt.c:129: if (strncmp (salt, sha256_rounds_prefix, sizeof (sha256_rounds_prefix) - 1)
145 je .L113 #,
146 .L3:
147 # sha256-crypt.c:143: salt_len = MIN (strcspn (salt, "$"), SALT_LEN_MAX);
148 movq -512(%rbp), %rdi # %sfp,
149 leaq .LC0(%rip), %rsi #,
150 call strcspn@PLT #
151 # sha256-crypt.c:144: key_len = strlen (key);
152 movq %r12, %rdi # key,
153 # sha256-crypt.c:143: salt_len = MIN (strcspn (salt, "$"), SALT_LEN_MAX);
154 cmpq $16, %rax #, _6
155 movq %rax, %r14 #, _6
156 movl $16, %eax #, tmp630
157 cmovnb %rax, %r14 # _6,, tmp630, _6
158 # sha256-crypt.c:144: key_len = strlen (key);
159 call strlen@PLT #
160 # sha256-crypt.c:146: if ((key - (char *) 0) % __alignof__ (uint32_t) != 0)
161 testb $3, %r12b #, key
162 # sha256-crypt.c:144: key_len = strlen (key);
163 movq %rax, %r15 #, tmp286
164 # sha256-crypt.c:146: if ((key - (char *) 0) % __alignof__ (uint32_t) != 0)
165 je .L60 #,
166 # sha256-crypt.c:150: if (__libc_use_alloca (alloca_used + key_len + __alignof__ (uint32_t)))
167 leaq 4(%rax), %rbx #, _9
168 # ../sysdeps/pthread/allocalim.h:27: return (__glibc_likely (__libc_alloca_cutoff (size))
169 movq %rbx, %rdi # _9,
170 call __libc_alloca_cutoff@PLT #
171 # ../sysdeps/pthread/allocalim.h:29: || __glibc_likely (size <= PTHREAD_STACK_MIN / 4)
172 testl %eax, %eax # _305
173 jne .L7 #,
174 cmpq $4096, %rbx #, _9
175 ja .L114 #,
176 .L7:
177 # sha256-crypt.c:151: tmp = alloca_account (key_len + __alignof__ (uint32_t), alloca_used);
178 #APP
179 # 151 "sha256-crypt.c" 1
180 mov %rsp, %rax # p__
181 # 0 "" 2
182 #NO_APP
183 addq $30, %rbx #, tmp297
184 andq $-16, %rbx #, tmp301
185 subq %rbx, %rsp # tmp301,
186 leaq 15(%rsp), %rdi #, tmp303
187 andq $-16, %rdi #, m__
188 #APP
189 # 151 "sha256-crypt.c" 1
190 sub %rsp , %rax # d__
191 # 0 "" 2
192 #NO_APP
193 movq %rax, -496(%rbp) # d__, %sfp
194 # sha256-crypt.c:120: char *free_key = NULL;
195 movq $0, -576(%rbp) #, %sfp
196 .L9:
197 # sha256-crypt.c:159: key = copied_key =
198 movq %r12, %rsi # key,
199 # sha256-crypt.c:161: - (tmp - (char *) 0) % __alignof__ (uint32_t),
200 addq $4, %rdi #, tmp307
201 # sha256-crypt.c:159: key = copied_key =
202 movq %r15, %rdx # tmp286,
203 call memcpy@PLT #
204 movq %rax, %r12 #, key
205 movq %rax, -584(%rbp) # key, %sfp
206 jmp .L6 #
207 .p2align 4,,10
208 .p2align 3
209 .L60:
210 # sha256-crypt.c:120: char *free_key = NULL;
211 movq $0, -576(%rbp) #, %sfp
212 # sha256-crypt.c:119: size_t alloca_used = 0;
213 movq $0, -496(%rbp) #, %sfp
214 # sha256-crypt.c:112: char *copied_key = NULL;
215 movq $0, -584(%rbp) #, %sfp
216 .L6:
217 # sha256-crypt.c:166: if ((salt - (char *) 0) % __alignof__ (uint32_t) != 0)
218 testb $3, -512(%rbp) #, %sfp
219 # sha256-crypt.c:113: char *copied_salt = NULL;
220 movq $0, -568(%rbp) #, %sfp
221 # sha256-crypt.c:166: if ((salt - (char *) 0) % __alignof__ (uint32_t) != 0)
222 jne .L115 #,
223 .L10:
224 # sha256-crypt.c:193: sha256_init_ctx (&ctx, nss_ctx);
225 leaq -400(%rbp), %rbx #, tmp624
226 # sha256-crypt.c:205: sha256_init_ctx (&alt_ctx, nss_alt_ctx);
227 leaq -224(%rbp), %r13 #, tmp625
228 # sha256-crypt.c:193: sha256_init_ctx (&ctx, nss_ctx);
229 movq %rbx, %rdi # tmp624,
230 call __sha256_init_ctx@PLT #
231 # sha256-crypt.c:196: sha256_process_bytes (key, key_len, &ctx, nss_ctx);
232 movq %rbx, %rdx # tmp624,
233 movq %r15, %rsi # tmp286,
234 movq %r12, %rdi # key,
235 call __sha256_process_bytes@PLT #
236 # sha256-crypt.c:200: sha256_process_bytes (salt, salt_len, &ctx, nss_ctx);
237 movq -512(%rbp), %rdi # %sfp,
238 movq %rbx, %rdx # tmp624,
239 movq %r14, %rsi # _6,
240 call __sha256_process_bytes@PLT #
241 # sha256-crypt.c:205: sha256_init_ctx (&alt_ctx, nss_alt_ctx);
242 movq %r13, %rdi # tmp625,
243 movq %r13, -520(%rbp) # tmp625, %sfp
244 call __sha256_init_ctx@PLT #
245 # sha256-crypt.c:208: sha256_process_bytes (key, key_len, &alt_ctx, nss_alt_ctx);
246 movq %r13, %rdx # tmp625,
247 movq %r15, %rsi # tmp286,
248 movq %r12, %rdi # key,
249 call __sha256_process_bytes@PLT #
250 # sha256-crypt.c:211: sha256_process_bytes (salt, salt_len, &alt_ctx, nss_alt_ctx);
251 movq -512(%rbp), %rdi # %sfp,
252 movq %r13, %rdx # tmp625,
253 movq %r14, %rsi # _6,
254 call __sha256_process_bytes@PLT #
255 # sha256-crypt.c:214: sha256_process_bytes (key, key_len, &alt_ctx, nss_alt_ctx);
256 movq %r13, %rdx # tmp625,
257 movq %r15, %rsi # tmp286,
258 movq %r12, %rdi # key,
259 call __sha256_process_bytes@PLT #
260 # sha256-crypt.c:218: sha256_finish_ctx (&alt_ctx, nss_alt_ctx, alt_result);
261 leaq -464(%rbp), %rax #, tmp626
262 movq %r13, %rdi # tmp625,
263 movq %rax, %rsi # tmp626,
264 movq %rax, -504(%rbp) # tmp626, %sfp
265 call __sha256_finish_ctx@PLT #
266 # sha256-crypt.c:221: for (cnt = key_len; cnt > 32; cnt -= 32)
267 cmpq $32, %r15 #, tmp286
268 jbe .L17 #,
269 leaq -33(%r15), %rcx #, _15
270 leaq -32(%r15), %rsi #, _319
271 movq %r14, -552(%rbp) # _6, %sfp
272 movq -504(%rbp), %r14 # %sfp, tmp626
273 movq %r12, -592(%rbp) # key, %sfp
274 movq %r15, %r12 # cnt, cnt
275 movq %rcx, -536(%rbp) # _15, %sfp
276 andq $-32, %rcx #, tmp370
277 movq %rsi, -544(%rbp) # _319, %sfp
278 movq %rcx, %rax # tmp370, tmp370
279 movq %rsi, %rcx # _319, _331
280 subq %rax, %rcx # tmp370, _331
281 movq %rcx, %r13 # _331, _331
282 .p2align 4,,10
283 .p2align 3
284 .L18:
285 # sha256-crypt.c:222: sha256_process_bytes (alt_result, 32, &ctx, nss_ctx);
286 movq %rbx, %rdx # tmp624,
287 movl $32, %esi #,
288 movq %r14, %rdi # tmp626,
289 # sha256-crypt.c:221: for (cnt = key_len; cnt > 32; cnt -= 32)
290 subq $32, %r12 #, cnt
291 # sha256-crypt.c:222: sha256_process_bytes (alt_result, 32, &ctx, nss_ctx);
292 call __sha256_process_bytes@PLT #
293 # sha256-crypt.c:221: for (cnt = key_len; cnt > 32; cnt -= 32)
294 cmpq %r13, %r12 # _331, cnt
295 jne .L18 #,
296 # sha256-crypt.c:223: sha256_process_bytes (alt_result, cnt, &ctx, nss_ctx);
297 movq -536(%rbp), %rax # %sfp, _15
298 movq -544(%rbp), %rsi # %sfp, _319
299 movq %rbx, %rdx # tmp624,
300 movq -504(%rbp), %rdi # %sfp,
301 movq -552(%rbp), %r14 # %sfp, _6
302 movq -592(%rbp), %r12 # %sfp, key
303 andq $-32, %rax #, _15
304 subq %rax, %rsi # tmp376, _319
305 call __sha256_process_bytes@PLT #
306 .L19:
307 # sha256-crypt.c:221: for (cnt = key_len; cnt > 32; cnt -= 32)
308 movq %r15, %r13 # tmp286, cnt
309 jmp .L23 #
310 .p2align 4,,10
311 .p2align 3
312 .L117:
313 # sha256-crypt.c:229: sha256_process_bytes (alt_result, 32, &ctx, nss_ctx);
314 movq -504(%rbp), %rdi # %sfp,
315 movl $32, %esi #,
316 call __sha256_process_bytes@PLT #
317 # sha256-crypt.c:227: for (cnt = key_len; cnt > 0; cnt >>= 1)
318 shrq %r13 # cnt
319 je .L116 #,
320 .L23:
321 # sha256-crypt.c:228: if ((cnt & 1) != 0)
322 testb $1, %r13b #, cnt
323 # sha256-crypt.c:229: sha256_process_bytes (alt_result, 32, &ctx, nss_ctx);
324 movq %rbx, %rdx # tmp624,
325 # sha256-crypt.c:228: if ((cnt & 1) != 0)
326 jne .L117 #,
327 # sha256-crypt.c:231: sha256_process_bytes (key, key_len, &ctx, nss_ctx);
328 movq %r15, %rsi # tmp286,
329 movq %r12, %rdi # key,
330 call __sha256_process_bytes@PLT #
331 # sha256-crypt.c:227: for (cnt = key_len; cnt > 0; cnt >>= 1)
332 shrq %r13 # cnt
333 jne .L23 #,
334 .L116:
335 # sha256-crypt.c:234: sha256_finish_ctx (&ctx, nss_ctx, alt_result);
336 movq -504(%rbp), %rsi # %sfp,
337 movq %rbx, %rdi # tmp624,
338 call __sha256_finish_ctx@PLT #
339 # sha256-crypt.c:237: sha256_init_ctx (&alt_ctx, nss_alt_ctx);
340 movq -520(%rbp), %rdi # %sfp,
341 call __sha256_init_ctx@PLT #
342 movq %rbx, -536(%rbp) # tmp624, %sfp
343 movq %r13, %rbx # cnt, cnt
344 movq -520(%rbp), %r13 # %sfp, tmp625
345 .p2align 4,,10
346 .p2align 3
347 .L25:
348 # sha256-crypt.c:241: sha256_process_bytes (key, key_len, &alt_ctx, nss_alt_ctx);
349 movq %r13, %rdx # tmp625,
350 movq %r15, %rsi # tmp286,
351 movq %r12, %rdi # key,
352 # sha256-crypt.c:240: for (cnt = 0; cnt < key_len; ++cnt)
353 addq $1, %rbx #, cnt
354 # sha256-crypt.c:241: sha256_process_bytes (key, key_len, &alt_ctx, nss_alt_ctx);
355 call __sha256_process_bytes@PLT #
356 # sha256-crypt.c:240: for (cnt = 0; cnt < key_len; ++cnt)
357 cmpq %rbx, %r15 # cnt, tmp286
358 jne .L25 #,
359 movq -536(%rbp), %rbx # %sfp, tmp624
360 .L57:
361 # sha256-crypt.c:244: sha256_finish_ctx (&alt_ctx, nss_alt_ctx, temp_result);
362 leaq -432(%rbp), %rax #, tmp627
363 movq -520(%rbp), %rdi # %sfp,
364 movq %rax, %rsi # tmp627,
365 movq %rax, -544(%rbp) # tmp627, %sfp
366 call __sha256_finish_ctx@PLT #
367 # sha256-crypt.c:247: if (__libc_use_alloca (alloca_used + key_len))
368 movq -496(%rbp), %r12 # %sfp, alloca_used
369 addq %r15, %r12 # tmp286, alloca_used
370 # ../sysdeps/pthread/allocalim.h:27: return (__glibc_likely (__libc_alloca_cutoff (size))
371 movq %r12, %rdi # _18,
372 call __libc_alloca_cutoff@PLT #
373 # ../sysdeps/pthread/allocalim.h:29: || __glibc_likely (size <= PTHREAD_STACK_MIN / 4)
374 cmpq $4096, %r12 #, _18
375 jbe .L26 #,
376 testl %eax, %eax # _311
377 je .L118 #,
378 .L26:
379 # sha256-crypt.c:248: cp = p_bytes = (char *) alloca (key_len);
380 leaq 30(%r15), %rax #, tmp396
381 # sha256-crypt.c:121: char *free_pbytes = NULL;
382 movq $0, -592(%rbp) #, %sfp
383 # sha256-crypt.c:248: cp = p_bytes = (char *) alloca (key_len);
384 andq $-16, %rax #, tmp400
385 subq %rax, %rsp # tmp400,
386 leaq 15(%rsp), %rax #, tmp402
387 andq $-16, %rax #, tmp402
388 movq %rax, -496(%rbp) # p_bytes, %sfp
389 movq %rax, -472(%rbp) # p_bytes, cp
390 .L28:
391 # sha256-crypt.c:259: for (cnt = key_len; cnt >= 32; cnt -= 32)
392 cmpq $31, %r15 #, tmp286
393 jbe .L63 #,
394 leaq -32(%r15), %rcx #, _139
395 movq -496(%rbp), %rsi # %sfp, p_bytes
396 movq %rcx, %rdx # _139, tmp406
397 andq $-32, %rdx #, tmp406
398 leaq 32(%rsi), %rax #, ivtmp.55
399 leaq 64(%rsi,%rdx), %rdx #, _26
400 .p2align 4,,10
401 .p2align 3
402 .L30:
403 # sha256-crypt.c:260: cp = mempcpy (cp, temp_result, 32);
404 movdqa -432(%rbp), %xmm0 # MEM[(char * {ref-all})&temp_result], MEM[(char * {ref-all})&temp_result]
405 movups %xmm0, -32(%rax) # MEM[(char * {ref-all})&temp_result], MEM[base: _140, offset: -32B]
406 movdqa -416(%rbp), %xmm0 # MEM[(char * {ref-all})&temp_result], MEM[(char * {ref-all})&temp_result]
407 movups %xmm0, -16(%rax) # MEM[(char * {ref-all})&temp_result], MEM[base: _140, offset: -32B]
408 movq %rax, -472(%rbp) # ivtmp.55, cp
409 addq $32, %rax #, ivtmp.55
410 # sha256-crypt.c:259: for (cnt = key_len; cnt >= 32; cnt -= 32)
411 cmpq %rax, %rdx # ivtmp.55, _26
412 jne .L30 #,
413 movq -496(%rbp), %rax # %sfp, p_bytes
414 andq $-32, %rcx #, tmp410
415 movq %r15, %rdx # tmp286, cnt
416 andl $31, %edx #, cnt
417 leaq 32(%rax,%rcx), %rcx #, _309
418 .L29:
419 # sha256-crypt.c:261: memcpy (cp, temp_result, cnt);
420 cmpl $8, %edx #, cnt
421 movl %edx, %eax # cnt, cnt
422 jnb .L31 #,
423 andl $4, %edx #, cnt
424 jne .L119 #,
425 testl %eax, %eax # cnt
426 je .L32 #,
427 movq -544(%rbp), %rsi # %sfp, tmp627
428 testb $2, %al #, cnt
429 movzbl (%rsi), %edx #, tmp425
430 movb %dl, (%rcx) # tmp425,* _309
431 jne .L120 #,
432 .L32:
433 # sha256-crypt.c:264: sha256_init_ctx (&alt_ctx, nss_alt_ctx);
434 movq -520(%rbp), %rdi # %sfp,
435 # sha256-crypt.c:267: for (cnt = 0; cnt < 16 + alt_result[0]; ++cnt)
436 xorl %r12d, %r12d # cnt
437 # sha256-crypt.c:264: sha256_init_ctx (&alt_ctx, nss_alt_ctx);
438 call __sha256_init_ctx@PLT #
439 # sha256-crypt.c:267: for (cnt = 0; cnt < 16 + alt_result[0]; ++cnt)
440 movq %rbx, -536(%rbp) # tmp624, %sfp
441 movq -512(%rbp), %r13 # %sfp, salt
442 movq %r12, %rbx # cnt, cnt
443 movq -520(%rbp), %r12 # %sfp, tmp625
444 .p2align 4,,10
445 .p2align 3
446 .L37:
447 # sha256-crypt.c:268: sha256_process_bytes (salt, salt_len, &alt_ctx, nss_alt_ctx);
448 movq %r12, %rdx # tmp625,
449 movq %r14, %rsi # _6,
450 movq %r13, %rdi # salt,
451 call __sha256_process_bytes@PLT #
452 # sha256-crypt.c:267: for (cnt = 0; cnt < 16 + alt_result[0]; ++cnt)
453 movzbl -464(%rbp), %edx # alt_result, tmp451
454 addq $1, %rbx #, cnt
455 addq $16, %rdx #, tmp452
456 cmpq %rbx, %rdx # cnt, tmp452
457 ja .L37 #,
458 # sha256-crypt.c:271: sha256_finish_ctx (&alt_ctx, nss_alt_ctx, temp_result);
459 movq -544(%rbp), %r13 # %sfp, tmp627
460 movq -520(%rbp), %rdi # %sfp,
461 movq -536(%rbp), %rbx # %sfp, tmp624
462 movq %r13, %rsi # tmp627,
463 call __sha256_finish_ctx@PLT #
464 # sha256-crypt.c:274: cp = s_bytes = alloca (salt_len);
465 leaq 30(%r14), %rax #, tmp457
466 # sha256-crypt.c:277: memcpy (cp, temp_result, cnt);
467 movl %r14d, %ecx # _6, _6
468 # sha256-crypt.c:274: cp = s_bytes = alloca (salt_len);
469 andq $-16, %rax #, tmp461
470 subq %rax, %rsp # tmp461,
471 leaq 15(%rsp), %rax #, tmp463
472 andq $-16, %rax #, tmp463
473 # sha256-crypt.c:277: memcpy (cp, temp_result, cnt);
474 cmpl $8, %r14d #, _6
475 # sha256-crypt.c:274: cp = s_bytes = alloca (salt_len);
476 movq %rax, %rsi # tmp463, tmp465
477 movq %rax, -536(%rbp) # tmp465, %sfp
478 movq %rax, -472(%rbp) # tmp465, cp
479 # sha256-crypt.c:277: memcpy (cp, temp_result, cnt);
480 movq %r13, %rax # tmp627, tmp469
481 jnb .L121 #,
482 .L38:
483 xorl %edx, %edx # tmp475
484 testb $4, %cl #, _6
485 jne .L122 #,
486 testb $2, %cl #, _6
487 jne .L123 #,
488 .L42:
489 andl $1, %ecx #, _6
490 jne .L124 #,
491 .L43:
492 # sha256-crypt.c:297: if (cnt % 7 != 0)
493 movq %r14, -552(%rbp) # _6, %sfp
494 # sha256-crypt.c:281: for (cnt = 0; cnt < rounds; ++cnt)
495 xorl %r12d, %r12d # cnt
496 # sha256-crypt.c:297: if (cnt % 7 != 0)
497 movq -504(%rbp), %r14 # %sfp, tmp626
498 jmp .L50 #
499 .p2align 4,,10
500 .p2align 3
501 .L128:
502 # sha256-crypt.c:288: sha256_process_bytes (p_bytes, key_len, &ctx, nss_ctx);
503 movq -496(%rbp), %rdi # %sfp,
504 movq %r15, %rsi # tmp286,
505 call __sha256_process_bytes@PLT #
506 .L45:
507 # sha256-crypt.c:293: if (cnt % 3 != 0)
508 movabsq $-6148914691236517205, %rax #, tmp688
509 mulq %r12 # cnt
510 shrq %rdx # tmp488
511 leaq (%rdx,%rdx,2), %rax #, tmp493
512 cmpq %rax, %r12 # tmp493, cnt
513 jne .L125 #,
514 .L46:
515 # sha256-crypt.c:297: if (cnt % 7 != 0)
516 movabsq $5270498306774157605, %rax #, tmp689
517 imulq %r12 # cnt
518 movq %r12, %rax # cnt, tmp518
519 sarq $63, %rax #, tmp518
520 sarq %rdx # tmp517
521 subq %rax, %rdx # tmp518, tmp514
522 leaq 0(,%rdx,8), %rax #, tmp520
523 subq %rdx, %rax # tmp514, tmp521
524 cmpq %rax, %r12 # tmp521, cnt
525 jne .L126 #,
526 .L47:
527 # sha256-crypt.c:301: if ((cnt & 1) != 0)
528 testq %r13, %r13 # _27
529 # sha256-crypt.c:302: sha256_process_bytes (alt_result, 32, &ctx, nss_ctx);
530 movq %rbx, %rdx # tmp624,
531 # sha256-crypt.c:301: if ((cnt & 1) != 0)
532 je .L48 #,
533 # sha256-crypt.c:302: sha256_process_bytes (alt_result, 32, &ctx, nss_ctx);
534 movl $32, %esi #,
535 movq %r14, %rdi # tmp626,
536 call __sha256_process_bytes@PLT #
537 .L49:
538 # sha256-crypt.c:307: sha256_finish_ctx (&ctx, nss_ctx, alt_result);
539 movq %r14, %rsi # tmp626,
540 movq %rbx, %rdi # tmp624,
541 # sha256-crypt.c:281: for (cnt = 0; cnt < rounds; ++cnt)
542 addq $1, %r12 #, cnt
543 # sha256-crypt.c:307: sha256_finish_ctx (&ctx, nss_ctx, alt_result);
544 call __sha256_finish_ctx@PLT #
545 # sha256-crypt.c:281: for (cnt = 0; cnt < rounds; ++cnt)
546 cmpq %r12, -528(%rbp) # cnt, %sfp
547 je .L127 #,
548 .L50:
549 # sha256-crypt.c:284: sha256_init_ctx (&ctx, nss_ctx);
550 movq %rbx, %rdi # tmp624,
551 # sha256-crypt.c:287: if ((cnt & 1) != 0)
552 movq %r12, %r13 # cnt, _27
553 # sha256-crypt.c:284: sha256_init_ctx (&ctx, nss_ctx);
554 call __sha256_init_ctx@PLT #
555 # sha256-crypt.c:287: if ((cnt & 1) != 0)
556 andl $1, %r13d #, _27
557 # sha256-crypt.c:288: sha256_process_bytes (p_bytes, key_len, &ctx, nss_ctx);
558 movq %rbx, %rdx # tmp624,
559 # sha256-crypt.c:287: if ((cnt & 1) != 0)
560 jne .L128 #,
561 # sha256-crypt.c:290: sha256_process_bytes (alt_result, 32, &ctx, nss_ctx);
562 movl $32, %esi #,
563 movq %r14, %rdi # tmp626,
564 call __sha256_process_bytes@PLT #
565 jmp .L45 #
566 .p2align 4,,10
567 .p2align 3
568 .L31:
569 # sha256-crypt.c:261: memcpy (cp, temp_result, cnt);
570 movq -432(%rbp), %rax #, tmp434
571 movq %rax, (%rcx) # tmp434,* _309
572 movq -544(%rbp), %rdi # %sfp, tmp627
573 movl %edx, %eax # cnt, cnt
574 movq -8(%rdi,%rax), %rsi #, tmp441
575 movq %rsi, -8(%rcx,%rax) # tmp441,
576 leaq 8(%rcx), %rsi #, tmp442
577 andq $-8, %rsi #, tmp442
578 subq %rsi, %rcx # tmp442, _309
579 leal (%rdx,%rcx), %eax #, cnt
580 subq %rcx, %rdi # _309, tmp415
581 andl $-8, %eax #, cnt
582 cmpl $8, %eax #, cnt
583 jb .L32 #,
584 andl $-8, %eax #, tmp444
585 xorl %edx, %edx # tmp443
586 .L35:
587 movl %edx, %ecx # tmp443, tmp445
588 addl $8, %edx #, tmp443
589 movq (%rdi,%rcx), %r8 #, tmp446
590 cmpl %eax, %edx # tmp444, tmp443
591 movq %r8, (%rsi,%rcx) # tmp446,
592 jb .L35 #,
593 jmp .L32 #
594 .p2align 4,,10
595 .p2align 3
596 .L48:
597 # sha256-crypt.c:304: sha256_process_bytes (p_bytes, key_len, &ctx, nss_ctx);
598 movq -496(%rbp), %rdi # %sfp,
599 movq %r15, %rsi # tmp286,
600 call __sha256_process_bytes@PLT #
601 jmp .L49 #
602 .p2align 4,,10
603 .p2align 3
604 .L126:
605 # sha256-crypt.c:298: sha256_process_bytes (p_bytes, key_len, &ctx, nss_ctx);
606 movq -496(%rbp), %rdi # %sfp,
607 movq %rbx, %rdx # tmp624,
608 movq %r15, %rsi # tmp286,
609 call __sha256_process_bytes@PLT #
610 jmp .L47 #
611 .p2align 4,,10
612 .p2align 3
613 .L125:
614 # sha256-crypt.c:294: sha256_process_bytes (s_bytes, salt_len, &ctx, nss_ctx);
615 movq -552(%rbp), %rsi # %sfp,
616 movq -536(%rbp), %rdi # %sfp,
617 movq %rbx, %rdx # tmp624,
618 call __sha256_process_bytes@PLT #
619 jmp .L46 #
620 .p2align 4,,10
621 .p2align 3
622 .L127:
623 # sha256-crypt.c:317: cp = __stpncpy (buffer, sha256_salt_prefix, MAX (0, buflen));
624 movl -484(%rbp), %edx # buflen,
625 xorl %r12d, %r12d # tmp530
626 movq -560(%rbp), %rdi # %sfp,
627 leaq sha256_salt_prefix(%rip), %rsi #,
628 movq -552(%rbp), %r14 # %sfp, _6
629 testl %edx, %edx #
630 movl %r12d, %edx # tmp530, tmp529
631 cmovns -484(%rbp), %edx # buflen,, tmp529
632 movslq %edx, %rdx # tmp529, tmp531
633 call __stpncpy@PLT #
634 movq %rax, %rdi #, _33
635 movq %rax, -472(%rbp) # _33, cp
636 # sha256-crypt.c:318: buflen -= sizeof (sha256_salt_prefix) - 1;
637 movl -484(%rbp), %eax # buflen, tmp750
638 # sha256-crypt.c:320: if (rounds_custom)
639 cmpb $0, -485(%rbp) #, %sfp
640 # sha256-crypt.c:318: buflen -= sizeof (sha256_salt_prefix) - 1;
641 leal -3(%rax), %edx #, _37
642 movl %edx, -484(%rbp) # _37, buflen
643 # sha256-crypt.c:320: if (rounds_custom)
644 jne .L129 #,
645 .L51:
646 # sha256-crypt.c:328: cp = __stpncpy (cp, salt, MIN ((size_t) MAX (0, buflen), salt_len));
647 xorl %r12d, %r12d #
648 testl %edx, %edx # _37
649 movq -512(%rbp), %rsi # %sfp,
650 cmovs %r12d, %edx # _37,, tmp541, tmp540
651 movslq %edx, %rdx # tmp540, tmp542
652 cmpq %r14, %rdx # _6, tmp542
653 cmova %r14, %rdx # tmp542,, _6, tmp539
654 call __stpncpy@PLT #
655 # sha256-crypt.c:329: buflen -= MIN ((size_t) MAX (0, buflen), salt_len);
656 movslq -484(%rbp), %rdx # buflen,
657 # sha256-crypt.c:328: cp = __stpncpy (cp, salt, MIN ((size_t) MAX (0, buflen), salt_len));
658 movq %rax, -472(%rbp) # _50, cp
659 # sha256-crypt.c:329: buflen -= MIN ((size_t) MAX (0, buflen), salt_len);
660 testl %edx, %edx # buflen.23_51
661 cmovns %rdx, %r12 #,,
662 cmpq %r14, %r12 # _6, tmp546
663 cmova %r14, %r12 # tmp546,, _6, tmp543
664 subl %r12d, %edx # tmp543, _58
665 # sha256-crypt.c:331: if (buflen > 0)
666 testl %edx, %edx # _58
667 # sha256-crypt.c:329: buflen -= MIN ((size_t) MAX (0, buflen), salt_len);
668 movl %edx, -484(%rbp) # _58, buflen
669 # sha256-crypt.c:331: if (buflen > 0)
670 jle .L52 #,
671 # sha256-crypt.c:333: *cp++ = '$';
672 leaq 1(%rax), %rdx #, tmp547
673 movq %rdx, -472(%rbp) # tmp547, cp
674 movb $36, (%rax) #, *_50
675 # sha256-crypt.c:334: --buflen;
676 subl $1, -484(%rbp) #, buflen
677 .L52:
678 # sha256-crypt.c:337: __b64_from_24bit (&cp, &buflen,
679 movzbl -454(%rbp), %ecx # alt_result, alt_result
680 movzbl -464(%rbp), %edx # alt_result, alt_result
681 leaq -484(%rbp), %r13 #, tmp551
682 movzbl -444(%rbp), %r8d # alt_result,
683 leaq -472(%rbp), %r12 #, tmp552
684 movl $4, %r9d #,
685 movq %r13, %rsi # tmp551,
686 movq %r12, %rdi # tmp552,
687 call __b64_from_24bit@PLT #
688 # sha256-crypt.c:339: __b64_from_24bit (&cp, &buflen,
689 movzbl -463(%rbp), %ecx # alt_result, alt_result
690 movzbl -443(%rbp), %edx # alt_result, alt_result
691 movl $4, %r9d #,
692 movzbl -453(%rbp), %r8d # alt_result,
693 movq %r13, %rsi # tmp551,
694 movq %r12, %rdi # tmp552,
695 call __b64_from_24bit@PLT #
696 # sha256-crypt.c:341: __b64_from_24bit (&cp, &buflen,
697 movzbl -442(%rbp), %ecx # alt_result, alt_result
698 movzbl -452(%rbp), %edx # alt_result, alt_result
699 movl $4, %r9d #,
700 movzbl -462(%rbp), %r8d # alt_result,
701 movq %r13, %rsi # tmp551,
702 movq %r12, %rdi # tmp552,
703 call __b64_from_24bit@PLT #
704 # sha256-crypt.c:343: __b64_from_24bit (&cp, &buflen,
705 movzbl -451(%rbp), %ecx # alt_result, alt_result
706 movzbl -461(%rbp), %edx # alt_result, alt_result
707 movl $4, %r9d #,
708 movzbl -441(%rbp), %r8d # alt_result,
709 movq %r13, %rsi # tmp551,
710 movq %r12, %rdi # tmp552,
711 call __b64_from_24bit@PLT #
712 # sha256-crypt.c:345: __b64_from_24bit (&cp, &buflen,
713 movzbl -460(%rbp), %ecx # alt_result, alt_result
714 movzbl -440(%rbp), %edx # alt_result, alt_result
715 movl $4, %r9d #,
716 movzbl -450(%rbp), %r8d # alt_result,
717 movq %r13, %rsi # tmp551,
718 movq %r12, %rdi # tmp552,
719 call __b64_from_24bit@PLT #
720 # sha256-crypt.c:347: __b64_from_24bit (&cp, &buflen,
721 movzbl -439(%rbp), %ecx # alt_result, alt_result
722 movzbl -449(%rbp), %edx # alt_result, alt_result
723 movl $4, %r9d #,
724 movzbl -459(%rbp), %r8d # alt_result,
725 movq %r13, %rsi # tmp551,
726 movq %r12, %rdi # tmp552,
727 call __b64_from_24bit@PLT #
728 # sha256-crypt.c:349: __b64_from_24bit (&cp, &buflen,
729 movzbl -448(%rbp), %ecx # alt_result, alt_result
730 movzbl -458(%rbp), %edx # alt_result, alt_result
731 movl $4, %r9d #,
732 movzbl -438(%rbp), %r8d # alt_result,
733 movq %r13, %rsi # tmp551,
734 movq %r12, %rdi # tmp552,
735 call __b64_from_24bit@PLT #
736 # sha256-crypt.c:351: __b64_from_24bit (&cp, &buflen,
737 movzbl -457(%rbp), %ecx # alt_result, alt_result
738 movzbl -437(%rbp), %edx # alt_result, alt_result
739 movl $4, %r9d #,
740 movzbl -447(%rbp), %r8d # alt_result,
741 movq %r13, %rsi # tmp551,
742 movq %r12, %rdi # tmp552,
743 call __b64_from_24bit@PLT #
744 # sha256-crypt.c:353: __b64_from_24bit (&cp, &buflen,
745 movzbl -436(%rbp), %ecx # alt_result, alt_result
746 movzbl -446(%rbp), %edx # alt_result, alt_result
747 movl $4, %r9d #,
748 movzbl -456(%rbp), %r8d # alt_result,
749 movq %r13, %rsi # tmp551,
750 movq %r12, %rdi # tmp552,
751 call __b64_from_24bit@PLT #
752 # sha256-crypt.c:355: __b64_from_24bit (&cp, &buflen,
753 movzbl -445(%rbp), %ecx # alt_result, alt_result
754 movzbl -455(%rbp), %edx # alt_result, alt_result
755 movl $4, %r9d #,
756 movzbl -435(%rbp), %r8d # alt_result,
757 movq %r13, %rsi # tmp551,
758 movq %r12, %rdi # tmp552,
759 call __b64_from_24bit@PLT #
760 # sha256-crypt.c:357: __b64_from_24bit (&cp, &buflen,
761 movzbl -433(%rbp), %ecx # alt_result, alt_result
762 movzbl -434(%rbp), %r8d # alt_result,
763 xorl %edx, %edx #
764 movl $3, %r9d #,
765 movq %r13, %rsi # tmp551,
766 movq %r12, %rdi # tmp552,
767 call __b64_from_24bit@PLT #
768 # sha256-crypt.c:359: if (buflen <= 0)
769 movl -484(%rbp), %eax # buflen,
770 testl %eax, %eax #
771 jle .L130 #,
772 # sha256-crypt.c:365: *cp = '\0'; /* Terminate the string. */
773 movq -472(%rbp), %rax # cp, cp.31_127
774 movq -560(%rbp), %r12 # %sfp, <retval>
775 movb $0, (%rax) #, *cp.31_127
776 .L54:
777 # sha256-crypt.c:372: __sha256_init_ctx (&ctx);
778 movq %rbx, %rdi # tmp624,
779 call __sha256_init_ctx@PLT #
780 # sha256-crypt.c:373: __sha256_finish_ctx (&ctx, alt_result);
781 movq -504(%rbp), %rsi # %sfp,
782 movq %rbx, %rdi # tmp624,
783 call __sha256_finish_ctx@PLT #
784 # sha256-crypt.c:374: explicit_bzero (&ctx, sizeof (ctx));
785 movl $176, %edx #,
786 movl $176, %esi #,
787 movq %rbx, %rdi # tmp624,
788 call __explicit_bzero_chk@PLT #
789 # sha256-crypt.c:375: explicit_bzero (&alt_ctx, sizeof (alt_ctx));
790 movq -520(%rbp), %rdi # %sfp,
791 movl $176, %edx #,
792 movl $176, %esi #,
793 call __explicit_bzero_chk@PLT #
794 # sha256-crypt.c:377: explicit_bzero (temp_result, sizeof (temp_result));
795 movq -544(%rbp), %rdi # %sfp,
796 movl $32, %edx #,
797 movl $32, %esi #,
798 call __explicit_bzero_chk@PLT #
799 # sha256-crypt.c:378: explicit_bzero (p_bytes, key_len);
800 movq -496(%rbp), %rdi # %sfp,
801 movq $-1, %rdx #,
802 movq %r15, %rsi # tmp286,
803 call __explicit_bzero_chk@PLT #
804 # sha256-crypt.c:379: explicit_bzero (s_bytes, salt_len);
805 movq -536(%rbp), %rdi # %sfp,
806 movq $-1, %rdx #,
807 movq %r14, %rsi # _6,
808 call __explicit_bzero_chk@PLT #
809 # sha256-crypt.c:380: if (copied_key != NULL)
810 movq -584(%rbp), %rax # %sfp, copied_key
811 testq %rax, %rax # copied_key
812 je .L55 #,
813 # sha256-crypt.c:381: explicit_bzero (copied_key, key_len);
814 movq $-1, %rdx #,
815 movq %r15, %rsi # tmp286,
816 movq %rax, %rdi # copied_key,
817 call __explicit_bzero_chk@PLT #
818 .L55:
819 # sha256-crypt.c:382: if (copied_salt != NULL)
820 movq -568(%rbp), %rax # %sfp, copied_salt
821 testq %rax, %rax # copied_salt
822 je .L56 #,
823 # sha256-crypt.c:383: explicit_bzero (copied_salt, salt_len);
824 movq $-1, %rdx #,
825 movq %r14, %rsi # _6,
826 movq %rax, %rdi # copied_salt,
827 call __explicit_bzero_chk@PLT #
828 .L56:
829 # sha256-crypt.c:385: free (free_key);
830 movq -576(%rbp), %rdi # %sfp,
831 call free@PLT #
832 # sha256-crypt.c:386: free (free_pbytes);
833 movq -592(%rbp), %rdi # %sfp,
834 call free@PLT #
835 .L1:
836 # sha256-crypt.c:388: }
837 leaq -40(%rbp), %rsp #,
838 movq %r12, %rax # <retval>,
839 popq %rbx #
840 popq %r12 #
841 popq %r13 #
842 popq %r14 #
843 popq %r15 #
844 popq %rbp #
845 .cfi_remember_state
846 .cfi_def_cfa 7, 8
848 .p2align 4,,10
849 .p2align 3
850 .L122:
851 .cfi_restore_state
852 # sha256-crypt.c:277: memcpy (cp, temp_result, cnt);
853 movl (%rax), %edx #, tmp477
854 testb $2, %cl #, _6
855 movl %edx, (%rsi) # tmp477,* s_bytes
856 movl $4, %edx #, tmp475
857 je .L42 #,
858 jmp .L123 #
859 .p2align 4,,10
860 .p2align 3
861 .L124:
862 movzbl (%rax,%rdx), %eax #, tmp483
863 movb %al, (%rsi,%rdx) # tmp483,
864 jmp .L43 #
865 .p2align 4,,10
866 .p2align 3
867 .L123:
868 movzwl (%rax,%rdx), %edi #, tmp480
869 movw %di, (%rsi,%rdx) # tmp480,
870 addq $2, %rdx #, tmp475
871 andl $1, %ecx #, _6
872 je .L43 #,
873 jmp .L124 #
874 .p2align 4,,10
875 .p2align 3
876 .L121:
877 movl %r14d, %esi # _6, tmp471
878 xorl %eax, %eax # tmp470
879 movq %r13, %r8 # tmp627, tmp627
880 andl $-8, %esi #, tmp471
881 .L39:
882 movl %eax, %edx # tmp470, tmp472
883 movq -536(%rbp), %r10 # %sfp, tmp465
884 addl $8, %eax #,
885 movq (%r8,%rdx), %rdi # MEM[(void *)&temp_result], tmp473
886 cmpl %esi, %eax # tmp471, tmp470
887 movq %rdi, (%r10,%rdx) # tmp473, MEM[(void *)s_bytes_233]
888 jb .L39 #,
889 movq %r10, %rsi # tmp465, tmp465
890 addq %rax, %rsi # tmp474, s_bytes
891 addq -544(%rbp), %rax # %sfp, tmp469
892 jmp .L38 #
893 .p2align 4,,10
894 .p2align 3
895 .L130:
896 # sha256-crypt.c:361: __set_errno (ERANGE);
897 movq errno@gottpoff(%rip), %rax #, tmp602
898 # sha256-crypt.c:362: buffer = NULL;
899 xorl %r12d, %r12d # <retval>
900 # sha256-crypt.c:361: __set_errno (ERANGE);
901 movl $34, %fs:(%rax) #, errno
902 jmp .L54 #
903 .p2align 4,,10
904 .p2align 3
905 .L129:
906 # sha256-crypt.c:322: int n = __snprintf (cp, MAX (0, buflen), "%s%zu$",
907 testl %edx, %edx # _37
908 movq -528(%rbp), %r8 # %sfp,
909 leaq sha256_rounds_prefix(%rip), %rcx #,
910 cmovs %r12d, %edx # _37,, tmp530, tmp533
911 xorl %eax, %eax #
912 movslq %edx, %rsi # tmp533, tmp535
913 leaq .LC1(%rip), %rdx #,
914 call __snprintf@PLT #
915 # sha256-crypt.c:325: buflen -= n;
916 movl -484(%rbp), %edx # buflen, _37
917 # sha256-crypt.c:324: cp += n;
918 movslq %eax, %rdi # n, n
919 addq -472(%rbp), %rdi # cp, _33
920 # sha256-crypt.c:325: buflen -= n;
921 subl %eax, %edx # n, _37
922 # sha256-crypt.c:324: cp += n;
923 movq %rdi, -472(%rbp) # _33, cp
924 # sha256-crypt.c:325: buflen -= n;
925 movl %edx, -484(%rbp) # _37, buflen
926 jmp .L51 #
927 .p2align 4,,10
928 .p2align 3
929 .L113:
930 # sha256-crypt.c:132: const char *num = salt + sizeof (sha256_rounds_prefix) - 1;
931 movq -512(%rbp), %rax # %sfp, salt
932 # sha256-crypt.c:134: unsigned long int srounds = strtoul (num, &endp, 10);
933 leaq -224(%rbp), %rsi #, tmp281
934 movl $10, %edx #,
935 # sha256-crypt.c:132: const char *num = salt + sizeof (sha256_rounds_prefix) - 1;
936 leaq 7(%rax), %rdi #, num
937 # sha256-crypt.c:134: unsigned long int srounds = strtoul (num, &endp, 10);
938 call strtoul@PLT #
939 # sha256-crypt.c:135: if (*endp == '$')
940 movq -224(%rbp), %rdx # endp, endp.0_3
941 cmpb $36, (%rdx) #, *endp.0_3
942 jne .L3 #,
943 # sha256-crypt.c:138: rounds = MAX (ROUNDS_MIN, MIN (srounds, ROUNDS_MAX));
944 cmpq $999999999, %rax #, srounds
945 # sha256-crypt.c:137: salt = endp + 1;
946 leaq 1(%rdx), %rcx #, salt
947 # sha256-crypt.c:138: rounds = MAX (ROUNDS_MIN, MIN (srounds, ROUNDS_MAX));
948 movl $999999999, %edx #, tmp283
949 cmovbe %rax, %rdx # srounds,, tmp283
950 movl $1000, %eax #, tmp284
951 # sha256-crypt.c:139: rounds_custom = true;
952 movb $1, -485(%rbp) #, %sfp
953 cmpq $1000, %rdx #, rounds
954 # sha256-crypt.c:137: salt = endp + 1;
955 movq %rcx, -512(%rbp) # salt, %sfp
956 cmovnb %rdx, %rax # rounds,, tmp284
957 movq %rax, -528(%rbp) # tmp284, %sfp
958 jmp .L3 #
959 .p2align 4,,10
960 .p2align 3
961 .L115:
962 # sha256-crypt.c:168: char *tmp = (char *) alloca (salt_len + __alignof__ (uint32_t));
963 leaq 34(%r14), %rax #, tmp316
964 # sha256-crypt.c:169: alloca_used += salt_len + __alignof__ (uint32_t);
965 movq -496(%rbp), %rcx # %sfp, alloca_used
966 # sha256-crypt.c:170: salt = copied_salt =
967 movl %r14d, %edx # _6,
968 # sha256-crypt.c:168: char *tmp = (char *) alloca (salt_len + __alignof__ (uint32_t));
969 andq $-16, %rax #, tmp320
970 subq %rax, %rsp # tmp320,
971 # sha256-crypt.c:169: alloca_used += salt_len + __alignof__ (uint32_t);
972 leaq 4(%r14,%rcx), %rcx #, alloca_used
973 # sha256-crypt.c:168: char *tmp = (char *) alloca (salt_len + __alignof__ (uint32_t));
974 leaq 15(%rsp), %rax #, tmp322
975 # sha256-crypt.c:169: alloca_used += salt_len + __alignof__ (uint32_t);
976 movq %rcx, -496(%rbp) # alloca_used, %sfp
977 # sha256-crypt.c:168: char *tmp = (char *) alloca (salt_len + __alignof__ (uint32_t));
978 andq $-16, %rax #, tmp324
979 # sha256-crypt.c:170: salt = copied_salt =
980 cmpl $8, %r14d #, _6
981 # sha256-crypt.c:172: - (tmp - (char *) 0) % __alignof__ (uint32_t),
982 leaq 4(%rax), %rcx #, tmp325
983 # sha256-crypt.c:170: salt = copied_salt =
984 jnb .L11 #,
985 testb $4, %r14b #, _6
986 jne .L131 #,
987 testl %edx, %edx # _6
988 je .L12 #,
989 movq -512(%rbp), %rbx # %sfp, salt
990 testb $2, %dl #, _6
991 movzbl (%rbx), %esi #* salt, tmp338
992 movb %sil, 4(%rax) # tmp338,
993 jne .L132 #,
994 .L12:
995 movq %rcx, -512(%rbp) # salt, %sfp
996 movq %rcx, -568(%rbp) # salt, %sfp
997 jmp .L10 #
998 .p2align 4,,10
999 .p2align 3
1000 .L11:
1001 movq -512(%rbp), %rbx # %sfp, salt
1002 addq $8, %rax #, tmp355
1003 movq (%rbx), %rdx #* salt, tmp347
1004 movq %rdx, -4(%rax) # tmp347,
1005 movl %r14d, %edx # _6, _6
1006 movq -8(%rbx,%rdx), %rsi #, tmp354
1007 movq %rsi, -8(%rcx,%rdx) # tmp354,
1008 movq %rcx, %rdx # tmp325, tmp327
1009 subq %rax, %rdx # tmp355, tmp327
1010 subq %rdx, %rbx # tmp327, salt
1011 addl %r14d, %edx # _6, _6
1012 andl $-8, %edx #, _6
1013 movq %rbx, %r8 # salt, salt
1014 cmpl $8, %edx #, _6
1015 jb .L12 #,
1016 andl $-8, %edx #, tmp357
1017 xorl %esi, %esi # tmp356
1018 .L15:
1019 movl %esi, %edi # tmp356, tmp358
1020 addl $8, %esi #, tmp356
1021 movq (%r8,%rdi), %r9 #, tmp359
1022 cmpl %edx, %esi # tmp357, tmp356
1023 movq %r9, (%rax,%rdi) # tmp359,
1024 jb .L15 #,
1025 jmp .L12 #
1026 .L119:
1027 # sha256-crypt.c:261: memcpy (cp, temp_result, cnt);
1028 movq -544(%rbp), %rsi # %sfp, tmp627
1029 movl (%rsi), %edx #, tmp417
1030 movl %edx, (%rcx) # tmp417,* _309
1031 movl %eax, %edx # cnt, cnt
1032 movl -4(%rsi,%rdx), %eax #, tmp424
1033 movl %eax, -4(%rcx,%rdx) # tmp424,
1034 jmp .L32 #
1035 .p2align 4,,10
1036 .p2align 3
1037 .L17:
1038 # sha256-crypt.c:223: sha256_process_bytes (alt_result, cnt, &ctx, nss_ctx);
1039 movq -504(%rbp), %rdi # %sfp,
1040 movq %rbx, %rdx # tmp624,
1041 movq %r15, %rsi # tmp286,
1042 call __sha256_process_bytes@PLT #
1043 # sha256-crypt.c:227: for (cnt = key_len; cnt > 0; cnt >>= 1)
1044 testq %r15, %r15 # tmp286
1045 jne .L19 #,
1046 # sha256-crypt.c:234: sha256_finish_ctx (&ctx, nss_ctx, alt_result);
1047 movq -504(%rbp), %rsi # %sfp,
1048 movq %rbx, %rdi # tmp624,
1049 call __sha256_finish_ctx@PLT #
1050 # sha256-crypt.c:237: sha256_init_ctx (&alt_ctx, nss_alt_ctx);
1051 movq -520(%rbp), %rdi # %sfp,
1052 call __sha256_init_ctx@PLT #
1053 jmp .L57 #
1054 .p2align 4,,10
1055 .p2align 3
1056 .L63:
1057 # sha256-crypt.c:259: for (cnt = key_len; cnt >= 32; cnt -= 32)
1058 movq -496(%rbp), %rcx # %sfp, _309
1059 movq %r15, %rdx # tmp286, cnt
1060 jmp .L29 #
1061 .L120:
1062 # sha256-crypt.c:261: memcpy (cp, temp_result, cnt);
1063 movl %eax, %edx # cnt, cnt
1064 movq -544(%rbp), %rax # %sfp, tmp627
1065 movzwl -2(%rax,%rdx), %eax #, tmp433
1066 movw %ax, -2(%rcx,%rdx) # tmp433,
1067 jmp .L32 #
1068 .L118:
1069 # sha256-crypt.c:251: free_pbytes = cp = p_bytes = (char *)malloc (key_len);
1070 movq %r15, %rdi # tmp286,
1071 call malloc@PLT #
1072 # sha256-crypt.c:252: if (free_pbytes == NULL)
1073 testq %rax, %rax # p_bytes
1074 # sha256-crypt.c:251: free_pbytes = cp = p_bytes = (char *)malloc (key_len);
1075 movq %rax, -496(%rbp) # p_bytes, %sfp
1076 movq %rax, -472(%rbp) # p_bytes, cp
1077 # sha256-crypt.c:252: if (free_pbytes == NULL)
1078 je .L27 #,
1079 movq %rax, -592(%rbp) # p_bytes, %sfp
1080 jmp .L28 #
1081 .L131:
1082 # sha256-crypt.c:170: salt = copied_salt =
1083 movq -512(%rbp), %rbx # %sfp, salt
1084 movl (%rbx), %esi #* salt, tmp330
1085 movl %esi, 4(%rax) # tmp330,
1086 movl -4(%rbx,%rdx), %eax #, tmp337
1087 movl %eax, -4(%rcx,%rdx) # tmp337,
1088 jmp .L12 #
1089 .L132:
1090 movq -512(%rbp), %rax # %sfp, salt
1091 movzwl -2(%rax,%rdx), %eax #, tmp346
1092 movw %ax, -2(%rcx,%rdx) # tmp346,
1093 jmp .L12 #
1094 .L114:
1095 # sha256-crypt.c:154: free_key = tmp = (char *) malloc (key_len + __alignof__ (uint32_t));
1096 movq %rbx, %rdi # _9,
1097 call malloc@PLT #
1098 # sha256-crypt.c:155: if (tmp == NULL)
1099 testq %rax, %rax # free_key
1100 # sha256-crypt.c:154: free_key = tmp = (char *) malloc (key_len + __alignof__ (uint32_t));
1101 movq %rax, -576(%rbp) # free_key, %sfp
1102 # sha256-crypt.c:155: if (tmp == NULL)
1103 je .L61 #,
1104 movq %rax, %rdi # free_key, m__
1105 # sha256-crypt.c:119: size_t alloca_used = 0;
1106 movq $0, -496(%rbp) #, %sfp
1107 jmp .L9 #
1108 .L27:
1109 # sha256-crypt.c:254: free (free_key);
1110 movq -576(%rbp), %rdi # %sfp,
1111 # sha256-crypt.c:255: return NULL;
1112 xorl %r12d, %r12d # <retval>
1113 # sha256-crypt.c:254: free (free_key);
1114 call free@PLT #
1115 # sha256-crypt.c:255: return NULL;
1116 jmp .L1 #
1117 .L61:
1118 # sha256-crypt.c:156: return NULL;
1119 xorl %r12d, %r12d # <retval>
1120 jmp .L1 #
1121 .cfi_endproc
1122 .LFE41:
1123 .size __sha256_crypt_r, .-__sha256_crypt_r
1124 .p2align 4,,15
1125 .globl __sha256_crypt
1126 .type __sha256_crypt, @function
1127 __sha256_crypt:
1128 .LFB42:
1129 .cfi_startproc
1130 pushq %r12 #
1131 .cfi_def_cfa_offset 16
1132 .cfi_offset 12, -16
1133 pushq %rbp #
1134 .cfi_def_cfa_offset 24
1135 .cfi_offset 6, -24
1136 movq %rdi, %r12 # key, key
1137 pushq %rbx #
1138 .cfi_def_cfa_offset 32
1139 .cfi_offset 3, -32
1140 # sha256-crypt.c:407: + strlen (salt) + 1 + 43 + 1);
1141 movq %rsi, %rdi # salt,
1142 # sha256-crypt.c:399: {
1143 movq %rsi, %rbp # salt, salt
1144 # sha256-crypt.c:407: + strlen (salt) + 1 + 43 + 1);
1145 call strlen@PLT #
1146 # sha256-crypt.c:409: if (buflen < needed)
1147 movl buflen.5422(%rip), %ecx # buflen, buflen.33_4
1148 # sha256-crypt.c:407: + strlen (salt) + 1 + 43 + 1);
1149 leal 66(%rax), %ebx #, needed
1150 movq buffer(%rip), %rdx # buffer, <retval>
1151 # sha256-crypt.c:409: if (buflen < needed)
1152 cmpl %ebx, %ecx # needed, buflen.33_4
1153 jge .L134 #,
1154 # sha256-crypt.c:411: char *new_buffer = (char *) realloc (buffer, needed);
1155 movq %rdx, %rdi # <retval>,
1156 movslq %ebx, %rsi # needed, needed
1157 call realloc@PLT #
1158 # sha256-crypt.c:412: if (new_buffer == NULL)
1159 testq %rax, %rax # <retval>
1160 # sha256-crypt.c:411: char *new_buffer = (char *) realloc (buffer, needed);
1161 movq %rax, %rdx #, <retval>
1162 # sha256-crypt.c:412: if (new_buffer == NULL)
1163 je .L133 #,
1164 # sha256-crypt.c:415: buffer = new_buffer;
1165 movq %rax, buffer(%rip) # <retval>, buffer
1166 # sha256-crypt.c:416: buflen = needed;
1167 movl %ebx, buflen.5422(%rip) # needed, buflen
1168 movl %ebx, %ecx # needed, buflen.33_4
1169 .L134:
1170 # sha256-crypt.c:420: }
1171 popq %rbx #
1172 .cfi_remember_state
1173 .cfi_def_cfa_offset 24
1174 # sha256-crypt.c:419: return __sha256_crypt_r (key, salt, buffer, buflen);
1175 movq %rbp, %rsi # salt,
1176 movq %r12, %rdi # key,
1177 # sha256-crypt.c:420: }
1178 popq %rbp #
1179 .cfi_def_cfa_offset 16
1180 popq %r12 #
1181 .cfi_def_cfa_offset 8
1182 # sha256-crypt.c:419: return __sha256_crypt_r (key, salt, buffer, buflen);
1183 jmp __sha256_crypt_r #
1184 .p2align 4,,10
1185 .p2align 3
1186 .L133:
1187 .cfi_restore_state
1188 # sha256-crypt.c:420: }
1189 popq %rbx #
1190 .cfi_def_cfa_offset 24
1191 xorl %eax, %eax #
1192 popq %rbp #
1193 .cfi_def_cfa_offset 16
1194 popq %r12 #
1195 .cfi_def_cfa_offset 8
1197 .cfi_endproc
1198 .LFE42:
1199 .size __sha256_crypt, .-__sha256_crypt
1200 .local buflen.5422
1201 .comm buflen.5422,4,4
1202 .section __libc_freeres_ptrs
1203 #,"aw",@progbits
1204 .align 8
1205 .type buffer, @object
1206 .size buffer, 8
1207 buffer:
1208 .zero 8
1209 .section .rodata.str1.8,"aMS",@progbits,1
1210 .align 8
1211 .type sha256_rounds_prefix, @object
1212 .size sha256_rounds_prefix, 8
1213 sha256_rounds_prefix:
1214 .string "rounds="
1215 .section .rodata.str1.1
1216 .type sha256_salt_prefix, @object
1217 .size sha256_salt_prefix, 4
1218 sha256_salt_prefix:
1219 .string "$5$"
1220 .ident "GCC: (GNU) 7.3.0"
1221 .section .note.GNU-stack,"",@progbits