2 # GNU C11 (GCC) version 7.3.0 (x86_64-nyan-linux-gnu)
3 # compiled by GNU C version 7.3.0, GMP version 6.1.2, MPFR version 4.0.1, MPC version 1.1.0, isl version none
4 # GGC heuristics: --param ggc-min-expand=100 --param ggc-min-heapsize=131072
5 # options passed: -I ../include
6 # -I /root/wip/nyanglibc/builds/0/nyanglibc/build/crypt
7 # -I /root/wip/nyanglibc/builds/0/nyanglibc/build
8 # -I ../sysdeps/unix/sysv/linux/x86_64/64
9 # -I ../sysdeps/unix/sysv/linux/x86_64
10 # -I ../sysdeps/unix/sysv/linux/x86/include
11 # -I ../sysdeps/unix/sysv/linux/x86 -I ../sysdeps/x86/nptl
12 # -I ../sysdeps/unix/sysv/linux/wordsize-64 -I ../sysdeps/x86_64/nptl
13 # -I ../sysdeps/unix/sysv/linux/include -I ../sysdeps/unix/sysv/linux
14 # -I ../sysdeps/nptl -I ../sysdeps/pthread -I ../sysdeps/gnu
15 # -I ../sysdeps/unix/inet -I ../sysdeps/unix/sysv -I ../sysdeps/unix/x86_64
16 # -I ../sysdeps/unix -I ../sysdeps/posix -I ../sysdeps/x86_64/64
17 # -I ../sysdeps/x86_64/fpu -I ../sysdeps/x86/fpu -I ../sysdeps/x86_64
18 # -I ../sysdeps/x86/include -I ../sysdeps/x86
19 # -I ../sysdeps/ieee754/float128 -I ../sysdeps/ieee754/ldbl-96/include
20 # -I ../sysdeps/ieee754/ldbl-96 -I ../sysdeps/ieee754/dbl-64
21 # -I ../sysdeps/ieee754/flt-32 -I ../sysdeps/wordsize-64
22 # -I ../sysdeps/ieee754 -I ../sysdeps/generic -I .. -I ../libio -I .
23 # -MD /run/asm/crypt/crypt_util.v.d -MF /run/asm/crypt/crypt_util.o.dt -MP
24 # -MT /run/asm/crypt/.o -D _LIBC_REENTRANT -D MODULE_NAME=libcrypt -D PIC
25 # -D TOP_NAMESPACE=glibc
26 # -include /root/wip/nyanglibc/builds/0/nyanglibc/build/libc-modules.h
27 # -include ../include/libc-symbols.h crypt_util.c -mtune=generic
28 # -march=x86-64 -auxbase-strip /run/asm/crypt/crypt_util.v.s -O2 -Wall
29 # -Wwrite-strings -Wundef -Werror -Wstrict-prototypes
30 # -Wold-style-definition -std=gnu11 -fverbose-asm -fgnu89-inline
31 # -fmerge-all-constants -frounding-math -fno-stack-protector -fmath-errno
32 # -fpie -ftls-model=initial-exec
33 # options enabled: -faggressive-loop-optimizations -falign-labels
34 # -fasynchronous-unwind-tables -fauto-inc-dec -fbranch-count-reg
35 # -fcaller-saves -fchkp-check-incomplete-type -fchkp-check-read
36 # -fchkp-check-write -fchkp-instrument-calls -fchkp-narrow-bounds
37 # -fchkp-optimize -fchkp-store-bounds -fchkp-use-static-bounds
38 # -fchkp-use-static-const-bounds -fchkp-use-wrappers -fcode-hoisting
39 # -fcombine-stack-adjustments -fcommon -fcompare-elim -fcprop-registers
40 # -fcrossjumping -fcse-follow-jumps -fdefer-pop
41 # -fdelete-null-pointer-checks -fdevirtualize -fdevirtualize-speculatively
42 # -fdwarf2-cfi-asm -fearly-inlining -feliminate-unused-debug-types
43 # -fexpensive-optimizations -fforward-propagate -ffp-int-builtin-inexact
44 # -ffunction-cse -fgcse -fgcse-lm -fgnu-runtime -fgnu-unique
45 # -fguess-branch-probability -fhoist-adjacent-loads -fident -fif-conversion
46 # -fif-conversion2 -findirect-inlining -finline -finline-atomics
47 # -finline-functions-called-once -finline-small-functions -fipa-bit-cp
48 # -fipa-cp -fipa-icf -fipa-icf-functions -fipa-icf-variables -fipa-profile
49 # -fipa-pure-const -fipa-ra -fipa-reference -fipa-sra -fipa-vrp
50 # -fira-hoist-pressure -fira-share-save-slots -fira-share-spill-slots
51 # -fisolate-erroneous-paths-dereference -fivopts -fkeep-static-consts
52 # -fleading-underscore -flifetime-dse -flra-remat -flto-odr-type-merging
53 # -fmath-errno -fmerge-all-constants -fmerge-debug-strings
54 # -fmove-loop-invariants -fomit-frame-pointer -foptimize-sibling-calls
55 # -foptimize-strlen -fpartial-inlining -fpeephole -fpeephole2 -fpic -fpie
56 # -fplt -fprefetch-loop-arrays -free -freg-struct-return -freorder-blocks
57 # -freorder-functions -frerun-cse-after-loop -frounding-math
58 # -fsched-critical-path-heuristic -fsched-dep-count-heuristic
59 # -fsched-group-heuristic -fsched-interblock -fsched-last-insn-heuristic
60 # -fsched-rank-heuristic -fsched-spec -fsched-spec-insn-heuristic
61 # -fsched-stalled-insns-dep -fschedule-fusion -fschedule-insns2
62 # -fsemantic-interposition -fshow-column -fshrink-wrap
63 # -fshrink-wrap-separate -fsigned-zeros -fsplit-ivs-in-unroller
64 # -fsplit-wide-types -fssa-backprop -fssa-phiopt -fstdarg-opt
65 # -fstore-merging -fstrict-aliasing -fstrict-overflow
66 # -fstrict-volatile-bitfields -fsync-libcalls -fthread-jumps
67 # -ftoplevel-reorder -ftrapping-math -ftree-bit-ccp -ftree-builtin-call-dce
68 # -ftree-ccp -ftree-ch -ftree-coalesce-vars -ftree-copy-prop -ftree-cselim
69 # -ftree-dce -ftree-dominator-opts -ftree-dse -ftree-forwprop -ftree-fre
70 # -ftree-loop-if-convert -ftree-loop-im -ftree-loop-ivcanon
71 # -ftree-loop-optimize -ftree-parallelize-loops= -ftree-phiprop -ftree-pre
72 # -ftree-pta -ftree-reassoc -ftree-scev-cprop -ftree-sink -ftree-slsr
73 # -ftree-sra -ftree-switch-conversion -ftree-tail-merge -ftree-ter
74 # -ftree-vrp -funit-at-a-time -funwind-tables -fverbose-asm
75 # -fzero-initialized-in-bss -m128bit-long-double -m64 -m80387
76 # -malign-stringops -mavx256-split-unaligned-load
77 # -mavx256-split-unaligned-store -mfancy-math-387 -mfp-ret-in-387 -mfxsr
78 # -mglibc -mieee-fp -mlong-double-80 -mmmx -mno-sse4 -mpush-args -mred-zone
79 # -msse -msse2 -mstv -mtls-direct-seg-refs -mvzeroupper
84 .type __init_des_r, @function
89 .cfi_def_cfa_offset 16
92 .cfi_def_cfa_offset 24
94 # crypt_util.c:346: sb[0] = (long64*)__data->sb0; sb[1] = (long64*)__data->sb1;
95 leaq
32896(%rdi
), %rax
#, tmp400
98 .cfi_def_cfa_offset 32
101 .cfi_def_cfa_offset 40
104 .cfi_def_cfa_offset 48
107 .cfi_def_cfa_offset 56
109 # crypt_util.c:346: sb[0] = (long64*)__data->sb0; sb[1] = (long64*)__data->sb1;
110 leaq
128(%rdi
), %rbp
#, _1
111 # crypt_util.c:331: {
113 .cfi_def_cfa_offset 400
114 # crypt_util.c:346: sb[0] = (long64*)__data->sb0; sb[1] = (long64*)__data->sb1;
115 movq
%rax
, 56(%rsp
) # tmp400, sb
116 # crypt_util.c:347: sb[2] = (long64*)__data->sb2; sb[3] = (long64*)__data->sb3;
117 leaq
65664(%rdi
), %rax
#, tmp401
118 # crypt_util.c:331: {
119 movq
%rdi
, 40(%rsp
) # __data, %sfp
120 # crypt_util.c:346: sb[0] = (long64*)__data->sb0; sb[1] = (long64*)__data->sb1;
121 movq
%rbp
, 48(%rsp
) # _1, sb
122 # crypt_util.c:347: sb[2] = (long64*)__data->sb2; sb[3] = (long64*)__data->sb3;
123 movq
%rax
, 64(%rsp
) # tmp401, sb
124 leaq
98432(%rdi
), %rax
#, tmp402
125 movq
%rax
, 72(%rsp
) # tmp402, sb
126 # crypt_util.c:350: if(small_tables_initialized == 0) {
127 movl small_tables_initialized.7488
(%rip
), %eax
# small_tables_initialized, small_tables_initialized.0_5
128 testl
%eax
, %eax
# small_tables_initialized.0_5
130 # crypt_util.c:352: __libc_lock_lock (_ufc_tables_lock);
131 cmpq $
0, __pthread_mutex_lock@GOTPCREL
(%rip
) #,
133 # crypt_util.c:352: __libc_lock_lock (_ufc_tables_lock);
134 leaq _ufc_tables_lock
(%rip
), %rdi
#,
135 call __pthread_mutex_lock@PLT
#
137 # crypt_util.c:353: if(small_tables_initialized)
138 movl small_tables_initialized.7488
(%rip
), %eax
# small_tables_initialized, small_tables_initialized.2_6
139 leaq eperm32tab
(%rip
), %rbx
#, tmp698
140 testl
%eax
, %eax
# small_tables_initialized.2_6
143 # crypt_util.c:460: __libc_lock_unlock(_ufc_tables_lock);
144 cmpq $
0, __pthread_mutex_unlock@GOTPCREL
(%rip
) #,
146 # crypt_util.c:460: __libc_lock_unlock(_ufc_tables_lock);
147 leaq _ufc_tables_lock
(%rip
), %rdi
#,
148 call __pthread_mutex_unlock@PLT
#
150 # crypt_util.c:481: _ufc_clearmem(__data->sb0,
151 movl $
131072, %edx
#,
153 movq
%rbp
, %rdi
# _1,
155 leaq sbox
(%rip
), %r12 #, tmp694
156 leaq
48(%rsp
), %rax
#, tmp835
157 movl $
24, %r13d
#, ivtmp.80
158 movl $
0, 28(%rsp
) #, %sfp
159 movl $
15, 24(%rsp
) #, %sfp
160 movl
%r13d
, %ecx
# ivtmp.80, ivtmp.80
161 movq
%rax
, 32(%rsp
) # tmp835, %sfp
162 movq
%r12, 16(%rsp
) # tmp694, %sfp
163 movl $
14, %eax
#, pretmp_510
165 movslq
28(%rsp
), %rdx
# %sfp,
166 # crypt_util.c:519: sb[sg][inx] =
167 movq
32(%rsp
), %rdi
# %sfp, ivtmp.74
168 # crypt_util.c:497: for(j1 = 0; j1 < 64; j1++) {
169 movl $
0, 4(%rsp
) #, %sfp
170 # crypt_util.c:519: sb[sg][inx] =
171 movq
(%rdi
), %r14 # MEM[base: _454, offset: 0B], _78
172 leal
1(%rdx
), %r15d
#, _451
173 leaq
0(,%rdx
,4), %rdi
#, tmp712
174 movslq
%r15d
, %r15 # _451, _451
175 movq
%rdi
, 8(%rsp
) # tmp712, %sfp
176 salq $
2, %r15 #, tmp707
180 movl
4(%rsp
), %ebp
# %sfp, _465
181 # crypt_util.c:503: to_permute = (((ufc_long)s1 << 4) |
182 movl
24(%rsp
), %edx
# %sfp, pretmp_524
183 movslq
%eax
, %r11 # pretmp_510, pretmp_510
185 # crypt_util.c:499: for(j2 = 0; j2 < 64; j2++) {
187 sall $
6, %ebp
#, _465
188 movslq
%edx
, %rax
# pretmp_524,
193 movl
%r9d
, %eax
# j2, tmp680
194 movl
%r9d
, %esi
# j2, tmp686
196 andl $
1, %esi
#, tmp686
197 movl
%eax
, %edx
# tmp680, tmp682
198 movl
%r9d
, %eax
# j2, tmp684
199 sarl $
4, %eax
#, tmp684
200 andl $
15, %edx
#, tmp682
201 andl $
2, %eax
#, tmp685
202 orl
%esi
, %eax
# tmp686, tmp687
204 addq
%r15, %rax
# tmp707, tmp690
205 salq $
4, %rax
#, tmp691
206 addq
%rdx
, %rax
# tmp682, tmp692
207 movslq
(%r12,%rax
,4), %rax
# sbox,
209 # crypt_util.c:503: to_permute = (((ufc_long)s1 << 4) |
210 orq
%r11, %rax
# _64, tmp626
211 # crypt_util.c:518: inx = ((j1 << 6) | j2);
212 movl
%r9d
, %esi
# j2, tmp630
213 # crypt_util.c:499: for(j2 = 0; j2 < 64; j2++) {
215 # crypt_util.c:503: to_permute = (((ufc_long)s1 << 4) |
216 salq
%cl
, %rax
# ivtmp.80, to_permute
217 # crypt_util.c:518: inx = ((j1 << 6) | j2);
218 orl
%ebp
, %esi
# _465, tmp630
219 # crypt_util.c:523: ((long64)eperm32tab[1][(to_permute >> 16) & 0xff][0] << 32) |
220 movq
%rax
, %rdx
# to_permute, tmp628
221 # crypt_util.c:520: ((long64)eperm32tab[0][(to_permute >> 24) & 0xff][0] << 32) |
222 movq
%rax
, %r10 # to_permute, _74
223 # crypt_util.c:526: ((long64)eperm32tab[2][(to_permute >> 8) & 0xff][0] << 32) |
224 movzbl
%ah
, %edi
# to_permute, _90
225 # crypt_util.c:523: ((long64)eperm32tab[1][(to_permute >> 16) & 0xff][0] << 32) |
226 shrq $
16, %rdx
#, tmp628
227 # crypt_util.c:520: ((long64)eperm32tab[0][(to_permute >> 24) & 0xff][0] << 32) |
228 shrq $
20, %r10 #, _74
229 # crypt_util.c:529: ((long64)eperm32tab[3][(to_permute) & 0xff][0] << 32) |
230 movzbl
%al
, %eax
# to_permute, _96
231 # crypt_util.c:523: ((long64)eperm32tab[1][(to_permute >> 16) & 0xff][0] << 32) |
232 movzbl
%dl
, %edx
# tmp628, _83
233 # crypt_util.c:521: (long64)eperm32tab[0][(to_permute >> 24) & 0xff][1];
234 andl $
4080, %r10d
#, tmp633
235 # crypt_util.c:518: inx = ((j1 << 6) | j2);
236 movslq
%esi
, %rsi
# tmp630, inx
237 # crypt_util.c:524: (long64)eperm32tab[1][(to_permute >> 16) & 0xff][1];
238 movq
%rdx
, %r13 # _83, tmp637
239 # crypt_util.c:521: (long64)eperm32tab[0][(to_permute >> 24) & 0xff][1];
240 addq
%rbx
, %r10 # tmp698, tmp634
241 # crypt_util.c:523: ((long64)eperm32tab[1][(to_permute >> 16) & 0xff][0] << 32) |
242 addq $
256, %rdx
#, tmp666
243 # crypt_util.c:524: (long64)eperm32tab[1][(to_permute >> 16) & 0xff][1];
244 salq $
4, %r13 #, tmp637
245 # crypt_util.c:528: sb[sg][inx] |=
246 movq
8(%r10), %r8 # eperm32tab, tmp641
247 # crypt_util.c:523: ((long64)eperm32tab[1][(to_permute >> 16) & 0xff][0] << 32) |
248 salq $
4, %rdx
#, tmp667
249 # crypt_util.c:528: sb[sg][inx] |=
250 orq
4104(%rbx
,%r13), %r8 # eperm32tab, tmp640
251 # crypt_util.c:530: (long64)eperm32tab[3][(to_permute) & 0xff][1];
252 movq
%rax
, %r13 # _96, tmp643
253 # crypt_util.c:529: ((long64)eperm32tab[3][(to_permute) & 0xff][0] << 32) |
254 addq $
768, %rax
#, tmp653
255 # crypt_util.c:530: (long64)eperm32tab[3][(to_permute) & 0xff][1];
256 salq $
4, %r13 #, tmp643
257 # crypt_util.c:529: ((long64)eperm32tab[3][(to_permute) & 0xff][0] << 32) |
258 salq $
4, %rax
#, tmp654
259 # crypt_util.c:528: sb[sg][inx] |=
260 orq
12296(%rbx
,%r13), %r8 # eperm32tab, tmp646
261 # crypt_util.c:527: (long64)eperm32tab[2][(to_permute >> 8) & 0xff][1];
262 movq
%rdi
, %r13 # _90, tmp648
263 # crypt_util.c:529: ((long64)eperm32tab[3][(to_permute) & 0xff][0] << 32) |
264 movq
(%rbx
,%rax
), %rax
# eperm32tab, tmp657
265 # crypt_util.c:527: (long64)eperm32tab[2][(to_permute >> 8) & 0xff][1];
266 salq $
4, %r13 #, tmp648
267 # crypt_util.c:528: sb[sg][inx] |=
268 orq
8200(%rbx
,%r13), %r8 # eperm32tab, tmp651
269 # crypt_util.c:529: ((long64)eperm32tab[3][(to_permute) & 0xff][0] << 32) |
270 salq $
32, %rax
#, tmp656
271 # crypt_util.c:528: sb[sg][inx] |=
272 orq
%rax
, %r8 # tmp656, tmp658
273 # crypt_util.c:520: ((long64)eperm32tab[0][(to_permute >> 24) & 0xff][0] << 32) |
274 movq
(%r10), %rax
# eperm32tab, tmp663
275 salq $
32, %rax
#, tmp662
276 # crypt_util.c:528: sb[sg][inx] |=
277 orq
%rax
, %r8 # tmp662, tmp664
278 # crypt_util.c:523: ((long64)eperm32tab[1][(to_permute >> 16) & 0xff][0] << 32) |
279 movq
(%rbx
,%rdx
), %rax
# eperm32tab, tmp670
280 salq $
32, %rax
#, tmp669
281 # crypt_util.c:528: sb[sg][inx] |=
282 orq
%rax
, %r8 # tmp669, tmp671
283 # crypt_util.c:526: ((long64)eperm32tab[2][(to_permute >> 8) & 0xff][0] << 32) |
284 leaq
512(%rdi
), %rax
#, tmp673
285 salq $
4, %rax
#, tmp674
286 movq
(%rbx
,%rax
), %rax
# eperm32tab, tmp677
287 salq $
32, %rax
#, tmp676
288 # crypt_util.c:528: sb[sg][inx] |=
289 orq
%rax
, %r8 # tmp676, tmp678
290 # crypt_util.c:499: for(j2 = 0; j2 < 64; j2++) {
292 # crypt_util.c:528: sb[sg][inx] |=
293 movq
%r8, (%r14,%rsi
,8) # tmp678, *_80
294 # crypt_util.c:499: for(j2 = 0; j2 < 64; j2++) {
296 # crypt_util.c:497: for(j1 = 0; j1 < 64; j1++) {
297 addl $
1, 4(%rsp
) #, %sfp
298 movl
4(%rsp
), %eax
# %sfp, j1
301 movl
4(%rsp
), %edi
# %sfp, j1
302 movl
%edi
, %eax
# j1, tmp611
304 movl
%eax
, %edx
# tmp611, tmp613
305 movl
%edi
, %eax
# j1, tmp615
306 andl $
1, %edi
#, tmp617
307 sarl $
4, %eax
#, tmp615
308 andl $
15, %edx
#, tmp613
309 andl $
2, %eax
#, tmp616
310 orl
%edi
, %eax
# tmp617, tmp618
312 addq
8(%rsp
), %rax
# %sfp, tmp621
313 salq $
4, %rax
#, tmp622
314 addq
%rdx
, %rax
# tmp613, tmp623
315 movl
(%r12,%rax
,4), %eax
# sbox, pretmp_510
318 subl $
8, %ecx
#, ivtmp.80
319 addq $
512, 16(%rsp
) #, %sfp
320 addq $
8, 32(%rsp
) #, %sfp
321 movq
16(%rsp
), %rax
# %sfp, ivtmp.78
322 addl $
2, 28(%rsp
) #, %sfp
323 # crypt_util.c:493: for(sg = 0; sg < 4; sg++) {
324 cmpl $
-8, %ecx
#, ivtmp.80
326 movl
256(%rax
), %edi
# MEM[base: _450, offset: 256B], prephitmp_487
327 movl
(%rax
), %eax
# MEM[base: _450, offset: 0B], pretmp_510
328 movl
%edi
, 24(%rsp
) # prephitmp_487, %sfp
331 # crypt_util.c:537: __data->current_salt[0] = 0;
332 movq
40(%rsp
), %rax
# %sfp, __data
333 # crypt_util.c:536: __data->current_saltbits = 0;
335 # crypt_util.c:537: __data->current_salt[0] = 0;
336 movq $
0, 131214(%rax
) #, MEM[(void *)__data_129(D) + 131214B]
337 # crypt_util.c:536: __data->current_saltbits = 0;
338 movw
%dx
, 131222(%rax
) #, MEM[(long int *)__data_129(D) + 131222B]
339 # crypt_util.c:539: __data->initialized++;
340 addl $
1, 131228(%rax
) #, *__data_129(D).initialized
341 # crypt_util.c:540: }
344 .cfi_def_cfa_offset 56
346 .cfi_def_cfa_offset 48
348 .cfi_def_cfa_offset 40
350 .cfi_def_cfa_offset 32
352 .cfi_def_cfa_offset 24
354 .cfi_def_cfa_offset 16
356 .cfi_def_cfa_offset 8
360 # crypt_util.c:362: _ufc_clearmem((char*)do_pc1, (int)sizeof(do_pc1));
361 leaq do_pc1
(%rip
), %rdi
#,
364 leaq bytemask
(%rip
), %r14 #, tmp697
365 leaq longmask
(%rip
), %r13 #, tmp699
367 leaq pc1
(%rip
), %r9 #, tmp701
368 leaq do_pc1
(%rip
), %r8 #, tmp704
369 xorl
%esi
, %esi
# ivtmp.157
370 # crypt_util.c:366: mask2 = longmask[bit % 28 + 4];
371 movl $
613566757, %edi
#, tmp426
373 # crypt_util.c:364: comes_from_bit = pc1[bit] - 1;
374 movl
(%r9,%rsi
,4), %r11d
# MEM[symbol: pc1, index: _384, offset: 0B], MEM[symbol: pc1, index: _384, offset: 0B]
375 # crypt_util.c:366: mask2 = longmask[bit % 28 + 4];
376 movl
%esi
, %r15d
# ivtmp.157, tmp428
377 # crypt_util.c:364: comes_from_bit = pc1[bit] - 1;
378 leal
-1(%r11), %ebx
#, comes_from_bit
379 # crypt_util.c:365: mask1 = bytemask[comes_from_bit % 8 + 1];
380 movl
%ebx
, %edx
# comes_from_bit, tmp415
381 sarl $
31, %edx
#, tmp415
382 shrl $
29, %edx
#, tmp416
383 leal
(%rbx
,%rdx
), %eax
#, tmp417
384 andl $
7, %eax
#, tmp418
385 subl
%edx
, %eax
# tmp416, tmp419
386 # crypt_util.c:366: mask2 = longmask[bit % 28 + 4];
387 movl
%esi
, %edx
# ivtmp.157, tmp424
388 # crypt_util.c:365: mask1 = bytemask[comes_from_bit % 8 + 1];
389 addl $
1, %eax
#, tmp420
390 # crypt_util.c:366: mask2 = longmask[bit % 28 + 4];
391 shrl $
2, %edx
#, tmp424
392 # crypt_util.c:365: mask1 = bytemask[comes_from_bit % 8 + 1];
394 movzbl
(%r14,%rax
), %ecx
# bytemask, mask1
395 # crypt_util.c:366: mask2 = longmask[bit % 28 + 4];
396 movl
%edx
, %eax
# tmp424, tmp424
398 imull $
28, %edx
, %eax
#, tmp425, tmp427
399 movslq
%edx
, %rdx
# tmp425, tmp455
400 subl
%eax
, %r15d
# tmp427, tmp428
401 movl
%r15d
, %eax
# tmp428, tmp428
402 addl $
4, %eax
#, tmp437
403 # crypt_util.c:369: do_pc1[comes_from_bit / 8][bit / 28][j] |= mask2;
404 testl
%ebx
, %ebx
# comes_from_bit
405 # crypt_util.c:366: mask2 = longmask[bit % 28 + 4];
407 movq
0(%r13,%rax
,8), %r10 # longmask, mask2
408 # crypt_util.c:369: do_pc1[comes_from_bit / 8][bit / 28][j] |= mask2;
409 leal
6(%r11), %eax
#, tmp441
410 cmovns
%ebx
, %eax
# tmp441,, comes_from_bit, comes_from_bit
411 sarl $
3, %eax
#, tmp442
413 leaq
(%rdx
,%rax
,2), %rdx
#, tmp456
414 # crypt_util.c:367: for(j = 0; j < 128; j++) {
416 salq $
10, %rdx
#, tmp457
417 addq
%r8, %rdx
# tmp704, _394
421 # crypt_util.c:367: for(j = 0; j < 128; j++) {
426 # crypt_util.c:368: if(j & mask1)
427 testq
%rax
, %rcx
# j, mask1
429 # crypt_util.c:369: do_pc1[comes_from_bit / 8][bit / 28][j] |= mask2;
430 orq
%r10, (%rdx
,%rax
,8) # mask2, MEM[base: _394, index: _395, offset: 0B]
431 # crypt_util.c:367: for(j = 0; j < 128; j++) {
436 addq $
1, %rsi
#, ivtmp.157
437 # crypt_util.c:363: for(bit = 0; bit < 56; bit++) {
438 cmpq $
56, %rsi
#, ivtmp.157
440 # crypt_util.c:378: _ufc_clearmem((char*)do_pc2, (int)sizeof(do_pc2));
441 leaq do_pc2
(%rip
), %r8 #, tmp705
442 movl $
1024, %ecx
#, tmp463
443 xorl
%eax
, %eax
# tmp462
444 leaq pc2
(%rip
), %r10 #, tmp702
445 leaq BITMASK
(%rip
), %r12 #, tmp703
446 xorl
%esi
, %esi
# ivtmp.142
447 movq
%r8, %rdi
# tmp705, tmp461
448 # crypt_util.c:381: mask1 = bytemask[comes_from_bit % 7 + 1];
449 movl $
-1840700269, %r9d
#, tmp470
450 # crypt_util.c:378: _ufc_clearmem((char*)do_pc2, (int)sizeof(do_pc2));
452 # crypt_util.c:382: mask2 = BITMASK[bit % 24];
453 movl $
-1431655765, %edi
#, tmp484
455 # crypt_util.c:380: comes_from_bit = pc2[bit] - 1;
456 movl
(%r10,%rsi
,4), %eax
# MEM[symbol: pc2, index: _401, offset: 0B], tmp764
457 # crypt_util.c:382: mask2 = BITMASK[bit % 24];
458 movl
%esi
, %ebx
# ivtmp.142, tmp489
459 # crypt_util.c:380: comes_from_bit = pc2[bit] - 1;
460 leal
-1(%rax
), %r11d
#, comes_from_bit
461 # crypt_util.c:381: mask1 = bytemask[comes_from_bit % 7 + 1];
462 movl
%r11d
, %eax
# comes_from_bit, tmp751
464 movl
%r11d
, %eax
# comes_from_bit, tmp473
465 sarl $
31, %eax
#, tmp473
466 leal
(%rdx
,%r11), %ecx
#, tmp471
467 sarl $
2, %ecx
#, tmp472
468 subl
%eax
, %ecx
# tmp473, tmp468
469 leal
0(,%rcx
,8), %eax
#, tmp475
470 subl
%ecx
, %eax
# tmp468, tmp476
471 subl
%eax
, %r11d
# tmp476, comes_from_bit
472 movl
%r11d
, %eax
# comes_from_bit, tmp477
473 addl $
1, %eax
#, tmp478
475 movzbl
(%r14,%rax
), %r11d
# bytemask, mask1
476 # crypt_util.c:382: mask2 = BITMASK[bit % 24];
477 movl
%esi
, %eax
# ivtmp.142, tmp752
479 movl
%edx
, %eax
# tmp483, tmp483
480 movslq
%ecx
, %rdx
# tmp468, tmp508
481 shrl $
4, %eax
#, tmp483
482 salq $
10, %rdx
#, tmp509
483 leal
(%rax
,%rax
,2), %eax
#, tmp487
484 addq
%r8, %rdx
# tmp705, _408
485 sall $
3, %eax
#, tmp488
486 subl
%eax
, %ebx
# tmp488, tmp489
487 movslq
%ebx
, %rax
# tmp489,
488 movq
(%r12,%rax
,8), %rbx
# BITMASK, mask2
489 # crypt_util.c:383: for(j = 0; j < 128; j++) {
494 # crypt_util.c:383: for(j = 0; j < 128; j++) {
499 # crypt_util.c:384: if(j & mask1)
500 testq
%rax
, %r11 # j, mask1
502 # crypt_util.c:385: do_pc2[comes_from_bit / 7][j] |= mask2;
503 orq
%rbx
, (%rdx
,%rax
,8) # mask2, MEM[base: _408, index: _409, offset: 0B]
504 # crypt_util.c:383: for(j = 0; j < 128; j++) {
509 addq $
1, %rsi
#, ivtmp.142
510 # crypt_util.c:379: for(bit = 0; bit < 48; bit++) {
511 cmpq $
48, %rsi
#, ivtmp.142
513 # crypt_util.c:401: _ufc_clearmem((char*)eperm32tab, (int)sizeof(eperm32tab));
514 leaq eperm32tab
(%rip
), %rdi
#,
517 leaq eperm32tab
(%rip
), %rbx
#, tmp698
519 leaq perm32
(%rip
), %r9 #, tmp693
520 leaq esel
(%rip
), %rdi
#, tmp696
521 xorl
%esi
, %esi
# ivtmp.127
522 # crypt_util.c:408: eperm32tab[comes_from / 8][j][bit / 24] |= BITMASK[bit % 24];
523 movl $
-1431655765, %r8d
#, tmp528
525 # crypt_util.c:404: comes_from = perm32[esel[bit]-1]-1;
526 movl
(%rdi
,%rsi
,4), %eax
# MEM[symbol: esel, index: _412, offset: 0B], tmp771
527 subl $
1, %eax
#, tmp520
529 movl
(%r9,%rax
,4), %r10d
# perm32, tmp523
530 subl $
1, %r10d
#, _27
531 # crypt_util.c:405: mask1 = bytemask[comes_from % 8];
532 movl
%r10d
, %eax
# _27, tmp526
533 andl $
7, %eax
#, tmp526
534 movzbl
(%r14,%rax
), %ecx
# bytemask, mask1
535 # crypt_util.c:408: eperm32tab[comes_from / 8][j][bit / 24] |= BITMASK[bit % 24];
536 movl
%esi
, %eax
# ivtmp.127, tmp753
538 movl
%edx
, %r15d
# tmp527, tmp527
539 shrl $
4, %r15d
#, _32
540 movslq
%r15d
, %rdx
# _32,
541 movl
%esi
, %r15d
# ivtmp.127, _32
542 leal
(%rdx
,%rdx
,2), %eax
#, tmp531
543 sall $
3, %eax
#, tmp532
544 subl
%eax
, %r15d
# tmp532, _32
545 # crypt_util.c:404: comes_from = perm32[esel[bit]-1]-1;
546 movslq
%r10d
, %rax
# _27, comes_from
547 # crypt_util.c:408: eperm32tab[comes_from / 8][j][bit / 24] |= BITMASK[bit % 24];
548 shrq $
3, %rax
#, tmp542
549 movslq
%r15d
, %r15 # _32, _32
550 salq $
9, %rax
#, tmp543
551 addq
%rdx
, %rax
# _32, tmp553
552 leaq
(%rbx
,%rax
,8), %r10 #, _421
553 # crypt_util.c:406: for(j = 256; j--;) {
558 # crypt_util.c:407: if(j & mask1)
559 testq
%rax
, %rcx
# j, mask1
561 movq
%rax
, %rdx
# j, _422
562 # crypt_util.c:408: eperm32tab[comes_from / 8][j][bit / 24] |= BITMASK[bit % 24];
563 movq
(%r12,%r15,8), %r11 # BITMASK, tmp559
564 salq $
4, %rdx
#, _422
565 orq
%r11, (%r10,%rdx
) # tmp559, MEM[base: _421, index: _422, offset: 0B]
567 # crypt_util.c:406: for(j = 256; j--;) {
571 addq $
1, %rsi
#, ivtmp.127
572 # crypt_util.c:402: for(bit = 0; bit < 48; bit++) {
573 cmpq $
48, %rsi
#, ivtmp.127
575 movl $
47, %eax
#, ivtmp.112
577 # crypt_util.c:417: e_inverse[esel[bit] - 1 ] = bit;
578 movl
(%rdi
,%rax
,4), %edx
# MEM[symbol: esel, index: _430, offset: 0B], _36
579 leal
-1(%rdx
), %ecx
#, tmp562
580 # crypt_util.c:418: e_inverse[esel[bit] - 1 + 32] = bit + 48;
581 addl $
31, %edx
#, tmp564
582 movslq
%edx
, %rdx
# tmp564, tmp565
583 # crypt_util.c:417: e_inverse[esel[bit] - 1 ] = bit;
584 movslq
%ecx
, %rcx
# tmp562, tmp563
585 movl
%eax
, 80(%rsp
,%rcx
,4) # ivtmp.112, e_inverse
586 # crypt_util.c:418: e_inverse[esel[bit] - 1 + 32] = bit + 48;
587 leal
48(%rax
), %ecx
#, tmp566
588 subq $
1, %rax
#, ivtmp.112
589 # crypt_util.c:416: for(bit=48; bit--;) {
590 cmpq $
-1, %rax
#, ivtmp.112
591 # crypt_util.c:418: e_inverse[esel[bit] - 1 + 32] = bit + 48;
592 movl
%ecx
, 80(%rsp
,%rdx
,4) # tmp566, e_inverse
593 # crypt_util.c:416: for(bit=48; bit--;) {
595 # crypt_util.c:425: _ufc_clearmem((char*)efp, (int)sizeof efp);
596 leaq efp
(%rip
), %rdi
#,
600 leaq final_perm
(%rip
), %r11 #, tmp700
601 leaq efp
(%rip
), %r10 #, tmp695
602 xorl
%r8d
, %r8d
# ivtmp.101
603 # crypt_util.c:446: bit_within_word = comes_from_e_bit % 6; /* 0..5 */
604 movl $
715827883, %r9d
#, tmp580
606 # crypt_util.c:443: comes_from_f_bit = final_perm[bit] - 1; /* 0..63 */
607 movl
(%r11,%r8,4), %eax
# MEM[symbol: final_perm, index: _433, offset: 0B], tmp777
608 subl $
1, %eax
#, comes_from_f_bit
609 # crypt_util.c:444: comes_from_e_bit = e_inverse[comes_from_f_bit]; /* 0..95 */
611 movl
80(%rsp
,%rax
,4), %ecx
# e_inverse, comes_from_e_bit
612 # crypt_util.c:446: bit_within_word = comes_from_e_bit % 6; /* 0..5 */
613 movl
%ecx
, %eax
# comes_from_e_bit, tmp754
615 movl
%ecx
, %eax
# comes_from_e_bit, tmp581
616 sarl $
31, %eax
#, tmp581
617 subl
%eax
, %edx
# tmp581, tmp579
618 movslq
%edx
, %rax
# tmp579,
619 leal
(%rax
,%rax
,2), %edx
#, tmp584
620 salq $
7, %rax
#, tmp597
621 addl
%edx
, %edx
# tmp585
622 subl
%edx
, %ecx
# tmp585, comes_from_e_bit
623 movl
%ecx
, %edx
# comes_from_e_bit, bit_within_word
624 # crypt_util.c:448: mask1 = longmask[bit_within_word + 26];
625 addl $
26, %edx
#, tmp587
626 movslq
%edx
, %rdx
# tmp587, tmp588
627 movq
0(%r13,%rdx
,8), %rsi
# longmask, mask1
628 # crypt_util.c:449: mask2 = longmask[o_bit];
629 movq
%r8, %rdx
# ivtmp.101, o_bit
630 andl $
31, %edx
#, o_bit
631 movq
0(%r13,%rdx
,8), %rdi
# longmask, mask2
632 # crypt_util.c:433: o_long = bit / 32; /* 0..1 */
633 movl
%r8d
, %edx
# ivtmp.101, o_long
634 sarl $
5, %edx
#, o_long
635 movslq
%edx
, %rdx
# o_long, o_long
636 addq
%rdx
, %rax
# o_long, tmp600
637 leaq
(%r10,%rax
,8), %rcx
#, _443
638 # crypt_util.c:451: for(word_value = 64; word_value--;) {
639 movl $
63, %eax
#, word_value
643 # crypt_util.c:452: if(word_value & mask1)
644 testq
%rax
, %rsi
# word_value, mask1
646 movq
%rax
, %rdx
# word_value, _444
647 salq $
4, %rdx
#, _444
648 # crypt_util.c:453: efp[comes_from_word][word_value][o_long] |= mask2;
649 orq
%rdi
, (%rcx
,%rdx
) # mask2, MEM[base: _443, index: _444, offset: 0B]
651 # crypt_util.c:451: for(word_value = 64; word_value--;) {
652 subq $
1, %rax
#, word_value
653 cmpq $
-1, %rax
#, word_value
655 addq $
1, %r8 #, ivtmp.101
656 # crypt_util.c:426: for(bit = 0; bit < 64; bit++) {
657 cmpq $
64, %r8 #, ivtmp.101
659 # crypt_util.c:456: atomic_write_barrier ();
660 # crypt_util.c:457: small_tables_initialized = 1;
661 movl $
1, small_tables_initialized.7488
(%rip
) #, small_tables_initialized
664 # crypt_util.c:463: atomic_read_barrier ();
665 leaq eperm32tab
(%rip
), %rbx
#, tmp698
669 .size __init_des_r, .-__init_des_r
672 .type __init_des, @function
676 # crypt_util.c:545: __init_des_r(&_ufc_foobar);
677 leaq _ufc_foobar
(%rip
), %rdi
#,
681 .size __init_des, .-__init_des
683 .globl _ufc_setup_salt_r
684 .type _ufc_setup_salt_r, @function
689 .cfi_def_cfa_offset 16
691 movq
%rdi
, %rbx
# s, s
693 .cfi_def_cfa_offset 32
694 # crypt_util.c:612: if(__data->initialized == 0)
695 movl
131228(%rsi
), %eax
# *__data_46(D).initialized,
699 # crypt_util.c:615: s0 = s[0];
700 movzbl
(%rbx
), %edx
# *s_48(D), s0
701 # crypt_util.c:587: switch (c)
706 leal
-46(%rdx
), %eax
#, tmp185
707 cmpb $
11, %al
#, tmp185
710 # crypt_util.c:617: return false;
711 xorl
%eax
, %eax
# <retval>
713 # crypt_util.c:663: }
716 .cfi_def_cfa_offset 16
718 .cfi_def_cfa_offset 8
724 # crypt_util.c:587: switch (c)
725 leal
-97(%rdx
), %eax
#, tmp186
726 cmpb $
25, %al
#, tmp186
729 # crypt_util.c:619: s1 = s[1];
730 movzbl
1(%rbx
), %edi
# MEM[(const char *)s_48(D) + 1B], pretmp_108
731 # crypt_util.c:587: switch (c)
732 cmpb $
90, %dil
#, pretmp_108
734 cmpb $
65, %dil
#, pretmp_108
736 leal
-46(%rdi
), %eax
#, tmp187
737 cmpb $
11, %al
#, tmp187
740 # crypt_util.c:623: if(s0 == __data->current_salt[0] && s1 == __data->current_salt[1])
741 cmpb
%dl
, 131214(%rsi
) # s0, *__data_46(D).current_salt
744 # crypt_util.c:636: long c=ascii_to_bin(s[i]);
746 # crypt_util.c:626: __data->current_salt[0] = s0;
747 movb
%dl
, 131214(%rsi
) # s0, *__data_46(D).current_salt
748 # crypt_util.c:627: __data->current_salt[1] = s1;
749 movb
%dil
, 131215(%rsi
) # pretmp_108, *__data_46(D).current_salt
750 movsbl
%dl
, %eax
# s0, _153
751 # crypt_util.c:636: long c=ascii_to_bin(s[i]);
755 subl $
46, %eax
#, tmp189
756 movslq
%eax
, %rdx
# tmp189, iftmp.7_146
758 # crypt_util.c:639: saltbits |= BITMASK[6 * i + j];
759 leaq BITMASK
(%rip
), %r8 #, tmp240
760 # crypt_util.c:634: saltbits = 0;
761 xorl
%ecx
, %ecx
# saltbits
762 # crypt_util.c:637: for(j = 0; j < 6; j++) {
767 # crypt_util.c:638: if((c >> j) & 0x1)
768 btq
%rax
, %rdx
# j, iftmp.7_146
770 # crypt_util.c:639: saltbits |= BITMASK[6 * i + j];
771 orq
(%r8,%rax
,8), %rcx
# MEM[symbol: BITMASK, index: _155, offset: 0B], saltbits
773 # crypt_util.c:637: for(j = 0; j < 6; j++) {
777 # crypt_util.c:636: long c=ascii_to_bin(s[i]);
778 cmpb $
96, %dil
#, pretmp_108
779 movsbl
%dil
, %eax
# pretmp_108, _176
781 # crypt_util.c:636: long c=ascii_to_bin(s[i]);
782 subl $
59, %eax
#, tmp197
783 movslq
%eax
, %rdx
# tmp197, iftmp.7_42
785 # crypt_util.c:639: saltbits |= BITMASK[6 * i + j];
786 leaq BITMASK
(%rip
), %rdi
#, tmp239
787 # crypt_util.c:637: for(j = 0; j < 6; j++) {
792 # crypt_util.c:638: if((c >> j) & 0x1)
793 btq
%rax
, %rdx
# j, iftmp.7_42
795 # crypt_util.c:639: saltbits |= BITMASK[6 * i + j];
796 orq
48(%rdi
,%rax
,8), %rcx
# MEM[symbol: BITMASK, index: _154, offset: 0B], saltbits
798 # crypt_util.c:637: for(j = 0; j < 6; j++) {
802 # crypt_util.c:655: shuffle_sb((LONGG)__data->sb0, __data->current_saltbits ^ saltbits);
803 movq
131216(%rsi
), %r10 # *__data_46(D).current_saltbits, _21
804 leaq
128(%rsi
), %rax
#, k
805 leaq
32896(%rsi
), %r9 #, k
806 xorq
%rcx
, %r10 # saltbits, _21
810 # crypt_util.c:574: x = ((*k >> 32) ^ *k) & (long64)saltbits;
811 movq
(%rax
), %r8 # MEM[base: k_113, offset: 0B], _95
812 # crypt_util.c:575: *k++ ^= (x << 32) | x;
814 # crypt_util.c:574: x = ((*k >> 32) ^ *k) & (long64)saltbits;
815 movq
%r8, %rdx
# _95, tmp206
816 shrq $
32, %rdx
#, tmp206
817 xorq
%r8, %rdx
# _95, tmp207
818 andq
%r10, %rdx
# _21, x
819 # crypt_util.c:575: *k++ ^= (x << 32) | x;
820 movq
%rdx
, %rdi
# x, tmp208
821 salq $
32, %rdi
#, tmp208
822 orq
%rdi
, %rdx
# tmp208, tmp209
823 xorq
%r8, %rdx
# _95, tmp210
824 # crypt_util.c:573: for(j=4096; j--;) {
825 cmpq
%r9, %rax
# k, k
826 # crypt_util.c:575: *k++ ^= (x << 32) | x;
827 movq
%rdx
, -8(%rax
) # tmp210, MEM[base: k_99, offset: -8B]
828 # crypt_util.c:573: for(j=4096; j--;) {
830 # crypt_util.c:656: shuffle_sb((LONGG)__data->sb1, __data->current_saltbits ^ saltbits);
831 movq
131216(%rsi
), %r10 # *__data_46(D).current_saltbits, _25
832 leaq
65664(%rsi
), %r9 #, k
833 xorq
%rcx
, %r10 # saltbits, _25
837 # crypt_util.c:574: x = ((*k >> 32) ^ *k) & (long64)saltbits;
838 movq
(%rax
), %r8 # MEM[base: k_115, offset: 0B], _84
839 # crypt_util.c:575: *k++ ^= (x << 32) | x;
841 # crypt_util.c:574: x = ((*k >> 32) ^ *k) & (long64)saltbits;
842 movq
%r8, %rdx
# _84, tmp211
843 shrq $
32, %rdx
#, tmp211
844 xorq
%r8, %rdx
# _84, tmp212
845 andq
%r10, %rdx
# _25, x
846 # crypt_util.c:575: *k++ ^= (x << 32) | x;
847 movq
%rdx
, %rdi
# x, tmp213
848 salq $
32, %rdi
#, tmp213
849 orq
%rdi
, %rdx
# tmp213, tmp214
850 xorq
%r8, %rdx
# _84, tmp215
851 # crypt_util.c:573: for(j=4096; j--;) {
852 cmpq
%r9, %rax
# k, k
853 # crypt_util.c:575: *k++ ^= (x << 32) | x;
854 movq
%rdx
, -8(%rax
) # tmp215, MEM[base: k_88, offset: -8B]
855 # crypt_util.c:573: for(j=4096; j--;) {
857 # crypt_util.c:657: shuffle_sb((LONGG)__data->sb2, __data->current_saltbits ^ saltbits);
858 movq
131216(%rsi
), %r10 # *__data_46(D).current_saltbits, _29
859 leaq
98432(%rsi
), %r9 #, k
860 xorq
%rcx
, %r10 # saltbits, _29
864 # crypt_util.c:574: x = ((*k >> 32) ^ *k) & (long64)saltbits;
865 movq
(%rax
), %r8 # MEM[base: k_118, offset: 0B], _73
866 # crypt_util.c:575: *k++ ^= (x << 32) | x;
868 # crypt_util.c:574: x = ((*k >> 32) ^ *k) & (long64)saltbits;
869 movq
%r8, %rdx
# _73, tmp216
870 shrq $
32, %rdx
#, tmp216
871 xorq
%r8, %rdx
# _73, tmp217
872 andq
%r10, %rdx
# _29, x
873 # crypt_util.c:575: *k++ ^= (x << 32) | x;
874 movq
%rdx
, %rdi
# x, tmp218
875 salq $
32, %rdi
#, tmp218
876 orq
%rdi
, %rdx
# tmp218, tmp219
877 xorq
%r8, %rdx
# _73, tmp220
878 # crypt_util.c:573: for(j=4096; j--;) {
879 cmpq
%r9, %rax
# k, k
880 # crypt_util.c:575: *k++ ^= (x << 32) | x;
881 movq
%rdx
, -8(%rax
) # tmp220, MEM[base: k_77, offset: -8B]
882 # crypt_util.c:573: for(j=4096; j--;) {
884 # crypt_util.c:658: shuffle_sb((LONGG)__data->sb3, __data->current_saltbits ^ saltbits);
885 movq
131216(%rsi
), %r10 # *__data_46(D).current_saltbits, _33
886 leaq
131200(%rsi
), %r9 #, _159
887 xorq
%rcx
, %r10 # saltbits, _33
891 # crypt_util.c:574: x = ((*k >> 32) ^ *k) & (long64)saltbits;
892 movq
(%rax
), %r8 # MEM[base: k_135, offset: 0B], _62
893 # crypt_util.c:575: *k++ ^= (x << 32) | x;
895 # crypt_util.c:574: x = ((*k >> 32) ^ *k) & (long64)saltbits;
896 movq
%r8, %rdx
# _62, tmp221
897 shrq $
32, %rdx
#, tmp221
898 xorq
%r8, %rdx
# _62, tmp222
899 andq
%r10, %rdx
# _33, x
900 # crypt_util.c:575: *k++ ^= (x << 32) | x;
901 movq
%rdx
, %rdi
# x, tmp223
902 salq $
32, %rdi
#, tmp223
903 orq
%rdi
, %rdx
# tmp223, tmp224
904 xorq
%r8, %rdx
# _62, tmp225
905 # crypt_util.c:573: for(j=4096; j--;) {
906 cmpq
%r9, %rax
# _159, k
907 # crypt_util.c:575: *k++ ^= (x << 32) | x;
908 movq
%rdx
, -8(%rax
) # tmp225, MEM[base: k_66, offset: -8B]
909 # crypt_util.c:573: for(j=4096; j--;) {
911 # crypt_util.c:660: __data->current_saltbits = saltbits;
912 movq
%rcx
, 131216(%rsi
) # saltbits, *__data_46(D).current_saltbits
913 # crypt_util.c:663: }
916 .cfi_def_cfa_offset 16
917 # crypt_util.c:662: return true;
918 movl $
1, %eax
#, <retval>
919 # crypt_util.c:663: }
921 .cfi_def_cfa_offset 8
925 # crypt_util.c:587: switch (c)
926 leal
-97(%rdi
), %eax
#, tmp188
927 cmpb $
25, %al
#, tmp188
933 # crypt_util.c:613: __init_des_r(__data);
934 movq
%rsi
, %rdi
# __data,
935 movq
%rsi
, 8(%rsp
) # __data, %sfp
937 movq
8(%rsp
), %rsi
# %sfp, __data
940 # crypt_util.c:636: long c=ascii_to_bin(s[i]);
941 subl $
59, %eax
#, tmp191
942 movslq
%eax
, %rdx
# tmp191, iftmp.7_146
945 # crypt_util.c:623: if(s0 == __data->current_salt[0] && s1 == __data->current_salt[1])
946 cmpb
%dil
, 131215(%rsi
) # pretmp_108, *__data_46(D).current_salt
947 # crypt_util.c:624: return true;
948 movl $
1, %eax
#, <retval>
949 # crypt_util.c:623: if(s0 == __data->current_salt[0] && s1 == __data->current_salt[1])
953 # crypt_util.c:636: long c=ascii_to_bin(s[i]);
954 cmpb $
64, %dil
#, pretmp_108
956 # crypt_util.c:636: long c=ascii_to_bin(s[i]);
957 subl $
53, %eax
#, tmp198
958 movslq
%eax
, %rdx
# tmp198, iftmp.7_42
961 # crypt_util.c:636: long c=ascii_to_bin(s[i]);
962 subl $
53, %eax
#, tmp190
963 movslq
%eax
, %rdx
# tmp190, iftmp.7_146
966 # crypt_util.c:636: long c=ascii_to_bin(s[i]);
967 subl $
46, %eax
#, tmp199
968 movslq
%eax
, %rdx
# tmp199, iftmp.7_42
972 .size _ufc_setup_salt_r, .-_ufc_setup_salt_r
974 .globl _ufc_mk_keytab_r
975 .type _ufc_mk_keytab_r, @function
980 .cfi_def_cfa_offset 16
982 leaq
8(%rdi
), %r9 #, _311
984 .cfi_def_cfa_offset 24
986 # crypt_util.c:679: v1 = v2 = 0; k1 = &do_pc1[0][0][0];
987 leaq do_pc1
(%rip
), %rcx
#, k1
988 # crypt_util.c:667: {
990 .cfi_def_cfa_offset 32
993 .cfi_def_cfa_offset 40
996 .cfi_def_cfa_offset 48
999 .cfi_def_cfa_offset 56
1001 # crypt_util.c:679: v1 = v2 = 0; k1 = &do_pc1[0][0][0];
1002 xorl
%r8d
, %r8d
# v2
1003 xorl
%edx
, %edx
# v1
1007 # crypt_util.c:681: v1 |= k1[*key & 0x7f]; k1 += 128;
1008 movzbl
(%rdi
), %eax
# MEM[base: key_96, offset: 0B], MEM[base: key_96, offset: 0B]
1009 # crypt_util.c:682: v2 |= k1[*key++ & 0x7f]; k1 += 128;
1010 addq $
1, %rdi
#, key
1011 # crypt_util.c:681: v1 |= k1[*key & 0x7f]; k1 += 128;
1012 andl $
127, %eax
#, tmp200
1013 orq
(%rcx
,%rax
,8), %rdx
# *_4, v1
1014 # crypt_util.c:682: v2 |= k1[*key++ & 0x7f]; k1 += 128;
1015 orq
1024(%rcx
,%rax
,8), %r8 # *_6, v2
1016 addq $
2048, %rcx
#, k1
1017 # crypt_util.c:680: for(i = 8; i--;) {
1018 cmpq
%r9, %rdi
# _311, key
1020 leaq
1024+do_pc2
(%rip
), %r15 #, tmp234
1021 xorl
%edi
, %edi
# ivtmp.241
1022 movl $
1, %r9d
#, pretmp_321
1023 leaq
-1024(%r15), %r14 #, tmp241
1024 leaq
2048(%r14), %r12 #, tmp235
1025 leaq
3072(%r14), %r13 #, tmp236
1026 leaq
2048(%r12), %rbp
#, tmp237
1027 leaq
3072(%rbp
), %rbx
#, tmp240
1032 leaq rots
(%rip
), %rax
#, tmp271
1033 movl
(%rax
,%rdi
), %r9d
# MEM[symbol: rots, index: ivtmp.241_312, offset: 0B], pretmp_321
1035 # crypt_util.c:688: v1 = (v1 << rots[i]) | (v1 >> (28 - rots[i]));
1036 movl $
28, %eax
#, _9
1037 movl
%r9d
, %ecx
# pretmp_321, tmp255
1038 movq
%rdx
, %r10 # v1, _8
1039 subl
%r9d
, %eax
# pretmp_321, _9
1040 salq
%cl
, %r10 # tmp255, _8
1041 # crypt_util.c:702: v2 = (v2 << rots[i]) | (v2 >> (28 - rots[i]));
1042 movq
%r8, %r11 # v2, _29
1043 # crypt_util.c:688: v1 = (v1 << rots[i]) | (v1 >> (28 - rots[i]));
1044 movl
%eax
, %ecx
# _9, tmp257
1045 shrq
%cl
, %rdx
# tmp257, _10
1046 orq
%r10, %rdx
# _8, v1
1047 # crypt_util.c:690: v |= k1[(v1 >> 14) & 0x7f]; k1 += 128;
1048 movq
%rdx
, %r10 # v1, tmp202
1049 # crypt_util.c:689: v = k1[(v1 >> 21) & 0x7f]; k1 += 128;
1050 movq
%rdx
, %rcx
# v1, tmp205
1051 # crypt_util.c:690: v |= k1[(v1 >> 14) & 0x7f]; k1 += 128;
1052 shrq $
14, %r10 #, tmp202
1053 # crypt_util.c:689: v = k1[(v1 >> 21) & 0x7f]; k1 += 128;
1054 shrq $
21, %rcx
#, tmp205
1055 andl $
127, %ecx
#, tmp206
1056 # crypt_util.c:690: v |= k1[(v1 >> 14) & 0x7f]; k1 += 128;
1057 andl $
127, %r10d
#, tmp203
1058 movq
(%r15,%r10,8), %r10 # *_18, *_18
1059 orq
(%r14,%rcx
,8), %r10 # *_14, tmp208
1060 # crypt_util.c:692: v |= k1[(v1 ) & 0x7f]; k1 += 128;
1061 movq
%rdx
, %rcx
# v1, tmp210
1062 andl $
127, %ecx
#, tmp210
1063 orq
0(%r13,%rcx
,8), %r10 # *_27, _52
1064 # crypt_util.c:691: v |= k1[(v1 >> 7) & 0x7f]; k1 += 128;
1065 movq
%rdx
, %rcx
# v1, tmp212
1066 shrq $
7, %rcx
#, tmp212
1067 andl $
127, %ecx
#, tmp213
1068 # crypt_util.c:692: v |= k1[(v1 ) & 0x7f]; k1 += 128;
1069 orq
(%r12,%rcx
,8), %r10 # *_23, v
1070 # crypt_util.c:702: v2 = (v2 << rots[i]) | (v2 >> (28 - rots[i]));
1071 movl
%r9d
, %ecx
# pretmp_321, tmp263
1072 # crypt_util.c:712: *k2++ = v | 0x0000800000008000l;
1073 leaq
5120+do_pc2
(%rip
), %r9 #, tmp272
1074 # crypt_util.c:702: v2 = (v2 << rots[i]) | (v2 >> (28 - rots[i]));
1075 salq
%cl
, %r11 # tmp263, _29
1076 movl
%eax
, %ecx
# _9, tmp264
1077 shrq
%cl
, %r8 # tmp264, _30
1078 orq
%r11, %r8 # _29, v2
1079 # crypt_util.c:712: *k2++ = v | 0x0000800000008000l;
1080 movabsq $
140737488388096, %r11 #, tmp220
1081 # crypt_util.c:703: v |= k1[(v2 >> 21) & 0x7f]; k1 += 128;
1082 movq
%r8, %rcx
# v2, tmp215
1083 # crypt_util.c:706: v |= k1[(v2 ) & 0x7f];
1084 movq
%r8, %rax
# v2, tmp218
1085 # crypt_util.c:699: v = (v << 32);
1087 # crypt_util.c:706: v |= k1[(v2 ) & 0x7f];
1088 andl $
127, %eax
#, tmp218
1089 # crypt_util.c:703: v |= k1[(v2 >> 21) & 0x7f]; k1 += 128;
1090 shrq $
21, %rcx
#, tmp215
1091 # crypt_util.c:712: *k2++ = v | 0x0000800000008000l;
1092 orq
(%rbx
,%rax
,8), %r11 # *_48, tmp220
1093 # crypt_util.c:703: v |= k1[(v2 >> 21) & 0x7f]; k1 += 128;
1094 andl $
127, %ecx
#, tmp216
1095 # crypt_util.c:712: *k2++ = v | 0x0000800000008000l;
1096 movq
0(%rbp
,%rcx
,8), %rax
# *_34, tmp220
1097 # crypt_util.c:704: v |= k1[(v2 >> 14) & 0x7f]; k1 += 128;
1098 movq
%r8, %rcx
# v2, tmp223
1099 shrq $
14, %rcx
#, tmp223
1100 andl $
127, %ecx
#, tmp224
1101 # crypt_util.c:712: *k2++ = v | 0x0000800000008000l;
1102 orq
%r11, %rax
# tmp220, tmp220
1103 orq
(%r9,%rcx
,8), %rax
# *_39, tmp226
1104 # crypt_util.c:705: v |= k1[(v2 >> 7) & 0x7f]; k1 += 128;
1105 movq
%r8, %rcx
# v2, tmp227
1106 shrq $
7, %rcx
#, tmp227
1107 # crypt_util.c:712: *k2++ = v | 0x0000800000008000l;
1108 leaq
1024(%r9), %r11 #, tmp270
1109 # crypt_util.c:705: v |= k1[(v2 >> 7) & 0x7f]; k1 += 128;
1110 andl $
127, %ecx
#, tmp228
1111 # crypt_util.c:712: *k2++ = v | 0x0000800000008000l;
1112 orq
(%r11,%rcx
,8), %rax
# *_44, tmp230
1113 orq
%r10, %rax
# v, tmp232
1114 movq
%rax
, (%rsi
,%rdi
,2) # tmp232, MEM[base: k2_62, index: ivtmp.241_313, step: 2, offset: 0B]
1115 addq $
4, %rdi
#, ivtmp.241
1116 # crypt_util.c:685: for(i = 0; i < 16; i++) {
1117 cmpq $
64, %rdi
#, ivtmp.241
1119 # crypt_util.c:717: }
1121 .cfi_def_cfa_offset 48
1122 # crypt_util.c:716: __data->direction = 0;
1123 movl $
0, 131224(%rsi
) #, *__data_61(D).direction
1124 # crypt_util.c:717: }
1126 .cfi_def_cfa_offset 40
1128 .cfi_def_cfa_offset 32
1130 .cfi_def_cfa_offset 24
1132 .cfi_def_cfa_offset 16
1134 .cfi_def_cfa_offset 8
1138 .size _ufc_mk_keytab_r, .-_ufc_mk_keytab_r
1140 .globl _ufc_dofinalperm_r
1141 .type _ufc_dofinalperm_r, @function
1145 # crypt_util.c:729: l1 = res[0]; l2 = res[1];
1146 movq
(%rdi
), %rcx
# *res_42(D), l1
1147 movq
8(%rdi
), %r10 # MEM[(ufc_long *)res_42(D) + 8B], l2
1148 # crypt_util.c:737: v1 |= efp[15][ r2 & 0x3f][0]; v2 |= efp[15][ r2 & 0x3f][1];
1149 leaq efp
(%rip
), %rax
#, tmp201
1150 # crypt_util.c:725: {
1152 .cfi_def_cfa_offset 16
1154 # crypt_util.c:732: x = (l1 ^ l2) & __data->current_saltbits; l1 ^= x; l2 ^= x;
1155 movq
131216(%rsi
), %rsi
# *__data_47(D).current_saltbits, _3
1156 # crypt_util.c:730: r1 = res[2]; r2 = res[3];
1157 movq
16(%rdi
), %r9 # MEM[(ufc_long *)res_42(D) + 16B], r1
1158 movq
24(%rdi
), %rdx
# MEM[(ufc_long *)res_42(D) + 24B], r2
1159 # crypt_util.c:732: x = (l1 ^ l2) & __data->current_saltbits; l1 ^= x; l2 ^= x;
1160 movq
%rcx
, %r8 # l1, tmp197
1161 xorq
%r10, %r8 # l2, tmp197
1162 andq
%rsi
, %r8 # _3, x
1163 xorq
%r8, %rcx
# x, l1
1164 xorq
%r10, %r8 # l2, l2
1165 # crypt_util.c:733: x = (r1 ^ r2) & __data->current_saltbits; r1 ^= x; r2 ^= x;
1166 movq
%r9, %r10 # r1, tmp198
1167 xorq
%rdx
, %r10 # r2, tmp198
1168 andq
%rsi
, %r10 # _3, x
1169 xorq
%r10, %r9 # x, r1
1170 xorq
%rdx
, %r10 # r2, r2
1171 # crypt_util.c:735: v1=v2=0; l1 >>= 3; l2 >>= 3; r1 >>= 3; r2 >>= 3;
1172 movq
%r10, %r11 # r2, r2
1173 # crypt_util.c:738: v1 |= efp[14][(r2 >>= 6) & 0x3f][0]; v2 |= efp[14][ r2 & 0x3f][1];
1174 movq
%r10, %rdx
# r2, r2
1175 # crypt_util.c:735: v1=v2=0; l1 >>= 3; l2 >>= 3; r1 >>= 3; r2 >>= 3;
1177 # crypt_util.c:738: v1 |= efp[14][(r2 >>= 6) & 0x3f][0]; v2 |= efp[14][ r2 & 0x3f][1];
1179 # crypt_util.c:737: v1 |= efp[15][ r2 & 0x3f][0]; v2 |= efp[15][ r2 & 0x3f][1];
1180 andl $
63, %r11d
#, _5
1181 # crypt_util.c:738: v1 |= efp[14][(r2 >>= 6) & 0x3f][0]; v2 |= efp[14][ r2 & 0x3f][1];
1182 andl $
63, %edx
#, _8
1183 # crypt_util.c:737: v1 |= efp[15][ r2 & 0x3f][0]; v2 |= efp[15][ r2 & 0x3f][1];
1184 leaq
960(%r11), %rsi
#, tmp202
1185 # crypt_util.c:738: v1 |= efp[14][(r2 >>= 6) & 0x3f][0]; v2 |= efp[14][ r2 & 0x3f][1];
1186 leaq
896(%rdx
), %rbx
#, tmp206
1188 salq $
4, %rbx
#, tmp207
1189 # crypt_util.c:737: v1 |= efp[15][ r2 & 0x3f][0]; v2 |= efp[15][ r2 & 0x3f][1];
1190 salq $
4, %rsi
#, tmp203
1191 # crypt_util.c:738: v1 |= efp[14][(r2 >>= 6) & 0x3f][0]; v2 |= efp[14][ r2 & 0x3f][1];
1192 movq
(%rax
,%rsi
), %rsi
# efp, tmp209
1193 orq
(%rax
,%rbx
), %rsi
# efp, v1
1194 # crypt_util.c:737: v1 |= efp[15][ r2 & 0x3f][0]; v2 |= efp[15][ r2 & 0x3f][1];
1195 movq
%r11, %rbx
# _5, _5
1197 # crypt_util.c:738: v1 |= efp[14][(r2 >>= 6) & 0x3f][0]; v2 |= efp[14][ r2 & 0x3f][1];
1198 movq
%rdx
, %r11 # _8, tmp215
1199 movq
15368(%rax
,%rbx
), %rdx
# efp, tmp218
1200 orq
14344(%rax
,%r11), %rdx
# efp, v2
1201 # crypt_util.c:739: v1 |= efp[13][(r2 >>= 10) & 0x3f][0]; v2 |= efp[13][ r2 & 0x3f][1];
1202 movq
%r10, %r11 # r2, r2
1203 shrq $
19, %r11 #, r2
1204 # crypt_util.c:740: v1 |= efp[12][(r2 >>= 6) & 0x3f][0]; v2 |= efp[12][ r2 & 0x3f][1];
1205 shrq $
25, %r10 #, r2
1206 # crypt_util.c:739: v1 |= efp[13][(r2 >>= 10) & 0x3f][0]; v2 |= efp[13][ r2 & 0x3f][1];
1207 andl $
63, %r11d
#, _10
1208 # crypt_util.c:740: v1 |= efp[12][(r2 >>= 6) & 0x3f][0]; v2 |= efp[12][ r2 & 0x3f][1];
1209 andl $
63, %r10d
#, _12
1210 # crypt_util.c:739: v1 |= efp[13][(r2 >>= 10) & 0x3f][0]; v2 |= efp[13][ r2 & 0x3f][1];
1211 leaq
832(%r11), %rbx
#, tmp221
1212 salq $
4, %r11 #, tmp225
1213 orq
13320(%rax
,%r11), %rdx
# efp, v2
1214 # crypt_util.c:740: v1 |= efp[12][(r2 >>= 6) & 0x3f][0]; v2 |= efp[12][ r2 & 0x3f][1];
1215 leaq
768(%r10), %r11 #, tmp230
1216 salq $
4, %r10 #, tmp234
1217 orq
12296(%rax
,%r10), %rdx
# efp, v2
1218 # crypt_util.c:735: v1=v2=0; l1 >>= 3; l2 >>= 3; r1 >>= 3; r2 >>= 3;
1219 movq
%r9, %r10 # r1, r1
1220 # crypt_util.c:739: v1 |= efp[13][(r2 >>= 10) & 0x3f][0]; v2 |= efp[13][ r2 & 0x3f][1];
1221 salq $
4, %rbx
#, tmp222
1222 # crypt_util.c:735: v1=v2=0; l1 >>= 3; l2 >>= 3; r1 >>= 3; r2 >>= 3;
1224 # crypt_util.c:739: v1 |= efp[13][(r2 >>= 10) & 0x3f][0]; v2 |= efp[13][ r2 & 0x3f][1];
1225 orq
(%rax
,%rbx
), %rsi
# efp, v1
1226 # crypt_util.c:740: v1 |= efp[12][(r2 >>= 6) & 0x3f][0]; v2 |= efp[12][ r2 & 0x3f][1];
1227 salq $
4, %r11 #, tmp231
1228 # crypt_util.c:742: v1 |= efp[11][ r1 & 0x3f][0]; v2 |= efp[11][ r1 & 0x3f][1];
1229 andl $
63, %r10d
#, _14
1230 # crypt_util.c:740: v1 |= efp[12][(r2 >>= 6) & 0x3f][0]; v2 |= efp[12][ r2 & 0x3f][1];
1231 orq
(%rax
,%r11), %rsi
# efp, v1
1232 # crypt_util.c:742: v1 |= efp[11][ r1 & 0x3f][0]; v2 |= efp[11][ r1 & 0x3f][1];
1233 leaq
704(%r10), %r11 #, tmp239
1234 salq $
4, %r11 #, tmp240
1235 salq $
4, %r10 #, tmp243
1236 orq
11272(%rax
,%r10), %rdx
# efp, v2
1237 # crypt_util.c:743: v1 |= efp[10][(r1 >>= 6) & 0x3f][0]; v2 |= efp[10][ r1 & 0x3f][1];
1238 movq
%r9, %r10 # r1, r1
1239 # crypt_util.c:742: v1 |= efp[11][ r1 & 0x3f][0]; v2 |= efp[11][ r1 & 0x3f][1];
1240 orq
(%rax
,%r11), %rsi
# efp, v1
1241 # crypt_util.c:743: v1 |= efp[10][(r1 >>= 6) & 0x3f][0]; v2 |= efp[10][ r1 & 0x3f][1];
1243 andl $
63, %r10d
#, _17
1244 leaq
640(%r10), %r11 #, tmp248
1245 salq $
4, %r10 #, tmp252
1246 orq
10248(%rax
,%r10), %rdx
# efp, v2
1247 # crypt_util.c:744: v1 |= efp[ 9][(r1 >>= 10) & 0x3f][0]; v2 |= efp[ 9][ r1 & 0x3f][1];
1248 movq
%r9, %r10 # r1, r1
1249 # crypt_util.c:745: v1 |= efp[ 8][(r1 >>= 6) & 0x3f][0]; v2 |= efp[ 8][ r1 & 0x3f][1];
1251 # crypt_util.c:744: v1 |= efp[ 9][(r1 >>= 10) & 0x3f][0]; v2 |= efp[ 9][ r1 & 0x3f][1];
1252 shrq $
19, %r10 #, r1
1253 # crypt_util.c:743: v1 |= efp[10][(r1 >>= 6) & 0x3f][0]; v2 |= efp[10][ r1 & 0x3f][1];
1254 salq $
4, %r11 #, tmp249
1255 # crypt_util.c:745: v1 |= efp[ 8][(r1 >>= 6) & 0x3f][0]; v2 |= efp[ 8][ r1 & 0x3f][1];
1256 andl $
63, %r9d
#, _21
1257 # crypt_util.c:744: v1 |= efp[ 9][(r1 >>= 10) & 0x3f][0]; v2 |= efp[ 9][ r1 & 0x3f][1];
1258 andl $
63, %r10d
#, _19
1259 # crypt_util.c:743: v1 |= efp[10][(r1 >>= 6) & 0x3f][0]; v2 |= efp[10][ r1 & 0x3f][1];
1260 orq
(%rax
,%r11), %rsi
# efp, v1
1261 # crypt_util.c:744: v1 |= efp[ 9][(r1 >>= 10) & 0x3f][0]; v2 |= efp[ 9][ r1 & 0x3f][1];
1262 leaq
576(%r10), %r11 #, tmp257
1263 salq $
4, %r10 #, tmp261
1264 orq
9224(%rax
,%r10), %rdx
# efp, v2
1265 # crypt_util.c:745: v1 |= efp[ 8][(r1 >>= 6) & 0x3f][0]; v2 |= efp[ 8][ r1 & 0x3f][1];
1266 leaq
512(%r9), %r10 #, tmp266
1267 salq $
4, %r9 #, tmp270
1268 orq
8200(%rax
,%r9), %rdx
# efp, v2
1269 # crypt_util.c:735: v1=v2=0; l1 >>= 3; l2 >>= 3; r1 >>= 3; r2 >>= 3;
1270 movq
%r8, %r9 # l2, l2
1271 # crypt_util.c:744: v1 |= efp[ 9][(r1 >>= 10) & 0x3f][0]; v2 |= efp[ 9][ r1 & 0x3f][1];
1272 salq $
4, %r11 #, tmp258
1273 # crypt_util.c:735: v1=v2=0; l1 >>= 3; l2 >>= 3; r1 >>= 3; r2 >>= 3;
1275 # crypt_util.c:744: v1 |= efp[ 9][(r1 >>= 10) & 0x3f][0]; v2 |= efp[ 9][ r1 & 0x3f][1];
1276 orq
(%rax
,%r11), %rsi
# efp, v1
1277 # crypt_util.c:745: v1 |= efp[ 8][(r1 >>= 6) & 0x3f][0]; v2 |= efp[ 8][ r1 & 0x3f][1];
1278 salq $
4, %r10 #, tmp267
1279 # crypt_util.c:747: v1 |= efp[ 7][ l2 & 0x3f][0]; v2 |= efp[ 7][ l2 & 0x3f][1];
1280 andl $
63, %r9d
#, _23
1281 # crypt_util.c:745: v1 |= efp[ 8][(r1 >>= 6) & 0x3f][0]; v2 |= efp[ 8][ r1 & 0x3f][1];
1282 orq
(%rax
,%r10), %rsi
# efp, v1
1283 # crypt_util.c:747: v1 |= efp[ 7][ l2 & 0x3f][0]; v2 |= efp[ 7][ l2 & 0x3f][1];
1284 leaq
448(%r9), %r10 #, tmp275
1285 salq $
4, %r9 #, tmp279
1286 orq
7176(%rax
,%r9), %rdx
# efp, v2
1287 # crypt_util.c:748: v1 |= efp[ 6][(l2 >>= 6) & 0x3f][0]; v2 |= efp[ 6][ l2 & 0x3f][1];
1288 movq
%r8, %r9 # l2, l2
1290 # crypt_util.c:747: v1 |= efp[ 7][ l2 & 0x3f][0]; v2 |= efp[ 7][ l2 & 0x3f][1];
1291 salq $
4, %r10 #, tmp276
1292 # crypt_util.c:748: v1 |= efp[ 6][(l2 >>= 6) & 0x3f][0]; v2 |= efp[ 6][ l2 & 0x3f][1];
1293 andl $
63, %r9d
#, _26
1294 # crypt_util.c:747: v1 |= efp[ 7][ l2 & 0x3f][0]; v2 |= efp[ 7][ l2 & 0x3f][1];
1295 orq
(%rax
,%r10), %rsi
# efp, v1
1296 # crypt_util.c:748: v1 |= efp[ 6][(l2 >>= 6) & 0x3f][0]; v2 |= efp[ 6][ l2 & 0x3f][1];
1297 leaq
384(%r9), %r10 #, tmp284
1298 salq $
4, %r10 #, tmp285
1299 orq
(%rax
,%r10), %rsi
# efp, v1
1300 salq $
4, %r9 #, tmp288
1301 orq
6152(%rax
,%r9), %rdx
# efp, v2
1302 # crypt_util.c:749: v1 |= efp[ 5][(l2 >>= 10) & 0x3f][0]; v2 |= efp[ 5][ l2 & 0x3f][1];
1303 movq
%r8, %r9 # l2, l2
1304 # crypt_util.c:750: v1 |= efp[ 4][(l2 >>= 6) & 0x3f][0]; v2 |= efp[ 4][ l2 & 0x3f][1];
1306 # crypt_util.c:749: v1 |= efp[ 5][(l2 >>= 10) & 0x3f][0]; v2 |= efp[ 5][ l2 & 0x3f][1];
1308 # crypt_util.c:750: v1 |= efp[ 4][(l2 >>= 6) & 0x3f][0]; v2 |= efp[ 4][ l2 & 0x3f][1];
1309 andl $
63, %r8d
#, _30
1310 # crypt_util.c:749: v1 |= efp[ 5][(l2 >>= 10) & 0x3f][0]; v2 |= efp[ 5][ l2 & 0x3f][1];
1311 andl $
63, %r9d
#, _28
1312 leaq
320(%r9), %r10 #, tmp293
1313 salq $
4, %r9 #, tmp297
1314 orq
5128(%rax
,%r9), %rdx
# efp, v2
1315 # crypt_util.c:750: v1 |= efp[ 4][(l2 >>= 6) & 0x3f][0]; v2 |= efp[ 4][ l2 & 0x3f][1];
1316 leaq
256(%r8), %r9 #, tmp302
1317 salq $
4, %r8 #, tmp306
1318 orq
4104(%rax
,%r8), %rdx
# efp, v2
1319 # crypt_util.c:735: v1=v2=0; l1 >>= 3; l2 >>= 3; r1 >>= 3; r2 >>= 3;
1320 movq
%rcx
, %r8 # l1, l1
1321 # crypt_util.c:749: v1 |= efp[ 5][(l2 >>= 10) & 0x3f][0]; v2 |= efp[ 5][ l2 & 0x3f][1];
1322 salq $
4, %r10 #, tmp294
1323 # crypt_util.c:735: v1=v2=0; l1 >>= 3; l2 >>= 3; r1 >>= 3; r2 >>= 3;
1325 # crypt_util.c:749: v1 |= efp[ 5][(l2 >>= 10) & 0x3f][0]; v2 |= efp[ 5][ l2 & 0x3f][1];
1326 orq
(%rax
,%r10), %rsi
# efp, v1
1327 # crypt_util.c:750: v1 |= efp[ 4][(l2 >>= 6) & 0x3f][0]; v2 |= efp[ 4][ l2 & 0x3f][1];
1328 salq $
4, %r9 #, tmp303
1329 # crypt_util.c:752: v1 |= efp[ 3][ l1 & 0x3f][0]; v2 |= efp[ 3][ l1 & 0x3f][1];
1330 andl $
63, %r8d
#, _32
1331 # crypt_util.c:750: v1 |= efp[ 4][(l2 >>= 6) & 0x3f][0]; v2 |= efp[ 4][ l2 & 0x3f][1];
1332 orq
(%rax
,%r9), %rsi
# efp, v1
1333 # crypt_util.c:752: v1 |= efp[ 3][ l1 & 0x3f][0]; v2 |= efp[ 3][ l1 & 0x3f][1];
1334 leaq
192(%r8), %r9 #, tmp311
1335 salq $
4, %r8 #, tmp315
1336 orq
3080(%rax
,%r8), %rdx
# efp, v2
1337 # crypt_util.c:753: v1 |= efp[ 2][(l1 >>= 6) & 0x3f][0]; v2 |= efp[ 2][ l1 & 0x3f][1];
1338 movq
%rcx
, %r8 # l1, l1
1340 # crypt_util.c:752: v1 |= efp[ 3][ l1 & 0x3f][0]; v2 |= efp[ 3][ l1 & 0x3f][1];
1341 salq $
4, %r9 #, tmp312
1342 # crypt_util.c:753: v1 |= efp[ 2][(l1 >>= 6) & 0x3f][0]; v2 |= efp[ 2][ l1 & 0x3f][1];
1343 andl $
63, %r8d
#, _35
1344 # crypt_util.c:752: v1 |= efp[ 3][ l1 & 0x3f][0]; v2 |= efp[ 3][ l1 & 0x3f][1];
1345 orq
(%rax
,%r9), %rsi
# efp, v1
1346 # crypt_util.c:753: v1 |= efp[ 2][(l1 >>= 6) & 0x3f][0]; v2 |= efp[ 2][ l1 & 0x3f][1];
1347 leaq
128(%r8), %r9 #, tmp320
1348 salq $
4, %r8 #, tmp324
1349 salq $
4, %r9 #, tmp321
1350 orq
(%rax
,%r9), %rsi
# efp, v1
1351 orq
2056(%rax
,%r8), %rdx
# efp, v2
1352 # crypt_util.c:754: v1 |= efp[ 1][(l1 >>= 10) & 0x3f][0]; v2 |= efp[ 1][ l1 & 0x3f][1];
1353 movq
%rcx
, %r8 # l1, l1
1355 andl $
63, %r8d
#, _37
1356 leaq
64(%r8), %r9 #, tmp329
1357 salq $
4, %r8 #, tmp333
1358 orq
1032(%rax
,%r8), %rdx
# efp, v2
1359 salq $
4, %r9 #, tmp330
1360 orq
(%rax
,%r9), %rsi
# efp, v1
1361 # crypt_util.c:755: v1 |= efp[ 0][(l1 >>= 6) & 0x3f][0]; v2 |= efp[ 0][ l1 & 0x3f][1];
1362 shrq $
21, %rcx
#, _39
1363 andl $
1008, %ecx
#, tmp338
1364 addq
%rcx
, %rax
# tmp338, tmp339
1365 orq
8(%rax
), %rdx
# efp, v2
1366 orq
(%rax
), %rsi
# efp, v1
1367 # crypt_util.c:758: }
1369 .cfi_def_cfa_offset 8
1370 # crypt_util.c:757: res[0] = v1; res[1] = v2;
1371 movq
%rsi
, (%rdi
) # v1, *res_42(D)
1372 movq
%rdx
, 8(%rdi
) # v2, MEM[(ufc_long *)res_42(D) + 8B]
1373 # crypt_util.c:758: }
1377 .size _ufc_dofinalperm_r, .-_ufc_dofinalperm_r
1379 .globl _ufc_output_conversion_r
1380 .type _ufc_output_conversion_r, @function
1381 _ufc_output_conversion_r
:
1384 # crypt_util.c:771: __data->crypt_3_buf[0] = salt[0];
1385 movzbl
(%rdx
), %eax
# *salt_50(D), _1
1386 # crypt_util.c:772: __data->crypt_3_buf[1] = salt[1] ? salt[1] : salt[0];
1387 movzbl
1(%rdx
), %edx
# MEM[(const char *)salt_50(D) + 1B], _2
1388 # crypt_util.c:768: {
1389 movq
%rcx
, %r8 # __data, __data
1390 # crypt_util.c:772: __data->crypt_3_buf[1] = salt[1] ? salt[1] : salt[0];
1392 # crypt_util.c:771: __data->crypt_3_buf[0] = salt[0];
1393 movb
%al
, 131200(%rcx
) # _1, *__data_51(D).crypt_3_buf
1394 # crypt_util.c:772: __data->crypt_3_buf[1] = salt[1] ? salt[1] : salt[0];
1395 cmovne
%edx
, %eax
# _1,, _2, _1
1396 leaq
131202(%rcx
), %rdx
#, ivtmp.277
1397 movb
%al
, 131201(%rcx
) # _1, *__data_51(D).crypt_3_buf
1398 movl $
26, %ecx
#, ivtmp.275
1400 # crypt_util.c:776: __data->crypt_3_buf[i + 2] = bin_to_ascii((v1 >> shf) & 0x3f);
1401 movq
%rdi
, %rax
# v1, _4
1402 shrq
%cl
, %rax
# ivtmp.275, _4
1403 andl $
63, %eax
#, _5
1404 cmpq $
37, %rax
#, _5
1406 # crypt_util.c:776: __data->crypt_3_buf[i + 2] = bin_to_ascii((v1 >> shf) & 0x3f);
1407 addl $
59, %eax
#, iftmp.17_44
1409 subl $
6, %ecx
#, ivtmp.275
1410 # crypt_util.c:776: __data->crypt_3_buf[i + 2] = bin_to_ascii((v1 >> shf) & 0x3f);
1411 movb
%al
, (%rdx
) # iftmp.17_44, MEM[base: _80, offset: 0B]
1412 addq $
1, %rdx
#, ivtmp.277
1413 # crypt_util.c:774: for(i = 0; i < 5; i++) {
1414 cmpl $
-4, %ecx
#, ivtmp.275
1416 # crypt_util.c:779: s = (v2 & 0xf) << 2;
1417 leal
0(,%rsi
,4), %r9d
#, tmp208
1418 # crypt_util.c:780: v2 = (v2 >> 2) | ((v1 & 0x3) << 30);
1419 movq
%rsi
, %rax
# v2, v2
1420 salq $
30, %rdi
#, tmp209
1422 movl
%edi
, %esi
# tmp209, tmp210
1423 leaq
131207(%r8), %rdx
#, ivtmp.267
1424 # crypt_util.c:779: s = (v2 & 0xf) << 2;
1426 # crypt_util.c:780: v2 = (v2 >> 2) | ((v1 & 0x3) << 30);
1427 orq
%rax
, %rsi
# _19, v2
1428 movl $
26, %ecx
#, ivtmp.265
1430 # crypt_util.c:784: __data->crypt_3_buf[i + 2] = bin_to_ascii((v2 >> shf) & 0x3f);
1431 movq
%rsi
, %rax
# v2, _23
1432 shrq
%cl
, %rax
# ivtmp.265, _23
1433 andl $
63, %eax
#, _24
1434 cmpq $
37, %rax
#, _24
1436 # crypt_util.c:784: __data->crypt_3_buf[i + 2] = bin_to_ascii((v2 >> shf) & 0x3f);
1437 addl $
59, %eax
#, iftmp.19_45
1439 subl $
6, %ecx
#, ivtmp.265
1440 # crypt_util.c:784: __data->crypt_3_buf[i + 2] = bin_to_ascii((v2 >> shf) & 0x3f);
1441 movb
%al
, (%rdx
) # iftmp.19_45, MEM[base: _42, offset: 0B]
1442 addq $
1, %rdx
#, ivtmp.267
1443 # crypt_util.c:782: for(i = 5; i < 10; i++) {
1444 cmpl $
-4, %ecx
#, ivtmp.265
1446 # crypt_util.c:787: __data->crypt_3_buf[12] = bin_to_ascii(s);
1449 # crypt_util.c:787: __data->crypt_3_buf[12] = bin_to_ascii(s);
1450 leal
53(%r9), %edx
#, tmp216
1451 leal
46(%r9), %eax
#, tmp215
1453 # crypt_util.c:788: __data->crypt_3_buf[13] = 0;
1454 movb $
0, 131213(%r8) #, *__data_51(D).crypt_3_buf
1455 # crypt_util.c:787: __data->crypt_3_buf[12] = bin_to_ascii(s);
1456 cmovl
%eax
, %edx
# tmp215,, tmp216
1457 movl
%edx
, %r9d
# tmp216, iftmp.21_46
1458 movb
%r9b
, 131212(%r8) # iftmp.21_46, *__data_51(D).crypt_3_buf
1459 # crypt_util.c:789: }
1464 # crypt_util.c:776: __data->crypt_3_buf[i + 2] = bin_to_ascii((v1 >> shf) & 0x3f);
1465 leal
53(%rax
), %r10d
#, tmp212
1466 leal
46(%rax
), %r9d
#, tmp211
1467 cmpq $
12, %rax
#, _5
1468 movl
%r10d
, %eax
# tmp212, tmp212
1469 cmovb
%r9d
, %eax
# tmp211,, tmp212
1474 # crypt_util.c:784: __data->crypt_3_buf[i + 2] = bin_to_ascii((v2 >> shf) & 0x3f);
1475 leal
53(%rax
), %r10d
#, tmp214
1476 leal
46(%rax
), %edi
#, tmp213
1477 cmpq $
12, %rax
#, _24
1478 movl
%r10d
, %eax
# tmp214, tmp214
1479 cmovb
%edi
, %eax
# tmp213,, tmp214
1484 # crypt_util.c:787: __data->crypt_3_buf[12] = bin_to_ascii(s);
1485 addl $
59, %r9d
#, iftmp.21_46
1486 # crypt_util.c:788: __data->crypt_3_buf[13] = 0;
1487 movb $
0, 131213(%r8) #, *__data_51(D).crypt_3_buf
1488 # crypt_util.c:787: __data->crypt_3_buf[12] = bin_to_ascii(s);
1489 movb
%r9b
, 131212(%r8) # iftmp.21_46, *__data_51(D).crypt_3_buf
1490 # crypt_util.c:789: }
1494 .size _ufc_output_conversion_r, .-_ufc_output_conversion_r
1496 .globl __b64_from_24bit
1497 .type __b64_from_24bit, @function
1501 # crypt_util.c:939: unsigned int w = (b2 << 16) | (b1 << 8) | b0;
1502 sall $
16, %edx
#, b2
1503 sall $
8, %ecx
#, tmp107
1504 orl
%r8d
, %ecx
# b0, tmp108
1505 movl
%edx
, %r8d
# b2, tmp109
1506 # crypt_util.c:940: while (n-- > 0 && (*buflen) > 0)
1507 leal
-1(%r9), %edx
#, n
1508 # crypt_util.c:939: unsigned int w = (b2 << 16) | (b1 << 8) | b0;
1509 orl
%ecx
, %r8d
# tmp108, w
1510 # crypt_util.c:940: while (n-- > 0 && (*buflen) > 0)
1511 testl
%r9d
, %r9d
# n
1513 movl
(%rsi
), %eax
# *buflen_21(D),
1516 leaq b64t
(%rip
), %r9 #, tmp116
1521 # crypt_util.c:940: while (n-- > 0 && (*buflen) > 0)
1522 testl
%eax
, %eax
# _9
1525 # crypt_util.c:942: *(*cp)++ = b64t[w & 0x3f];
1526 movq
(%rdi
), %rax
# *cp_22(D), _5
1527 # crypt_util.c:940: while (n-- > 0 && (*buflen) > 0)
1529 # crypt_util.c:942: *(*cp)++ = b64t[w & 0x3f];
1530 leaq
1(%rax
), %rcx
#, tmp110
1531 movq
%rcx
, (%rdi
) # tmp110, *cp_22(D)
1532 movl
%r8d
, %ecx
# w, tmp113
1533 # crypt_util.c:944: w >>= 6;
1535 # crypt_util.c:942: *(*cp)++ = b64t[w & 0x3f];
1536 andl $
63, %ecx
#, tmp113
1537 movzbl
(%r9,%rcx
), %ecx
# b64t, tmp114
1538 movb
%cl
, (%rax
) # tmp114, *_5
1539 # crypt_util.c:943: --(*buflen);
1540 movl
(%rsi
), %eax
# *buflen_21(D), tmp119
1542 # crypt_util.c:940: while (n-- > 0 && (*buflen) > 0)
1544 # crypt_util.c:943: --(*buflen);
1545 movl
%eax
, (%rsi
) # _9, *buflen_21(D)
1546 # crypt_util.c:940: while (n-- > 0 && (*buflen) > 0)
1549 # crypt_util.c:946: }
1553 .size __b64_from_24bit, .-__b64_from_24bit
1554 .local small_tables_initialized.7488
1555 .comm small_tables_initialized.7488,4,4
1556 .local _ufc_tables_lock
1557 .comm _ufc_tables_lock,40,32
1558 .comm _ufc_foobar,131232,32
1564 .ascii "./0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuv"
1569 .comm eperm32tab,16384,32
1571 .comm do_pc2,8192,32
1573 .comm do_pc1,16384,32
1575 .type longmask, @object
1610 .section .rodata.cst8,"aM",@progbits,8
1612 .type bytemask, @object
1625 .type BITMASK, @object
1653 .type final_perm, @object
1654 .size final_perm, 256
2237 .type perm32, @object
2456 .weak __pthread_mutex_unlock
2457 .weak __pthread_mutex_lock
2458 .ident "GCC: (GNU) 7.3.0"
2459 .section .note.GNU-stack,"",@progbits