clarify the purpose of this project
[nyanglibc.git] / crypt / crypt_util.v.s
blobada7f058208ddb58c5afd6584bf601968dc31d81
1 .file "crypt_util.c"
2 # GNU C11 (GCC) version 7.3.0 (x86_64-nyan-linux-gnu)
3 # compiled by GNU C version 7.3.0, GMP version 6.1.2, MPFR version 4.0.1, MPC version 1.1.0, isl version none
4 # GGC heuristics: --param ggc-min-expand=100 --param ggc-min-heapsize=131072
5 # options passed: -I ../include
6 # -I /root/wip/nyanglibc/builds/0/nyanglibc/build/crypt
7 # -I /root/wip/nyanglibc/builds/0/nyanglibc/build
8 # -I ../sysdeps/unix/sysv/linux/x86_64/64
9 # -I ../sysdeps/unix/sysv/linux/x86_64
10 # -I ../sysdeps/unix/sysv/linux/x86/include
11 # -I ../sysdeps/unix/sysv/linux/x86 -I ../sysdeps/x86/nptl
12 # -I ../sysdeps/unix/sysv/linux/wordsize-64 -I ../sysdeps/x86_64/nptl
13 # -I ../sysdeps/unix/sysv/linux/include -I ../sysdeps/unix/sysv/linux
14 # -I ../sysdeps/nptl -I ../sysdeps/pthread -I ../sysdeps/gnu
15 # -I ../sysdeps/unix/inet -I ../sysdeps/unix/sysv -I ../sysdeps/unix/x86_64
16 # -I ../sysdeps/unix -I ../sysdeps/posix -I ../sysdeps/x86_64/64
17 # -I ../sysdeps/x86_64/fpu -I ../sysdeps/x86/fpu -I ../sysdeps/x86_64
18 # -I ../sysdeps/x86/include -I ../sysdeps/x86
19 # -I ../sysdeps/ieee754/float128 -I ../sysdeps/ieee754/ldbl-96/include
20 # -I ../sysdeps/ieee754/ldbl-96 -I ../sysdeps/ieee754/dbl-64
21 # -I ../sysdeps/ieee754/flt-32 -I ../sysdeps/wordsize-64
22 # -I ../sysdeps/ieee754 -I ../sysdeps/generic -I .. -I ../libio -I .
23 # -MD /run/asm/crypt/crypt_util.v.d -MF /run/asm/crypt/crypt_util.o.dt -MP
24 # -MT /run/asm/crypt/.o -D _LIBC_REENTRANT -D MODULE_NAME=libcrypt -D PIC
25 # -D TOP_NAMESPACE=glibc
26 # -include /root/wip/nyanglibc/builds/0/nyanglibc/build/libc-modules.h
27 # -include ../include/libc-symbols.h crypt_util.c -mtune=generic
28 # -march=x86-64 -auxbase-strip /run/asm/crypt/crypt_util.v.s -O2 -Wall
29 # -Wwrite-strings -Wundef -Werror -Wstrict-prototypes
30 # -Wold-style-definition -std=gnu11 -fverbose-asm -fgnu89-inline
31 # -fmerge-all-constants -frounding-math -fno-stack-protector -fmath-errno
32 # -fpie -ftls-model=initial-exec
33 # options enabled: -faggressive-loop-optimizations -falign-labels
34 # -fasynchronous-unwind-tables -fauto-inc-dec -fbranch-count-reg
35 # -fcaller-saves -fchkp-check-incomplete-type -fchkp-check-read
36 # -fchkp-check-write -fchkp-instrument-calls -fchkp-narrow-bounds
37 # -fchkp-optimize -fchkp-store-bounds -fchkp-use-static-bounds
38 # -fchkp-use-static-const-bounds -fchkp-use-wrappers -fcode-hoisting
39 # -fcombine-stack-adjustments -fcommon -fcompare-elim -fcprop-registers
40 # -fcrossjumping -fcse-follow-jumps -fdefer-pop
41 # -fdelete-null-pointer-checks -fdevirtualize -fdevirtualize-speculatively
42 # -fdwarf2-cfi-asm -fearly-inlining -feliminate-unused-debug-types
43 # -fexpensive-optimizations -fforward-propagate -ffp-int-builtin-inexact
44 # -ffunction-cse -fgcse -fgcse-lm -fgnu-runtime -fgnu-unique
45 # -fguess-branch-probability -fhoist-adjacent-loads -fident -fif-conversion
46 # -fif-conversion2 -findirect-inlining -finline -finline-atomics
47 # -finline-functions-called-once -finline-small-functions -fipa-bit-cp
48 # -fipa-cp -fipa-icf -fipa-icf-functions -fipa-icf-variables -fipa-profile
49 # -fipa-pure-const -fipa-ra -fipa-reference -fipa-sra -fipa-vrp
50 # -fira-hoist-pressure -fira-share-save-slots -fira-share-spill-slots
51 # -fisolate-erroneous-paths-dereference -fivopts -fkeep-static-consts
52 # -fleading-underscore -flifetime-dse -flra-remat -flto-odr-type-merging
53 # -fmath-errno -fmerge-all-constants -fmerge-debug-strings
54 # -fmove-loop-invariants -fomit-frame-pointer -foptimize-sibling-calls
55 # -foptimize-strlen -fpartial-inlining -fpeephole -fpeephole2 -fpic -fpie
56 # -fplt -fprefetch-loop-arrays -free -freg-struct-return -freorder-blocks
57 # -freorder-functions -frerun-cse-after-loop -frounding-math
58 # -fsched-critical-path-heuristic -fsched-dep-count-heuristic
59 # -fsched-group-heuristic -fsched-interblock -fsched-last-insn-heuristic
60 # -fsched-rank-heuristic -fsched-spec -fsched-spec-insn-heuristic
61 # -fsched-stalled-insns-dep -fschedule-fusion -fschedule-insns2
62 # -fsemantic-interposition -fshow-column -fshrink-wrap
63 # -fshrink-wrap-separate -fsigned-zeros -fsplit-ivs-in-unroller
64 # -fsplit-wide-types -fssa-backprop -fssa-phiopt -fstdarg-opt
65 # -fstore-merging -fstrict-aliasing -fstrict-overflow
66 # -fstrict-volatile-bitfields -fsync-libcalls -fthread-jumps
67 # -ftoplevel-reorder -ftrapping-math -ftree-bit-ccp -ftree-builtin-call-dce
68 # -ftree-ccp -ftree-ch -ftree-coalesce-vars -ftree-copy-prop -ftree-cselim
69 # -ftree-dce -ftree-dominator-opts -ftree-dse -ftree-forwprop -ftree-fre
70 # -ftree-loop-if-convert -ftree-loop-im -ftree-loop-ivcanon
71 # -ftree-loop-optimize -ftree-parallelize-loops= -ftree-phiprop -ftree-pre
72 # -ftree-pta -ftree-reassoc -ftree-scev-cprop -ftree-sink -ftree-slsr
73 # -ftree-sra -ftree-switch-conversion -ftree-tail-merge -ftree-ter
74 # -ftree-vrp -funit-at-a-time -funwind-tables -fverbose-asm
75 # -fzero-initialized-in-bss -m128bit-long-double -m64 -m80387
76 # -malign-stringops -mavx256-split-unaligned-load
77 # -mavx256-split-unaligned-store -mfancy-math-387 -mfp-ret-in-387 -mfxsr
78 # -mglibc -mieee-fp -mlong-double-80 -mmmx -mno-sse4 -mpush-args -mred-zone
79 # -msse -msse2 -mstv -mtls-direct-seg-refs -mvzeroupper
81 .text
82 .p2align 4,,15
83 .globl __init_des_r
84 .type __init_des_r, @function
85 __init_des_r:
86 .LFB47:
87 .cfi_startproc
88 pushq %r15 #
89 .cfi_def_cfa_offset 16
90 .cfi_offset 15, -16
91 pushq %r14 #
92 .cfi_def_cfa_offset 24
93 .cfi_offset 14, -24
94 # crypt_util.c:346: sb[0] = (long64*)__data->sb0; sb[1] = (long64*)__data->sb1;
95 leaq 32896(%rdi), %rax #, tmp400
96 # crypt_util.c:331: {
97 pushq %r13 #
98 .cfi_def_cfa_offset 32
99 .cfi_offset 13, -32
100 pushq %r12 #
101 .cfi_def_cfa_offset 40
102 .cfi_offset 12, -40
103 pushq %rbp #
104 .cfi_def_cfa_offset 48
105 .cfi_offset 6, -48
106 pushq %rbx #
107 .cfi_def_cfa_offset 56
108 .cfi_offset 3, -56
109 # crypt_util.c:346: sb[0] = (long64*)__data->sb0; sb[1] = (long64*)__data->sb1;
110 leaq 128(%rdi), %rbp #, _1
111 # crypt_util.c:331: {
112 subq $344, %rsp #,
113 .cfi_def_cfa_offset 400
114 # crypt_util.c:346: sb[0] = (long64*)__data->sb0; sb[1] = (long64*)__data->sb1;
115 movq %rax, 56(%rsp) # tmp400, sb
116 # crypt_util.c:347: sb[2] = (long64*)__data->sb2; sb[3] = (long64*)__data->sb3;
117 leaq 65664(%rdi), %rax #, tmp401
118 # crypt_util.c:331: {
119 movq %rdi, 40(%rsp) # __data, %sfp
120 # crypt_util.c:346: sb[0] = (long64*)__data->sb0; sb[1] = (long64*)__data->sb1;
121 movq %rbp, 48(%rsp) # _1, sb
122 # crypt_util.c:347: sb[2] = (long64*)__data->sb2; sb[3] = (long64*)__data->sb3;
123 movq %rax, 64(%rsp) # tmp401, sb
124 leaq 98432(%rdi), %rax #, tmp402
125 movq %rax, 72(%rsp) # tmp402, sb
126 # crypt_util.c:350: if(small_tables_initialized == 0) {
127 movl small_tables_initialized.7488(%rip), %eax # small_tables_initialized, small_tables_initialized.0_5
128 testl %eax, %eax # small_tables_initialized.0_5
129 jne .L2 #,
130 # crypt_util.c:352: __libc_lock_lock (_ufc_tables_lock);
131 cmpq $0, __pthread_mutex_lock@GOTPCREL(%rip) #,
132 je .L3 #,
133 # crypt_util.c:352: __libc_lock_lock (_ufc_tables_lock);
134 leaq _ufc_tables_lock(%rip), %rdi #,
135 call __pthread_mutex_lock@PLT #
136 .L3:
137 # crypt_util.c:353: if(small_tables_initialized)
138 movl small_tables_initialized.7488(%rip), %eax # small_tables_initialized, small_tables_initialized.2_6
139 leaq eperm32tab(%rip), %rbx #, tmp698
140 testl %eax, %eax # small_tables_initialized.2_6
141 je .L55 #,
142 .L4:
143 # crypt_util.c:460: __libc_lock_unlock(_ufc_tables_lock);
144 cmpq $0, __pthread_mutex_unlock@GOTPCREL(%rip) #,
145 je .L20 #,
146 # crypt_util.c:460: __libc_lock_unlock(_ufc_tables_lock);
147 leaq _ufc_tables_lock(%rip), %rdi #,
148 call __pthread_mutex_unlock@PLT #
149 .L20:
150 # crypt_util.c:481: _ufc_clearmem(__data->sb0,
151 movl $131072, %edx #,
152 xorl %esi, %esi #
153 movq %rbp, %rdi # _1,
154 call memset@PLT #
155 leaq sbox(%rip), %r12 #, tmp694
156 leaq 48(%rsp), %rax #, tmp835
157 movl $24, %r13d #, ivtmp.80
158 movl $0, 28(%rsp) #, %sfp
159 movl $15, 24(%rsp) #, %sfp
160 movl %r13d, %ecx # ivtmp.80, ivtmp.80
161 movq %rax, 32(%rsp) # tmp835, %sfp
162 movq %r12, 16(%rsp) # tmp694, %sfp
163 movl $14, %eax #, pretmp_510
164 .L21:
165 movslq 28(%rsp), %rdx # %sfp,
166 # crypt_util.c:519: sb[sg][inx] =
167 movq 32(%rsp), %rdi # %sfp, ivtmp.74
168 # crypt_util.c:497: for(j1 = 0; j1 < 64; j1++) {
169 movl $0, 4(%rsp) #, %sfp
170 # crypt_util.c:519: sb[sg][inx] =
171 movq (%rdi), %r14 # MEM[base: _454, offset: 0B], _78
172 leal 1(%rdx), %r15d #, _451
173 leaq 0(,%rdx,4), %rdi #, tmp712
174 movslq %r15d, %r15 # _451, _451
175 movq %rdi, 8(%rsp) # tmp712, %sfp
176 salq $2, %r15 #, tmp707
177 .p2align 4,,10
178 .p2align 3
179 .L26:
180 movl 4(%rsp), %ebp # %sfp, _465
181 # crypt_util.c:503: to_permute = (((ufc_long)s1 << 4) |
182 movl 24(%rsp), %edx # %sfp, pretmp_524
183 movslq %eax, %r11 # pretmp_510, pretmp_510
184 salq $4, %r11 #, _64
185 # crypt_util.c:499: for(j2 = 0; j2 < 64; j2++) {
186 xorl %r9d, %r9d # j2
187 sall $6, %ebp #, _465
188 movslq %edx, %rax # pretmp_524,
189 jmp .L23 #
190 .p2align 4,,10
191 .p2align 3
192 .L56:
193 movl %r9d, %eax # j2, tmp680
194 movl %r9d, %esi # j2, tmp686
195 sarl %eax # tmp680
196 andl $1, %esi #, tmp686
197 movl %eax, %edx # tmp680, tmp682
198 movl %r9d, %eax # j2, tmp684
199 sarl $4, %eax #, tmp684
200 andl $15, %edx #, tmp682
201 andl $2, %eax #, tmp685
202 orl %esi, %eax # tmp686, tmp687
203 cltq
204 addq %r15, %rax # tmp707, tmp690
205 salq $4, %rax #, tmp691
206 addq %rdx, %rax # tmp682, tmp692
207 movslq (%r12,%rax,4), %rax # sbox,
208 .L23:
209 # crypt_util.c:503: to_permute = (((ufc_long)s1 << 4) |
210 orq %r11, %rax # _64, tmp626
211 # crypt_util.c:518: inx = ((j1 << 6) | j2);
212 movl %r9d, %esi # j2, tmp630
213 # crypt_util.c:499: for(j2 = 0; j2 < 64; j2++) {
214 addl $1, %r9d #, j2
215 # crypt_util.c:503: to_permute = (((ufc_long)s1 << 4) |
216 salq %cl, %rax # ivtmp.80, to_permute
217 # crypt_util.c:518: inx = ((j1 << 6) | j2);
218 orl %ebp, %esi # _465, tmp630
219 # crypt_util.c:523: ((long64)eperm32tab[1][(to_permute >> 16) & 0xff][0] << 32) |
220 movq %rax, %rdx # to_permute, tmp628
221 # crypt_util.c:520: ((long64)eperm32tab[0][(to_permute >> 24) & 0xff][0] << 32) |
222 movq %rax, %r10 # to_permute, _74
223 # crypt_util.c:526: ((long64)eperm32tab[2][(to_permute >> 8) & 0xff][0] << 32) |
224 movzbl %ah, %edi # to_permute, _90
225 # crypt_util.c:523: ((long64)eperm32tab[1][(to_permute >> 16) & 0xff][0] << 32) |
226 shrq $16, %rdx #, tmp628
227 # crypt_util.c:520: ((long64)eperm32tab[0][(to_permute >> 24) & 0xff][0] << 32) |
228 shrq $20, %r10 #, _74
229 # crypt_util.c:529: ((long64)eperm32tab[3][(to_permute) & 0xff][0] << 32) |
230 movzbl %al, %eax # to_permute, _96
231 # crypt_util.c:523: ((long64)eperm32tab[1][(to_permute >> 16) & 0xff][0] << 32) |
232 movzbl %dl, %edx # tmp628, _83
233 # crypt_util.c:521: (long64)eperm32tab[0][(to_permute >> 24) & 0xff][1];
234 andl $4080, %r10d #, tmp633
235 # crypt_util.c:518: inx = ((j1 << 6) | j2);
236 movslq %esi, %rsi # tmp630, inx
237 # crypt_util.c:524: (long64)eperm32tab[1][(to_permute >> 16) & 0xff][1];
238 movq %rdx, %r13 # _83, tmp637
239 # crypt_util.c:521: (long64)eperm32tab[0][(to_permute >> 24) & 0xff][1];
240 addq %rbx, %r10 # tmp698, tmp634
241 # crypt_util.c:523: ((long64)eperm32tab[1][(to_permute >> 16) & 0xff][0] << 32) |
242 addq $256, %rdx #, tmp666
243 # crypt_util.c:524: (long64)eperm32tab[1][(to_permute >> 16) & 0xff][1];
244 salq $4, %r13 #, tmp637
245 # crypt_util.c:528: sb[sg][inx] |=
246 movq 8(%r10), %r8 # eperm32tab, tmp641
247 # crypt_util.c:523: ((long64)eperm32tab[1][(to_permute >> 16) & 0xff][0] << 32) |
248 salq $4, %rdx #, tmp667
249 # crypt_util.c:528: sb[sg][inx] |=
250 orq 4104(%rbx,%r13), %r8 # eperm32tab, tmp640
251 # crypt_util.c:530: (long64)eperm32tab[3][(to_permute) & 0xff][1];
252 movq %rax, %r13 # _96, tmp643
253 # crypt_util.c:529: ((long64)eperm32tab[3][(to_permute) & 0xff][0] << 32) |
254 addq $768, %rax #, tmp653
255 # crypt_util.c:530: (long64)eperm32tab[3][(to_permute) & 0xff][1];
256 salq $4, %r13 #, tmp643
257 # crypt_util.c:529: ((long64)eperm32tab[3][(to_permute) & 0xff][0] << 32) |
258 salq $4, %rax #, tmp654
259 # crypt_util.c:528: sb[sg][inx] |=
260 orq 12296(%rbx,%r13), %r8 # eperm32tab, tmp646
261 # crypt_util.c:527: (long64)eperm32tab[2][(to_permute >> 8) & 0xff][1];
262 movq %rdi, %r13 # _90, tmp648
263 # crypt_util.c:529: ((long64)eperm32tab[3][(to_permute) & 0xff][0] << 32) |
264 movq (%rbx,%rax), %rax # eperm32tab, tmp657
265 # crypt_util.c:527: (long64)eperm32tab[2][(to_permute >> 8) & 0xff][1];
266 salq $4, %r13 #, tmp648
267 # crypt_util.c:528: sb[sg][inx] |=
268 orq 8200(%rbx,%r13), %r8 # eperm32tab, tmp651
269 # crypt_util.c:529: ((long64)eperm32tab[3][(to_permute) & 0xff][0] << 32) |
270 salq $32, %rax #, tmp656
271 # crypt_util.c:528: sb[sg][inx] |=
272 orq %rax, %r8 # tmp656, tmp658
273 # crypt_util.c:520: ((long64)eperm32tab[0][(to_permute >> 24) & 0xff][0] << 32) |
274 movq (%r10), %rax # eperm32tab, tmp663
275 salq $32, %rax #, tmp662
276 # crypt_util.c:528: sb[sg][inx] |=
277 orq %rax, %r8 # tmp662, tmp664
278 # crypt_util.c:523: ((long64)eperm32tab[1][(to_permute >> 16) & 0xff][0] << 32) |
279 movq (%rbx,%rdx), %rax # eperm32tab, tmp670
280 salq $32, %rax #, tmp669
281 # crypt_util.c:528: sb[sg][inx] |=
282 orq %rax, %r8 # tmp669, tmp671
283 # crypt_util.c:526: ((long64)eperm32tab[2][(to_permute >> 8) & 0xff][0] << 32) |
284 leaq 512(%rdi), %rax #, tmp673
285 salq $4, %rax #, tmp674
286 movq (%rbx,%rax), %rax # eperm32tab, tmp677
287 salq $32, %rax #, tmp676
288 # crypt_util.c:528: sb[sg][inx] |=
289 orq %rax, %r8 # tmp676, tmp678
290 # crypt_util.c:499: for(j2 = 0; j2 < 64; j2++) {
291 cmpl $64, %r9d #, j2
292 # crypt_util.c:528: sb[sg][inx] |=
293 movq %r8, (%r14,%rsi,8) # tmp678, *_80
294 # crypt_util.c:499: for(j2 = 0; j2 < 64; j2++) {
295 jne .L56 #,
296 # crypt_util.c:497: for(j1 = 0; j1 < 64; j1++) {
297 addl $1, 4(%rsp) #, %sfp
298 movl 4(%rsp), %eax # %sfp, j1
299 cmpl $64, %eax #, j1
300 je .L57 #,
301 movl 4(%rsp), %edi # %sfp, j1
302 movl %edi, %eax # j1, tmp611
303 sarl %eax # tmp611
304 movl %eax, %edx # tmp611, tmp613
305 movl %edi, %eax # j1, tmp615
306 andl $1, %edi #, tmp617
307 sarl $4, %eax #, tmp615
308 andl $15, %edx #, tmp613
309 andl $2, %eax #, tmp616
310 orl %edi, %eax # tmp617, tmp618
311 cltq
312 addq 8(%rsp), %rax # %sfp, tmp621
313 salq $4, %rax #, tmp622
314 addq %rdx, %rax # tmp613, tmp623
315 movl (%r12,%rax,4), %eax # sbox, pretmp_510
316 jmp .L26 #
317 .L57:
318 subl $8, %ecx #, ivtmp.80
319 addq $512, 16(%rsp) #, %sfp
320 addq $8, 32(%rsp) #, %sfp
321 movq 16(%rsp), %rax # %sfp, ivtmp.78
322 addl $2, 28(%rsp) #, %sfp
323 # crypt_util.c:493: for(sg = 0; sg < 4; sg++) {
324 cmpl $-8, %ecx #, ivtmp.80
325 je .L25 #,
326 movl 256(%rax), %edi # MEM[base: _450, offset: 256B], prephitmp_487
327 movl (%rax), %eax # MEM[base: _450, offset: 0B], pretmp_510
328 movl %edi, 24(%rsp) # prephitmp_487, %sfp
329 jmp .L21 #
330 .L25:
331 # crypt_util.c:537: __data->current_salt[0] = 0;
332 movq 40(%rsp), %rax # %sfp, __data
333 # crypt_util.c:536: __data->current_saltbits = 0;
334 xorl %edx, %edx #
335 # crypt_util.c:537: __data->current_salt[0] = 0;
336 movq $0, 131214(%rax) #, MEM[(void *)__data_129(D) + 131214B]
337 # crypt_util.c:536: __data->current_saltbits = 0;
338 movw %dx, 131222(%rax) #, MEM[(long int *)__data_129(D) + 131222B]
339 # crypt_util.c:539: __data->initialized++;
340 addl $1, 131228(%rax) #, *__data_129(D).initialized
341 # crypt_util.c:540: }
342 addq $344, %rsp #,
343 .cfi_remember_state
344 .cfi_def_cfa_offset 56
345 popq %rbx #
346 .cfi_def_cfa_offset 48
347 popq %rbp #
348 .cfi_def_cfa_offset 40
349 popq %r12 #
350 .cfi_def_cfa_offset 32
351 popq %r13 #
352 .cfi_def_cfa_offset 24
353 popq %r14 #
354 .cfi_def_cfa_offset 16
355 popq %r15 #
356 .cfi_def_cfa_offset 8
358 .L55:
359 .cfi_restore_state
360 # crypt_util.c:362: _ufc_clearmem((char*)do_pc1, (int)sizeof(do_pc1));
361 leaq do_pc1(%rip), %rdi #,
362 xorl %esi, %esi #
363 movl $16384, %edx #,
364 leaq bytemask(%rip), %r14 #, tmp697
365 leaq longmask(%rip), %r13 #, tmp699
366 call memset@PLT #
367 leaq pc1(%rip), %r9 #, tmp701
368 leaq do_pc1(%rip), %r8 #, tmp704
369 xorl %esi, %esi # ivtmp.157
370 # crypt_util.c:366: mask2 = longmask[bit % 28 + 4];
371 movl $613566757, %edi #, tmp426
372 .L8:
373 # crypt_util.c:364: comes_from_bit = pc1[bit] - 1;
374 movl (%r9,%rsi,4), %r11d # MEM[symbol: pc1, index: _384, offset: 0B], MEM[symbol: pc1, index: _384, offset: 0B]
375 # crypt_util.c:366: mask2 = longmask[bit % 28 + 4];
376 movl %esi, %r15d # ivtmp.157, tmp428
377 # crypt_util.c:364: comes_from_bit = pc1[bit] - 1;
378 leal -1(%r11), %ebx #, comes_from_bit
379 # crypt_util.c:365: mask1 = bytemask[comes_from_bit % 8 + 1];
380 movl %ebx, %edx # comes_from_bit, tmp415
381 sarl $31, %edx #, tmp415
382 shrl $29, %edx #, tmp416
383 leal (%rbx,%rdx), %eax #, tmp417
384 andl $7, %eax #, tmp418
385 subl %edx, %eax # tmp416, tmp419
386 # crypt_util.c:366: mask2 = longmask[bit % 28 + 4];
387 movl %esi, %edx # ivtmp.157, tmp424
388 # crypt_util.c:365: mask1 = bytemask[comes_from_bit % 8 + 1];
389 addl $1, %eax #, tmp420
390 # crypt_util.c:366: mask2 = longmask[bit % 28 + 4];
391 shrl $2, %edx #, tmp424
392 # crypt_util.c:365: mask1 = bytemask[comes_from_bit % 8 + 1];
393 cltq
394 movzbl (%r14,%rax), %ecx # bytemask, mask1
395 # crypt_util.c:366: mask2 = longmask[bit % 28 + 4];
396 movl %edx, %eax # tmp424, tmp424
397 mull %edi # tmp426
398 imull $28, %edx, %eax #, tmp425, tmp427
399 movslq %edx, %rdx # tmp425, tmp455
400 subl %eax, %r15d # tmp427, tmp428
401 movl %r15d, %eax # tmp428, tmp428
402 addl $4, %eax #, tmp437
403 # crypt_util.c:369: do_pc1[comes_from_bit / 8][bit / 28][j] |= mask2;
404 testl %ebx, %ebx # comes_from_bit
405 # crypt_util.c:366: mask2 = longmask[bit % 28 + 4];
406 cltq
407 movq 0(%r13,%rax,8), %r10 # longmask, mask2
408 # crypt_util.c:369: do_pc1[comes_from_bit / 8][bit / 28][j] |= mask2;
409 leal 6(%r11), %eax #, tmp441
410 cmovns %ebx, %eax # tmp441,, comes_from_bit, comes_from_bit
411 sarl $3, %eax #, tmp442
412 cltq
413 leaq (%rdx,%rax,2), %rdx #, tmp456
414 # crypt_util.c:367: for(j = 0; j < 128; j++) {
415 xorl %eax, %eax # j
416 salq $10, %rdx #, tmp457
417 addq %r8, %rdx # tmp704, _394
418 .p2align 4,,10
419 .p2align 3
420 .L5:
421 # crypt_util.c:367: for(j = 0; j < 128; j++) {
422 addq $1, %rax #, j
423 cmpq $128, %rax #, j
424 je .L58 #,
425 .L7:
426 # crypt_util.c:368: if(j & mask1)
427 testq %rax, %rcx # j, mask1
428 je .L5 #,
429 # crypt_util.c:369: do_pc1[comes_from_bit / 8][bit / 28][j] |= mask2;
430 orq %r10, (%rdx,%rax,8) # mask2, MEM[base: _394, index: _395, offset: 0B]
431 # crypt_util.c:367: for(j = 0; j < 128; j++) {
432 addq $1, %rax #, j
433 cmpq $128, %rax #, j
434 jne .L7 #,
435 .L58:
436 addq $1, %rsi #, ivtmp.157
437 # crypt_util.c:363: for(bit = 0; bit < 56; bit++) {
438 cmpq $56, %rsi #, ivtmp.157
439 jne .L8 #,
440 # crypt_util.c:378: _ufc_clearmem((char*)do_pc2, (int)sizeof(do_pc2));
441 leaq do_pc2(%rip), %r8 #, tmp705
442 movl $1024, %ecx #, tmp463
443 xorl %eax, %eax # tmp462
444 leaq pc2(%rip), %r10 #, tmp702
445 leaq BITMASK(%rip), %r12 #, tmp703
446 xorl %esi, %esi # ivtmp.142
447 movq %r8, %rdi # tmp705, tmp461
448 # crypt_util.c:381: mask1 = bytemask[comes_from_bit % 7 + 1];
449 movl $-1840700269, %r9d #, tmp470
450 # crypt_util.c:378: _ufc_clearmem((char*)do_pc2, (int)sizeof(do_pc2));
451 rep stosq
452 # crypt_util.c:382: mask2 = BITMASK[bit % 24];
453 movl $-1431655765, %edi #, tmp484
454 .L12:
455 # crypt_util.c:380: comes_from_bit = pc2[bit] - 1;
456 movl (%r10,%rsi,4), %eax # MEM[symbol: pc2, index: _401, offset: 0B], tmp764
457 # crypt_util.c:382: mask2 = BITMASK[bit % 24];
458 movl %esi, %ebx # ivtmp.142, tmp489
459 # crypt_util.c:380: comes_from_bit = pc2[bit] - 1;
460 leal -1(%rax), %r11d #, comes_from_bit
461 # crypt_util.c:381: mask1 = bytemask[comes_from_bit % 7 + 1];
462 movl %r11d, %eax # comes_from_bit, tmp751
463 imull %r9d # tmp470
464 movl %r11d, %eax # comes_from_bit, tmp473
465 sarl $31, %eax #, tmp473
466 leal (%rdx,%r11), %ecx #, tmp471
467 sarl $2, %ecx #, tmp472
468 subl %eax, %ecx # tmp473, tmp468
469 leal 0(,%rcx,8), %eax #, tmp475
470 subl %ecx, %eax # tmp468, tmp476
471 subl %eax, %r11d # tmp476, comes_from_bit
472 movl %r11d, %eax # comes_from_bit, tmp477
473 addl $1, %eax #, tmp478
474 cltq
475 movzbl (%r14,%rax), %r11d # bytemask, mask1
476 # crypt_util.c:382: mask2 = BITMASK[bit % 24];
477 movl %esi, %eax # ivtmp.142, tmp752
478 mull %edi # tmp484
479 movl %edx, %eax # tmp483, tmp483
480 movslq %ecx, %rdx # tmp468, tmp508
481 shrl $4, %eax #, tmp483
482 salq $10, %rdx #, tmp509
483 leal (%rax,%rax,2), %eax #, tmp487
484 addq %r8, %rdx # tmp705, _408
485 sall $3, %eax #, tmp488
486 subl %eax, %ebx # tmp488, tmp489
487 movslq %ebx, %rax # tmp489,
488 movq (%r12,%rax,8), %rbx # BITMASK, mask2
489 # crypt_util.c:383: for(j = 0; j < 128; j++) {
490 xorl %eax, %eax # j
491 .p2align 4,,10
492 .p2align 3
493 .L9:
494 # crypt_util.c:383: for(j = 0; j < 128; j++) {
495 addq $1, %rax #, j
496 cmpq $128, %rax #, j
497 je .L59 #,
498 .L11:
499 # crypt_util.c:384: if(j & mask1)
500 testq %rax, %r11 # j, mask1
501 je .L9 #,
502 # crypt_util.c:385: do_pc2[comes_from_bit / 7][j] |= mask2;
503 orq %rbx, (%rdx,%rax,8) # mask2, MEM[base: _408, index: _409, offset: 0B]
504 # crypt_util.c:383: for(j = 0; j < 128; j++) {
505 addq $1, %rax #, j
506 cmpq $128, %rax #, j
507 jne .L11 #,
508 .L59:
509 addq $1, %rsi #, ivtmp.142
510 # crypt_util.c:379: for(bit = 0; bit < 48; bit++) {
511 cmpq $48, %rsi #, ivtmp.142
512 jne .L12 #,
513 # crypt_util.c:401: _ufc_clearmem((char*)eperm32tab, (int)sizeof(eperm32tab));
514 leaq eperm32tab(%rip), %rdi #,
515 xorl %esi, %esi #
516 movl $16384, %edx #,
517 leaq eperm32tab(%rip), %rbx #, tmp698
518 call memset@PLT #
519 leaq perm32(%rip), %r9 #, tmp693
520 leaq esel(%rip), %rdi #, tmp696
521 xorl %esi, %esi # ivtmp.127
522 # crypt_util.c:408: eperm32tab[comes_from / 8][j][bit / 24] |= BITMASK[bit % 24];
523 movl $-1431655765, %r8d #, tmp528
524 .L15:
525 # crypt_util.c:404: comes_from = perm32[esel[bit]-1]-1;
526 movl (%rdi,%rsi,4), %eax # MEM[symbol: esel, index: _412, offset: 0B], tmp771
527 subl $1, %eax #, tmp520
528 cltq
529 movl (%r9,%rax,4), %r10d # perm32, tmp523
530 subl $1, %r10d #, _27
531 # crypt_util.c:405: mask1 = bytemask[comes_from % 8];
532 movl %r10d, %eax # _27, tmp526
533 andl $7, %eax #, tmp526
534 movzbl (%r14,%rax), %ecx # bytemask, mask1
535 # crypt_util.c:408: eperm32tab[comes_from / 8][j][bit / 24] |= BITMASK[bit % 24];
536 movl %esi, %eax # ivtmp.127, tmp753
537 mull %r8d # tmp528
538 movl %edx, %r15d # tmp527, tmp527
539 shrl $4, %r15d #, _32
540 movslq %r15d, %rdx # _32,
541 movl %esi, %r15d # ivtmp.127, _32
542 leal (%rdx,%rdx,2), %eax #, tmp531
543 sall $3, %eax #, tmp532
544 subl %eax, %r15d # tmp532, _32
545 # crypt_util.c:404: comes_from = perm32[esel[bit]-1]-1;
546 movslq %r10d, %rax # _27, comes_from
547 # crypt_util.c:408: eperm32tab[comes_from / 8][j][bit / 24] |= BITMASK[bit % 24];
548 shrq $3, %rax #, tmp542
549 movslq %r15d, %r15 # _32, _32
550 salq $9, %rax #, tmp543
551 addq %rdx, %rax # _32, tmp553
552 leaq (%rbx,%rax,8), %r10 #, _421
553 # crypt_util.c:406: for(j = 256; j--;) {
554 movl $255, %eax #, j
555 .p2align 4,,10
556 .p2align 3
557 .L14:
558 # crypt_util.c:407: if(j & mask1)
559 testq %rax, %rcx # j, mask1
560 je .L13 #,
561 movq %rax, %rdx # j, _422
562 # crypt_util.c:408: eperm32tab[comes_from / 8][j][bit / 24] |= BITMASK[bit % 24];
563 movq (%r12,%r15,8), %r11 # BITMASK, tmp559
564 salq $4, %rdx #, _422
565 orq %r11, (%r10,%rdx) # tmp559, MEM[base: _421, index: _422, offset: 0B]
566 .L13:
567 # crypt_util.c:406: for(j = 256; j--;) {
568 subq $1, %rax #, j
569 cmpq $-1, %rax #, j
570 jne .L14 #,
571 addq $1, %rsi #, ivtmp.127
572 # crypt_util.c:402: for(bit = 0; bit < 48; bit++) {
573 cmpq $48, %rsi #, ivtmp.127
574 jne .L15 #,
575 movl $47, %eax #, ivtmp.112
576 .L16:
577 # crypt_util.c:417: e_inverse[esel[bit] - 1 ] = bit;
578 movl (%rdi,%rax,4), %edx # MEM[symbol: esel, index: _430, offset: 0B], _36
579 leal -1(%rdx), %ecx #, tmp562
580 # crypt_util.c:418: e_inverse[esel[bit] - 1 + 32] = bit + 48;
581 addl $31, %edx #, tmp564
582 movslq %edx, %rdx # tmp564, tmp565
583 # crypt_util.c:417: e_inverse[esel[bit] - 1 ] = bit;
584 movslq %ecx, %rcx # tmp562, tmp563
585 movl %eax, 80(%rsp,%rcx,4) # ivtmp.112, e_inverse
586 # crypt_util.c:418: e_inverse[esel[bit] - 1 + 32] = bit + 48;
587 leal 48(%rax), %ecx #, tmp566
588 subq $1, %rax #, ivtmp.112
589 # crypt_util.c:416: for(bit=48; bit--;) {
590 cmpq $-1, %rax #, ivtmp.112
591 # crypt_util.c:418: e_inverse[esel[bit] - 1 + 32] = bit + 48;
592 movl %ecx, 80(%rsp,%rdx,4) # tmp566, e_inverse
593 # crypt_util.c:416: for(bit=48; bit--;) {
594 jne .L16 #,
595 # crypt_util.c:425: _ufc_clearmem((char*)efp, (int)sizeof efp);
596 leaq efp(%rip), %rdi #,
597 movl $16384, %edx #,
598 xorl %esi, %esi #
599 call memset@PLT #
600 leaq final_perm(%rip), %r11 #, tmp700
601 leaq efp(%rip), %r10 #, tmp695
602 xorl %r8d, %r8d # ivtmp.101
603 # crypt_util.c:446: bit_within_word = comes_from_e_bit % 6; /* 0..5 */
604 movl $715827883, %r9d #, tmp580
605 .L19:
606 # crypt_util.c:443: comes_from_f_bit = final_perm[bit] - 1; /* 0..63 */
607 movl (%r11,%r8,4), %eax # MEM[symbol: final_perm, index: _433, offset: 0B], tmp777
608 subl $1, %eax #, comes_from_f_bit
609 # crypt_util.c:444: comes_from_e_bit = e_inverse[comes_from_f_bit]; /* 0..95 */
610 cltq
611 movl 80(%rsp,%rax,4), %ecx # e_inverse, comes_from_e_bit
612 # crypt_util.c:446: bit_within_word = comes_from_e_bit % 6; /* 0..5 */
613 movl %ecx, %eax # comes_from_e_bit, tmp754
614 imull %r9d # tmp580
615 movl %ecx, %eax # comes_from_e_bit, tmp581
616 sarl $31, %eax #, tmp581
617 subl %eax, %edx # tmp581, tmp579
618 movslq %edx, %rax # tmp579,
619 leal (%rax,%rax,2), %edx #, tmp584
620 salq $7, %rax #, tmp597
621 addl %edx, %edx # tmp585
622 subl %edx, %ecx # tmp585, comes_from_e_bit
623 movl %ecx, %edx # comes_from_e_bit, bit_within_word
624 # crypt_util.c:448: mask1 = longmask[bit_within_word + 26];
625 addl $26, %edx #, tmp587
626 movslq %edx, %rdx # tmp587, tmp588
627 movq 0(%r13,%rdx,8), %rsi # longmask, mask1
628 # crypt_util.c:449: mask2 = longmask[o_bit];
629 movq %r8, %rdx # ivtmp.101, o_bit
630 andl $31, %edx #, o_bit
631 movq 0(%r13,%rdx,8), %rdi # longmask, mask2
632 # crypt_util.c:433: o_long = bit / 32; /* 0..1 */
633 movl %r8d, %edx # ivtmp.101, o_long
634 sarl $5, %edx #, o_long
635 movslq %edx, %rdx # o_long, o_long
636 addq %rdx, %rax # o_long, tmp600
637 leaq (%r10,%rax,8), %rcx #, _443
638 # crypt_util.c:451: for(word_value = 64; word_value--;) {
639 movl $63, %eax #, word_value
640 .p2align 4,,10
641 .p2align 3
642 .L18:
643 # crypt_util.c:452: if(word_value & mask1)
644 testq %rax, %rsi # word_value, mask1
645 je .L17 #,
646 movq %rax, %rdx # word_value, _444
647 salq $4, %rdx #, _444
648 # crypt_util.c:453: efp[comes_from_word][word_value][o_long] |= mask2;
649 orq %rdi, (%rcx,%rdx) # mask2, MEM[base: _443, index: _444, offset: 0B]
650 .L17:
651 # crypt_util.c:451: for(word_value = 64; word_value--;) {
652 subq $1, %rax #, word_value
653 cmpq $-1, %rax #, word_value
654 jne .L18 #,
655 addq $1, %r8 #, ivtmp.101
656 # crypt_util.c:426: for(bit = 0; bit < 64; bit++) {
657 cmpq $64, %r8 #, ivtmp.101
658 jne .L19 #,
659 # crypt_util.c:456: atomic_write_barrier ();
660 # crypt_util.c:457: small_tables_initialized = 1;
661 movl $1, small_tables_initialized.7488(%rip) #, small_tables_initialized
662 jmp .L4 #
663 .L2:
664 # crypt_util.c:463: atomic_read_barrier ();
665 leaq eperm32tab(%rip), %rbx #, tmp698
666 jmp .L20 #
667 .cfi_endproc
668 .LFE47:
669 .size __init_des_r, .-__init_des_r
670 .p2align 4,,15
671 .globl __init_des
672 .type __init_des, @function
673 __init_des:
674 .LFB48:
675 .cfi_startproc
676 # crypt_util.c:545: __init_des_r(&_ufc_foobar);
677 leaq _ufc_foobar(%rip), %rdi #,
678 jmp __init_des_r #
679 .cfi_endproc
680 .LFE48:
681 .size __init_des, .-__init_des
682 .p2align 4,,15
683 .globl _ufc_setup_salt_r
684 .type _ufc_setup_salt_r, @function
685 _ufc_setup_salt_r:
686 .LFB51:
687 .cfi_startproc
688 pushq %rbx #
689 .cfi_def_cfa_offset 16
690 .cfi_offset 3, -16
691 movq %rdi, %rbx # s, s
692 subq $16, %rsp #,
693 .cfi_def_cfa_offset 32
694 # crypt_util.c:612: if(__data->initialized == 0)
695 movl 131228(%rsi), %eax # *__data_46(D).initialized,
696 testl %eax, %eax #
697 je .L99 #,
698 .L62:
699 # crypt_util.c:615: s0 = s[0];
700 movzbl (%rbx), %edx # *s_48(D), s0
701 # crypt_util.c:587: switch (c)
702 cmpb $90, %dl #, s0
703 jg .L64 #,
704 cmpb $65, %dl #, s0
705 jge .L65 #,
706 leal -46(%rdx), %eax #, tmp185
707 cmpb $11, %al #, tmp185
708 jbe .L65 #,
709 .L84:
710 # crypt_util.c:617: return false;
711 xorl %eax, %eax # <retval>
712 .L61:
713 # crypt_util.c:663: }
714 addq $16, %rsp #,
715 .cfi_remember_state
716 .cfi_def_cfa_offset 16
717 popq %rbx #
718 .cfi_def_cfa_offset 8
720 .p2align 4,,10
721 .p2align 3
722 .L64:
723 .cfi_restore_state
724 # crypt_util.c:587: switch (c)
725 leal -97(%rdx), %eax #, tmp186
726 cmpb $25, %al #, tmp186
727 ja .L84 #,
728 .L65:
729 # crypt_util.c:619: s1 = s[1];
730 movzbl 1(%rbx), %edi # MEM[(const char *)s_48(D) + 1B], pretmp_108
731 # crypt_util.c:587: switch (c)
732 cmpb $90, %dil #, pretmp_108
733 jg .L66 #,
734 cmpb $65, %dil #, pretmp_108
735 jge .L67 #,
736 leal -46(%rdi), %eax #, tmp187
737 cmpb $11, %al #, tmp187
738 ja .L84 #,
739 .L67:
740 # crypt_util.c:623: if(s0 == __data->current_salt[0] && s1 == __data->current_salt[1])
741 cmpb %dl, 131214(%rsi) # s0, *__data_46(D).current_salt
742 je .L100 #,
743 .L68:
744 # crypt_util.c:636: long c=ascii_to_bin(s[i]);
745 cmpb $96, %dl #, s0
746 # crypt_util.c:626: __data->current_salt[0] = s0;
747 movb %dl, 131214(%rsi) # s0, *__data_46(D).current_salt
748 # crypt_util.c:627: __data->current_salt[1] = s1;
749 movb %dil, 131215(%rsi) # pretmp_108, *__data_46(D).current_salt
750 movsbl %dl, %eax # s0, _153
751 # crypt_util.c:636: long c=ascii_to_bin(s[i]);
752 jg .L69 #,
753 cmpb $64, %dl #, s0
754 jg .L70 #,
755 subl $46, %eax #, tmp189
756 movslq %eax, %rdx # tmp189, iftmp.7_146
757 .L71:
758 # crypt_util.c:639: saltbits |= BITMASK[6 * i + j];
759 leaq BITMASK(%rip), %r8 #, tmp240
760 # crypt_util.c:634: saltbits = 0;
761 xorl %ecx, %ecx # saltbits
762 # crypt_util.c:637: for(j = 0; j < 6; j++) {
763 xorl %eax, %eax # j
764 .p2align 4,,10
765 .p2align 3
766 .L73:
767 # crypt_util.c:638: if((c >> j) & 0x1)
768 btq %rax, %rdx # j, iftmp.7_146
769 jnc .L72 #,
770 # crypt_util.c:639: saltbits |= BITMASK[6 * i + j];
771 orq (%r8,%rax,8), %rcx # MEM[symbol: BITMASK, index: _155, offset: 0B], saltbits
772 .L72:
773 # crypt_util.c:637: for(j = 0; j < 6; j++) {
774 addq $1, %rax #, j
775 cmpq $6, %rax #, j
776 jne .L73 #,
777 # crypt_util.c:636: long c=ascii_to_bin(s[i]);
778 cmpb $96, %dil #, pretmp_108
779 movsbl %dil, %eax # pretmp_108, _176
780 jle .L74 #,
781 # crypt_util.c:636: long c=ascii_to_bin(s[i]);
782 subl $59, %eax #, tmp197
783 movslq %eax, %rdx # tmp197, iftmp.7_42
784 .L75:
785 # crypt_util.c:639: saltbits |= BITMASK[6 * i + j];
786 leaq BITMASK(%rip), %rdi #, tmp239
787 # crypt_util.c:637: for(j = 0; j < 6; j++) {
788 xorl %eax, %eax # j
789 .p2align 4,,10
790 .p2align 3
791 .L78:
792 # crypt_util.c:638: if((c >> j) & 0x1)
793 btq %rax, %rdx # j, iftmp.7_42
794 jnc .L77 #,
795 # crypt_util.c:639: saltbits |= BITMASK[6 * i + j];
796 orq 48(%rdi,%rax,8), %rcx # MEM[symbol: BITMASK, index: _154, offset: 0B], saltbits
797 .L77:
798 # crypt_util.c:637: for(j = 0; j < 6; j++) {
799 addq $1, %rax #, j
800 cmpq $6, %rax #, j
801 jne .L78 #,
802 # crypt_util.c:655: shuffle_sb((LONGG)__data->sb0, __data->current_saltbits ^ saltbits);
803 movq 131216(%rsi), %r10 # *__data_46(D).current_saltbits, _21
804 leaq 128(%rsi), %rax #, k
805 leaq 32896(%rsi), %r9 #, k
806 xorq %rcx, %r10 # saltbits, _21
807 .p2align 4,,10
808 .p2align 3
809 .L79:
810 # crypt_util.c:574: x = ((*k >> 32) ^ *k) & (long64)saltbits;
811 movq (%rax), %r8 # MEM[base: k_113, offset: 0B], _95
812 # crypt_util.c:575: *k++ ^= (x << 32) | x;
813 addq $8, %rax #, k
814 # crypt_util.c:574: x = ((*k >> 32) ^ *k) & (long64)saltbits;
815 movq %r8, %rdx # _95, tmp206
816 shrq $32, %rdx #, tmp206
817 xorq %r8, %rdx # _95, tmp207
818 andq %r10, %rdx # _21, x
819 # crypt_util.c:575: *k++ ^= (x << 32) | x;
820 movq %rdx, %rdi # x, tmp208
821 salq $32, %rdi #, tmp208
822 orq %rdi, %rdx # tmp208, tmp209
823 xorq %r8, %rdx # _95, tmp210
824 # crypt_util.c:573: for(j=4096; j--;) {
825 cmpq %r9, %rax # k, k
826 # crypt_util.c:575: *k++ ^= (x << 32) | x;
827 movq %rdx, -8(%rax) # tmp210, MEM[base: k_99, offset: -8B]
828 # crypt_util.c:573: for(j=4096; j--;) {
829 jne .L79 #,
830 # crypt_util.c:656: shuffle_sb((LONGG)__data->sb1, __data->current_saltbits ^ saltbits);
831 movq 131216(%rsi), %r10 # *__data_46(D).current_saltbits, _25
832 leaq 65664(%rsi), %r9 #, k
833 xorq %rcx, %r10 # saltbits, _25
834 .p2align 4,,10
835 .p2align 3
836 .L80:
837 # crypt_util.c:574: x = ((*k >> 32) ^ *k) & (long64)saltbits;
838 movq (%rax), %r8 # MEM[base: k_115, offset: 0B], _84
839 # crypt_util.c:575: *k++ ^= (x << 32) | x;
840 addq $8, %rax #, k
841 # crypt_util.c:574: x = ((*k >> 32) ^ *k) & (long64)saltbits;
842 movq %r8, %rdx # _84, tmp211
843 shrq $32, %rdx #, tmp211
844 xorq %r8, %rdx # _84, tmp212
845 andq %r10, %rdx # _25, x
846 # crypt_util.c:575: *k++ ^= (x << 32) | x;
847 movq %rdx, %rdi # x, tmp213
848 salq $32, %rdi #, tmp213
849 orq %rdi, %rdx # tmp213, tmp214
850 xorq %r8, %rdx # _84, tmp215
851 # crypt_util.c:573: for(j=4096; j--;) {
852 cmpq %r9, %rax # k, k
853 # crypt_util.c:575: *k++ ^= (x << 32) | x;
854 movq %rdx, -8(%rax) # tmp215, MEM[base: k_88, offset: -8B]
855 # crypt_util.c:573: for(j=4096; j--;) {
856 jne .L80 #,
857 # crypt_util.c:657: shuffle_sb((LONGG)__data->sb2, __data->current_saltbits ^ saltbits);
858 movq 131216(%rsi), %r10 # *__data_46(D).current_saltbits, _29
859 leaq 98432(%rsi), %r9 #, k
860 xorq %rcx, %r10 # saltbits, _29
861 .p2align 4,,10
862 .p2align 3
863 .L81:
864 # crypt_util.c:574: x = ((*k >> 32) ^ *k) & (long64)saltbits;
865 movq (%rax), %r8 # MEM[base: k_118, offset: 0B], _73
866 # crypt_util.c:575: *k++ ^= (x << 32) | x;
867 addq $8, %rax #, k
868 # crypt_util.c:574: x = ((*k >> 32) ^ *k) & (long64)saltbits;
869 movq %r8, %rdx # _73, tmp216
870 shrq $32, %rdx #, tmp216
871 xorq %r8, %rdx # _73, tmp217
872 andq %r10, %rdx # _29, x
873 # crypt_util.c:575: *k++ ^= (x << 32) | x;
874 movq %rdx, %rdi # x, tmp218
875 salq $32, %rdi #, tmp218
876 orq %rdi, %rdx # tmp218, tmp219
877 xorq %r8, %rdx # _73, tmp220
878 # crypt_util.c:573: for(j=4096; j--;) {
879 cmpq %r9, %rax # k, k
880 # crypt_util.c:575: *k++ ^= (x << 32) | x;
881 movq %rdx, -8(%rax) # tmp220, MEM[base: k_77, offset: -8B]
882 # crypt_util.c:573: for(j=4096; j--;) {
883 jne .L81 #,
884 # crypt_util.c:658: shuffle_sb((LONGG)__data->sb3, __data->current_saltbits ^ saltbits);
885 movq 131216(%rsi), %r10 # *__data_46(D).current_saltbits, _33
886 leaq 131200(%rsi), %r9 #, _159
887 xorq %rcx, %r10 # saltbits, _33
888 .p2align 4,,10
889 .p2align 3
890 .L82:
891 # crypt_util.c:574: x = ((*k >> 32) ^ *k) & (long64)saltbits;
892 movq (%rax), %r8 # MEM[base: k_135, offset: 0B], _62
893 # crypt_util.c:575: *k++ ^= (x << 32) | x;
894 addq $8, %rax #, k
895 # crypt_util.c:574: x = ((*k >> 32) ^ *k) & (long64)saltbits;
896 movq %r8, %rdx # _62, tmp221
897 shrq $32, %rdx #, tmp221
898 xorq %r8, %rdx # _62, tmp222
899 andq %r10, %rdx # _33, x
900 # crypt_util.c:575: *k++ ^= (x << 32) | x;
901 movq %rdx, %rdi # x, tmp223
902 salq $32, %rdi #, tmp223
903 orq %rdi, %rdx # tmp223, tmp224
904 xorq %r8, %rdx # _62, tmp225
905 # crypt_util.c:573: for(j=4096; j--;) {
906 cmpq %r9, %rax # _159, k
907 # crypt_util.c:575: *k++ ^= (x << 32) | x;
908 movq %rdx, -8(%rax) # tmp225, MEM[base: k_66, offset: -8B]
909 # crypt_util.c:573: for(j=4096; j--;) {
910 jne .L82 #,
911 # crypt_util.c:660: __data->current_saltbits = saltbits;
912 movq %rcx, 131216(%rsi) # saltbits, *__data_46(D).current_saltbits
913 # crypt_util.c:663: }
914 addq $16, %rsp #,
915 .cfi_remember_state
916 .cfi_def_cfa_offset 16
917 # crypt_util.c:662: return true;
918 movl $1, %eax #, <retval>
919 # crypt_util.c:663: }
920 popq %rbx #
921 .cfi_def_cfa_offset 8
923 .L66:
924 .cfi_restore_state
925 # crypt_util.c:587: switch (c)
926 leal -97(%rdi), %eax #, tmp188
927 cmpb $25, %al #, tmp188
928 ja .L84 #,
929 jmp .L67 #
930 .p2align 4,,10
931 .p2align 3
932 .L99:
933 # crypt_util.c:613: __init_des_r(__data);
934 movq %rsi, %rdi # __data,
935 movq %rsi, 8(%rsp) # __data, %sfp
936 call __init_des_r #
937 movq 8(%rsp), %rsi # %sfp, __data
938 jmp .L62 #
939 .L69:
940 # crypt_util.c:636: long c=ascii_to_bin(s[i]);
941 subl $59, %eax #, tmp191
942 movslq %eax, %rdx # tmp191, iftmp.7_146
943 jmp .L71 #
944 .L100:
945 # crypt_util.c:623: if(s0 == __data->current_salt[0] && s1 == __data->current_salt[1])
946 cmpb %dil, 131215(%rsi) # pretmp_108, *__data_46(D).current_salt
947 # crypt_util.c:624: return true;
948 movl $1, %eax #, <retval>
949 # crypt_util.c:623: if(s0 == __data->current_salt[0] && s1 == __data->current_salt[1])
950 jne .L68 #,
951 jmp .L61 #
952 .L74:
953 # crypt_util.c:636: long c=ascii_to_bin(s[i]);
954 cmpb $64, %dil #, pretmp_108
955 jle .L76 #,
956 # crypt_util.c:636: long c=ascii_to_bin(s[i]);
957 subl $53, %eax #, tmp198
958 movslq %eax, %rdx # tmp198, iftmp.7_42
959 jmp .L75 #
960 .L70:
961 # crypt_util.c:636: long c=ascii_to_bin(s[i]);
962 subl $53, %eax #, tmp190
963 movslq %eax, %rdx # tmp190, iftmp.7_146
964 jmp .L71 #
965 .L76:
966 # crypt_util.c:636: long c=ascii_to_bin(s[i]);
967 subl $46, %eax #, tmp199
968 movslq %eax, %rdx # tmp199, iftmp.7_42
969 jmp .L75 #
970 .cfi_endproc
971 .LFE51:
972 .size _ufc_setup_salt_r, .-_ufc_setup_salt_r
973 .p2align 4,,15
974 .globl _ufc_mk_keytab_r
975 .type _ufc_mk_keytab_r, @function
976 _ufc_mk_keytab_r:
977 .LFB52:
978 .cfi_startproc
979 pushq %r15 #
980 .cfi_def_cfa_offset 16
981 .cfi_offset 15, -16
982 leaq 8(%rdi), %r9 #, _311
983 pushq %r14 #
984 .cfi_def_cfa_offset 24
985 .cfi_offset 14, -24
986 # crypt_util.c:679: v1 = v2 = 0; k1 = &do_pc1[0][0][0];
987 leaq do_pc1(%rip), %rcx #, k1
988 # crypt_util.c:667: {
989 pushq %r13 #
990 .cfi_def_cfa_offset 32
991 .cfi_offset 13, -32
992 pushq %r12 #
993 .cfi_def_cfa_offset 40
994 .cfi_offset 12, -40
995 pushq %rbp #
996 .cfi_def_cfa_offset 48
997 .cfi_offset 6, -48
998 pushq %rbx #
999 .cfi_def_cfa_offset 56
1000 .cfi_offset 3, -56
1001 # crypt_util.c:679: v1 = v2 = 0; k1 = &do_pc1[0][0][0];
1002 xorl %r8d, %r8d # v2
1003 xorl %edx, %edx # v1
1004 .p2align 4,,10
1005 .p2align 3
1006 .L102:
1007 # crypt_util.c:681: v1 |= k1[*key & 0x7f]; k1 += 128;
1008 movzbl (%rdi), %eax # MEM[base: key_96, offset: 0B], MEM[base: key_96, offset: 0B]
1009 # crypt_util.c:682: v2 |= k1[*key++ & 0x7f]; k1 += 128;
1010 addq $1, %rdi #, key
1011 # crypt_util.c:681: v1 |= k1[*key & 0x7f]; k1 += 128;
1012 andl $127, %eax #, tmp200
1013 orq (%rcx,%rax,8), %rdx # *_4, v1
1014 # crypt_util.c:682: v2 |= k1[*key++ & 0x7f]; k1 += 128;
1015 orq 1024(%rcx,%rax,8), %r8 # *_6, v2
1016 addq $2048, %rcx #, k1
1017 # crypt_util.c:680: for(i = 8; i--;) {
1018 cmpq %r9, %rdi # _311, key
1019 jne .L102 #,
1020 leaq 1024+do_pc2(%rip), %r15 #, tmp234
1021 xorl %edi, %edi # ivtmp.241
1022 movl $1, %r9d #, pretmp_321
1023 leaq -1024(%r15), %r14 #, tmp241
1024 leaq 2048(%r14), %r12 #, tmp235
1025 leaq 3072(%r14), %r13 #, tmp236
1026 leaq 2048(%r12), %rbp #, tmp237
1027 leaq 3072(%rbp), %rbx #, tmp240
1028 jmp .L104 #
1029 .p2align 4,,10
1030 .p2align 3
1031 .L107:
1032 leaq rots(%rip), %rax #, tmp271
1033 movl (%rax,%rdi), %r9d # MEM[symbol: rots, index: ivtmp.241_312, offset: 0B], pretmp_321
1034 .L104:
1035 # crypt_util.c:688: v1 = (v1 << rots[i]) | (v1 >> (28 - rots[i]));
1036 movl $28, %eax #, _9
1037 movl %r9d, %ecx # pretmp_321, tmp255
1038 movq %rdx, %r10 # v1, _8
1039 subl %r9d, %eax # pretmp_321, _9
1040 salq %cl, %r10 # tmp255, _8
1041 # crypt_util.c:702: v2 = (v2 << rots[i]) | (v2 >> (28 - rots[i]));
1042 movq %r8, %r11 # v2, _29
1043 # crypt_util.c:688: v1 = (v1 << rots[i]) | (v1 >> (28 - rots[i]));
1044 movl %eax, %ecx # _9, tmp257
1045 shrq %cl, %rdx # tmp257, _10
1046 orq %r10, %rdx # _8, v1
1047 # crypt_util.c:690: v |= k1[(v1 >> 14) & 0x7f]; k1 += 128;
1048 movq %rdx, %r10 # v1, tmp202
1049 # crypt_util.c:689: v = k1[(v1 >> 21) & 0x7f]; k1 += 128;
1050 movq %rdx, %rcx # v1, tmp205
1051 # crypt_util.c:690: v |= k1[(v1 >> 14) & 0x7f]; k1 += 128;
1052 shrq $14, %r10 #, tmp202
1053 # crypt_util.c:689: v = k1[(v1 >> 21) & 0x7f]; k1 += 128;
1054 shrq $21, %rcx #, tmp205
1055 andl $127, %ecx #, tmp206
1056 # crypt_util.c:690: v |= k1[(v1 >> 14) & 0x7f]; k1 += 128;
1057 andl $127, %r10d #, tmp203
1058 movq (%r15,%r10,8), %r10 # *_18, *_18
1059 orq (%r14,%rcx,8), %r10 # *_14, tmp208
1060 # crypt_util.c:692: v |= k1[(v1 ) & 0x7f]; k1 += 128;
1061 movq %rdx, %rcx # v1, tmp210
1062 andl $127, %ecx #, tmp210
1063 orq 0(%r13,%rcx,8), %r10 # *_27, _52
1064 # crypt_util.c:691: v |= k1[(v1 >> 7) & 0x7f]; k1 += 128;
1065 movq %rdx, %rcx # v1, tmp212
1066 shrq $7, %rcx #, tmp212
1067 andl $127, %ecx #, tmp213
1068 # crypt_util.c:692: v |= k1[(v1 ) & 0x7f]; k1 += 128;
1069 orq (%r12,%rcx,8), %r10 # *_23, v
1070 # crypt_util.c:702: v2 = (v2 << rots[i]) | (v2 >> (28 - rots[i]));
1071 movl %r9d, %ecx # pretmp_321, tmp263
1072 # crypt_util.c:712: *k2++ = v | 0x0000800000008000l;
1073 leaq 5120+do_pc2(%rip), %r9 #, tmp272
1074 # crypt_util.c:702: v2 = (v2 << rots[i]) | (v2 >> (28 - rots[i]));
1075 salq %cl, %r11 # tmp263, _29
1076 movl %eax, %ecx # _9, tmp264
1077 shrq %cl, %r8 # tmp264, _30
1078 orq %r11, %r8 # _29, v2
1079 # crypt_util.c:712: *k2++ = v | 0x0000800000008000l;
1080 movabsq $140737488388096, %r11 #, tmp220
1081 # crypt_util.c:703: v |= k1[(v2 >> 21) & 0x7f]; k1 += 128;
1082 movq %r8, %rcx # v2, tmp215
1083 # crypt_util.c:706: v |= k1[(v2 ) & 0x7f];
1084 movq %r8, %rax # v2, tmp218
1085 # crypt_util.c:699: v = (v << 32);
1086 salq $32, %r10 #, v
1087 # crypt_util.c:706: v |= k1[(v2 ) & 0x7f];
1088 andl $127, %eax #, tmp218
1089 # crypt_util.c:703: v |= k1[(v2 >> 21) & 0x7f]; k1 += 128;
1090 shrq $21, %rcx #, tmp215
1091 # crypt_util.c:712: *k2++ = v | 0x0000800000008000l;
1092 orq (%rbx,%rax,8), %r11 # *_48, tmp220
1093 # crypt_util.c:703: v |= k1[(v2 >> 21) & 0x7f]; k1 += 128;
1094 andl $127, %ecx #, tmp216
1095 # crypt_util.c:712: *k2++ = v | 0x0000800000008000l;
1096 movq 0(%rbp,%rcx,8), %rax # *_34, tmp220
1097 # crypt_util.c:704: v |= k1[(v2 >> 14) & 0x7f]; k1 += 128;
1098 movq %r8, %rcx # v2, tmp223
1099 shrq $14, %rcx #, tmp223
1100 andl $127, %ecx #, tmp224
1101 # crypt_util.c:712: *k2++ = v | 0x0000800000008000l;
1102 orq %r11, %rax # tmp220, tmp220
1103 orq (%r9,%rcx,8), %rax # *_39, tmp226
1104 # crypt_util.c:705: v |= k1[(v2 >> 7) & 0x7f]; k1 += 128;
1105 movq %r8, %rcx # v2, tmp227
1106 shrq $7, %rcx #, tmp227
1107 # crypt_util.c:712: *k2++ = v | 0x0000800000008000l;
1108 leaq 1024(%r9), %r11 #, tmp270
1109 # crypt_util.c:705: v |= k1[(v2 >> 7) & 0x7f]; k1 += 128;
1110 andl $127, %ecx #, tmp228
1111 # crypt_util.c:712: *k2++ = v | 0x0000800000008000l;
1112 orq (%r11,%rcx,8), %rax # *_44, tmp230
1113 orq %r10, %rax # v, tmp232
1114 movq %rax, (%rsi,%rdi,2) # tmp232, MEM[base: k2_62, index: ivtmp.241_313, step: 2, offset: 0B]
1115 addq $4, %rdi #, ivtmp.241
1116 # crypt_util.c:685: for(i = 0; i < 16; i++) {
1117 cmpq $64, %rdi #, ivtmp.241
1118 jne .L107 #,
1119 # crypt_util.c:717: }
1120 popq %rbx #
1121 .cfi_def_cfa_offset 48
1122 # crypt_util.c:716: __data->direction = 0;
1123 movl $0, 131224(%rsi) #, *__data_61(D).direction
1124 # crypt_util.c:717: }
1125 popq %rbp #
1126 .cfi_def_cfa_offset 40
1127 popq %r12 #
1128 .cfi_def_cfa_offset 32
1129 popq %r13 #
1130 .cfi_def_cfa_offset 24
1131 popq %r14 #
1132 .cfi_def_cfa_offset 16
1133 popq %r15 #
1134 .cfi_def_cfa_offset 8
1136 .cfi_endproc
1137 .LFE52:
1138 .size _ufc_mk_keytab_r, .-_ufc_mk_keytab_r
1139 .p2align 4,,15
1140 .globl _ufc_dofinalperm_r
1141 .type _ufc_dofinalperm_r, @function
1142 _ufc_dofinalperm_r:
1143 .LFB53:
1144 .cfi_startproc
1145 # crypt_util.c:729: l1 = res[0]; l2 = res[1];
1146 movq (%rdi), %rcx # *res_42(D), l1
1147 movq 8(%rdi), %r10 # MEM[(ufc_long *)res_42(D) + 8B], l2
1148 # crypt_util.c:737: v1 |= efp[15][ r2 & 0x3f][0]; v2 |= efp[15][ r2 & 0x3f][1];
1149 leaq efp(%rip), %rax #, tmp201
1150 # crypt_util.c:725: {
1151 pushq %rbx #
1152 .cfi_def_cfa_offset 16
1153 .cfi_offset 3, -16
1154 # crypt_util.c:732: x = (l1 ^ l2) & __data->current_saltbits; l1 ^= x; l2 ^= x;
1155 movq 131216(%rsi), %rsi # *__data_47(D).current_saltbits, _3
1156 # crypt_util.c:730: r1 = res[2]; r2 = res[3];
1157 movq 16(%rdi), %r9 # MEM[(ufc_long *)res_42(D) + 16B], r1
1158 movq 24(%rdi), %rdx # MEM[(ufc_long *)res_42(D) + 24B], r2
1159 # crypt_util.c:732: x = (l1 ^ l2) & __data->current_saltbits; l1 ^= x; l2 ^= x;
1160 movq %rcx, %r8 # l1, tmp197
1161 xorq %r10, %r8 # l2, tmp197
1162 andq %rsi, %r8 # _3, x
1163 xorq %r8, %rcx # x, l1
1164 xorq %r10, %r8 # l2, l2
1165 # crypt_util.c:733: x = (r1 ^ r2) & __data->current_saltbits; r1 ^= x; r2 ^= x;
1166 movq %r9, %r10 # r1, tmp198
1167 xorq %rdx, %r10 # r2, tmp198
1168 andq %rsi, %r10 # _3, x
1169 xorq %r10, %r9 # x, r1
1170 xorq %rdx, %r10 # r2, r2
1171 # crypt_util.c:735: v1=v2=0; l1 >>= 3; l2 >>= 3; r1 >>= 3; r2 >>= 3;
1172 movq %r10, %r11 # r2, r2
1173 # crypt_util.c:738: v1 |= efp[14][(r2 >>= 6) & 0x3f][0]; v2 |= efp[14][ r2 & 0x3f][1];
1174 movq %r10, %rdx # r2, r2
1175 # crypt_util.c:735: v1=v2=0; l1 >>= 3; l2 >>= 3; r1 >>= 3; r2 >>= 3;
1176 shrq $3, %r11 #, r2
1177 # crypt_util.c:738: v1 |= efp[14][(r2 >>= 6) & 0x3f][0]; v2 |= efp[14][ r2 & 0x3f][1];
1178 shrq $9, %rdx #, r2
1179 # crypt_util.c:737: v1 |= efp[15][ r2 & 0x3f][0]; v2 |= efp[15][ r2 & 0x3f][1];
1180 andl $63, %r11d #, _5
1181 # crypt_util.c:738: v1 |= efp[14][(r2 >>= 6) & 0x3f][0]; v2 |= efp[14][ r2 & 0x3f][1];
1182 andl $63, %edx #, _8
1183 # crypt_util.c:737: v1 |= efp[15][ r2 & 0x3f][0]; v2 |= efp[15][ r2 & 0x3f][1];
1184 leaq 960(%r11), %rsi #, tmp202
1185 # crypt_util.c:738: v1 |= efp[14][(r2 >>= 6) & 0x3f][0]; v2 |= efp[14][ r2 & 0x3f][1];
1186 leaq 896(%rdx), %rbx #, tmp206
1187 salq $4, %rdx #, _8
1188 salq $4, %rbx #, tmp207
1189 # crypt_util.c:737: v1 |= efp[15][ r2 & 0x3f][0]; v2 |= efp[15][ r2 & 0x3f][1];
1190 salq $4, %rsi #, tmp203
1191 # crypt_util.c:738: v1 |= efp[14][(r2 >>= 6) & 0x3f][0]; v2 |= efp[14][ r2 & 0x3f][1];
1192 movq (%rax,%rsi), %rsi # efp, tmp209
1193 orq (%rax,%rbx), %rsi # efp, v1
1194 # crypt_util.c:737: v1 |= efp[15][ r2 & 0x3f][0]; v2 |= efp[15][ r2 & 0x3f][1];
1195 movq %r11, %rbx # _5, _5
1196 salq $4, %rbx #, _5
1197 # crypt_util.c:738: v1 |= efp[14][(r2 >>= 6) & 0x3f][0]; v2 |= efp[14][ r2 & 0x3f][1];
1198 movq %rdx, %r11 # _8, tmp215
1199 movq 15368(%rax,%rbx), %rdx # efp, tmp218
1200 orq 14344(%rax,%r11), %rdx # efp, v2
1201 # crypt_util.c:739: v1 |= efp[13][(r2 >>= 10) & 0x3f][0]; v2 |= efp[13][ r2 & 0x3f][1];
1202 movq %r10, %r11 # r2, r2
1203 shrq $19, %r11 #, r2
1204 # crypt_util.c:740: v1 |= efp[12][(r2 >>= 6) & 0x3f][0]; v2 |= efp[12][ r2 & 0x3f][1];
1205 shrq $25, %r10 #, r2
1206 # crypt_util.c:739: v1 |= efp[13][(r2 >>= 10) & 0x3f][0]; v2 |= efp[13][ r2 & 0x3f][1];
1207 andl $63, %r11d #, _10
1208 # crypt_util.c:740: v1 |= efp[12][(r2 >>= 6) & 0x3f][0]; v2 |= efp[12][ r2 & 0x3f][1];
1209 andl $63, %r10d #, _12
1210 # crypt_util.c:739: v1 |= efp[13][(r2 >>= 10) & 0x3f][0]; v2 |= efp[13][ r2 & 0x3f][1];
1211 leaq 832(%r11), %rbx #, tmp221
1212 salq $4, %r11 #, tmp225
1213 orq 13320(%rax,%r11), %rdx # efp, v2
1214 # crypt_util.c:740: v1 |= efp[12][(r2 >>= 6) & 0x3f][0]; v2 |= efp[12][ r2 & 0x3f][1];
1215 leaq 768(%r10), %r11 #, tmp230
1216 salq $4, %r10 #, tmp234
1217 orq 12296(%rax,%r10), %rdx # efp, v2
1218 # crypt_util.c:735: v1=v2=0; l1 >>= 3; l2 >>= 3; r1 >>= 3; r2 >>= 3;
1219 movq %r9, %r10 # r1, r1
1220 # crypt_util.c:739: v1 |= efp[13][(r2 >>= 10) & 0x3f][0]; v2 |= efp[13][ r2 & 0x3f][1];
1221 salq $4, %rbx #, tmp222
1222 # crypt_util.c:735: v1=v2=0; l1 >>= 3; l2 >>= 3; r1 >>= 3; r2 >>= 3;
1223 shrq $3, %r10 #, r1
1224 # crypt_util.c:739: v1 |= efp[13][(r2 >>= 10) & 0x3f][0]; v2 |= efp[13][ r2 & 0x3f][1];
1225 orq (%rax,%rbx), %rsi # efp, v1
1226 # crypt_util.c:740: v1 |= efp[12][(r2 >>= 6) & 0x3f][0]; v2 |= efp[12][ r2 & 0x3f][1];
1227 salq $4, %r11 #, tmp231
1228 # crypt_util.c:742: v1 |= efp[11][ r1 & 0x3f][0]; v2 |= efp[11][ r1 & 0x3f][1];
1229 andl $63, %r10d #, _14
1230 # crypt_util.c:740: v1 |= efp[12][(r2 >>= 6) & 0x3f][0]; v2 |= efp[12][ r2 & 0x3f][1];
1231 orq (%rax,%r11), %rsi # efp, v1
1232 # crypt_util.c:742: v1 |= efp[11][ r1 & 0x3f][0]; v2 |= efp[11][ r1 & 0x3f][1];
1233 leaq 704(%r10), %r11 #, tmp239
1234 salq $4, %r11 #, tmp240
1235 salq $4, %r10 #, tmp243
1236 orq 11272(%rax,%r10), %rdx # efp, v2
1237 # crypt_util.c:743: v1 |= efp[10][(r1 >>= 6) & 0x3f][0]; v2 |= efp[10][ r1 & 0x3f][1];
1238 movq %r9, %r10 # r1, r1
1239 # crypt_util.c:742: v1 |= efp[11][ r1 & 0x3f][0]; v2 |= efp[11][ r1 & 0x3f][1];
1240 orq (%rax,%r11), %rsi # efp, v1
1241 # crypt_util.c:743: v1 |= efp[10][(r1 >>= 6) & 0x3f][0]; v2 |= efp[10][ r1 & 0x3f][1];
1242 shrq $9, %r10 #, r1
1243 andl $63, %r10d #, _17
1244 leaq 640(%r10), %r11 #, tmp248
1245 salq $4, %r10 #, tmp252
1246 orq 10248(%rax,%r10), %rdx # efp, v2
1247 # crypt_util.c:744: v1 |= efp[ 9][(r1 >>= 10) & 0x3f][0]; v2 |= efp[ 9][ r1 & 0x3f][1];
1248 movq %r9, %r10 # r1, r1
1249 # crypt_util.c:745: v1 |= efp[ 8][(r1 >>= 6) & 0x3f][0]; v2 |= efp[ 8][ r1 & 0x3f][1];
1250 shrq $25, %r9 #, r1
1251 # crypt_util.c:744: v1 |= efp[ 9][(r1 >>= 10) & 0x3f][0]; v2 |= efp[ 9][ r1 & 0x3f][1];
1252 shrq $19, %r10 #, r1
1253 # crypt_util.c:743: v1 |= efp[10][(r1 >>= 6) & 0x3f][0]; v2 |= efp[10][ r1 & 0x3f][1];
1254 salq $4, %r11 #, tmp249
1255 # crypt_util.c:745: v1 |= efp[ 8][(r1 >>= 6) & 0x3f][0]; v2 |= efp[ 8][ r1 & 0x3f][1];
1256 andl $63, %r9d #, _21
1257 # crypt_util.c:744: v1 |= efp[ 9][(r1 >>= 10) & 0x3f][0]; v2 |= efp[ 9][ r1 & 0x3f][1];
1258 andl $63, %r10d #, _19
1259 # crypt_util.c:743: v1 |= efp[10][(r1 >>= 6) & 0x3f][0]; v2 |= efp[10][ r1 & 0x3f][1];
1260 orq (%rax,%r11), %rsi # efp, v1
1261 # crypt_util.c:744: v1 |= efp[ 9][(r1 >>= 10) & 0x3f][0]; v2 |= efp[ 9][ r1 & 0x3f][1];
1262 leaq 576(%r10), %r11 #, tmp257
1263 salq $4, %r10 #, tmp261
1264 orq 9224(%rax,%r10), %rdx # efp, v2
1265 # crypt_util.c:745: v1 |= efp[ 8][(r1 >>= 6) & 0x3f][0]; v2 |= efp[ 8][ r1 & 0x3f][1];
1266 leaq 512(%r9), %r10 #, tmp266
1267 salq $4, %r9 #, tmp270
1268 orq 8200(%rax,%r9), %rdx # efp, v2
1269 # crypt_util.c:735: v1=v2=0; l1 >>= 3; l2 >>= 3; r1 >>= 3; r2 >>= 3;
1270 movq %r8, %r9 # l2, l2
1271 # crypt_util.c:744: v1 |= efp[ 9][(r1 >>= 10) & 0x3f][0]; v2 |= efp[ 9][ r1 & 0x3f][1];
1272 salq $4, %r11 #, tmp258
1273 # crypt_util.c:735: v1=v2=0; l1 >>= 3; l2 >>= 3; r1 >>= 3; r2 >>= 3;
1274 shrq $3, %r9 #, l2
1275 # crypt_util.c:744: v1 |= efp[ 9][(r1 >>= 10) & 0x3f][0]; v2 |= efp[ 9][ r1 & 0x3f][1];
1276 orq (%rax,%r11), %rsi # efp, v1
1277 # crypt_util.c:745: v1 |= efp[ 8][(r1 >>= 6) & 0x3f][0]; v2 |= efp[ 8][ r1 & 0x3f][1];
1278 salq $4, %r10 #, tmp267
1279 # crypt_util.c:747: v1 |= efp[ 7][ l2 & 0x3f][0]; v2 |= efp[ 7][ l2 & 0x3f][1];
1280 andl $63, %r9d #, _23
1281 # crypt_util.c:745: v1 |= efp[ 8][(r1 >>= 6) & 0x3f][0]; v2 |= efp[ 8][ r1 & 0x3f][1];
1282 orq (%rax,%r10), %rsi # efp, v1
1283 # crypt_util.c:747: v1 |= efp[ 7][ l2 & 0x3f][0]; v2 |= efp[ 7][ l2 & 0x3f][1];
1284 leaq 448(%r9), %r10 #, tmp275
1285 salq $4, %r9 #, tmp279
1286 orq 7176(%rax,%r9), %rdx # efp, v2
1287 # crypt_util.c:748: v1 |= efp[ 6][(l2 >>= 6) & 0x3f][0]; v2 |= efp[ 6][ l2 & 0x3f][1];
1288 movq %r8, %r9 # l2, l2
1289 shrq $9, %r9 #, l2
1290 # crypt_util.c:747: v1 |= efp[ 7][ l2 & 0x3f][0]; v2 |= efp[ 7][ l2 & 0x3f][1];
1291 salq $4, %r10 #, tmp276
1292 # crypt_util.c:748: v1 |= efp[ 6][(l2 >>= 6) & 0x3f][0]; v2 |= efp[ 6][ l2 & 0x3f][1];
1293 andl $63, %r9d #, _26
1294 # crypt_util.c:747: v1 |= efp[ 7][ l2 & 0x3f][0]; v2 |= efp[ 7][ l2 & 0x3f][1];
1295 orq (%rax,%r10), %rsi # efp, v1
1296 # crypt_util.c:748: v1 |= efp[ 6][(l2 >>= 6) & 0x3f][0]; v2 |= efp[ 6][ l2 & 0x3f][1];
1297 leaq 384(%r9), %r10 #, tmp284
1298 salq $4, %r10 #, tmp285
1299 orq (%rax,%r10), %rsi # efp, v1
1300 salq $4, %r9 #, tmp288
1301 orq 6152(%rax,%r9), %rdx # efp, v2
1302 # crypt_util.c:749: v1 |= efp[ 5][(l2 >>= 10) & 0x3f][0]; v2 |= efp[ 5][ l2 & 0x3f][1];
1303 movq %r8, %r9 # l2, l2
1304 # crypt_util.c:750: v1 |= efp[ 4][(l2 >>= 6) & 0x3f][0]; v2 |= efp[ 4][ l2 & 0x3f][1];
1305 shrq $25, %r8 #, l2
1306 # crypt_util.c:749: v1 |= efp[ 5][(l2 >>= 10) & 0x3f][0]; v2 |= efp[ 5][ l2 & 0x3f][1];
1307 shrq $19, %r9 #, l2
1308 # crypt_util.c:750: v1 |= efp[ 4][(l2 >>= 6) & 0x3f][0]; v2 |= efp[ 4][ l2 & 0x3f][1];
1309 andl $63, %r8d #, _30
1310 # crypt_util.c:749: v1 |= efp[ 5][(l2 >>= 10) & 0x3f][0]; v2 |= efp[ 5][ l2 & 0x3f][1];
1311 andl $63, %r9d #, _28
1312 leaq 320(%r9), %r10 #, tmp293
1313 salq $4, %r9 #, tmp297
1314 orq 5128(%rax,%r9), %rdx # efp, v2
1315 # crypt_util.c:750: v1 |= efp[ 4][(l2 >>= 6) & 0x3f][0]; v2 |= efp[ 4][ l2 & 0x3f][1];
1316 leaq 256(%r8), %r9 #, tmp302
1317 salq $4, %r8 #, tmp306
1318 orq 4104(%rax,%r8), %rdx # efp, v2
1319 # crypt_util.c:735: v1=v2=0; l1 >>= 3; l2 >>= 3; r1 >>= 3; r2 >>= 3;
1320 movq %rcx, %r8 # l1, l1
1321 # crypt_util.c:749: v1 |= efp[ 5][(l2 >>= 10) & 0x3f][0]; v2 |= efp[ 5][ l2 & 0x3f][1];
1322 salq $4, %r10 #, tmp294
1323 # crypt_util.c:735: v1=v2=0; l1 >>= 3; l2 >>= 3; r1 >>= 3; r2 >>= 3;
1324 shrq $3, %r8 #, l1
1325 # crypt_util.c:749: v1 |= efp[ 5][(l2 >>= 10) & 0x3f][0]; v2 |= efp[ 5][ l2 & 0x3f][1];
1326 orq (%rax,%r10), %rsi # efp, v1
1327 # crypt_util.c:750: v1 |= efp[ 4][(l2 >>= 6) & 0x3f][0]; v2 |= efp[ 4][ l2 & 0x3f][1];
1328 salq $4, %r9 #, tmp303
1329 # crypt_util.c:752: v1 |= efp[ 3][ l1 & 0x3f][0]; v2 |= efp[ 3][ l1 & 0x3f][1];
1330 andl $63, %r8d #, _32
1331 # crypt_util.c:750: v1 |= efp[ 4][(l2 >>= 6) & 0x3f][0]; v2 |= efp[ 4][ l2 & 0x3f][1];
1332 orq (%rax,%r9), %rsi # efp, v1
1333 # crypt_util.c:752: v1 |= efp[ 3][ l1 & 0x3f][0]; v2 |= efp[ 3][ l1 & 0x3f][1];
1334 leaq 192(%r8), %r9 #, tmp311
1335 salq $4, %r8 #, tmp315
1336 orq 3080(%rax,%r8), %rdx # efp, v2
1337 # crypt_util.c:753: v1 |= efp[ 2][(l1 >>= 6) & 0x3f][0]; v2 |= efp[ 2][ l1 & 0x3f][1];
1338 movq %rcx, %r8 # l1, l1
1339 shrq $9, %r8 #, l1
1340 # crypt_util.c:752: v1 |= efp[ 3][ l1 & 0x3f][0]; v2 |= efp[ 3][ l1 & 0x3f][1];
1341 salq $4, %r9 #, tmp312
1342 # crypt_util.c:753: v1 |= efp[ 2][(l1 >>= 6) & 0x3f][0]; v2 |= efp[ 2][ l1 & 0x3f][1];
1343 andl $63, %r8d #, _35
1344 # crypt_util.c:752: v1 |= efp[ 3][ l1 & 0x3f][0]; v2 |= efp[ 3][ l1 & 0x3f][1];
1345 orq (%rax,%r9), %rsi # efp, v1
1346 # crypt_util.c:753: v1 |= efp[ 2][(l1 >>= 6) & 0x3f][0]; v2 |= efp[ 2][ l1 & 0x3f][1];
1347 leaq 128(%r8), %r9 #, tmp320
1348 salq $4, %r8 #, tmp324
1349 salq $4, %r9 #, tmp321
1350 orq (%rax,%r9), %rsi # efp, v1
1351 orq 2056(%rax,%r8), %rdx # efp, v2
1352 # crypt_util.c:754: v1 |= efp[ 1][(l1 >>= 10) & 0x3f][0]; v2 |= efp[ 1][ l1 & 0x3f][1];
1353 movq %rcx, %r8 # l1, l1
1354 shrq $19, %r8 #, l1
1355 andl $63, %r8d #, _37
1356 leaq 64(%r8), %r9 #, tmp329
1357 salq $4, %r8 #, tmp333
1358 orq 1032(%rax,%r8), %rdx # efp, v2
1359 salq $4, %r9 #, tmp330
1360 orq (%rax,%r9), %rsi # efp, v1
1361 # crypt_util.c:755: v1 |= efp[ 0][(l1 >>= 6) & 0x3f][0]; v2 |= efp[ 0][ l1 & 0x3f][1];
1362 shrq $21, %rcx #, _39
1363 andl $1008, %ecx #, tmp338
1364 addq %rcx, %rax # tmp338, tmp339
1365 orq 8(%rax), %rdx # efp, v2
1366 orq (%rax), %rsi # efp, v1
1367 # crypt_util.c:758: }
1368 popq %rbx #
1369 .cfi_def_cfa_offset 8
1370 # crypt_util.c:757: res[0] = v1; res[1] = v2;
1371 movq %rsi, (%rdi) # v1, *res_42(D)
1372 movq %rdx, 8(%rdi) # v2, MEM[(ufc_long *)res_42(D) + 8B]
1373 # crypt_util.c:758: }
1375 .cfi_endproc
1376 .LFE53:
1377 .size _ufc_dofinalperm_r, .-_ufc_dofinalperm_r
1378 .p2align 4,,15
1379 .globl _ufc_output_conversion_r
1380 .type _ufc_output_conversion_r, @function
1381 _ufc_output_conversion_r:
1382 .LFB54:
1383 .cfi_startproc
1384 # crypt_util.c:771: __data->crypt_3_buf[0] = salt[0];
1385 movzbl (%rdx), %eax # *salt_50(D), _1
1386 # crypt_util.c:772: __data->crypt_3_buf[1] = salt[1] ? salt[1] : salt[0];
1387 movzbl 1(%rdx), %edx # MEM[(const char *)salt_50(D) + 1B], _2
1388 # crypt_util.c:768: {
1389 movq %rcx, %r8 # __data, __data
1390 # crypt_util.c:772: __data->crypt_3_buf[1] = salt[1] ? salt[1] : salt[0];
1391 testb %dl, %dl # _2
1392 # crypt_util.c:771: __data->crypt_3_buf[0] = salt[0];
1393 movb %al, 131200(%rcx) # _1, *__data_51(D).crypt_3_buf
1394 # crypt_util.c:772: __data->crypt_3_buf[1] = salt[1] ? salt[1] : salt[0];
1395 cmovne %edx, %eax # _1,, _2, _1
1396 leaq 131202(%rcx), %rdx #, ivtmp.277
1397 movb %al, 131201(%rcx) # _1, *__data_51(D).crypt_3_buf
1398 movl $26, %ecx #, ivtmp.275
1399 .L115:
1400 # crypt_util.c:776: __data->crypt_3_buf[i + 2] = bin_to_ascii((v1 >> shf) & 0x3f);
1401 movq %rdi, %rax # v1, _4
1402 shrq %cl, %rax # ivtmp.275, _4
1403 andl $63, %eax #, _5
1404 cmpq $37, %rax #, _5
1405 jbe .L112 #,
1406 # crypt_util.c:776: __data->crypt_3_buf[i + 2] = bin_to_ascii((v1 >> shf) & 0x3f);
1407 addl $59, %eax #, iftmp.17_44
1408 .L113:
1409 subl $6, %ecx #, ivtmp.275
1410 # crypt_util.c:776: __data->crypt_3_buf[i + 2] = bin_to_ascii((v1 >> shf) & 0x3f);
1411 movb %al, (%rdx) # iftmp.17_44, MEM[base: _80, offset: 0B]
1412 addq $1, %rdx #, ivtmp.277
1413 # crypt_util.c:774: for(i = 0; i < 5; i++) {
1414 cmpl $-4, %ecx #, ivtmp.275
1415 jne .L115 #,
1416 # crypt_util.c:779: s = (v2 & 0xf) << 2;
1417 leal 0(,%rsi,4), %r9d #, tmp208
1418 # crypt_util.c:780: v2 = (v2 >> 2) | ((v1 & 0x3) << 30);
1419 movq %rsi, %rax # v2, v2
1420 salq $30, %rdi #, tmp209
1421 shrq $2, %rax #, v2
1422 movl %edi, %esi # tmp209, tmp210
1423 leaq 131207(%r8), %rdx #, ivtmp.267
1424 # crypt_util.c:779: s = (v2 & 0xf) << 2;
1425 andl $60, %r9d #, s
1426 # crypt_util.c:780: v2 = (v2 >> 2) | ((v1 & 0x3) << 30);
1427 orq %rax, %rsi # _19, v2
1428 movl $26, %ecx #, ivtmp.265
1429 .L119:
1430 # crypt_util.c:784: __data->crypt_3_buf[i + 2] = bin_to_ascii((v2 >> shf) & 0x3f);
1431 movq %rsi, %rax # v2, _23
1432 shrq %cl, %rax # ivtmp.265, _23
1433 andl $63, %eax #, _24
1434 cmpq $37, %rax #, _24
1435 jbe .L116 #,
1436 # crypt_util.c:784: __data->crypt_3_buf[i + 2] = bin_to_ascii((v2 >> shf) & 0x3f);
1437 addl $59, %eax #, iftmp.19_45
1438 .L117:
1439 subl $6, %ecx #, ivtmp.265
1440 # crypt_util.c:784: __data->crypt_3_buf[i + 2] = bin_to_ascii((v2 >> shf) & 0x3f);
1441 movb %al, (%rdx) # iftmp.19_45, MEM[base: _42, offset: 0B]
1442 addq $1, %rdx #, ivtmp.267
1443 # crypt_util.c:782: for(i = 5; i < 10; i++) {
1444 cmpl $-4, %ecx #, ivtmp.265
1445 jne .L119 #,
1446 # crypt_util.c:787: __data->crypt_3_buf[12] = bin_to_ascii(s);
1447 cmpl $37, %r9d #, s
1448 jg .L127 #,
1449 # crypt_util.c:787: __data->crypt_3_buf[12] = bin_to_ascii(s);
1450 leal 53(%r9), %edx #, tmp216
1451 leal 46(%r9), %eax #, tmp215
1452 cmpl $12, %r9d #, s
1453 # crypt_util.c:788: __data->crypt_3_buf[13] = 0;
1454 movb $0, 131213(%r8) #, *__data_51(D).crypt_3_buf
1455 # crypt_util.c:787: __data->crypt_3_buf[12] = bin_to_ascii(s);
1456 cmovl %eax, %edx # tmp215,, tmp216
1457 movl %edx, %r9d # tmp216, iftmp.21_46
1458 movb %r9b, 131212(%r8) # iftmp.21_46, *__data_51(D).crypt_3_buf
1459 # crypt_util.c:789: }
1461 .p2align 4,,10
1462 .p2align 3
1463 .L112:
1464 # crypt_util.c:776: __data->crypt_3_buf[i + 2] = bin_to_ascii((v1 >> shf) & 0x3f);
1465 leal 53(%rax), %r10d #, tmp212
1466 leal 46(%rax), %r9d #, tmp211
1467 cmpq $12, %rax #, _5
1468 movl %r10d, %eax # tmp212, tmp212
1469 cmovb %r9d, %eax # tmp211,, tmp212
1470 jmp .L113 #
1471 .p2align 4,,10
1472 .p2align 3
1473 .L116:
1474 # crypt_util.c:784: __data->crypt_3_buf[i + 2] = bin_to_ascii((v2 >> shf) & 0x3f);
1475 leal 53(%rax), %r10d #, tmp214
1476 leal 46(%rax), %edi #, tmp213
1477 cmpq $12, %rax #, _24
1478 movl %r10d, %eax # tmp214, tmp214
1479 cmovb %edi, %eax # tmp213,, tmp214
1480 jmp .L117 #
1481 .p2align 4,,10
1482 .p2align 3
1483 .L127:
1484 # crypt_util.c:787: __data->crypt_3_buf[12] = bin_to_ascii(s);
1485 addl $59, %r9d #, iftmp.21_46
1486 # crypt_util.c:788: __data->crypt_3_buf[13] = 0;
1487 movb $0, 131213(%r8) #, *__data_51(D).crypt_3_buf
1488 # crypt_util.c:787: __data->crypt_3_buf[12] = bin_to_ascii(s);
1489 movb %r9b, 131212(%r8) # iftmp.21_46, *__data_51(D).crypt_3_buf
1490 # crypt_util.c:789: }
1492 .cfi_endproc
1493 .LFE54:
1494 .size _ufc_output_conversion_r, .-_ufc_output_conversion_r
1495 .p2align 4,,15
1496 .globl __b64_from_24bit
1497 .type __b64_from_24bit, @function
1498 __b64_from_24bit:
1499 .LFB55:
1500 .cfi_startproc
1501 # crypt_util.c:939: unsigned int w = (b2 << 16) | (b1 << 8) | b0;
1502 sall $16, %edx #, b2
1503 sall $8, %ecx #, tmp107
1504 orl %r8d, %ecx # b0, tmp108
1505 movl %edx, %r8d # b2, tmp109
1506 # crypt_util.c:940: while (n-- > 0 && (*buflen) > 0)
1507 leal -1(%r9), %edx #, n
1508 # crypt_util.c:939: unsigned int w = (b2 << 16) | (b1 << 8) | b0;
1509 orl %ecx, %r8d # tmp108, w
1510 # crypt_util.c:940: while (n-- > 0 && (*buflen) > 0)
1511 testl %r9d, %r9d # n
1512 jle .L128 #,
1513 movl (%rsi), %eax # *buflen_21(D),
1514 testl %eax, %eax #
1515 jle .L128 #,
1516 leaq b64t(%rip), %r9 #, tmp116
1517 jmp .L130 #
1518 .p2align 4,,10
1519 .p2align 3
1520 .L135:
1521 # crypt_util.c:940: while (n-- > 0 && (*buflen) > 0)
1522 testl %eax, %eax # _9
1523 jle .L128 #,
1524 .L130:
1525 # crypt_util.c:942: *(*cp)++ = b64t[w & 0x3f];
1526 movq (%rdi), %rax # *cp_22(D), _5
1527 # crypt_util.c:940: while (n-- > 0 && (*buflen) > 0)
1528 subl $1, %edx #, n
1529 # crypt_util.c:942: *(*cp)++ = b64t[w & 0x3f];
1530 leaq 1(%rax), %rcx #, tmp110
1531 movq %rcx, (%rdi) # tmp110, *cp_22(D)
1532 movl %r8d, %ecx # w, tmp113
1533 # crypt_util.c:944: w >>= 6;
1534 shrl $6, %r8d #, w
1535 # crypt_util.c:942: *(*cp)++ = b64t[w & 0x3f];
1536 andl $63, %ecx #, tmp113
1537 movzbl (%r9,%rcx), %ecx # b64t, tmp114
1538 movb %cl, (%rax) # tmp114, *_5
1539 # crypt_util.c:943: --(*buflen);
1540 movl (%rsi), %eax # *buflen_21(D), tmp119
1541 subl $1, %eax #, _9
1542 # crypt_util.c:940: while (n-- > 0 && (*buflen) > 0)
1543 cmpl $-1, %edx #, n
1544 # crypt_util.c:943: --(*buflen);
1545 movl %eax, (%rsi) # _9, *buflen_21(D)
1546 # crypt_util.c:940: while (n-- > 0 && (*buflen) > 0)
1547 jne .L135 #,
1548 .L128:
1549 # crypt_util.c:946: }
1550 rep ret
1551 .cfi_endproc
1552 .LFE55:
1553 .size __b64_from_24bit, .-__b64_from_24bit
1554 .local small_tables_initialized.7488
1555 .comm small_tables_initialized.7488,4,4
1556 .local _ufc_tables_lock
1557 .comm _ufc_tables_lock,40,32
1558 .comm _ufc_foobar,131232,32
1559 .section .rodata
1560 .align 32
1561 .type b64t, @object
1562 .size b64t, 64
1563 b64t:
1564 .ascii "./0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuv"
1565 .ascii "wxyz"
1566 .local efp
1567 .comm efp,16384,32
1568 .local eperm32tab
1569 .comm eperm32tab,16384,32
1570 .local do_pc2
1571 .comm do_pc2,8192,32
1572 .local do_pc1
1573 .comm do_pc1,16384,32
1574 .align 32
1575 .type longmask, @object
1576 .size longmask, 256
1577 longmask:
1578 .quad 2147483648
1579 .quad 1073741824
1580 .quad 536870912
1581 .quad 268435456
1582 .quad 134217728
1583 .quad 67108864
1584 .quad 33554432
1585 .quad 16777216
1586 .quad 8388608
1587 .quad 4194304
1588 .quad 2097152
1589 .quad 1048576
1590 .quad 524288
1591 .quad 262144
1592 .quad 131072
1593 .quad 65536
1594 .quad 32768
1595 .quad 16384
1596 .quad 8192
1597 .quad 4096
1598 .quad 2048
1599 .quad 1024
1600 .quad 512
1601 .quad 256
1602 .quad 128
1603 .quad 64
1604 .quad 32
1605 .quad 16
1606 .quad 8
1607 .quad 4
1608 .quad 2
1609 .quad 1
1610 .section .rodata.cst8,"aM",@progbits,8
1611 .align 8
1612 .type bytemask, @object
1613 .size bytemask, 8
1614 bytemask:
1615 .byte -128
1616 .byte 64
1617 .byte 32
1618 .byte 16
1619 .byte 8
1620 .byte 4
1621 .byte 2
1622 .byte 1
1623 .section .rodata
1624 .align 32
1625 .type BITMASK, @object
1626 .size BITMASK, 192
1627 BITMASK:
1628 .quad 1073741824
1629 .quad 536870912
1630 .quad 268435456
1631 .quad 134217728
1632 .quad 67108864
1633 .quad 33554432
1634 .quad 16777216
1635 .quad 8388608
1636 .quad 4194304
1637 .quad 2097152
1638 .quad 1048576
1639 .quad 524288
1640 .quad 16384
1641 .quad 8192
1642 .quad 4096
1643 .quad 2048
1644 .quad 1024
1645 .quad 512
1646 .quad 256
1647 .quad 128
1648 .quad 64
1649 .quad 32
1650 .quad 16
1651 .quad 8
1652 .align 32
1653 .type final_perm, @object
1654 .size final_perm, 256
1655 final_perm:
1656 .long 40
1657 .long 8
1658 .long 48
1659 .long 16
1660 .long 56
1661 .long 24
1662 .long 64
1663 .long 32
1664 .long 39
1665 .long 7
1666 .long 47
1667 .long 15
1668 .long 55
1669 .long 23
1670 .long 63
1671 .long 31
1672 .long 38
1673 .long 6
1674 .long 46
1675 .long 14
1676 .long 54
1677 .long 22
1678 .long 62
1679 .long 30
1680 .long 37
1681 .long 5
1682 .long 45
1683 .long 13
1684 .long 53
1685 .long 21
1686 .long 61
1687 .long 29
1688 .long 36
1689 .long 4
1690 .long 44
1691 .long 12
1692 .long 52
1693 .long 20
1694 .long 60
1695 .long 28
1696 .long 35
1697 .long 3
1698 .long 43
1699 .long 11
1700 .long 51
1701 .long 19
1702 .long 59
1703 .long 27
1704 .long 34
1705 .long 2
1706 .long 42
1707 .long 10
1708 .long 50
1709 .long 18
1710 .long 58
1711 .long 26
1712 .long 33
1713 .long 1
1714 .long 41
1715 .long 9
1716 .long 49
1717 .long 17
1718 .long 57
1719 .long 25
1720 .align 32
1721 .type sbox, @object
1722 .size sbox, 2048
1723 sbox:
1724 .long 14
1725 .long 4
1726 .long 13
1727 .long 1
1728 .long 2
1729 .long 15
1730 .long 11
1731 .long 8
1732 .long 3
1733 .long 10
1734 .long 6
1735 .long 12
1736 .long 5
1737 .long 9
1738 .long 0
1739 .long 7
1740 .long 0
1741 .long 15
1742 .long 7
1743 .long 4
1744 .long 14
1745 .long 2
1746 .long 13
1747 .long 1
1748 .long 10
1749 .long 6
1750 .long 12
1751 .long 11
1752 .long 9
1753 .long 5
1754 .long 3
1755 .long 8
1756 .long 4
1757 .long 1
1758 .long 14
1759 .long 8
1760 .long 13
1761 .long 6
1762 .long 2
1763 .long 11
1764 .long 15
1765 .long 12
1766 .long 9
1767 .long 7
1768 .long 3
1769 .long 10
1770 .long 5
1771 .long 0
1772 .long 15
1773 .long 12
1774 .long 8
1775 .long 2
1776 .long 4
1777 .long 9
1778 .long 1
1779 .long 7
1780 .long 5
1781 .long 11
1782 .long 3
1783 .long 14
1784 .long 10
1785 .long 0
1786 .long 6
1787 .long 13
1788 .long 15
1789 .long 1
1790 .long 8
1791 .long 14
1792 .long 6
1793 .long 11
1794 .long 3
1795 .long 4
1796 .long 9
1797 .long 7
1798 .long 2
1799 .long 13
1800 .long 12
1801 .long 0
1802 .long 5
1803 .long 10
1804 .long 3
1805 .long 13
1806 .long 4
1807 .long 7
1808 .long 15
1809 .long 2
1810 .long 8
1811 .long 14
1812 .long 12
1813 .long 0
1814 .long 1
1815 .long 10
1816 .long 6
1817 .long 9
1818 .long 11
1819 .long 5
1820 .long 0
1821 .long 14
1822 .long 7
1823 .long 11
1824 .long 10
1825 .long 4
1826 .long 13
1827 .long 1
1828 .long 5
1829 .long 8
1830 .long 12
1831 .long 6
1832 .long 9
1833 .long 3
1834 .long 2
1835 .long 15
1836 .long 13
1837 .long 8
1838 .long 10
1839 .long 1
1840 .long 3
1841 .long 15
1842 .long 4
1843 .long 2
1844 .long 11
1845 .long 6
1846 .long 7
1847 .long 12
1848 .long 0
1849 .long 5
1850 .long 14
1851 .long 9
1852 .long 10
1853 .long 0
1854 .long 9
1855 .long 14
1856 .long 6
1857 .long 3
1858 .long 15
1859 .long 5
1860 .long 1
1861 .long 13
1862 .long 12
1863 .long 7
1864 .long 11
1865 .long 4
1866 .long 2
1867 .long 8
1868 .long 13
1869 .long 7
1870 .long 0
1871 .long 9
1872 .long 3
1873 .long 4
1874 .long 6
1875 .long 10
1876 .long 2
1877 .long 8
1878 .long 5
1879 .long 14
1880 .long 12
1881 .long 11
1882 .long 15
1883 .long 1
1884 .long 13
1885 .long 6
1886 .long 4
1887 .long 9
1888 .long 8
1889 .long 15
1890 .long 3
1891 .long 0
1892 .long 11
1893 .long 1
1894 .long 2
1895 .long 12
1896 .long 5
1897 .long 10
1898 .long 14
1899 .long 7
1900 .long 1
1901 .long 10
1902 .long 13
1903 .long 0
1904 .long 6
1905 .long 9
1906 .long 8
1907 .long 7
1908 .long 4
1909 .long 15
1910 .long 14
1911 .long 3
1912 .long 11
1913 .long 5
1914 .long 2
1915 .long 12
1916 .long 7
1917 .long 13
1918 .long 14
1919 .long 3
1920 .long 0
1921 .long 6
1922 .long 9
1923 .long 10
1924 .long 1
1925 .long 2
1926 .long 8
1927 .long 5
1928 .long 11
1929 .long 12
1930 .long 4
1931 .long 15
1932 .long 13
1933 .long 8
1934 .long 11
1935 .long 5
1936 .long 6
1937 .long 15
1938 .long 0
1939 .long 3
1940 .long 4
1941 .long 7
1942 .long 2
1943 .long 12
1944 .long 1
1945 .long 10
1946 .long 14
1947 .long 9
1948 .long 10
1949 .long 6
1950 .long 9
1951 .long 0
1952 .long 12
1953 .long 11
1954 .long 7
1955 .long 13
1956 .long 15
1957 .long 1
1958 .long 3
1959 .long 14
1960 .long 5
1961 .long 2
1962 .long 8
1963 .long 4
1964 .long 3
1965 .long 15
1966 .long 0
1967 .long 6
1968 .long 10
1969 .long 1
1970 .long 13
1971 .long 8
1972 .long 9
1973 .long 4
1974 .long 5
1975 .long 11
1976 .long 12
1977 .long 7
1978 .long 2
1979 .long 14
1980 .long 2
1981 .long 12
1982 .long 4
1983 .long 1
1984 .long 7
1985 .long 10
1986 .long 11
1987 .long 6
1988 .long 8
1989 .long 5
1990 .long 3
1991 .long 15
1992 .long 13
1993 .long 0
1994 .long 14
1995 .long 9
1996 .long 14
1997 .long 11
1998 .long 2
1999 .long 12
2000 .long 4
2001 .long 7
2002 .long 13
2003 .long 1
2004 .long 5
2005 .long 0
2006 .long 15
2007 .long 10
2008 .long 3
2009 .long 9
2010 .long 8
2011 .long 6
2012 .long 4
2013 .long 2
2014 .long 1
2015 .long 11
2016 .long 10
2017 .long 13
2018 .long 7
2019 .long 8
2020 .long 15
2021 .long 9
2022 .long 12
2023 .long 5
2024 .long 6
2025 .long 3
2026 .long 0
2027 .long 14
2028 .long 11
2029 .long 8
2030 .long 12
2031 .long 7
2032 .long 1
2033 .long 14
2034 .long 2
2035 .long 13
2036 .long 6
2037 .long 15
2038 .long 0
2039 .long 9
2040 .long 10
2041 .long 4
2042 .long 5
2043 .long 3
2044 .long 12
2045 .long 1
2046 .long 10
2047 .long 15
2048 .long 9
2049 .long 2
2050 .long 6
2051 .long 8
2052 .long 0
2053 .long 13
2054 .long 3
2055 .long 4
2056 .long 14
2057 .long 7
2058 .long 5
2059 .long 11
2060 .long 10
2061 .long 15
2062 .long 4
2063 .long 2
2064 .long 7
2065 .long 12
2066 .long 9
2067 .long 5
2068 .long 6
2069 .long 1
2070 .long 13
2071 .long 14
2072 .long 0
2073 .long 11
2074 .long 3
2075 .long 8
2076 .long 9
2077 .long 14
2078 .long 15
2079 .long 5
2080 .long 2
2081 .long 8
2082 .long 12
2083 .long 3
2084 .long 7
2085 .long 0
2086 .long 4
2087 .long 10
2088 .long 1
2089 .long 13
2090 .long 11
2091 .long 6
2092 .long 4
2093 .long 3
2094 .long 2
2095 .long 12
2096 .long 9
2097 .long 5
2098 .long 15
2099 .long 10
2100 .long 11
2101 .long 14
2102 .long 1
2103 .long 7
2104 .long 6
2105 .long 0
2106 .long 8
2107 .long 13
2108 .long 4
2109 .long 11
2110 .long 2
2111 .long 14
2112 .long 15
2113 .long 0
2114 .long 8
2115 .long 13
2116 .long 3
2117 .long 12
2118 .long 9
2119 .long 7
2120 .long 5
2121 .long 10
2122 .long 6
2123 .long 1
2124 .long 13
2125 .long 0
2126 .long 11
2127 .long 7
2128 .long 4
2129 .long 9
2130 .long 1
2131 .long 10
2132 .long 14
2133 .long 3
2134 .long 5
2135 .long 12
2136 .long 2
2137 .long 15
2138 .long 8
2139 .long 6
2140 .long 1
2141 .long 4
2142 .long 11
2143 .long 13
2144 .long 12
2145 .long 3
2146 .long 7
2147 .long 14
2148 .long 10
2149 .long 15
2150 .long 6
2151 .long 8
2152 .long 0
2153 .long 5
2154 .long 9
2155 .long 2
2156 .long 6
2157 .long 11
2158 .long 13
2159 .long 8
2160 .long 1
2161 .long 4
2162 .long 10
2163 .long 7
2164 .long 9
2165 .long 5
2166 .long 0
2167 .long 15
2168 .long 14
2169 .long 2
2170 .long 3
2171 .long 12
2172 .long 13
2173 .long 2
2174 .long 8
2175 .long 4
2176 .long 6
2177 .long 15
2178 .long 11
2179 .long 1
2180 .long 10
2181 .long 9
2182 .long 3
2183 .long 14
2184 .long 5
2185 .long 0
2186 .long 12
2187 .long 7
2188 .long 1
2189 .long 15
2190 .long 13
2191 .long 8
2192 .long 10
2193 .long 3
2194 .long 7
2195 .long 4
2196 .long 12
2197 .long 5
2198 .long 6
2199 .long 11
2200 .long 0
2201 .long 14
2202 .long 9
2203 .long 2
2204 .long 7
2205 .long 11
2206 .long 4
2207 .long 1
2208 .long 9
2209 .long 12
2210 .long 14
2211 .long 2
2212 .long 0
2213 .long 6
2214 .long 10
2215 .long 13
2216 .long 15
2217 .long 3
2218 .long 5
2219 .long 8
2220 .long 2
2221 .long 1
2222 .long 14
2223 .long 7
2224 .long 4
2225 .long 10
2226 .long 8
2227 .long 13
2228 .long 15
2229 .long 12
2230 .long 9
2231 .long 0
2232 .long 3
2233 .long 5
2234 .long 6
2235 .long 11
2236 .align 32
2237 .type perm32, @object
2238 .size perm32, 128
2239 perm32:
2240 .long 16
2241 .long 7
2242 .long 20
2243 .long 21
2244 .long 29
2245 .long 12
2246 .long 28
2247 .long 17
2248 .long 1
2249 .long 15
2250 .long 23
2251 .long 26
2252 .long 5
2253 .long 18
2254 .long 31
2255 .long 10
2256 .long 2
2257 .long 8
2258 .long 24
2259 .long 14
2260 .long 32
2261 .long 27
2262 .long 3
2263 .long 9
2264 .long 19
2265 .long 13
2266 .long 30
2267 .long 6
2268 .long 22
2269 .long 11
2270 .long 4
2271 .long 25
2272 .align 32
2273 .type esel, @object
2274 .size esel, 192
2275 esel:
2276 .long 32
2277 .long 1
2278 .long 2
2279 .long 3
2280 .long 4
2281 .long 5
2282 .long 4
2283 .long 5
2284 .long 6
2285 .long 7
2286 .long 8
2287 .long 9
2288 .long 8
2289 .long 9
2290 .long 10
2291 .long 11
2292 .long 12
2293 .long 13
2294 .long 12
2295 .long 13
2296 .long 14
2297 .long 15
2298 .long 16
2299 .long 17
2300 .long 16
2301 .long 17
2302 .long 18
2303 .long 19
2304 .long 20
2305 .long 21
2306 .long 20
2307 .long 21
2308 .long 22
2309 .long 23
2310 .long 24
2311 .long 25
2312 .long 24
2313 .long 25
2314 .long 26
2315 .long 27
2316 .long 28
2317 .long 29
2318 .long 28
2319 .long 29
2320 .long 30
2321 .long 31
2322 .long 32
2323 .long 1
2324 .align 32
2325 .type pc2, @object
2326 .size pc2, 192
2327 pc2:
2328 .long 14
2329 .long 17
2330 .long 11
2331 .long 24
2332 .long 1
2333 .long 5
2334 .long 3
2335 .long 28
2336 .long 15
2337 .long 6
2338 .long 21
2339 .long 10
2340 .long 23
2341 .long 19
2342 .long 12
2343 .long 4
2344 .long 26
2345 .long 8
2346 .long 16
2347 .long 7
2348 .long 27
2349 .long 20
2350 .long 13
2351 .long 2
2352 .long 41
2353 .long 52
2354 .long 31
2355 .long 37
2356 .long 47
2357 .long 55
2358 .long 30
2359 .long 40
2360 .long 51
2361 .long 45
2362 .long 33
2363 .long 48
2364 .long 44
2365 .long 49
2366 .long 39
2367 .long 56
2368 .long 34
2369 .long 53
2370 .long 46
2371 .long 42
2372 .long 50
2373 .long 36
2374 .long 29
2375 .long 32
2376 .align 32
2377 .type rots, @object
2378 .size rots, 64
2379 rots:
2380 .long 1
2381 .long 1
2382 .long 2
2383 .long 2
2384 .long 2
2385 .long 2
2386 .long 2
2387 .long 2
2388 .long 1
2389 .long 2
2390 .long 2
2391 .long 2
2392 .long 2
2393 .long 2
2394 .long 2
2395 .long 1
2396 .align 32
2397 .type pc1, @object
2398 .size pc1, 224
2399 pc1:
2400 .long 57
2401 .long 49
2402 .long 41
2403 .long 33
2404 .long 25
2405 .long 17
2406 .long 9
2407 .long 1
2408 .long 58
2409 .long 50
2410 .long 42
2411 .long 34
2412 .long 26
2413 .long 18
2414 .long 10
2415 .long 2
2416 .long 59
2417 .long 51
2418 .long 43
2419 .long 35
2420 .long 27
2421 .long 19
2422 .long 11
2423 .long 3
2424 .long 60
2425 .long 52
2426 .long 44
2427 .long 36
2428 .long 63
2429 .long 55
2430 .long 47
2431 .long 39
2432 .long 31
2433 .long 23
2434 .long 15
2435 .long 7
2436 .long 62
2437 .long 54
2438 .long 46
2439 .long 38
2440 .long 30
2441 .long 22
2442 .long 14
2443 .long 6
2444 .long 61
2445 .long 53
2446 .long 45
2447 .long 37
2448 .long 29
2449 .long 21
2450 .long 13
2451 .long 5
2452 .long 28
2453 .long 20
2454 .long 12
2455 .long 4
2456 .weak __pthread_mutex_unlock
2457 .weak __pthread_mutex_lock
2458 .ident "GCC: (GNU) 7.3.0"
2459 .section .note.GNU-stack,"",@progbits