clarify the purpose of this project
[nyanglibc.git] / sunrpc / xdr_intXX_t.shared.s
blob6759fb493673add036bd4fa0dd4870dac61908e0
1 .text
2 #APP
3 memmove = __GI_memmove
4 memset = __GI_memset
5 memcpy = __GI_memcpy
6 .symver __EI_xdr_int64_t, xdr_int64_t@GLIBC_2.2.5
7 .symver __EI_xdr_quad_t, xdr_quad_t@GLIBC_2.3.4
8 .symver __EI_xdr_uint64_t, xdr_uint64_t@GLIBC_2.2.5
9 .symver __EI_xdr_u_quad_t, xdr_u_quad_t@GLIBC_2.3.4
10 .symver __EI_xdr_int32_t, xdr_int32_t@GLIBC_2.2.5
11 .symver __EI_xdr_uint32_t, xdr_uint32_t@GLIBC_2.2.5
12 .symver __EI_xdr_int16_t, xdr_int16_t@GLIBC_2.2.5
13 .symver __EI_xdr_uint16_t, xdr_uint16_t@GLIBC_2.2.5
14 .symver __EI_xdr_int8_t, xdr_int8_t@GLIBC_2.2.5
15 .symver __EI_xdr_uint8_t, xdr_uint8_t@GLIBC_2.2.5
16 #NO_APP
17 .p2align 4,,15
18 .globl __GI_xdr_int64_t
19 .hidden __GI_xdr_int64_t
20 .type __GI_xdr_int64_t, @function
21 __GI_xdr_int64_t:
22 pushq %rbp
23 pushq %rbx
24 movq %rsi, %rbp
25 movq %rdi, %rbx
26 subq $24, %rsp
27 movl (%rdi), %eax
28 cmpl $1, %eax
29 je .L3
30 jb .L4
31 cmpl $2, %eax
32 sete %al
33 addq $24, %rsp
34 movzbl %al, %eax
35 popq %rbx
36 popq %rbp
37 ret
38 .p2align 4,,10
39 .p2align 3
40 .L3:
41 movq 8(%rdi), %rax
42 leaq 8(%rsp), %rsi
43 call *64(%rax)
44 testl %eax, %eax
45 je .L10
46 movq 8(%rbx), %rax
47 leaq 12(%rsp), %rsi
48 movq %rbx, %rdi
49 call *64(%rax)
50 testl %eax, %eax
51 jne .L16
52 .L10:
53 addq $24, %rsp
54 xorl %eax, %eax
55 popq %rbx
56 popq %rbp
57 ret
58 .p2align 4,,10
59 .p2align 3
60 .L4:
61 movq (%rsi), %rax
62 leaq 8(%rsp), %rsi
63 movq %rax, %rdx
64 movl %eax, 12(%rsp)
65 movq 8(%rdi), %rax
66 sarq $32, %rdx
67 movl %edx, 8(%rsp)
68 call *72(%rax)
69 testl %eax, %eax
70 je .L10
71 movq 8(%rbx), %rax
72 leaq 12(%rsp), %rsi
73 movq %rbx, %rdi
74 call *72(%rax)
75 testl %eax, %eax
76 setne %al
77 addq $24, %rsp
78 movzbl %al, %eax
79 popq %rbx
80 popq %rbp
81 ret
82 .p2align 4,,10
83 .p2align 3
84 .L16:
85 movslq 8(%rsp), %rax
86 movl 12(%rsp), %edx
87 salq $32, %rax
88 orq %rdx, %rax
89 movq %rax, 0(%rbp)
90 addq $24, %rsp
91 movl $1, %eax
92 popq %rbx
93 popq %rbp
94 ret
95 .size __GI_xdr_int64_t, .-__GI_xdr_int64_t
96 .globl __EI_xdr_int64_t
97 .set __EI_xdr_int64_t,__GI_xdr_int64_t
98 .p2align 4,,15
99 .globl __GI_xdr_quad_t
100 .hidden __GI_xdr_quad_t
101 .type __GI_xdr_quad_t, @function
102 __GI_xdr_quad_t:
103 jmp __GI_xdr_int64_t
104 .size __GI_xdr_quad_t, .-__GI_xdr_quad_t
105 .globl __EI_xdr_quad_t
106 .set __EI_xdr_quad_t,__GI_xdr_quad_t
107 .p2align 4,,15
108 .globl __GI_xdr_uint64_t
109 .hidden __GI_xdr_uint64_t
110 .type __GI_xdr_uint64_t, @function
111 __GI_xdr_uint64_t:
112 pushq %rbp
113 pushq %rbx
114 movq %rsi, %rbp
115 movq %rdi, %rbx
116 subq $24, %rsp
117 movl (%rdi), %eax
118 cmpl $1, %eax
119 je .L20
120 jb .L21
121 cmpl $2, %eax
122 sete %al
123 addq $24, %rsp
124 movzbl %al, %eax
125 popq %rbx
126 popq %rbp
128 .p2align 4,,10
129 .p2align 3
130 .L20:
131 movq 8(%rdi), %rax
132 leaq 8(%rsp), %rsi
133 call *64(%rax)
134 testl %eax, %eax
135 je .L27
136 movq 8(%rbx), %rax
137 leaq 12(%rsp), %rsi
138 movq %rbx, %rdi
139 call *64(%rax)
140 testl %eax, %eax
141 jne .L32
142 .L27:
143 addq $24, %rsp
144 xorl %eax, %eax
145 popq %rbx
146 popq %rbp
148 .p2align 4,,10
149 .p2align 3
150 .L21:
151 movq (%rsi), %rax
152 leaq 8(%rsp), %rsi
153 movq %rax, %rdx
154 movl %eax, 12(%rsp)
155 movq 8(%rdi), %rax
156 shrq $32, %rdx
157 movl %edx, 8(%rsp)
158 call *72(%rax)
159 testl %eax, %eax
160 je .L27
161 movq 8(%rbx), %rax
162 leaq 12(%rsp), %rsi
163 movq %rbx, %rdi
164 call *72(%rax)
165 testl %eax, %eax
166 setne %al
167 addq $24, %rsp
168 movzbl %al, %eax
169 popq %rbx
170 popq %rbp
172 .p2align 4,,10
173 .p2align 3
174 .L32:
175 movl 8(%rsp), %eax
176 movl 12(%rsp), %edx
177 salq $32, %rax
178 orq %rdx, %rax
179 movq %rax, 0(%rbp)
180 addq $24, %rsp
181 movl $1, %eax
182 popq %rbx
183 popq %rbp
185 .size __GI_xdr_uint64_t, .-__GI_xdr_uint64_t
186 .globl __EI_xdr_uint64_t
187 .set __EI_xdr_uint64_t,__GI_xdr_uint64_t
188 .p2align 4,,15
189 .globl __GI_xdr_u_quad_t
190 .hidden __GI_xdr_u_quad_t
191 .type __GI_xdr_u_quad_t, @function
192 __GI_xdr_u_quad_t:
193 jmp __GI_xdr_uint64_t
194 .size __GI_xdr_u_quad_t, .-__GI_xdr_u_quad_t
195 .globl __EI_xdr_u_quad_t
196 .set __EI_xdr_u_quad_t,__GI_xdr_u_quad_t
197 .p2align 4,,15
198 .globl __GI_xdr_int32_t
199 .hidden __GI_xdr_int32_t
200 .type __GI_xdr_int32_t, @function
201 __GI_xdr_int32_t:
202 movl (%rdi), %eax
203 cmpl $1, %eax
204 je .L36
205 jb .L42
206 cmpl $2, %eax
207 sete %al
208 movzbl %al, %eax
210 .p2align 4,,10
211 .p2align 3
212 .L36:
213 movq 8(%rdi), %rax
214 jmp *64(%rax)
215 .p2align 4,,10
216 .p2align 3
217 .L42:
218 movq 8(%rdi), %rax
219 jmp *72(%rax)
220 .size __GI_xdr_int32_t, .-__GI_xdr_int32_t
221 .globl __EI_xdr_int32_t
222 .set __EI_xdr_int32_t,__GI_xdr_int32_t
223 .p2align 4,,15
224 .globl __GI_xdr_uint32_t
225 .hidden __GI_xdr_uint32_t
226 .type __GI_xdr_uint32_t, @function
227 __GI_xdr_uint32_t:
228 movl (%rdi), %eax
229 cmpl $1, %eax
230 je .L45
231 jb .L51
232 cmpl $2, %eax
233 sete %al
234 movzbl %al, %eax
236 .p2align 4,,10
237 .p2align 3
238 .L45:
239 movq 8(%rdi), %rax
240 jmp *64(%rax)
241 .p2align 4,,10
242 .p2align 3
243 .L51:
244 movq 8(%rdi), %rax
245 jmp *72(%rax)
246 .size __GI_xdr_uint32_t, .-__GI_xdr_uint32_t
247 .globl __EI_xdr_uint32_t
248 .set __EI_xdr_uint32_t,__GI_xdr_uint32_t
249 .p2align 4,,15
250 .globl __GI_xdr_int16_t
251 .hidden __GI_xdr_int16_t
252 .type __GI_xdr_int16_t, @function
253 __GI_xdr_int16_t:
254 pushq %rbx
255 movq %rsi, %rbx
256 subq $16, %rsp
257 movl (%rdi), %eax
258 cmpl $1, %eax
259 je .L54
260 jb .L55
261 cmpl $2, %eax
262 sete %al
263 movzbl %al, %eax
264 .L52:
265 addq $16, %rsp
266 popq %rbx
268 .p2align 4,,10
269 .p2align 3
270 .L54:
271 movq 8(%rdi), %rax
272 leaq 12(%rsp), %rsi
273 call *64(%rax)
274 testl %eax, %eax
275 je .L52
276 movl 12(%rsp), %eax
277 movw %ax, (%rbx)
278 addq $16, %rsp
279 movl $1, %eax
280 popq %rbx
282 .p2align 4,,10
283 .p2align 3
284 .L55:
285 movswl (%rsi), %eax
286 leaq 12(%rsp), %rsi
287 movl %eax, 12(%rsp)
288 movq 8(%rdi), %rax
289 call *72(%rax)
290 addq $16, %rsp
291 popq %rbx
293 .size __GI_xdr_int16_t, .-__GI_xdr_int16_t
294 .globl __EI_xdr_int16_t
295 .set __EI_xdr_int16_t,__GI_xdr_int16_t
296 .p2align 4,,15
297 .globl __GI_xdr_uint16_t
298 .hidden __GI_xdr_uint16_t
299 .type __GI_xdr_uint16_t, @function
300 __GI_xdr_uint16_t:
301 pushq %rbx
302 movq %rsi, %rbx
303 subq $16, %rsp
304 movl (%rdi), %eax
305 cmpl $1, %eax
306 je .L65
307 jb .L66
308 cmpl $2, %eax
309 sete %al
310 movzbl %al, %eax
311 .L63:
312 addq $16, %rsp
313 popq %rbx
315 .p2align 4,,10
316 .p2align 3
317 .L65:
318 movq 8(%rdi), %rax
319 leaq 12(%rsp), %rsi
320 call *64(%rax)
321 testl %eax, %eax
322 je .L63
323 movl 12(%rsp), %eax
324 movw %ax, (%rbx)
325 addq $16, %rsp
326 movl $1, %eax
327 popq %rbx
329 .p2align 4,,10
330 .p2align 3
331 .L66:
332 movzwl (%rsi), %eax
333 leaq 12(%rsp), %rsi
334 movl %eax, 12(%rsp)
335 movq 8(%rdi), %rax
336 call *72(%rax)
337 addq $16, %rsp
338 popq %rbx
340 .size __GI_xdr_uint16_t, .-__GI_xdr_uint16_t
341 .globl __EI_xdr_uint16_t
342 .set __EI_xdr_uint16_t,__GI_xdr_uint16_t
343 .p2align 4,,15
344 .globl __GI_xdr_int8_t
345 .hidden __GI_xdr_int8_t
346 .type __GI_xdr_int8_t, @function
347 __GI_xdr_int8_t:
348 pushq %rbx
349 movq %rsi, %rbx
350 subq $16, %rsp
351 movl (%rdi), %eax
352 cmpl $1, %eax
353 je .L76
354 jb .L77
355 cmpl $2, %eax
356 sete %al
357 movzbl %al, %eax
358 .L74:
359 addq $16, %rsp
360 popq %rbx
362 .p2align 4,,10
363 .p2align 3
364 .L76:
365 movq 8(%rdi), %rax
366 leaq 12(%rsp), %rsi
367 call *64(%rax)
368 testl %eax, %eax
369 je .L74
370 movl 12(%rsp), %eax
371 movb %al, (%rbx)
372 addq $16, %rsp
373 movl $1, %eax
374 popq %rbx
376 .p2align 4,,10
377 .p2align 3
378 .L77:
379 movsbl (%rsi), %eax
380 leaq 12(%rsp), %rsi
381 movl %eax, 12(%rsp)
382 movq 8(%rdi), %rax
383 call *72(%rax)
384 addq $16, %rsp
385 popq %rbx
387 .size __GI_xdr_int8_t, .-__GI_xdr_int8_t
388 .globl __EI_xdr_int8_t
389 .set __EI_xdr_int8_t,__GI_xdr_int8_t
390 .p2align 4,,15
391 .globl __GI_xdr_uint8_t
392 .hidden __GI_xdr_uint8_t
393 .type __GI_xdr_uint8_t, @function
394 __GI_xdr_uint8_t:
395 pushq %rbx
396 movq %rsi, %rbx
397 subq $16, %rsp
398 movl (%rdi), %eax
399 cmpl $1, %eax
400 je .L87
401 jb .L88
402 cmpl $2, %eax
403 sete %al
404 movzbl %al, %eax
405 .L85:
406 addq $16, %rsp
407 popq %rbx
409 .p2align 4,,10
410 .p2align 3
411 .L87:
412 movq 8(%rdi), %rax
413 leaq 12(%rsp), %rsi
414 call *64(%rax)
415 testl %eax, %eax
416 je .L85
417 movl 12(%rsp), %eax
418 movb %al, (%rbx)
419 addq $16, %rsp
420 movl $1, %eax
421 popq %rbx
423 .p2align 4,,10
424 .p2align 3
425 .L88:
426 movzbl (%rsi), %eax
427 leaq 12(%rsp), %rsi
428 movl %eax, 12(%rsp)
429 movq 8(%rdi), %rax
430 call *72(%rax)
431 addq $16, %rsp
432 popq %rbx
434 .size __GI_xdr_uint8_t, .-__GI_xdr_uint8_t
435 .globl __EI_xdr_uint8_t
436 .set __EI_xdr_uint8_t,__GI_xdr_uint8_t