4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License, Version 1.0 only
6 * (the "License"). You may not use this file except in compliance
9 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10 * or http://www.opensolaris.org/os/licensing.
11 * See the License for the specific language governing permissions
12 * and limitations under the License.
14 * When distributing Covered Code, include this CDDL HEADER in each
15 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16 * If applicable, add the following below this CDDL HEADER, with the
17 * fields enclosed by brackets "[]" replaced with your own identifying
18 * information: Portions Copyright [yyyy] [name of copyright owner]
23 * Copyright 2005 Sun Microsystems, Inc. All rights reserved.
24 * Use is subject to license terms.
27 .ident "%Z%%M% %I% %E% SMI"
31 #if defined(__i386) && !defined(__amd64)
34 * Helper routines for 32-bit compilers to perform 64-bit math.
35 * These are used both by the Sun and GCC compilers.
38 #include <sys/asm_linkage.h>
39 #include <sys/asm_misc.h>
44 / function __mul64
(A,B:Longint
):Longint;
45 / {Overflow is
not checked
}
47 / We essentially do multiply by longhand
, using base
2**32 digits.
56 / We can ignore ac
and top
32 bits of ad+
bc: if
<> 0, overflow happened.
62 mov
12(%ebp
),%eax
/ A.hi
(a)
63 mull
16(%ebp
) / Multiply
A.hi by B.lo
(produces ad
)
64 xchg
%ecx
,%eax
/ ecx
= bottom half of ad.
65 movl
8(%ebp
),%eax
/ A.Lo
(b)
66 movl
%eax
,%esi
/ Save
A.lo for later
67 mull
16(%ebp
) / Multiply
A.Lo by B.LO
(dx
:ax
= bd.
)
68 addl
%edx
,%ecx
/ cx is ad
69 xchg
%eax
,%esi
/ esi is bd
, eax
= A.lo
(d
)
70 mull
20(%ebp
) / Multiply
A.lo
* B.hi
(producing
bc)
71 addl
%ecx
,%eax
/ Produce ad+
bc
82 * C support for 64-bit modulo and division.
83 * Hand-customized compiler output - see comments for details.
87 / * Unsigned division with remainder.
88 / * Divide two uint64_ts, and calculate remainder.
91 / UDivRem
(uint64_t x
, uint64_t y
, uint64_t
* pmod
)
93 / /* simple cases: y is a single uint32_t */
95 / uint32_t div_hi
, div_rem;
99 / if
(HI
(x
) < LO
(y
)) {
100 / /* result is a single uint32_t, use one division */
104 / /* result is a double uint32_t, use two divisions */
105 / A_DIV32
(HI
(x
), 0, LO
(y
), q1
, div_hi
);
108 / /* calculate q0 and remainder */
109 / A_DIV32
(LO
(x
), div_hi
, LO
(y
), q0
, div_rem
);
111 / /* return remainder */
114 / /* return result */
115 / return
(HILO
(q1
, q0
));
117 / } else if
(HI
(x
) < HI
(y
)) {
118 / /* HI(x) < HI(y) => x < y => result is 0 */
120 / /* return remainder */
123 / /* return result */
128 / * uint64_t by uint64_t division, resulting in a one-uint32_t
134 / uint32_t normshift;
136 / /* normalize by shifting x and y so MSB(y) == 1 */
137 / HIBIT
(HI
(y
), normshift
);
/* index of highest 1 bit */
138 / normshift
= 31 - normshift;
140 / if
(normshift
== 0) {
141 / /* no shifting needed, and x < 2*y so q <= 1 */
147 / /* if x >= y then q = 1 (note x1 >= y1) */
148 / if
(x1
> y1 || x0
>= y0
) {
150 / /* subtract y from x to get remainder */
151 / A_SUB2
(y0
, y1
, x0
, x1
);
156 / /* return remainder */
157 / *pmod
= HILO
(x1
, x0
);
159 / /* return result */
164 / * the last case: result is one uint32_t, but we need to
168 / uint32_t t0
, t1
, x2;
171 / dt
= (y
<< normshift
);
175 / /* normalize x (we need 3 uint32_ts!!!) */
176 / x2
= (HI
(x
) >> (32 - normshift
));
177 / dt
= (x
<< normshift
);
181 / /* estimate q0, and reduce x to a two uint32_t value */
182 / A_DIV32
(x1
, x2
, y1
, q0
, x1
);
184 / /* adjust q0 down if too high */
186 / * because of the limited range of x2 we can only be
189 / A_MUL32
(y0
, q0
, t0
, t1
);
190 / if
(t1
> x1 ||
(t1
== x1
&& t0
> x0
)) {
192 / A_SUB2
(y0
, y1
, t0
, t1
);
194 / /* return remainder */
195 / /* subtract product from x to get remainder */
196 / A_SUB2
(t0
, t1
, x0
, x1
);
197 / *pmod
= (HILO
(x1
, x0
) >> normshift
);
199 / /* return result */
209 movl
68(%esp
), %edi
/ y
,
210 testl
%edi
, %edi
/ tmp63
211 movl
%eax
, 40(%esp
) / x
, x
212 movl
%edx
, 44(%esp
) / x
, x
213 movl
%edi
, %esi
/, tmp62
214 movl
%edi
, %ecx
/ tmp62
, tmp63
216 movl
%edx
, %eax
/, tmp68
217 cmpl 64(%esp
), %eax
/ y
, tmp68
220 movl
72(%esp
), %ebp
/ pmod
,
221 xorl
%esi
, %esi
/ <result
>
222 movl
40(%esp
), %eax
/ x
, q0
223 movl
%ecx
, %edi
/ <result
>, <result
>
225 movl
%edx
, (%ebp
) / div_rem
,
227 addl
%eax
, %esi
/ q0
, <result
>
229 adcl
%edx
, %edi
/ q0
, <result
>
231 movl
%esi
, %eax
/ <result
>, <result
>
233 movl
%edi
, %edx
/ <result
>, <result
>
239 movl
44(%esp
), %eax
/ x
,
241 cmpl %esi
, %eax
/ tmp62
, tmp5
242 movl
%eax
, 32(%esp
) / tmp5
,
245 movl
72(%esp
), %esi
/ pmod
,
246 movl
40(%esp
), %ebp
/ x
,
247 movl
44(%esp
), %ecx
/ x
,
250 xorl
%edi
, %edi
/ <result
>
251 xorl
%esi
, %esi
/ <result
>
254 movl
%esi
, %eax
/ <result
>, <result
>
256 movl
%edi
, %edx
/ <result
>, <result
>
262 movl
%edi
, %edx
/ tmp63
, div_hi
264 movl
%eax
, %ecx
/, q1
268 movl $
31, %edi
/, tmp87
269 bsrl
%esi
,%edx
/ tmp62
, normshift
270 subl
%edx
, %edi
/ normshift
, tmp87
271 movl
%edi
, 28(%esp
) / tmp87
,
273 movl
32(%esp
), %edx
/, x1
274 cmpl %ecx
, %edx
/ y1
, x1
275 movl
64(%esp
), %edi
/ y
, y0
276 movl
40(%esp
), %esi
/ x
, x0
279 cmpl %edi
, %esi
/ y0
, x0
283 subl
%edi
,%esi
/ y0
, x0
284 sbbl
%ecx
,%edx
/ tmp63
, x1
286 movl
%edx
, %ecx
/ x1
, x1
289 addl
%esi
, %edx
/ x0
, x1
290 adcl
%edi
, %ecx
/ x0
, x1
291 movl
72(%esp
), %esi
/ pmod
,
292 movl
%edx
, (%esi
) / x1
,
293 movl
%ecx
, 4(%esi
) / x1
,
294 xorl
%edi
, %edi
/ <result
>
295 movl
%ebp
, %esi
/ q0
, <result
>
300 movl
64(%esp
), %esi
/ y
, dt
301 movl
68(%esp
), %edi
/ y
, dt
302 shldl
%esi
, %edi
/, dt
, dt
307 movl $
32, %ecx
/, tmp102
308 subl
28(%esp
), %ecx
/, tmp102
309 movl
%esi
, %ebp
/ dt
, y0
311 shrl
%cl
, %esi
/ tmp102
,
312 movl
%edi
, 24(%esp
) / tmp99
,
314 movl
%esi
, 12(%esp
) /, x2
315 movl
44(%esp
), %edi
/ x
, dt
316 movl
40(%esp
), %esi
/ x
, dt
317 shldl
%esi
, %edi
/, dt
, dt
321 movl
%esi
, %edi
/ dt
, dt
324 movl
%edi
, %ecx
/ dt
,
325 movl
%edi
, %eax
/ tmp2
,
327 movl
12(%esp
), %edx
/ x2
,
329 movl
%edx
, %ecx
/, x1
332 movl
%ebp
, %eax
/ y0
, t0
334 cmpl %ecx
, %edx
/ x1
, t1
339 movl
%ecx
, %edi
/ x1
,
340 subl
%eax
,%esi
/ t0
, x0
342 movl
%edi
, %eax
/, x1
343 movl
%eax
, %edx
/ x1
, x1
346 addl
%esi
, %eax
/ x0
, x1
347 adcl
%ebp
, %edx
/ x0
, x1
349 shrdl
%edx
, %eax
/, x1
, x1
353 movl
%edx
, %eax
/ x1
, x1
356 movl
72(%esp
), %ecx
/ pmod
,
357 movl
20(%esp
), %esi
/, <result
>
358 xorl
%edi
, %edi
/ <result
>
359 movl
%eax
, (%ecx
) / x1
,
360 movl
%edx
, 4(%ecx
) / x1
,
364 cmpl %esi
, %eax
/ x0
, t0
368 subl
%ebp
,%eax
/ y0
, t0
369 sbbl
24(%esp
),%edx
/, t1
372 movl
%esi
, %edi
/ dt
, dt
378 * Unsigned division without remainder.
381 / UDiv
(uint64_t x
, uint64_t y
)
384 / /* simple cases: y is a single uint32_t */
385 / uint32_t div_hi
, div_rem;
389 / if
(HI
(x
) < LO
(y
)) {
390 / /* result is a single uint32_t, use one division */
394 / /* result is a double uint32_t, use two divisions */
395 / A_DIV32
(HI
(x
), 0, LO
(y
), q1
, div_hi
);
398 / /* calculate q0 and remainder */
399 / A_DIV32
(LO
(x
), div_hi
, LO
(y
), q0
, div_rem
);
401 / /* return result */
402 / return
(HILO
(q1
, q0
));
404 / } else if
(HI
(x
) < HI
(y
)) {
405 / /* HI(x) < HI(y) => x < y => result is 0 */
407 / /* return result */
412 / * uint64_t by uint64_t division, resulting in a one-uint32_t
418 / unsigned normshift;
420 / /* normalize by shifting x and y so MSB(y) == 1 */
421 / HIBIT
(HI
(y
), normshift
);
/* index of highest 1 bit */
422 / normshift
= 31 - normshift;
424 / if
(normshift
== 0) {
425 / /* no shifting needed, and x < 2*y so q <= 1 */
431 / /* if x >= y then q = 1 (note x1 >= y1) */
432 / if
(x1
> y1 || x0
>= y0
) {
434 / /* subtract y from x to get remainder */
435 / /* A_SUB2(y0, y1, x0, x1); */
440 / /* return result */
445 / * the last case: result is one uint32_t, but we need to
449 / uint32_t t0
, t1
, x2;
452 / dt
= (y
<< normshift
);
456 / /* normalize x (we need 3 uint32_ts!!!) */
457 / x2
= (HI
(x
) >> (32 - normshift
));
458 / dt
= (x
<< normshift
);
462 / /* estimate q0, and reduce x to a two uint32_t value */
463 / A_DIV32
(x1
, x2
, y1
, q0
, x1
);
465 / /* adjust q0 down if too high */
467 / * because of the limited range of x2 we can only be
470 / A_MUL32
(y0
, q0
, t0
, t1
);
471 / if
(t1
> x1 ||
(t1
== x1
&& t0
> x0
)) {
474 / /* return result */
484 movl
%edx
, 36(%esp
) / x
, x
485 movl
60(%esp
), %edx
/ y
,
486 testl
%edx
, %edx
/ tmp62
487 movl
%eax
, 32(%esp
) / x
, x
488 movl
%edx
, %ecx
/ tmp61
, tmp62
489 movl
%edx
, %eax
/, tmp61
491 movl
36(%esp
), %esi
/ x
,
492 cmpl 56(%esp
), %esi
/ y
, tmp67
493 movl
%esi
, %eax
/, tmp67
494 movl
%esi
, %edx
/ tmp67
, div_hi
496 movl
%ecx
, %edx
/ tmp62
, div_hi
498 movl
%eax
, %ecx
/, q1
500 xorl
%esi
, %esi
/ <result
>
501 movl
%ecx
, %edi
/ <result
>, <result
>
502 movl
32(%esp
), %eax
/ x
, q0
505 addl
%eax
, %esi
/ q0
, <result
>
506 adcl
%ecx
, %edi
/ q0
, <result
>
509 movl
%esi
, %eax
/ <result
>, <result
>
511 movl
%edi
, %edx
/ <result
>, <result
>
517 movl
36(%esp
), %esi
/ x
,
519 movl
%esi
, 24(%esp
) / tmp1
,
521 xorl
%esi
, %esi
/ <result
>
522 xorl
%edi
, %edi
/ <result
>
523 cmpl %eax
, 24(%esp
) / tmp61
,
525 bsrl
%eax
,%ebp
/ tmp61
, normshift
526 movl $
31, %eax
/, tmp85
527 subl
%ebp
, %eax
/ normshift
, normshift
529 movl
24(%esp
), %eax
/, x1
530 cmpl %ecx
, %eax
/ tmp62
, x1
531 movl
56(%esp
), %esi
/ y
, y0
532 movl
32(%esp
), %edx
/ x
, x0
535 cmpl %esi
, %edx
/ y0
, x0
540 movl
%eax
, %esi
/ q0
, <result
>
541 xorl
%edi
, %edi
/ <result
>
544 movl
%esi
, %eax
/ <result
>, <result
>
546 movl
%edi
, %edx
/ <result
>, <result
>
553 movl
56(%esp
), %esi
/ y
,
554 movl
60(%esp
), %edi
/ y
,
560 movl $
32, %ecx
/, tmp96
561 subl
%eax
, %ecx
/ normshift
, tmp96
563 movl
%edi
, 20(%esp
) /, dt
564 movl
24(%esp
), %ebp
/, x2
566 shrl
%cl
, %ebp
/ tmp96
, x2
567 movl
%esi
, 16(%esp
) /, dt
569 movl
32(%esp
), %esi
/ x
, dt
571 movl
36(%esp
), %edi
/ x
, dt
572 shldl
%esi
, %edi
/, dt
, dt
577 movl
%esi
, %edi
/ dt
, dt
581 movl
%edi
, %eax
/ tmp1
,
582 movl
%ebp
, %edx
/ x2
,
584 movl
%edx
, %ebp
/, x1
586 movl
%eax
, %ecx
/, q0
587 movl
16(%esp
), %eax
/ dt
,
589 cmpl %ebp
, %edx
/ x1
, t1
591 movl
%esi
, %edi
/ dt
, x0
595 movl
%ecx
, %esi
/ q0
, <result
>
597 xorl
%edi
, %edi
/ <result
>
600 cmpl %edi
, %eax
/ x0
, t0
604 movl
%ecx
, %esi
/ q0
, <result
>
615 * Perform division of two unsigned 64-bit quantities, returning the
616 * quotient in %edx:%eax. __udiv64 pops the arguments on return,
619 movl
4(%esp
), %eax
/ x
, x
620 movl
8(%esp
), %edx
/ x
, x
631 * Perform division of two unsigned 64-bit quantities, returning the
632 * remainder in %edx:%eax. __urem64 pops the arguments on return
636 movl
%esp
, %ecx
/, tmp65
637 movl
16(%esp
), %eax
/ x
, x
638 movl
20(%esp
), %edx
/ x
, x
643 movl
12(%esp
), %eax
/ rem
, rem
644 movl
16(%esp
), %edx
/ rem
, rem
652 * Perform division of two signed 64-bit quantities, returning the
653 * quotient in %edx:%eax. __div64 pops the arguments on return.
656 / __div64
(int64_t x
, int64_t y
)
659 / uint64_t xt
, yt
, r;
662 / xt
= -(uint64_t
) x;
669 / yt
= -(uint64_t
) y;
675 / return
(negative ?
(int64_t
) - r
: r
);
682 movl
28(%esp
), %edx
/ x
, x
684 movl
24(%esp
), %eax
/ x
, x
685 movl
32(%esp
), %esi
/ y
, y
686 movl
36(%esp
), %edi
/ y
, y
688 xorl
%ebp
, %ebp
/ negative
690 movl
%eax
, (%esp
) / x
, xt
691 movl
%edx
, 4(%esp
) / x
, xt
692 movl
%esi
, %eax
/ y
, yt
693 movl
%edi
, %edx
/ y
, yt
698 movl
8(%esp
), %eax
/ xt
, xt
699 movl
12(%esp
), %edx
/ xt
, xt
702 testl
%ebp
, %ebp
/ negative
720 movl
%eax
, (%esp
) / x
, xt
721 movl
%edx
, 4(%esp
) / x
, xt
722 movl $
1, %ebp
/, negative
723 movl
%esi
, %eax
/ y
, yt
724 movl
%edi
, %edx
/ y
, yt
731 xorl $
1, %ebp
/, negative
738 * Perform division of two signed 64-bit quantities, returning the
739 * remainder in %edx:%eax. __rem64 pops the arguments on return.
742 / __rem64
(int64_t x
, int64_t y
)
744 / uint64_t xt
, yt
, rem;
747 / xt
= -(uint64_t
) x;
752 / yt
= -(uint64_t
) y;
756 / (void
) UDivRem
(xt
, yt
, &rem
);
757 / return
(x
< 0 ?
(int64_t
) - rem
: rem
);
763 movl
36(%esp
), %ecx
/ x
,
764 movl
32(%esp
), %esi
/ x
,
765 movl
36(%esp
), %edi
/ x
,
767 movl
40(%esp
), %eax
/ y
, y
768 movl
44(%esp
), %edx
/ y
, y
769 movl
%esi
, (%esp
) /, xt
770 movl
%edi
, 4(%esp
) /, xt
773 movl
%eax
, %esi
/ y
, yt
774 movl
%edx
, %edi
/ y
, yt
777 leal
8(%esp
), %eax
/, tmp66
781 movl
12(%esp
), %eax
/ xt
, xt
782 movl
16(%esp
), %edx
/ xt
, xt
785 movl
36(%esp
), %edi
/ x
,
787 movl
8(%esp
), %eax
/ rem
, rem
788 movl
12(%esp
), %edx
/ rem
, rem
800 movl
%esi
, (%esp
) /, xt
801 movl
%edi
, 4(%esp
) /, xt
802 movl
%eax
, %esi
/ y
, yt
803 movl
%edx
, %edi
/ y
, yt
823 * C support for 64-bit modulo and division.
824 * GNU routines callable from C (though generated by the compiler).
825 * Hand-customized compiler output - see comments for details.
829 * int32_t/int64_t division/manipulation
831 * Hand-customized compiler output: the non-GCC entry points depart from
832 * the SYS V ABI by requiring their arguments to be popped, and in the
833 * [u]divrem64 cases returning the remainder in %ecx:%esi. Note the
834 * compiler-generated use of %edx:%eax for the first argument of
835 * internal entry points.
838 * - counting the number of leading zeros in a word
839 * - multiplying two 32-bit numbers giving a 64-bit result
840 * - dividing a 64-bit number by a 32-bit number, giving both quotient
842 * - subtracting two 64-bit results
844 / #define LO(X) ((uint32_t)(X) & 0xffffffff)
845 / #define HI(X) ((uint32_t)((X) >> 32) & 0xffffffff)
846 / #define HILO(H, L) (((uint64_t)(H) << 32) + (L))
848 / /* give index of highest bit */
849 / #define HIBIT(a, r) \
850 / asm
("bsrl %1,%0": "=r"((uint32_t
)(r
)) : "g" (a))
852 / /* multiply two uint32_ts resulting in a uint64_t */
853 / #define A_MUL32(a, b, lo, hi) \
855 / : "=a"((uint32_t
)(lo
)), "=d"((uint32_t
)(hi
)) : "g" (b), "0"(a))
857 / /* divide a uint64_t by a uint32_t */
858 / #define A_DIV32(lo, hi, b, q, r) \
860 / : "=a"((uint32_t
)(q
)), "=d"((uint32_t
)(r
)) \
861 / : "g" (b), "0"((uint32_t
)(lo
)), "1"((uint32_t
)hi
))
863 / /* subtract two uint64_ts (with borrow) */
864 / #define A_SUB2(bl, bh, al, ah) \
865 / asm
("subl %4,%0\n\tsbbl %5,%1" \
866 / : "=&r"((uint32_t
)(al
)), "=r"((uint32_t
)(ah
)) \
867 / : "0"((uint32_t
)(al
)), "1"((uint32_t
)(ah
)), "g"((uint32_t
)(bl)), \
868 / "g"((uint32_t
)(bh
)))
873 * Perform division of two unsigned 64-bit quantities, returning the
874 * quotient in %edx:%eax.
877 movl
4(%esp
), %eax
/ x
, x
878 movl
8(%esp
), %edx
/ x
, x
889 * Perform division of two unsigned 64-bit quantities, returning the
890 * remainder in %edx:%eax.
894 movl
%esp
, %ecx
/, tmp65
895 movl
16(%esp
), %eax
/ x
, x
896 movl
20(%esp
), %edx
/ x
, x
901 movl
12(%esp
), %eax
/ rem
, rem
902 movl
16(%esp
), %edx
/ rem
, rem
910 * Perform division of two signed 64-bit quantities, returning the
911 * quotient in %edx:%eax.
914 / __divdi3
(int64_t x
, int64_t y
)
917 / uint64_t xt
, yt
, r;
920 / xt
= -(uint64_t
) x;
927 / yt
= -(uint64_t
) y;
933 / return
(negative ?
(int64_t
) - r
: r
);
940 movl
28(%esp
), %edx
/ x
, x
942 movl
24(%esp
), %eax
/ x
, x
943 movl
32(%esp
), %esi
/ y
, y
944 movl
36(%esp
), %edi
/ y
, y
946 xorl
%ebp
, %ebp
/ negative
948 movl
%eax
, (%esp
) / x
, xt
949 movl
%edx
, 4(%esp
) / x
, xt
950 movl
%esi
, %eax
/ y
, yt
951 movl
%edi
, %edx
/ y
, yt
956 movl
8(%esp
), %eax
/ xt
, xt
957 movl
12(%esp
), %edx
/ xt
, xt
960 testl
%ebp
, %ebp
/ negative
978 movl
%eax
, (%esp
) / x
, xt
979 movl
%edx
, 4(%esp
) / x
, xt
980 movl $
1, %ebp
/, negative
981 movl
%esi
, %eax
/ y
, yt
982 movl
%edi
, %edx
/ y
, yt
989 xorl $
1, %ebp
/, negative
996 * Perform division of two signed 64-bit quantities, returning the
997 * quotient in %edx:%eax.
1000 / __moddi3
(int64_t x
, int64_t y
)
1002 / uint64_t xt
, yt
, rem;
1005 / xt
= -(uint64_t
) x;
1010 / yt
= -(uint64_t
) y;
1014 / (void
) UDivRem
(xt
, yt
, &rem
);
1015 / return
(x
< 0 ?
(int64_t
) - rem
: rem
);
1021 movl
36(%esp
), %ecx
/ x
,
1022 movl
32(%esp
), %esi
/ x
,
1023 movl
36(%esp
), %edi
/ x
,
1025 movl
40(%esp
), %eax
/ y
, y
1026 movl
44(%esp
), %edx
/ y
, y
1027 movl
%esi
, (%esp
) /, xt
1028 movl
%edi
, 4(%esp
) /, xt
1030 testl
%edx
, %edx
/ y
1031 movl
%eax
, %esi
/ y
, yt
1032 movl
%edx
, %edi
/ y
, yt
1035 leal
8(%esp
), %eax
/, tmp66
1039 movl
12(%esp
), %eax
/ xt
, xt
1040 movl
16(%esp
), %edx
/ xt
, xt
1043 movl
36(%esp
), %edi
/ x
,
1045 movl
8(%esp
), %eax
/ rem
, rem
1046 movl
12(%esp
), %edx
/ rem
, rem
1057 testl
%edx
, %edx
/ y
1058 movl
%esi
, (%esp
) /, xt
1059 movl
%edi
, 4(%esp
) /, xt
1060 movl
%eax
, %esi
/ y
, yt
1061 movl
%edx
, %edi
/ y
, yt
1072 adcl $
0, %edx
/, rem
1083 * Perform division of two unsigned 64-bit quantities, returning the
1084 * quotient in %edx:%eax, and the remainder in %ecx:%esi. __udivrem64
1085 * pops the arguments on return.
1089 movl
%esp
, %ecx
/, tmp64
1090 movl
16(%esp
), %eax
/ x
, x
1091 movl
20(%esp
), %edx
/ x
, x
1096 movl
16(%esp
), %ecx
/ rem
, tmp63
1097 movl
12(%esp
), %esi
/ rem
1100 SET_SIZE
(__udivrem64
)
1103 * Signed division with remainder.
1106 / SDivRem
(int64_t x
, int64_t y
, int64_t
* pmod
)
1109 / uint64_t xt
, yt
, r
, rem;
1112 / xt
= -(uint64_t
) x;
1119 / yt
= -(uint64_t
) y;
1124 / r
= UDivRem
(xt
, yt
, &rem
);
1125 / *pmod
= (x
< 0 ?
(int64_t
) - rem
: rem
);
1126 / return
(negative ?
(int64_t
) - r
: r
);
1133 testl
%edx
, %edx
/ x
1134 movl
%edx
, %edi
/ x
, x
1136 movl
44(%esp
), %esi
/ y
,
1137 xorl
%ebp
, %ebp
/ negative
1139 movl
%edx
, 12(%esp
) / x
, xt
1140 movl
%eax
, 8(%esp
) / x
, xt
1141 movl
40(%esp
), %edx
/ y
, yt
1142 movl
44(%esp
), %ecx
/ y
, yt
1145 leal
16(%esp
), %eax
/, tmp70
1149 movl
20(%esp
), %eax
/ xt
, xt
1150 movl
24(%esp
), %edx
/ xt
, xt
1152 movl
%edx
, 16(%esp
) /, r
1153 movl
%eax
, 12(%esp
) /, r
1155 testl
%edi
, %edi
/ x
1156 movl
16(%esp
), %edx
/ rem
, rem
1157 movl
20(%esp
), %ecx
/ rem
, rem
1160 movl
48(%esp
), %edi
/ pmod
, pmod
1161 testl
%ebp
, %ebp
/ negative
1162 movl
%edx
, (%edi
) / rem
,* pmod
1163 movl
%ecx
, 4(%edi
) / rem
,
1164 movl
(%esp
), %eax
/ r
, r
1165 movl
4(%esp
), %edx
/ r
, r
1180 movl
44(%esp
), %esi
/ y
,
1183 movl
%edx
, 12(%esp
) /, xt
1184 movl
%eax
, 8(%esp
) /, xt
1185 movl $
1, %ebp
/, negative
1186 movl
40(%esp
), %edx
/ y
, yt
1187 movl
44(%esp
), %ecx
/ y
, yt
1194 xorl $
1, %ebp
/, negative
1199 adcl $
0, %ecx
/, rem
1207 * Perform division of two signed 64-bit quantities, returning the
1208 * quotient in %edx:%eax, and the remainder in %ecx:%esi. __divrem64
1209 * pops the arguments on return.
1213 movl
%esp
, %ecx
/, tmp64
1214 movl
24(%esp
), %eax
/ x
, x
1215 movl
28(%esp
), %edx
/ x
, x
1221 movl
12(%esp
),%esi
/ rem
1224 SET_SIZE
(__divrem64
)
1228 #endif /* defined(__i386) && !defined(__amd64) */