added MouseWheel event support for Silverlight 3.0
[moon.git] / cairo / src / cairo-wideint.c
blob45655938679b4499b97c90a2d644f15ddac067e1
1 /* cairo - a vector graphics library with display and print output
3 * Copyright © 2004 Keith Packard
5 * This library is free software; you can redistribute it and/or
6 * modify it either under the terms of the GNU Lesser General Public
7 * License version 2.1 as published by the Free Software Foundation
8 * (the "LGPL") or, at your option, under the terms of the Mozilla
9 * Public License Version 1.1 (the "MPL"). If you do not alter this
10 * notice, a recipient may use your version of this file under either
11 * the MPL or the LGPL.
13 * You should have received a copy of the LGPL along with this library
14 * in the file COPYING-LGPL-2.1; if not, write to the Free Software
15 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
16 * You should have received a copy of the MPL along with this library
17 * in the file COPYING-MPL-1.1
19 * The contents of this file are subject to the Mozilla Public License
20 * Version 1.1 (the "License"); you may not use this file except in
21 * compliance with the License. You may obtain a copy of the License at
22 * http://www.mozilla.org/MPL/
24 * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY
25 * OF ANY KIND, either express or implied. See the LGPL or the MPL for
26 * the specific language governing rights and limitations.
28 * The Original Code is the cairo graphics library.
30 * The Initial Developer of the Original Code is Keith Packard
32 * Contributor(s):
33 * Keith R. Packard <keithp@keithp.com>
36 #include "cairoint.h"
38 #if HAVE_UINT64_T
40 #define _cairo_uint32s_to_uint64(h,l) ((uint64_t) (h) << 32 | (l))
42 cairo_uquorem64_t
43 _cairo_uint64_divrem (cairo_uint64_t num, cairo_uint64_t den)
45 cairo_uquorem64_t qr;
47 qr.quo = num / den;
48 qr.rem = num % den;
49 return qr;
52 #else
54 cairo_uint64_t
55 _cairo_uint32_to_uint64 (uint32_t i)
57 cairo_uint64_t q;
59 q.lo = i;
60 q.hi = 0;
61 return q;
64 cairo_int64_t
65 _cairo_int32_to_int64 (int32_t i)
67 cairo_uint64_t q;
69 q.lo = i;
70 q.hi = i < 0 ? -1 : 0;
71 return q;
74 static cairo_uint64_t
75 _cairo_uint32s_to_uint64 (uint32_t h, uint32_t l)
77 cairo_uint64_t q;
79 q.lo = l;
80 q.hi = h;
81 return q;
84 cairo_uint64_t
85 _cairo_uint64_add (cairo_uint64_t a, cairo_uint64_t b)
87 cairo_uint64_t s;
89 s.hi = a.hi + b.hi;
90 s.lo = a.lo + b.lo;
91 if (s.lo < a.lo)
92 s.hi++;
93 return s;
96 cairo_uint64_t
97 _cairo_uint64_sub (cairo_uint64_t a, cairo_uint64_t b)
99 cairo_uint64_t s;
101 s.hi = a.hi - b.hi;
102 s.lo = a.lo - b.lo;
103 if (s.lo > a.lo)
104 s.hi--;
105 return s;
108 #define uint32_lo(i) ((i) & 0xffff)
109 #define uint32_hi(i) ((i) >> 16)
110 #define uint32_carry16 ((1) << 16)
112 cairo_uint64_t
113 _cairo_uint32x32_64_mul (uint32_t a, uint32_t b)
115 cairo_uint64_t s;
117 uint16_t ah, al, bh, bl;
118 uint32_t r0, r1, r2, r3;
120 al = uint32_lo (a);
121 ah = uint32_hi (a);
122 bl = uint32_lo (b);
123 bh = uint32_hi (b);
125 r0 = (uint32_t) al * bl;
126 r1 = (uint32_t) al * bh;
127 r2 = (uint32_t) ah * bl;
128 r3 = (uint32_t) ah * bh;
130 r1 += uint32_hi(r0); /* no carry possible */
131 r1 += r2; /* but this can carry */
132 if (r1 < r2) /* check */
133 r3 += uint32_carry16;
135 s.hi = r3 + uint32_hi(r1);
136 s.lo = (uint32_lo (r1) << 16) + uint32_lo (r0);
137 return s;
140 cairo_int64_t
141 _cairo_int32x32_64_mul (int32_t a, int32_t b)
143 cairo_int64_t s;
144 s = _cairo_uint32x32_64_mul ((uint32_t) a, (uint32_t) b);
145 if (a < 0)
146 s.hi -= b;
147 if (b < 0)
148 s.hi -= a;
149 return s;
152 cairo_uint64_t
153 _cairo_uint64_mul (cairo_uint64_t a, cairo_uint64_t b)
155 cairo_uint64_t s;
157 s = _cairo_uint32x32_64_mul (a.lo, b.lo);
158 s.hi += a.lo * b.hi + a.hi * b.lo;
159 return s;
162 cairo_uint64_t
163 _cairo_uint64_lsl (cairo_uint64_t a, int shift)
165 if (shift >= 32)
167 a.hi = a.lo;
168 a.lo = 0;
169 shift -= 32;
171 if (shift)
173 a.hi = a.hi << shift | a.lo >> (32 - shift);
174 a.lo = a.lo << shift;
176 return a;
179 cairo_uint64_t
180 _cairo_uint64_rsl (cairo_uint64_t a, int shift)
182 if (shift >= 32)
184 a.lo = a.hi;
185 a.hi = 0;
186 shift -= 32;
188 if (shift)
190 a.lo = a.lo >> shift | a.hi << (32 - shift);
191 a.hi = a.hi >> shift;
193 return a;
196 #define _cairo_uint32_rsa(a,n) ((uint32_t) (((int32_t) (a)) >> (n)))
198 cairo_int64_t
199 _cairo_uint64_rsa (cairo_int64_t a, int shift)
201 if (shift >= 32)
203 a.lo = a.hi;
204 a.hi = _cairo_uint32_rsa (a.hi, 31);
205 shift -= 32;
207 if (shift)
209 a.lo = a.lo >> shift | a.hi << (32 - shift);
210 a.hi = _cairo_uint32_rsa (a.hi, shift);
212 return a;
216 _cairo_uint64_lt (cairo_uint64_t a, cairo_uint64_t b)
218 return (a.hi < b.hi ||
219 (a.hi == b.hi && a.lo < b.lo));
223 _cairo_uint64_eq (cairo_uint64_t a, cairo_uint64_t b)
225 return a.hi == b.hi && a.lo == b.lo;
229 _cairo_int64_lt (cairo_int64_t a, cairo_int64_t b)
231 if (_cairo_int64_negative (a) && !_cairo_int64_negative (b))
232 return 1;
233 if (!_cairo_int64_negative (a) && _cairo_int64_negative (b))
234 return 0;
235 return _cairo_uint64_lt (a, b);
239 _cairo_uint64_cmp (cairo_uint64_t a, cairo_uint64_t b)
241 if (a.hi < b.hi)
242 return -1;
243 else if (a.hi > b.hi)
244 return 1;
245 else if (a.lo < b.lo)
246 return -1;
247 else if (a.lo > b.lo)
248 return 1;
249 else
250 return 0;
254 _cairo_int64_cmp (cairo_int64_t a, cairo_int64_t b)
256 if (_cairo_int64_negative (a) && !_cairo_int64_negative (b))
257 return -1;
258 if (!_cairo_int64_negative (a) && _cairo_int64_negative (b))
259 return 1;
261 return _cairo_uint64_cmp (a, b);
264 cairo_uint64_t
265 _cairo_uint64_not (cairo_uint64_t a)
267 a.lo = ~a.lo;
268 a.hi = ~a.hi;
269 return a;
272 cairo_uint64_t
273 _cairo_uint64_negate (cairo_uint64_t a)
275 a.lo = ~a.lo;
276 a.hi = ~a.hi;
277 if (++a.lo == 0)
278 ++a.hi;
279 return a;
283 * Simple bit-at-a-time divide.
285 cairo_uquorem64_t
286 _cairo_uint64_divrem (cairo_uint64_t num, cairo_uint64_t den)
288 cairo_uquorem64_t qr;
289 cairo_uint64_t bit;
290 cairo_uint64_t quo;
292 bit = _cairo_uint32_to_uint64 (1);
294 /* normalize to make den >= num, but not overflow */
295 while (_cairo_uint64_lt (den, num) && (den.hi & 0x80000000) == 0)
297 bit = _cairo_uint64_lsl (bit, 1);
298 den = _cairo_uint64_lsl (den, 1);
300 quo = _cairo_uint32_to_uint64 (0);
302 /* generate quotient, one bit at a time */
303 while (bit.hi | bit.lo)
305 if (_cairo_uint64_le (den, num))
307 num = _cairo_uint64_sub (num, den);
308 quo = _cairo_uint64_add (quo, bit);
310 bit = _cairo_uint64_rsl (bit, 1);
311 den = _cairo_uint64_rsl (den, 1);
313 qr.quo = quo;
314 qr.rem = num;
315 return qr;
318 #endif /* !HAVE_UINT64_T */
320 cairo_quorem64_t
321 _cairo_int64_divrem (cairo_int64_t num, cairo_int64_t den)
323 int num_neg = _cairo_int64_negative (num);
324 int den_neg = _cairo_int64_negative (den);
325 cairo_uquorem64_t uqr;
326 cairo_quorem64_t qr;
328 if (num_neg)
329 num = _cairo_int64_negate (num);
330 if (den_neg)
331 den = _cairo_int64_negate (den);
332 uqr = _cairo_uint64_divrem (num, den);
333 if (num_neg)
334 qr.rem = _cairo_int64_negate (uqr.rem);
335 else
336 qr.rem = uqr.rem;
337 if (num_neg != den_neg)
338 qr.quo = (cairo_int64_t) _cairo_int64_negate (uqr.quo);
339 else
340 qr.quo = (cairo_int64_t) uqr.quo;
341 return qr;
344 #if HAVE_UINT128_T
346 cairo_uquorem128_t
347 _cairo_uint128_divrem (cairo_uint128_t num, cairo_uint128_t den)
349 cairo_uquorem128_t qr;
351 qr.quo = num / den;
352 qr.rem = num % den;
353 return qr;
356 #else
358 cairo_uint128_t
359 _cairo_uint32_to_uint128 (uint32_t i)
361 cairo_uint128_t q;
363 q.lo = _cairo_uint32_to_uint64 (i);
364 q.hi = _cairo_uint32_to_uint64 (0);
365 return q;
368 cairo_int128_t
369 _cairo_int32_to_int128 (int32_t i)
371 cairo_int128_t q;
373 q.lo = _cairo_int32_to_int64 (i);
374 q.hi = _cairo_int32_to_int64 (i < 0 ? -1 : 0);
375 return q;
378 cairo_uint128_t
379 _cairo_uint64_to_uint128 (cairo_uint64_t i)
381 cairo_uint128_t q;
383 q.lo = i;
384 q.hi = _cairo_uint32_to_uint64 (0);
385 return q;
388 cairo_int128_t
389 _cairo_int64_to_int128 (cairo_int64_t i)
391 cairo_int128_t q;
393 q.lo = i;
394 q.hi = _cairo_int32_to_int64 (_cairo_int64_negative(i) ? -1 : 0);
395 return q;
398 cairo_uint128_t
399 _cairo_uint128_add (cairo_uint128_t a, cairo_uint128_t b)
401 cairo_uint128_t s;
403 s.hi = _cairo_uint64_add (a.hi, b.hi);
404 s.lo = _cairo_uint64_add (a.lo, b.lo);
405 if (_cairo_uint64_lt (s.lo, a.lo))
406 s.hi = _cairo_uint64_add (s.hi, _cairo_uint32_to_uint64 (1));
407 return s;
410 cairo_uint128_t
411 _cairo_uint128_sub (cairo_uint128_t a, cairo_uint128_t b)
413 cairo_uint128_t s;
415 s.hi = _cairo_uint64_sub (a.hi, b.hi);
416 s.lo = _cairo_uint64_sub (a.lo, b.lo);
417 if (_cairo_uint64_gt (s.lo, a.lo))
418 s.hi = _cairo_uint64_sub (s.hi, _cairo_uint32_to_uint64(1));
419 return s;
422 #if HAVE_UINT64_T
424 #define uint64_lo32(i) ((i) & 0xffffffff)
425 #define uint64_hi32(i) ((i) >> 32)
426 #define uint64_lo(i) ((i) & 0xffffffff)
427 #define uint64_hi(i) ((i) >> 32)
428 #define uint64_shift32(i) ((i) << 32)
429 #define uint64_carry32 (((uint64_t) 1) << 32)
431 #else
433 #define uint64_lo32(i) ((i).lo)
434 #define uint64_hi32(i) ((i).hi)
436 static cairo_uint64_t
437 uint64_lo (cairo_uint64_t i)
439 cairo_uint64_t s;
441 s.lo = i.lo;
442 s.hi = 0;
443 return s;
446 static cairo_uint64_t
447 uint64_hi (cairo_uint64_t i)
449 cairo_uint64_t s;
451 s.lo = i.hi;
452 s.hi = 0;
453 return s;
456 static cairo_uint64_t
457 uint64_shift32 (cairo_uint64_t i)
459 cairo_uint64_t s;
461 s.lo = 0;
462 s.hi = i.lo;
463 return s;
466 static const cairo_uint64_t uint64_carry32 = { 0, 1 };
468 #endif
470 cairo_uint128_t
471 _cairo_uint64x64_128_mul (cairo_uint64_t a, cairo_uint64_t b)
473 cairo_uint128_t s;
474 uint32_t ah, al, bh, bl;
475 cairo_uint64_t r0, r1, r2, r3;
477 al = uint64_lo32 (a);
478 ah = uint64_hi32 (a);
479 bl = uint64_lo32 (b);
480 bh = uint64_hi32 (b);
482 r0 = _cairo_uint32x32_64_mul (al, bl);
483 r1 = _cairo_uint32x32_64_mul (al, bh);
484 r2 = _cairo_uint32x32_64_mul (ah, bl);
485 r3 = _cairo_uint32x32_64_mul (ah, bh);
487 r1 = _cairo_uint64_add (r1, uint64_hi (r0)); /* no carry possible */
488 r1 = _cairo_uint64_add (r1, r2); /* but this can carry */
489 if (_cairo_uint64_lt (r1, r2)) /* check */
490 r3 = _cairo_uint64_add (r3, uint64_carry32);
492 s.hi = _cairo_uint64_add (r3, uint64_hi(r1));
493 s.lo = _cairo_uint64_add (uint64_shift32 (r1),
494 uint64_lo (r0));
495 return s;
498 cairo_int128_t
499 _cairo_int64x64_128_mul (cairo_int64_t a, cairo_int64_t b)
501 cairo_int128_t s;
502 s = _cairo_uint64x64_128_mul (_cairo_int64_to_uint64(a),
503 _cairo_int64_to_uint64(b));
504 if (_cairo_int64_negative (a))
505 s.hi = _cairo_uint64_sub (s.hi,
506 _cairo_int64_to_uint64 (b));
507 if (_cairo_int64_negative (b))
508 s.hi = _cairo_uint64_sub (s.hi,
509 _cairo_int64_to_uint64 (a));
510 return s;
513 cairo_uint128_t
514 _cairo_uint128_mul (cairo_uint128_t a, cairo_uint128_t b)
516 cairo_uint128_t s;
518 s = _cairo_uint64x64_128_mul (a.lo, b.lo);
519 s.hi = _cairo_uint64_add (s.hi,
520 _cairo_uint64_mul (a.lo, b.hi));
521 s.hi = _cairo_uint64_add (s.hi,
522 _cairo_uint64_mul (a.hi, b.lo));
523 return s;
526 cairo_uint128_t
527 _cairo_uint128_lsl (cairo_uint128_t a, int shift)
529 if (shift >= 64)
531 a.hi = a.lo;
532 a.lo = _cairo_uint32_to_uint64 (0);
533 shift -= 64;
535 if (shift)
537 a.hi = _cairo_uint64_add (_cairo_uint64_lsl (a.hi, shift),
538 _cairo_uint64_rsl (a.lo, (64 - shift)));
539 a.lo = _cairo_uint64_lsl (a.lo, shift);
541 return a;
544 cairo_uint128_t
545 _cairo_uint128_rsl (cairo_uint128_t a, int shift)
547 if (shift >= 64)
549 a.lo = a.hi;
550 a.hi = _cairo_uint32_to_uint64 (0);
551 shift -= 64;
553 if (shift)
555 a.lo = _cairo_uint64_add (_cairo_uint64_rsl (a.lo, shift),
556 _cairo_uint64_lsl (a.hi, (64 - shift)));
557 a.hi = _cairo_uint64_rsl (a.hi, shift);
559 return a;
562 cairo_uint128_t
563 _cairo_uint128_rsa (cairo_int128_t a, int shift)
565 if (shift >= 64)
567 a.lo = a.hi;
568 a.hi = _cairo_uint64_rsa (a.hi, 64-1);
569 shift -= 64;
571 if (shift)
573 a.lo = _cairo_uint64_add (_cairo_uint64_rsl (a.lo, shift),
574 _cairo_uint64_lsl (a.hi, (64 - shift)));
575 a.hi = _cairo_uint64_rsa (a.hi, shift);
577 return a;
581 _cairo_uint128_lt (cairo_uint128_t a, cairo_uint128_t b)
583 return (_cairo_uint64_lt (a.hi, b.hi) ||
584 (_cairo_uint64_eq (a.hi, b.hi) &&
585 _cairo_uint64_lt (a.lo, b.lo)));
589 _cairo_int128_lt (cairo_int128_t a, cairo_int128_t b)
591 if (_cairo_int128_negative (a) && !_cairo_int128_negative (b))
592 return 1;
593 if (!_cairo_int128_negative (a) && _cairo_int128_negative (b))
594 return 0;
595 return _cairo_uint128_lt (a, b);
599 _cairo_uint128_cmp (cairo_uint128_t a, cairo_uint128_t b)
601 int cmp;
603 cmp = _cairo_uint64_cmp (a.hi, b.hi);
604 if (cmp)
605 return cmp;
606 return _cairo_uint64_cmp (a.lo, b.lo);
610 _cairo_int128_cmp (cairo_int128_t a, cairo_int128_t b)
612 if (_cairo_int128_negative (a) && !_cairo_int128_negative (b))
613 return -1;
614 if (!_cairo_int128_negative (a) && _cairo_int128_negative (b))
615 return 1;
617 return _cairo_uint128_cmp (a, b);
621 _cairo_uint128_eq (cairo_uint128_t a, cairo_uint128_t b)
623 return (_cairo_uint64_eq (a.hi, b.hi) &&
624 _cairo_uint64_eq (a.lo, b.lo));
627 #if HAVE_UINT64_T
628 #define _cairo_msbset64(q) (q & ((uint64_t) 1 << 63))
629 #else
630 #define _cairo_msbset64(q) (q.hi & ((uint32_t) 1 << 31))
631 #endif
633 cairo_uquorem128_t
634 _cairo_uint128_divrem (cairo_uint128_t num, cairo_uint128_t den)
636 cairo_uquorem128_t qr;
637 cairo_uint128_t bit;
638 cairo_uint128_t quo;
640 bit = _cairo_uint32_to_uint128 (1);
642 /* normalize to make den >= num, but not overflow */
643 while (_cairo_uint128_lt (den, num) && !_cairo_msbset64(den.hi))
645 bit = _cairo_uint128_lsl (bit, 1);
646 den = _cairo_uint128_lsl (den, 1);
648 quo = _cairo_uint32_to_uint128 (0);
650 /* generate quotient, one bit at a time */
651 while (_cairo_uint128_ne (bit, _cairo_uint32_to_uint128(0)))
653 if (_cairo_uint128_le (den, num))
655 num = _cairo_uint128_sub (num, den);
656 quo = _cairo_uint128_add (quo, bit);
658 bit = _cairo_uint128_rsl (bit, 1);
659 den = _cairo_uint128_rsl (den, 1);
661 qr.quo = quo;
662 qr.rem = num;
663 return qr;
666 cairo_int128_t
667 _cairo_int128_negate (cairo_int128_t a)
669 a.lo = _cairo_uint64_not (a.lo);
670 a.hi = _cairo_uint64_not (a.hi);
671 return _cairo_uint128_add (a, _cairo_uint32_to_uint128 (1));
674 cairo_int128_t
675 _cairo_int128_not (cairo_int128_t a)
677 a.lo = _cairo_uint64_not (a.lo);
678 a.hi = _cairo_uint64_not (a.hi);
679 return a;
682 #endif /* !HAVE_UINT128_T */
684 cairo_quorem128_t
685 _cairo_int128_divrem (cairo_int128_t num, cairo_int128_t den)
687 int num_neg = _cairo_int128_negative (num);
688 int den_neg = _cairo_int128_negative (den);
689 cairo_uquorem128_t uqr;
690 cairo_quorem128_t qr;
692 if (num_neg)
693 num = _cairo_int128_negate (num);
694 if (den_neg)
695 den = _cairo_int128_negate (den);
696 uqr = _cairo_uint128_divrem (num, den);
697 if (num_neg)
698 qr.rem = _cairo_int128_negate (uqr.rem);
699 else
700 qr.rem = uqr.rem;
701 if (num_neg != den_neg)
702 qr.quo = _cairo_int128_negate (uqr.quo);
703 else
704 qr.quo = uqr.quo;
705 return qr;
709 * _cairo_uint_96by64_32x64_divrem:
711 * Compute a 32 bit quotient and 64 bit remainder of a 96 bit unsigned
712 * dividend and 64 bit divisor. If the quotient doesn't fit into 32
713 * bits then the returned remainder is equal to the divisor, and the
714 * quotient is the largest representable 64 bit integer. It is an
715 * error to call this function with the high 32 bits of @num being
716 * non-zero. */
717 cairo_uquorem64_t
718 _cairo_uint_96by64_32x64_divrem (cairo_uint128_t num,
719 cairo_uint64_t den)
721 cairo_uquorem64_t result;
722 cairo_uint64_t B = _cairo_uint32s_to_uint64 (1, 0);
724 /* These are the high 64 bits of the *96* bit numerator. We're
725 * going to represent the numerator as xB + y, where x is a 64,
726 * and y is a 32 bit number. */
727 cairo_uint64_t x = _cairo_uint128_to_uint64 (_cairo_uint128_rsl(num, 32));
729 /* Initialise the result to indicate overflow. */
730 result.quo = _cairo_uint32s_to_uint64 (-1U, -1U);
731 result.rem = den;
733 /* Don't bother if the quotient is going to overflow. */
734 if (_cairo_uint64_ge (x, den)) {
735 return /* overflow */ result;
738 if (_cairo_uint64_lt (x, B)) {
739 /* When the final quotient is known to fit in 32 bits, then
740 * num < 2^64 if and only if den < 2^32. */
741 return _cairo_uint64_divrem (_cairo_uint128_to_uint64 (num), den);
743 else {
744 /* Denominator is >= 2^32. the numerator is >= 2^64, and the
745 * division won't overflow: need two divrems. Write the
746 * numerator and denominator as
748 * num = xB + y x : 64 bits, y : 32 bits
749 * den = uB + v u, v : 32 bits
751 uint32_t y = _cairo_uint128_to_uint32 (num);
752 uint32_t u = uint64_hi32 (den);
753 uint32_t v = _cairo_uint64_to_uint32 (den);
755 /* Compute a lower bound approximate quotient of num/den
756 * from x/(u+1). Then we have
758 * x = q(u+1) + r ; q : 32 bits, r <= u : 32 bits.
760 * xB + y = q(u+1)B + (rB+y)
761 * = q(uB + B + v - v) + (rB+y)
762 * = q(uB + v) + qB - qv + (rB+y)
763 * = q(uB + v) + q(B-v) + (rB+y)
765 * The true quotient of num/den then is q plus the
766 * contribution of q(B-v) + (rB+y). The main contribution
767 * comes from the term q(B-v), with the term (rB+y) only
768 * contributing at most one part.
770 * The term q(B-v) must fit into 64 bits, since q fits into 32
771 * bits on account of being a lower bound to the true
772 * quotient, and as B-v <= 2^32, we may safely use a single
773 * 64/64 bit division to find its contribution. */
775 cairo_uquorem64_t quorem;
776 cairo_uint64_t remainder; /* will contain final remainder */
777 uint32_t quotient; /* will contain final quotient. */
778 uint32_t q;
779 uint32_t r;
781 /* Approximate quotient by dividing the high 64 bits of num by
782 * u+1. Watch out for overflow of u+1. */
783 if (u+1) {
784 quorem = _cairo_uint64_divrem (x, _cairo_uint32_to_uint64 (u+1));
785 q = _cairo_uint64_to_uint32 (quorem.quo);
786 r = _cairo_uint64_to_uint32 (quorem.rem);
788 else {
789 q = uint64_hi32 (x);
790 r = _cairo_uint64_to_uint32 (x);
792 quotient = q;
794 /* Add the main term's contribution to quotient. Note B-v =
795 * -v as an uint32 (unless v = 0) */
796 if (v)
797 quorem = _cairo_uint64_divrem (_cairo_uint32x32_64_mul (q, -v), den);
798 else
799 quorem = _cairo_uint64_divrem (_cairo_uint32s_to_uint64 (q, 0), den);
800 quotient += _cairo_uint64_to_uint32 (quorem.quo);
802 /* Add the contribution of the subterm and start computing the
803 * true remainder. */
804 remainder = _cairo_uint32s_to_uint64 (r, y);
805 if (_cairo_uint64_ge (remainder, den)) {
806 remainder = _cairo_uint64_sub (remainder, den);
807 quotient++;
810 /* Add the contribution of the main term's remainder. The
811 * funky test here checks that remainder + main_rem >= den,
812 * taking into account overflow of the addition. */
813 remainder = _cairo_uint64_add (remainder, quorem.rem);
814 if (_cairo_uint64_ge (remainder, den) ||
815 _cairo_uint64_lt (remainder, quorem.rem))
817 remainder = _cairo_uint64_sub (remainder, den);
818 quotient++;
821 result.quo = _cairo_uint32_to_uint64 (quotient);
822 result.rem = remainder;
824 return result;
827 cairo_quorem64_t
828 _cairo_int_96by64_32x64_divrem (cairo_int128_t num, cairo_int64_t den)
830 int num_neg = _cairo_int128_negative (num);
831 int den_neg = _cairo_int64_negative (den);
832 cairo_uint64_t nonneg_den;
833 cairo_uquorem64_t uqr;
834 cairo_quorem64_t qr;
836 if (num_neg)
837 num = _cairo_int128_negate (num);
838 if (den_neg)
839 nonneg_den = _cairo_int64_negate (den);
840 else
841 nonneg_den = den;
843 uqr = _cairo_uint_96by64_32x64_divrem (num, nonneg_den);
844 if (_cairo_uint64_eq (uqr.rem, nonneg_den)) {
845 /* bail on overflow. */
846 qr.quo = _cairo_uint32s_to_uint64 (0x7FFFFFFF, -1U);;
847 qr.rem = den;
848 return qr;
851 if (num_neg)
852 qr.rem = _cairo_int64_negate (uqr.rem);
853 else
854 qr.rem = uqr.rem;
855 if (num_neg != den_neg)
856 qr.quo = _cairo_int64_negate (uqr.quo);
857 else
858 qr.quo = uqr.quo;
859 return qr;