maint: avoid sc_tight_scope failure in sort.c
[coreutils.git] / src / longlong.h
blob967468cba4ca83f48f92c63252b33b96a90d892b
1 /* longlong.h -- definitions for mixed size 32/64 bit arithmetic.
3 Copyright 1991-2024 Free Software Foundation, Inc.
5 This file is free software; you can redistribute it and/or modify it under the
6 terms of the GNU Lesser General Public License as published by the Free
7 Software Foundation; either version 3 of the License, or (at your option) any
8 later version.
10 This file is distributed in the hope that it will be useful, but WITHOUT ANY
11 WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A
12 PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
13 details.
15 You should have received a copy of the GNU Lesser General Public License
16 along with this file. If not, see https://www.gnu.org/licenses/. */
18 /* You have to define the following before including this file:
20 UWtype -- An unsigned type, default type for operations (typically a "word")
21 UHWtype -- An unsigned type, at least half the size of UWtype
22 UDWtype -- An unsigned type, at least twice as large a UWtype
23 W_TYPE_SIZE -- size in bits of UWtype
25 SItype, USItype -- Signed and unsigned 32 bit types
26 DItype, UDItype -- Signed and unsigned 64 bit types
28 On a 32 bit machine UWtype should typically be USItype;
29 on a 64 bit machine, UWtype should typically be UDItype.
31 Optionally, define:
33 LONGLONG_STANDALONE -- Avoid code that needs machine-dependent support files
34 NO_ASM -- Disable inline asm
37 CAUTION! Using this version of longlong.h outside of GMP is not safe. You
38 need to include gmp.h and gmp-impl.h, or certain things might not work as
39 expected.
42 #define __BITS4 (W_TYPE_SIZE / 4)
43 #define __ll_B ((UWtype) 1 << (W_TYPE_SIZE / 2))
44 #define __ll_lowpart(t) ((UWtype) (t) & (__ll_B - 1))
45 #define __ll_highpart(t) ((UWtype) (t) >> (W_TYPE_SIZE / 2))
47 /* This is used to make sure no undesirable sharing between different libraries
48 that use this file takes place. */
49 #ifndef __MPN
50 #define __MPN(x) __##x
51 #endif
53 /* Define auxiliary asm macros.
55 1) umul_ppmm(high_prod, low_prod, multiplier, multiplicand) multiplies two
56 UWtype integers MULTIPLIER and MULTIPLICAND, and generates a two UWtype
57 word product in HIGH_PROD and LOW_PROD.
59 2) __umulsidi3(a,b) multiplies two UWtype integers A and B, and returns a
60 UDWtype product. This is just a variant of umul_ppmm.
62 3) udiv_qrnnd(quotient, remainder, high_numerator, low_numerator,
63 denominator) divides a UDWtype, composed by the UWtype integers
64 HIGH_NUMERATOR and LOW_NUMERATOR, by DENOMINATOR and places the quotient
65 in QUOTIENT and the remainder in REMAINDER. HIGH_NUMERATOR must be less
66 than DENOMINATOR for correct operation. If, in addition, the most
67 significant bit of DENOMINATOR must be 1, then the pre-processor symbol
68 UDIV_NEEDS_NORMALIZATION is defined to 1.
70 4) sdiv_qrnnd(quotient, remainder, high_numerator, low_numerator,
71 denominator). Like udiv_qrnnd but the numbers are signed. The quotient
72 is rounded towards 0.
74 5) count_leading_zeros(count, x) counts the number of zero-bits from the
75 msb to the first non-zero bit in the UWtype X. This is the number of
76 steps X needs to be shifted left to set the msb. Undefined for X == 0,
77 unless the symbol COUNT_LEADING_ZEROS_0 is defined to some value.
79 6) count_trailing_zeros(count, x) like count_leading_zeros, but counts
80 from the least significant end.
82 7) add_ssaaaa(high_sum, low_sum, high_addend_1, low_addend_1,
83 high_addend_2, low_addend_2) adds two UWtype integers, composed by
84 HIGH_ADDEND_1 and LOW_ADDEND_1, and HIGH_ADDEND_2 and LOW_ADDEND_2
85 respectively. The result is placed in HIGH_SUM and LOW_SUM. Overflow
86 (i.e. carry out) is not stored anywhere, and is lost.
88 8) sub_ddmmss(high_difference, low_difference, high_minuend, low_minuend,
89 high_subtrahend, low_subtrahend) subtracts two two-word UWtype integers,
90 composed by HIGH_MINUEND_1 and LOW_MINUEND_1, and HIGH_SUBTRAHEND_2 and
91 LOW_SUBTRAHEND_2 respectively. The result is placed in HIGH_DIFFERENCE
92 and LOW_DIFFERENCE. Overflow (i.e. carry out) is not stored anywhere,
93 and is lost.
95 If any of these macros are left undefined for a particular CPU,
96 C macros are used.
99 Notes:
101 For add_ssaaaa the two high and two low addends can both commute, but
102 unfortunately gcc only supports one "%" commutative in each asm block.
103 This has always been so but is only documented in recent versions
104 (eg. pre-release 3.3). Having two or more "%"s can cause an internal
105 compiler error in certain rare circumstances.
107 Apparently it was only the last "%" that was ever actually respected, so
108 the code has been updated to leave just that. Clearly there's a free
109 choice whether high or low should get it, if there's a reason to favour
110 one over the other. Also obviously when the constraints on the two
111 operands are identical there's no benefit to the reloader in any "%" at
112 all.
116 /* The CPUs come in alphabetical order below.
118 Please add support for more CPUs here, or improve the current support
119 for the CPUs below! */
122 /* count_leading_zeros_gcc_clz is count_leading_zeros implemented with gcc
123 3.4 __builtin_clzl or __builtin_clzll, according to our limb size.
124 Similarly count_trailing_zeros_gcc_ctz using __builtin_ctzl or
125 __builtin_ctzll.
127 These builtins are only used when we check what code comes out, on some
128 chips they're merely libgcc calls, where we will instead want an inline
129 in that case (either asm or generic C).
131 These builtins are better than an asm block of the same insn, since an
132 asm block doesn't give gcc any information about scheduling or resource
133 usage. We keep an asm block for use on prior versions of gcc though.
135 For reference, __builtin_ffs existed in gcc prior to __builtin_clz, but
136 it's not used (for count_leading_zeros) because it generally gives extra
137 code to ensure the result is 0 when the input is 0, which we don't need
138 or want. */
140 #ifdef _LONG_LONG_LIMB
141 #define count_leading_zeros_gcc_clz(count,x) \
142 do { \
143 ASSERT ((x) != 0); \
144 (count) = __builtin_clzll (x); \
145 } while (0)
146 #else
147 #define count_leading_zeros_gcc_clz(count,x) \
148 do { \
149 ASSERT ((x) != 0); \
150 (count) = __builtin_clzl (x); \
151 } while (0)
152 #endif
154 #ifdef _LONG_LONG_LIMB
155 #define count_trailing_zeros_gcc_ctz(count,x) \
156 do { \
157 ASSERT ((x) != 0); \
158 (count) = __builtin_ctzll (x); \
159 } while (0)
160 #else
161 #define count_trailing_zeros_gcc_ctz(count,x) \
162 do { \
163 ASSERT ((x) != 0); \
164 (count) = __builtin_ctzl (x); \
165 } while (0)
166 #endif
169 /* FIXME: The macros using external routines like __MPN(count_leading_zeros)
170 don't need to be under !NO_ASM */
171 #if ! defined (NO_ASM)
173 #if defined (__alpha) && W_TYPE_SIZE == 64
174 /* Most alpha-based machines, except Cray systems. */
175 #if defined (__GNUC__)
176 #if __GMP_GNUC_PREREQ (3,3)
177 #define umul_ppmm(ph, pl, m0, m1) \
178 do { \
179 UDItype __m0 = (m0), __m1 = (m1); \
180 (ph) = __builtin_alpha_umulh (__m0, __m1); \
181 (pl) = __m0 * __m1; \
182 } while (0)
183 #else
184 #define umul_ppmm(ph, pl, m0, m1) \
185 do { \
186 UDItype __m0 = (m0), __m1 = (m1); \
187 __asm__ ("umulh %r1,%2,%0" \
188 : "=r" (ph) \
189 : "%rJ" (__m0), "rI" (__m1)); \
190 (pl) = __m0 * __m1; \
191 } while (0)
192 #endif
193 #else /* ! __GNUC__ */
194 #include <machine/builtins.h>
195 #define umul_ppmm(ph, pl, m0, m1) \
196 do { \
197 UDItype __m0 = (m0), __m1 = (m1); \
198 (ph) = __UMULH (__m0, __m1); \
199 (pl) = __m0 * __m1; \
200 } while (0)
201 #endif
202 #ifndef LONGLONG_STANDALONE
203 #define udiv_qrnnd(q, r, n1, n0, d) \
204 do { UWtype __di; \
205 __di = __MPN(invert_limb) (d); \
206 udiv_qrnnd_preinv (q, r, n1, n0, d, __di); \
207 } while (0)
208 #define UDIV_PREINV_ALWAYS 1
209 #define UDIV_NEEDS_NORMALIZATION 1
210 #endif /* LONGLONG_STANDALONE */
212 /* clz_tab is required in all configurations, since mpn/alpha/cntlz.asm
213 always goes into libgmp.so, even when not actually used. */
214 #define COUNT_LEADING_ZEROS_NEED_CLZ_TAB
216 #if defined (__GNUC__) && HAVE_HOST_CPU_alpha_CIX
217 #define count_leading_zeros(COUNT,X) \
218 __asm__("ctlz %1,%0" : "=r"(COUNT) : "r"(X))
219 #define count_trailing_zeros(COUNT,X) \
220 __asm__("cttz %1,%0" : "=r"(COUNT) : "r"(X))
221 #endif /* clz/ctz using cix */
223 #if ! defined (count_leading_zeros) \
224 && defined (__GNUC__) && ! defined (LONGLONG_STANDALONE)
225 /* ALPHA_CMPBGE_0 gives "cmpbge $31,src,dst", ie. test src bytes == 0.
226 "$31" is written explicitly in the asm, since an "r" constraint won't
227 select reg 31. There seems no need to worry about "r31" syntax for cray,
228 since gcc itself (pre-release 3.4) emits just $31 in various places. */
229 #define ALPHA_CMPBGE_0(dst, src) \
230 do { asm ("cmpbge $31, %1, %0" : "=r" (dst) : "r" (src)); } while (0)
231 /* Zero bytes are turned into bits with cmpbge, a __clz_tab lookup counts
232 them, locating the highest non-zero byte. A second __clz_tab lookup
233 counts the leading zero bits in that byte, giving the result. */
234 #define count_leading_zeros(count, x) \
235 do { \
236 UWtype __clz__b, __clz__c, __clz__x = (x); \
237 ALPHA_CMPBGE_0 (__clz__b, __clz__x); /* zero bytes */ \
238 __clz__b = __clz_tab [(__clz__b >> 1) ^ 0x7F]; /* 8 to 1 byte */ \
239 __clz__b = __clz__b * 8 - 7; /* 57 to 1 shift */ \
240 __clz__x >>= __clz__b; \
241 __clz__c = __clz_tab [__clz__x]; /* 8 to 1 bit */ \
242 __clz__b = 65 - __clz__b; \
243 (count) = __clz__b - __clz__c; \
244 } while (0)
245 #define COUNT_LEADING_ZEROS_NEED_CLZ_TAB
246 #endif /* clz using cmpbge */
248 #if ! defined (count_leading_zeros) && ! defined (LONGLONG_STANDALONE)
249 #if HAVE_ATTRIBUTE_CONST
250 long __MPN(count_leading_zeros) (UDItype) __attribute__ ((const));
251 #else
252 long __MPN(count_leading_zeros) (UDItype);
253 #endif
254 #define count_leading_zeros(count, x) \
255 ((count) = __MPN(count_leading_zeros) (x))
256 #endif /* clz using mpn */
257 #endif /* __alpha */
259 #if defined (__AVR) && W_TYPE_SIZE == 8
260 #define umul_ppmm(ph, pl, m0, m1) \
261 do { \
262 unsigned short __p = (unsigned short) (m0) * (m1); \
263 (ph) = __p >> 8; \
264 (pl) = __p; \
265 } while (0)
266 #endif /* AVR */
268 #if defined (_CRAY) && W_TYPE_SIZE == 64
269 #include <intrinsics.h>
270 #define UDIV_PREINV_ALWAYS 1
271 #define UDIV_NEEDS_NORMALIZATION 1
272 long __MPN(count_leading_zeros) (UDItype);
273 #define count_leading_zeros(count, x) \
274 ((count) = _leadz ((UWtype) (x)))
275 #if defined (_CRAYIEEE) /* I.e., Cray T90/ieee, T3D, and T3E */
276 #define umul_ppmm(ph, pl, m0, m1) \
277 do { \
278 UDItype __m0 = (m0), __m1 = (m1); \
279 (ph) = _int_mult_upper (__m0, __m1); \
280 (pl) = __m0 * __m1; \
281 } while (0)
282 #ifndef LONGLONG_STANDALONE
283 #define udiv_qrnnd(q, r, n1, n0, d) \
284 do { UWtype __di; \
285 __di = __MPN(invert_limb) (d); \
286 udiv_qrnnd_preinv (q, r, n1, n0, d, __di); \
287 } while (0)
288 #endif /* LONGLONG_STANDALONE */
289 #endif /* _CRAYIEEE */
290 #endif /* _CRAY */
292 #if defined (__ia64) && W_TYPE_SIZE == 64
293 /* This form encourages gcc (pre-release 3.4 at least) to emit predicated
294 "sub r=r,r" and "sub r=r,r,1", giving a 2 cycle latency. The generic
295 code using "al<bl" arithmetically comes out making an actual 0 or 1 in a
296 register, which takes an extra cycle. */
297 #define sub_ddmmss(sh, sl, ah, al, bh, bl) \
298 do { \
299 UWtype __x; \
300 __x = (al) - (bl); \
301 if ((al) < (bl)) \
302 (sh) = (ah) - (bh) - 1; \
303 else \
304 (sh) = (ah) - (bh); \
305 (sl) = __x; \
306 } while (0)
307 #if defined (__GNUC__) && ! defined (__INTEL_COMPILER)
308 /* Do both product parts in assembly, since that gives better code with
309 all gcc versions. Some callers will just use the upper part, and in
310 that situation we waste an instruction, but not any cycles. */
311 #define umul_ppmm(ph, pl, m0, m1) \
312 __asm__ ("xma.hu %0 = %2, %3, f0\n\txma.l %1 = %2, %3, f0" \
313 : "=&f" (ph), "=f" (pl) \
314 : "f" (m0), "f" (m1))
315 #define count_leading_zeros(count, x) \
316 do { \
317 UWtype _x = (x), _y, _a, _c; \
318 __asm__ ("mux1 %0 = %1, @rev" : "=r" (_y) : "r" (_x)); \
319 __asm__ ("czx1.l %0 = %1" : "=r" (_a) : "r" (-_y | _y)); \
320 _c = (_a - 1) << 3; \
321 _x >>= _c; \
322 if (_x >= 1 << 4) \
323 _x >>= 4, _c += 4; \
324 if (_x >= 1 << 2) \
325 _x >>= 2, _c += 2; \
326 _c += _x >> 1; \
327 (count) = W_TYPE_SIZE - 1 - _c; \
328 } while (0)
329 /* similar to what gcc does for __builtin_ffs, but 0 based rather than 1
330 based, and we don't need a special case for x==0 here */
331 #define count_trailing_zeros(count, x) \
332 do { \
333 UWtype __ctz_x = (x); \
334 __asm__ ("popcnt %0 = %1" \
335 : "=r" (count) \
336 : "r" ((__ctz_x-1) & ~__ctz_x)); \
337 } while (0)
338 #endif
339 #if defined (__INTEL_COMPILER)
340 #include <ia64intrin.h>
341 #define umul_ppmm(ph, pl, m0, m1) \
342 do { \
343 UWtype __m0 = (m0), __m1 = (m1); \
344 ph = _m64_xmahu (__m0, __m1, 0); \
345 pl = __m0 * __m1; \
346 } while (0)
347 #endif
348 #ifndef LONGLONG_STANDALONE
349 #define udiv_qrnnd(q, r, n1, n0, d) \
350 do { UWtype __di; \
351 __di = __MPN(invert_limb) (d); \
352 udiv_qrnnd_preinv (q, r, n1, n0, d, __di); \
353 } while (0)
354 #define UDIV_PREINV_ALWAYS 1
355 #define UDIV_NEEDS_NORMALIZATION 1
356 #endif
357 #endif
360 #if defined (__GNUC__)
362 /* We sometimes need to clobber "cc" with gcc2, but that would not be
363 understood by gcc1. Use cpp to avoid major code duplication. */
364 #if __GNUC__ < 2
365 #define __CLOBBER_CC
366 #define __AND_CLOBBER_CC
367 #else /* __GNUC__ >= 2 */
368 #define __CLOBBER_CC : "cc"
369 #define __AND_CLOBBER_CC , "cc"
370 #endif /* __GNUC__ < 2 */
372 #if (defined (__a29k__) || defined (_AM29K)) && W_TYPE_SIZE == 32
373 #define add_ssaaaa(sh, sl, ah, al, bh, bl) \
374 __asm__ ("add %1,%4,%5\n\taddc %0,%2,%3" \
375 : "=r" (sh), "=&r" (sl) \
376 : "r" (ah), "rI" (bh), "%r" (al), "rI" (bl))
377 #define sub_ddmmss(sh, sl, ah, al, bh, bl) \
378 __asm__ ("sub %1,%4,%5\n\tsubc %0,%2,%3" \
379 : "=r" (sh), "=&r" (sl) \
380 : "r" (ah), "rI" (bh), "r" (al), "rI" (bl))
381 #define umul_ppmm(xh, xl, m0, m1) \
382 do { \
383 USItype __m0 = (m0), __m1 = (m1); \
384 __asm__ ("multiplu %0,%1,%2" \
385 : "=r" (xl) \
386 : "r" (__m0), "r" (__m1)); \
387 __asm__ ("multmu %0,%1,%2" \
388 : "=r" (xh) \
389 : "r" (__m0), "r" (__m1)); \
390 } while (0)
391 #define udiv_qrnnd(q, r, n1, n0, d) \
392 __asm__ ("dividu %0,%3,%4" \
393 : "=r" (q), "=q" (r) \
394 : "1" (n1), "r" (n0), "r" (d))
395 #define count_leading_zeros(count, x) \
396 __asm__ ("clz %0,%1" \
397 : "=r" (count) \
398 : "r" (x))
399 #define COUNT_LEADING_ZEROS_0 32
400 #endif /* __a29k__ */
402 #if defined (__arc__)
403 #define add_ssaaaa(sh, sl, ah, al, bh, bl) \
404 __asm__ ("add.f\t%1, %4, %5\n\tadc\t%0, %2, %3" \
405 : "=r" (sh), \
406 "=&r" (sl) \
407 : "r" ((USItype) (ah)), \
408 "rICal" ((USItype) (bh)), \
409 "%r" ((USItype) (al)), \
410 "rICal" ((USItype) (bl)))
411 #define sub_ddmmss(sh, sl, ah, al, bh, bl) \
412 __asm__ ("sub.f\t%1, %4, %5\n\tsbc\t%0, %2, %3" \
413 : "=r" (sh), \
414 "=&r" (sl) \
415 : "r" ((USItype) (ah)), \
416 "rICal" ((USItype) (bh)), \
417 "r" ((USItype) (al)), \
418 "rICal" ((USItype) (bl)))
419 #endif
421 #if defined (__arm__) && (defined (__thumb2__) || !defined (__thumb__)) \
422 && W_TYPE_SIZE == 32
423 #define add_ssaaaa(sh, sl, ah, al, bh, bl) \
424 do { \
425 if (__builtin_constant_p (bl) && -(USItype)(bl) < (USItype)(bl)) \
426 __asm__ ("subs\t%1, %4, %5\n\tadc\t%0, %2, %3" \
427 : "=r" (sh), "=&r" (sl) \
428 : "r" (ah), "rI" (bh), \
429 "%r" (al), "rI" (-(USItype)(bl)) __CLOBBER_CC); \
430 else \
431 __asm__ ("adds\t%1, %4, %5\n\tadc\t%0, %2, %3" \
432 : "=r" (sh), "=&r" (sl) \
433 : "r" (ah), "rI" (bh), "%r" (al), "rI" (bl) __CLOBBER_CC); \
434 } while (0)
435 /* FIXME: Extend the immediate range for the low word by using both ADDS and
436 SUBS, since they set carry in the same way. We need separate definitions
437 for thumb and non-thumb since thumb lacks RSC. */
438 #if defined (__thumb__)
439 #define sub_ddmmss(sh, sl, ah, al, bh, bl) \
440 do { \
441 if (__builtin_constant_p (ah) && __builtin_constant_p (bh) \
442 && (ah) == (bh)) \
443 __asm__ ("subs\t%1, %2, %3\n\tsbc\t%0, %0, %0" \
444 : "=r" (sh), "=r" (sl) \
445 : "r" (al), "rI" (bl) __CLOBBER_CC); \
446 else if (__builtin_constant_p (al)) \
447 __asm__ ("rsbs\t%1, %5, %4\n\tsbc\t%0, %2, %3" \
448 : "=r" (sh), "=&r" (sl) \
449 : "r" (ah), "rI" (bh), "rI" (al), "r" (bl) __CLOBBER_CC); \
450 else \
451 __asm__ ("subs\t%1, %4, %5\n\tsbc\t%0, %2, %3" \
452 : "=r" (sh), "=&r" (sl) \
453 : "r" (ah), "rI" (bh), "r" (al), "rI" (bl) __CLOBBER_CC); \
454 } while (0)
455 #else
456 #define sub_ddmmss(sh, sl, ah, al, bh, bl) \
457 do { \
458 if (__builtin_constant_p (ah) && __builtin_constant_p (bh) \
459 && (ah) == (bh)) \
460 __asm__ ("subs\t%1, %2, %3\n\tsbc\t%0, %0, %0" \
461 : "=r" (sh), "=r" (sl) \
462 : "r" (al), "rI" (bl) __CLOBBER_CC); \
463 else if (__builtin_constant_p (al)) \
465 if (__builtin_constant_p (ah)) \
466 __asm__ ("rsbs\t%1, %5, %4\n\trsc\t%0, %3, %2" \
467 : "=r" (sh), "=&r" (sl) \
468 : "rI" (ah), "r" (bh), "rI" (al), "r" (bl) __CLOBBER_CC); \
469 else \
470 __asm__ ("rsbs\t%1, %5, %4\n\tsbc\t%0, %2, %3" \
471 : "=r" (sh), "=&r" (sl) \
472 : "r" (ah), "rI" (bh), "rI" (al), "r" (bl) __CLOBBER_CC); \
474 else if (__builtin_constant_p (ah)) \
476 if (__builtin_constant_p (bl)) \
477 __asm__ ("subs\t%1, %4, %5\n\trsc\t%0, %3, %2" \
478 : "=r" (sh), "=&r" (sl) \
479 : "rI" (ah), "r" (bh), "r" (al), "rI" (bl) __CLOBBER_CC); \
480 else \
481 __asm__ ("rsbs\t%1, %5, %4\n\trsc\t%0, %3, %2" \
482 : "=r" (sh), "=&r" (sl) \
483 : "rI" (ah), "r" (bh), "rI" (al), "r" (bl) __CLOBBER_CC); \
485 else \
486 __asm__ ("subs\t%1, %4, %5\n\tsbc\t%0, %2, %3" \
487 : "=r" (sh), "=&r" (sl) \
488 : "r" (ah), "rI" (bh), "r" (al), "rI" (bl) __CLOBBER_CC); \
489 } while (0)
490 #endif
491 #if defined (__ARM_ARCH_2__) || defined (__ARM_ARCH_2A__) \
492 || defined (__ARM_ARCH_3__)
493 #define umul_ppmm(xh, xl, a, b) \
494 do { \
495 register USItype __t0, __t1, __t2; \
496 __asm__ ("%@ Inlined umul_ppmm\n" \
497 " mov %2, %5, lsr #16\n" \
498 " mov %0, %6, lsr #16\n" \
499 " bic %3, %5, %2, lsl #16\n" \
500 " bic %4, %6, %0, lsl #16\n" \
501 " mul %1, %3, %4\n" \
502 " mul %4, %2, %4\n" \
503 " mul %3, %0, %3\n" \
504 " mul %0, %2, %0\n" \
505 " adds %3, %4, %3\n" \
506 " addcs %0, %0, #65536\n" \
507 " adds %1, %1, %3, lsl #16\n" \
508 " adc %0, %0, %3, lsr #16" \
509 : "=&r" ((USItype) (xh)), "=r" ((USItype) (xl)), \
510 "=&r" (__t0), "=&r" (__t1), "=r" (__t2) \
511 : "r" ((USItype) (a)), "r" ((USItype) (b)) __CLOBBER_CC); \
512 } while (0)
513 #ifndef LONGLONG_STANDALONE
514 #define udiv_qrnnd(q, r, n1, n0, d) \
515 do { UWtype __r; \
516 (q) = __MPN(udiv_qrnnd) (&__r, (n1), (n0), (d)); \
517 (r) = __r; \
518 } while (0)
519 extern UWtype __MPN(udiv_qrnnd) (UWtype *, UWtype, UWtype, UWtype);
520 #endif /* LONGLONG_STANDALONE */
521 #else /* ARMv4 or newer */
522 #define umul_ppmm(xh, xl, a, b) \
523 __asm__ ("umull %0,%1,%2,%3" : "=&r" (xl), "=&r" (xh) : "r" (a), "r" (b))
524 #define smul_ppmm(xh, xl, a, b) \
525 __asm__ ("smull %0,%1,%2,%3" : "=&r" (xl), "=&r" (xh) : "r" (a), "r" (b))
526 #ifndef LONGLONG_STANDALONE
527 #define udiv_qrnnd(q, r, n1, n0, d) \
528 do { UWtype __di; \
529 __di = __MPN(invert_limb) (d); \
530 udiv_qrnnd_preinv (q, r, n1, n0, d, __di); \
531 } while (0)
532 #define UDIV_PREINV_ALWAYS 1
533 #define UDIV_NEEDS_NORMALIZATION 1
534 #endif /* LONGLONG_STANDALONE */
535 #endif /* defined(__ARM_ARCH_2__) ... */
536 #define count_leading_zeros(count, x) count_leading_zeros_gcc_clz(count, x)
537 #define count_trailing_zeros(count, x) count_trailing_zeros_gcc_ctz(count, x)
538 #endif /* __arm__ */
540 #if defined (__aarch64__) && W_TYPE_SIZE == 64
541 #define add_ssaaaa(sh, sl, ah, al, bh, bl) \
542 do { \
543 if (__builtin_constant_p (bl) && ~(UDItype)(bl) <= (UDItype)(bl)) \
544 __asm__ ("subs\t%1, %x4, %5\n\tadc\t%0, %x2, %x3" \
545 : "=r" (sh), "=&r" (sl) \
546 : "rZ" ((UDItype)(ah)), "rZ" ((UDItype)(bh)), \
547 "%r" ((UDItype)(al)), "rI" (-(UDItype)(bl)) __CLOBBER_CC);\
548 else \
549 __asm__ ("adds\t%1, %x4, %5\n\tadc\t%0, %x2, %x3" \
550 : "=r" (sh), "=&r" (sl) \
551 : "rZ" ((UDItype)(ah)), "rZ" ((UDItype)(bh)), \
552 "%r" ((UDItype)(al)), "rI" ((UDItype)(bl)) __CLOBBER_CC);\
553 } while (0)
554 #define sub_ddmmss(sh, sl, ah, al, bh, bl) \
555 do { \
556 if (__builtin_constant_p (bl) && ~(UDItype)(bl) <= (UDItype)(bl)) \
557 __asm__ ("adds\t%1, %x4, %5\n\tsbc\t%0, %x2, %x3" \
558 : "=r,r" (sh), "=&r,&r" (sl) \
559 : "rZ,rZ" ((UDItype)(ah)), "rZ,rZ" ((UDItype)(bh)), \
560 "r,Z" ((UDItype)(al)), "rI,r" (-(UDItype)(bl)) __CLOBBER_CC);\
561 else \
562 __asm__ ("subs\t%1, %x4, %5\n\tsbc\t%0, %x2, %x3" \
563 : "=r,r" (sh), "=&r,&r" (sl) \
564 : "rZ,rZ" ((UDItype)(ah)), "rZ,rZ" ((UDItype)(bh)), \
565 "r,Z" ((UDItype)(al)), "rI,r" ((UDItype)(bl)) __CLOBBER_CC);\
566 } while(0);
567 #if __GMP_GNUC_PREREQ (4,9)
568 #define umul_ppmm(w1, w0, u, v) \
569 do { \
570 typedef unsigned int __ll_UTItype __attribute__((mode(TI))); \
571 __ll_UTItype __ll = (__ll_UTItype)(u) * (v); \
572 w1 = __ll >> 64; \
573 w0 = __ll; \
574 } while (0)
575 #endif
576 #if !defined (umul_ppmm)
577 #define umul_ppmm(ph, pl, m0, m1) \
578 do { \
579 UDItype __m0 = (m0), __m1 = (m1); \
580 __asm__ ("umulh\t%0, %1, %2" : "=r" (ph) : "r" (__m0), "r" (__m1)); \
581 (pl) = __m0 * __m1; \
582 } while (0)
583 #endif
584 #define count_leading_zeros(count, x) count_leading_zeros_gcc_clz(count, x)
585 #define count_trailing_zeros(count, x) count_trailing_zeros_gcc_ctz(count, x)
586 #endif /* __aarch64__ */
588 #if defined (__clipper__) && W_TYPE_SIZE == 32
589 #define umul_ppmm(w1, w0, u, v) \
590 ({union {UDItype __ll; \
591 struct {USItype __l, __h;} __i; \
592 } __x; \
593 __asm__ ("mulwux %2,%0" \
594 : "=r" (__x.__ll) \
595 : "%0" ((USItype)(u)), "r" ((USItype)(v))); \
596 (w1) = __x.__i.__h; (w0) = __x.__i.__l;})
597 #define smul_ppmm(w1, w0, u, v) \
598 ({union {DItype __ll; \
599 struct {SItype __l, __h;} __i; \
600 } __x; \
601 __asm__ ("mulwx %2,%0" \
602 : "=r" (__x.__ll) \
603 : "%0" ((SItype)(u)), "r" ((SItype)(v))); \
604 (w1) = __x.__i.__h; (w0) = __x.__i.__l;})
605 #define __umulsidi3(u, v) \
606 ({UDItype __w; \
607 __asm__ ("mulwux %2,%0" \
608 : "=r" (__w) : "%0" ((USItype)(u)), "r" ((USItype)(v))); \
609 __w; })
610 #endif /* __clipper__ */
612 /* Fujitsu vector computers. */
613 #if defined (__uxp__) && W_TYPE_SIZE == 32
614 #define umul_ppmm(ph, pl, u, v) \
615 do { \
616 union {UDItype __ll; \
617 struct {USItype __h, __l;} __i; \
618 } __x; \
619 __asm__ ("mult.lu %1,%2,%0" : "=r" (__x.__ll) : "%r" (u), "rK" (v));\
620 (ph) = __x.__i.__h; \
621 (pl) = __x.__i.__l; \
622 } while (0)
623 #define smul_ppmm(ph, pl, u, v) \
624 do { \
625 union {UDItype __ll; \
626 struct {USItype __h, __l;} __i; \
627 } __x; \
628 __asm__ ("mult.l %1,%2,%0" : "=r" (__x.__ll) : "%r" (u), "rK" (v)); \
629 (ph) = __x.__i.__h; \
630 (pl) = __x.__i.__l; \
631 } while (0)
632 #endif
634 #if defined (__gmicro__) && W_TYPE_SIZE == 32
635 #define add_ssaaaa(sh, sl, ah, al, bh, bl) \
636 __asm__ ("add.w %5,%1\n\taddx %3,%0" \
637 : "=g" (sh), "=&g" (sl) \
638 : "0" ((USItype)(ah)), "g" ((USItype)(bh)), \
639 "%1" ((USItype)(al)), "g" ((USItype)(bl)))
640 #define sub_ddmmss(sh, sl, ah, al, bh, bl) \
641 __asm__ ("sub.w %5,%1\n\tsubx %3,%0" \
642 : "=g" (sh), "=&g" (sl) \
643 : "0" ((USItype)(ah)), "g" ((USItype)(bh)), \
644 "1" ((USItype)(al)), "g" ((USItype)(bl)))
645 #define umul_ppmm(ph, pl, m0, m1) \
646 __asm__ ("mulx %3,%0,%1" \
647 : "=g" (ph), "=r" (pl) \
648 : "%0" ((USItype)(m0)), "g" ((USItype)(m1)))
649 #define udiv_qrnnd(q, r, nh, nl, d) \
650 __asm__ ("divx %4,%0,%1" \
651 : "=g" (q), "=r" (r) \
652 : "1" ((USItype)(nh)), "0" ((USItype)(nl)), "g" ((USItype)(d)))
653 #define count_leading_zeros(count, x) \
654 __asm__ ("bsch/1 %1,%0" \
655 : "=g" (count) : "g" ((USItype)(x)), "0" ((USItype)0))
656 #endif
658 #if defined (__hppa) && W_TYPE_SIZE == 32
659 #define add_ssaaaa(sh, sl, ah, al, bh, bl) \
660 __asm__ ("add%I5 %5,%r4,%1\n\taddc %r2,%r3,%0" \
661 : "=r" (sh), "=&r" (sl) \
662 : "rM" (ah), "rM" (bh), "%rM" (al), "rI" (bl))
663 #define sub_ddmmss(sh, sl, ah, al, bh, bl) \
664 __asm__ ("sub%I4 %4,%r5,%1\n\tsubb %r2,%r3,%0" \
665 : "=r" (sh), "=&r" (sl) \
666 : "rM" (ah), "rM" (bh), "rI" (al), "rM" (bl))
667 #if defined (_PA_RISC1_1)
668 #define umul_ppmm(wh, wl, u, v) \
669 do { \
670 union {UDItype __ll; \
671 struct {USItype __h, __l;} __i; \
672 } __x; \
673 __asm__ ("xmpyu %1,%2,%0" : "=*f" (__x.__ll) : "*f" (u), "*f" (v)); \
674 (wh) = __x.__i.__h; \
675 (wl) = __x.__i.__l; \
676 } while (0)
677 #endif
678 #define count_leading_zeros(count, x) \
679 do { \
680 USItype __tmp; \
681 __asm__ ( \
682 "ldi 1,%0\n" \
683 " extru,= %1,15,16,%%r0 ; Bits 31..16 zero?\n" \
684 " extru,tr %1,15,16,%1 ; No. Shift down, skip add.\n" \
685 " ldo 16(%0),%0 ; Yes. Perform add.\n" \
686 " extru,= %1,23,8,%%r0 ; Bits 15..8 zero?\n" \
687 " extru,tr %1,23,8,%1 ; No. Shift down, skip add.\n" \
688 " ldo 8(%0),%0 ; Yes. Perform add.\n" \
689 " extru,= %1,27,4,%%r0 ; Bits 7..4 zero?\n" \
690 " extru,tr %1,27,4,%1 ; No. Shift down, skip add.\n" \
691 " ldo 4(%0),%0 ; Yes. Perform add.\n" \
692 " extru,= %1,29,2,%%r0 ; Bits 3..2 zero?\n" \
693 " extru,tr %1,29,2,%1 ; No. Shift down, skip add.\n" \
694 " ldo 2(%0),%0 ; Yes. Perform add.\n" \
695 " extru %1,30,1,%1 ; Extract bit 1.\n" \
696 " sub %0,%1,%0 ; Subtract it.\n" \
697 : "=r" (count), "=r" (__tmp) : "1" (x)); \
698 } while (0)
699 #endif /* hppa */
701 /* These macros are for ABI=2.0w. In ABI=2.0n they can't be used, since GCC
702 (3.2) puts longlong into two adjacent 32-bit registers. Presumably this
703 is just a case of no direct support for 2.0n but treating it like 1.0. */
704 #if defined (__hppa) && W_TYPE_SIZE == 64 && ! defined (_LONG_LONG_LIMB)
705 #define add_ssaaaa(sh, sl, ah, al, bh, bl) \
706 __asm__ ("add%I5 %5,%r4,%1\n\tadd,dc %r2,%r3,%0" \
707 : "=r" (sh), "=&r" (sl) \
708 : "rM" (ah), "rM" (bh), "%rM" (al), "rI" (bl))
709 #define sub_ddmmss(sh, sl, ah, al, bh, bl) \
710 __asm__ ("sub%I4 %4,%r5,%1\n\tsub,db %r2,%r3,%0" \
711 : "=r" (sh), "=&r" (sl) \
712 : "rM" (ah), "rM" (bh), "rI" (al), "rM" (bl))
713 #endif /* hppa */
715 #if (defined (__i370__) || defined (__s390__) || defined (__mvs__)) && W_TYPE_SIZE == 32
716 #if defined (__zarch__) || defined (HAVE_HOST_CPU_s390_zarch)
717 #define add_ssaaaa(sh, sl, ah, al, bh, bl) \
718 do { \
719 /* if (__builtin_constant_p (bl)) \
720 __asm__ ("alfi\t%1,%o5\n\talcr\t%0,%3" \
721 : "=r" (sh), "=&r" (sl) \
722 : "0" (ah), "r" (bh), "%1" (al), "n" (bl) __CLOBBER_CC);\
723 else \
724 */ __asm__ ("alr\t%1,%5\n\talcr\t%0,%3" \
725 : "=r" (sh), "=&r" (sl) \
726 : "0" (ah), "r" (bh), "%1" (al), "r" (bl)__CLOBBER_CC); \
727 } while (0)
728 #define sub_ddmmss(sh, sl, ah, al, bh, bl) \
729 do { \
730 /* if (__builtin_constant_p (bl)) \
731 __asm__ ("slfi\t%1,%o5\n\tslbr\t%0,%3" \
732 : "=r" (sh), "=&r" (sl) \
733 : "0" (ah), "r" (bh), "1" (al), "n" (bl) __CLOBBER_CC); \
734 else \
735 */ __asm__ ("slr\t%1,%5\n\tslbr\t%0,%3" \
736 : "=r" (sh), "=&r" (sl) \
737 : "0" (ah), "r" (bh), "1" (al), "r" (bl) __CLOBBER_CC); \
738 } while (0)
739 #if __GMP_GNUC_PREREQ (4,5)
740 #define umul_ppmm(xh, xl, m0, m1) \
741 do { \
742 union {UDItype __ll; \
743 struct {USItype __h, __l;} __i; \
744 } __x; \
745 __x.__ll = (UDItype) (m0) * (UDItype) (m1); \
746 (xh) = __x.__i.__h; (xl) = __x.__i.__l; \
747 } while (0)
748 #else
749 #if 0
750 /* FIXME: this fails if gcc knows about the 64-bit registers. Use only
751 with a new enough processor pretending we have 32-bit registers. */
752 #define umul_ppmm(xh, xl, m0, m1) \
753 do { \
754 union {UDItype __ll; \
755 struct {USItype __h, __l;} __i; \
756 } __x; \
757 __asm__ ("mlr\t%0,%2" \
758 : "=r" (__x.__ll) \
759 : "%0" (m0), "r" (m1)); \
760 (xh) = __x.__i.__h; (xl) = __x.__i.__l; \
761 } while (0)
762 #else
763 #define umul_ppmm(xh, xl, m0, m1) \
764 do { \
765 /* When we have 64-bit regs and gcc is aware of that, we cannot simply use
766 DImode for the product, since that would be allocated to a single 64-bit
767 register, whereas mlr uses the low 32-bits of an even-odd register pair.
768 */ \
769 register USItype __r0 __asm__ ("0"); \
770 register USItype __r1 __asm__ ("1") = (m0); \
771 __asm__ ("mlr\t%0,%3" \
772 : "=r" (__r0), "=r" (__r1) \
773 : "r" (__r1), "r" (m1)); \
774 (xh) = __r0; (xl) = __r1; \
775 } while (0)
776 #endif /* if 0 */
777 #endif
778 #if 0
779 /* FIXME: this fails if gcc knows about the 64-bit registers. Use only
780 with a new enough processor pretending we have 32-bit registers. */
781 #define udiv_qrnnd(q, r, n1, n0, d) \
782 do { \
783 union {UDItype __ll; \
784 struct {USItype __h, __l;} __i; \
785 } __x; \
786 __x.__i.__h = n1; __x.__i.__l = n0; \
787 __asm__ ("dlr\t%0,%2" \
788 : "=r" (__x.__ll) \
789 : "0" (__x.__ll), "r" (d)); \
790 (q) = __x.__i.__l; (r) = __x.__i.__h; \
791 } while (0)
792 #else
793 #define udiv_qrnnd(q, r, n1, n0, d) \
794 do { \
795 register USItype __r0 __asm__ ("0") = (n1); \
796 register USItype __r1 __asm__ ("1") = (n0); \
797 __asm__ ("dlr\t%0,%4" \
798 : "=r" (__r0), "=r" (__r1) \
799 : "r" (__r0), "r" (__r1), "r" (d)); \
800 (q) = __r1; (r) = __r0; \
801 } while (0)
802 #endif /* if 0 */
803 #else /* if __zarch__ */
804 /* FIXME: this fails if gcc knows about the 64-bit registers. */
805 #define smul_ppmm(xh, xl, m0, m1) \
806 do { \
807 union {DItype __ll; \
808 struct {USItype __h, __l;} __i; \
809 } __x; \
810 __asm__ ("mr\t%0,%2" \
811 : "=r" (__x.__ll) \
812 : "%0" (m0), "r" (m1)); \
813 (xh) = __x.__i.__h; (xl) = __x.__i.__l; \
814 } while (0)
815 /* FIXME: this fails if gcc knows about the 64-bit registers. */
816 #define sdiv_qrnnd(q, r, n1, n0, d) \
817 do { \
818 union {DItype __ll; \
819 struct {USItype __h, __l;} __i; \
820 } __x; \
821 __x.__i.__h = n1; __x.__i.__l = n0; \
822 __asm__ ("dr\t%0,%2" \
823 : "=r" (__x.__ll) \
824 : "0" (__x.__ll), "r" (d)); \
825 (q) = __x.__i.__l; (r) = __x.__i.__h; \
826 } while (0)
827 #endif /* if __zarch__ */
828 #endif
830 #if defined (__s390x__) && W_TYPE_SIZE == 64
831 /* We need to cast operands with register constraints, otherwise their types
832 will be assumed to be SImode by gcc. For these machines, such operations
833 will insert a value into the low 32 bits, and leave the high 32 bits with
834 garbage. */
835 #define add_ssaaaa(sh, sl, ah, al, bh, bl) \
836 do { \
837 __asm__ ("algr\t%1,%5\n\talcgr\t%0,%3" \
838 : "=r" (sh), "=&r" (sl) \
839 : "0" ((UDItype)(ah)), "r" ((UDItype)(bh)), \
840 "%1" ((UDItype)(al)), "r" ((UDItype)(bl)) __CLOBBER_CC); \
841 } while (0)
842 #define sub_ddmmss(sh, sl, ah, al, bh, bl) \
843 do { \
844 __asm__ ("slgr\t%1,%5\n\tslbgr\t%0,%3" \
845 : "=r" (sh), "=&r" (sl) \
846 : "0" ((UDItype)(ah)), "r" ((UDItype)(bh)), \
847 "1" ((UDItype)(al)), "r" ((UDItype)(bl)) __CLOBBER_CC); \
848 } while (0)
849 #if !defined (__clang__)
850 #define umul_ppmm(xh, xl, m0, m1) \
851 do { \
852 union {unsigned int __attribute__ ((mode(TI))) __ll; \
853 struct {UDItype __h, __l;} __i; \
854 } __x; \
855 __asm__ ("mlgr\t%0,%2" \
856 : "=r" (__x.__ll) \
857 : "%0" ((UDItype)(m0)), "r" ((UDItype)(m1))); \
858 (xh) = __x.__i.__h; (xl) = __x.__i.__l; \
859 } while (0)
860 #define udiv_qrnnd(q, r, n1, n0, d) \
861 do { \
862 union {unsigned int __attribute__ ((mode(TI))) __ll; \
863 struct {UDItype __h, __l;} __i; \
864 } __x; \
865 __x.__i.__h = n1; __x.__i.__l = n0; \
866 __asm__ ("dlgr\t%0,%2" \
867 : "=r" (__x.__ll) \
868 : "0" (__x.__ll), "r" ((UDItype)(d))); \
869 (q) = __x.__i.__l; (r) = __x.__i.__h; \
870 } while (0)
871 #endif
872 #if 0 /* FIXME: Enable for z10 (?) */
873 #define count_leading_zeros(cnt, x) \
874 do { \
875 union {unsigned int __attribute__ ((mode(TI))) __ll; \
876 struct {UDItype __h, __l;} __i; \
877 } __clr_cnt; \
878 __asm__ ("flogr\t%0,%1" \
879 : "=r" (__clr_cnt.__ll) \
880 : "r" (x) __CLOBBER_CC); \
881 (cnt) = __clr_cnt.__i.__h; \
882 } while (0)
883 #endif
884 #endif
886 /* On x86 and x86_64, every asm implicitly clobbers "flags" and "fpsr",
887 so we don't need __CLOBBER_CC. */
888 #if (defined (__i386__) || defined (__i486__)) && W_TYPE_SIZE == 32
889 #define add_ssaaaa(sh, sl, ah, al, bh, bl) \
890 __asm__ ("addl %5,%k1\n\tadcl %3,%k0" \
891 : "=r" (sh), "=&r" (sl) \
892 : "0" ((USItype)(ah)), "g" ((USItype)(bh)), \
893 "%1" ((USItype)(al)), "g" ((USItype)(bl)))
894 #define sub_ddmmss(sh, sl, ah, al, bh, bl) \
895 __asm__ ("subl %5,%k1\n\tsbbl %3,%k0" \
896 : "=r" (sh), "=&r" (sl) \
897 : "0" ((USItype)(ah)), "g" ((USItype)(bh)), \
898 "1" ((USItype)(al)), "g" ((USItype)(bl)))
899 #define umul_ppmm(w1, w0, u, v) \
900 __asm__ ("mull %3" \
901 : "=a" (w0), "=d" (w1) \
902 : "%0" ((USItype)(u)), "rm" ((USItype)(v)))
903 #define udiv_qrnnd(q, r, n1, n0, dx) /* d renamed to dx avoiding "=d" */\
904 __asm__ ("divl %4" /* stringification in K&R C */ \
905 : "=a" (q), "=d" (r) \
906 : "0" ((USItype)(n0)), "1" ((USItype)(n1)), "rm" ((USItype)(dx)))
908 #if HAVE_HOST_CPU_i586 || HAVE_HOST_CPU_pentium || HAVE_HOST_CPU_pentiummmx
909 /* Pentium bsrl takes between 10 and 72 cycles depending where the most
910 significant 1 bit is, hence the use of the following alternatives. bsfl
911 is slow too, between 18 and 42 depending where the least significant 1
912 bit is, so let the generic count_trailing_zeros below make use of the
913 count_leading_zeros here too. */
915 #if HAVE_HOST_CPU_pentiummmx && ! defined (LONGLONG_STANDALONE)
916 /* The following should be a fixed 14 or 15 cycles, but possibly plus an L1
917 cache miss reading from __clz_tab. For P55 it's favoured over the float
918 below so as to avoid mixing MMX and x87, since the penalty for switching
919 between the two is about 100 cycles.
921 The asm block sets __shift to -3 if the high 24 bits are clear, -2 for
922 16, -1 for 8, or 0 otherwise. This could be written equivalently as
923 follows, but as of gcc 2.95.2 it results in conditional jumps.
925 __shift = -(__n < 0x1000000);
926 __shift -= (__n < 0x10000);
927 __shift -= (__n < 0x100);
929 The middle two sbbl and cmpl's pair, and with luck something gcc
930 generates might pair with the first cmpl and the last sbbl. The "32+1"
931 constant could be folded into __clz_tab[], but it doesn't seem worth
932 making a different table just for that. */
934 #define count_leading_zeros(c,n) \
935 do { \
936 USItype __n = (n); \
937 USItype __shift; \
938 __asm__ ("cmpl $0x1000000, %1\n" \
939 "sbbl %0, %0\n" \
940 "cmpl $0x10000, %1\n" \
941 "sbbl $0, %0\n" \
942 "cmpl $0x100, %1\n" \
943 "sbbl $0, %0\n" \
944 : "=&r" (__shift) : "r" (__n)); \
945 __shift = __shift*8 + 24 + 1; \
946 (c) = 32 + 1 - __shift - __clz_tab[__n >> __shift]; \
947 } while (0)
948 #define COUNT_LEADING_ZEROS_NEED_CLZ_TAB
949 #define COUNT_LEADING_ZEROS_0 31 /* n==0 indistinguishable from n==1 */
951 #else /* ! pentiummmx || LONGLONG_STANDALONE */
952 /* The following should be a fixed 14 cycles or so. Some scheduling
953 opportunities should be available between the float load/store too. This
954 sort of code is used in gcc 3 for __builtin_ffs (with "n&-n") and is
955 apparently suggested by the Intel optimizing manual (don't know exactly
956 where). gcc 2.95 or up will be best for this, so the "double" is
957 correctly aligned on the stack. */
958 #define count_leading_zeros(c,n) \
959 do { \
960 union { \
961 double d; \
962 unsigned a[2]; \
963 } __u; \
964 __u.d = (UWtype) (n); \
965 (c) = 0x3FF + 31 - (__u.a[1] >> 20); \
966 } while (0)
967 #define COUNT_LEADING_ZEROS_0 (0x3FF + 31)
968 #endif /* pentiummx */
970 #else /* ! pentium */
972 #if __GMP_GNUC_PREREQ (3,4) /* using bsrl */
973 #define count_leading_zeros(count,x) count_leading_zeros_gcc_clz(count,x)
974 #endif /* gcc clz */
976 /* On P6, gcc prior to 3.0 generates a partial register stall for
977 __cbtmp^31, due to using "xorb $31" instead of "xorl $31", the former
978 being 1 code byte smaller. "31-__cbtmp" is a workaround, probably at the
979 cost of one extra instruction. Do this for "i386" too, since that means
980 generic x86. */
981 #if ! defined (count_leading_zeros) && __GNUC__ < 3 \
982 && (HAVE_HOST_CPU_i386 \
983 || HAVE_HOST_CPU_i686 \
984 || HAVE_HOST_CPU_pentiumpro \
985 || HAVE_HOST_CPU_pentium2 \
986 || HAVE_HOST_CPU_pentium3)
987 #define count_leading_zeros(count, x) \
988 do { \
989 USItype __cbtmp; \
990 ASSERT ((x) != 0); \
991 __asm__ ("bsrl %1,%0" : "=r" (__cbtmp) : "rm" ((USItype)(x))); \
992 (count) = 31 - __cbtmp; \
993 } while (0)
994 #endif /* gcc<3 asm bsrl */
996 #ifndef count_leading_zeros
997 #define count_leading_zeros(count, x) \
998 do { \
999 USItype __cbtmp; \
1000 ASSERT ((x) != 0); \
1001 __asm__ ("bsrl %1,%0" : "=r" (__cbtmp) : "rm" ((USItype)(x))); \
1002 (count) = __cbtmp ^ 31; \
1003 } while (0)
1004 #endif /* asm bsrl */
1006 #if __GMP_GNUC_PREREQ (3,4) /* using bsfl */
1007 #define count_trailing_zeros(count,x) count_trailing_zeros_gcc_ctz(count,x)
1008 #endif /* gcc ctz */
1010 #ifndef count_trailing_zeros
1011 #define count_trailing_zeros(count, x) \
1012 do { \
1013 ASSERT ((x) != 0); \
1014 __asm__ ("bsfl %1,%k0" : "=r" (count) : "rm" ((USItype)(x))); \
1015 } while (0)
1016 #endif /* asm bsfl */
1018 #endif /* ! pentium */
1020 #endif /* 80x86 */
1022 #if defined (__amd64__) && W_TYPE_SIZE == 64
1023 #define add_ssaaaa(sh, sl, ah, al, bh, bl) \
1024 __asm__ ("addq %5,%q1\n\tadcq %3,%q0" \
1025 : "=r" (sh), "=&r" (sl) \
1026 : "0" ((UDItype)(ah)), "rme" ((UDItype)(bh)), \
1027 "%1" ((UDItype)(al)), "rme" ((UDItype)(bl)))
1028 #define sub_ddmmss(sh, sl, ah, al, bh, bl) \
1029 __asm__ ("subq %5,%q1\n\tsbbq %3,%q0" \
1030 : "=r" (sh), "=&r" (sl) \
1031 : "0" ((UDItype)(ah)), "rme" ((UDItype)(bh)), \
1032 "1" ((UDItype)(al)), "rme" ((UDItype)(bl)))
1033 #if X86_ASM_MULX \
1034 && (HAVE_HOST_CPU_haswell || HAVE_HOST_CPU_broadwell \
1035 || HAVE_HOST_CPU_skylake || HAVE_HOST_CPU_bd4 || HAVE_HOST_CPU_zen)
1036 #define umul_ppmm(w1, w0, u, v) \
1037 __asm__ ("mulx\t%3, %q0, %q1" \
1038 : "=r" (w0), "=r" (w1) \
1039 : "%d" ((UDItype)(u)), "rm" ((UDItype)(v)))
1040 #else
1041 #define umul_ppmm(w1, w0, u, v) \
1042 __asm__ ("mulq\t%3" \
1043 : "=a" (w0), "=d" (w1) \
1044 : "%0" ((UDItype)(u)), "rm" ((UDItype)(v)))
1045 #endif
1046 #define udiv_qrnnd(q, r, n1, n0, dx) /* d renamed to dx avoiding "=d" */\
1047 __asm__ ("divq %4" /* stringification in K&R C */ \
1048 : "=a" (q), "=d" (r) \
1049 : "0" ((UDItype)(n0)), "1" ((UDItype)(n1)), "rm" ((UDItype)(dx)))
1051 #if HAVE_HOST_CPU_haswell || HAVE_HOST_CPU_broadwell || HAVE_HOST_CPU_skylake \
1052 || HAVE_HOST_CPU_k10 || HAVE_HOST_CPU_bd1 || HAVE_HOST_CPU_bd2 \
1053 || HAVE_HOST_CPU_bd3 || HAVE_HOST_CPU_bd4 || HAVE_HOST_CPU_zen \
1054 || HAVE_HOST_CPU_bobcat || HAVE_HOST_CPU_jaguar
1055 #define count_leading_zeros(count, x) \
1056 do { \
1057 /* This is lzcnt, spelled for older assemblers. Destination and */ \
1058 /* source must be a 64-bit registers, hence cast and %q. */ \
1059 __asm__ ("rep;bsr\t%1, %q0" : "=r" (count) : "rm" ((UDItype)(x))); \
1060 } while (0)
1061 #define COUNT_LEADING_ZEROS_0 64
1062 #else
1063 #define count_leading_zeros(count, x) \
1064 do { \
1065 UDItype __cbtmp; \
1066 ASSERT ((x) != 0); \
1067 __asm__ ("bsr\t%1,%0" : "=r" (__cbtmp) : "rm" ((UDItype)(x))); \
1068 (count) = __cbtmp ^ 63; \
1069 } while (0)
1070 #endif
1072 #if HAVE_HOST_CPU_bd2 || HAVE_HOST_CPU_bd3 || HAVE_HOST_CPU_bd4 \
1073 || HAVE_HOST_CPU_zen || HAVE_HOST_CPU_jaguar
1074 #define count_trailing_zeros(count, x) \
1075 do { \
1076 /* This is tzcnt, spelled for older assemblers. Destination and */ \
1077 /* source must be a 64-bit registers, hence cast and %q. */ \
1078 __asm__ ("rep;bsf\t%1, %q0" : "=r" (count) : "rm" ((UDItype)(x))); \
1079 } while (0)
1080 #define COUNT_TRAILING_ZEROS_0 64
1081 #else
1082 #define count_trailing_zeros(count, x) \
1083 do { \
1084 ASSERT ((x) != 0); \
1085 __asm__ ("bsf\t%1, %q0" : "=r" (count) : "rm" ((UDItype)(x))); \
1086 } while (0)
1087 #endif
1088 #endif /* __amd64__ */
1090 #if defined (__i860__) && W_TYPE_SIZE == 32
1091 #define rshift_rhlc(r,h,l,c) \
1092 __asm__ ("shr %3,r0,r0\;shrd %1,%2,%0" \
1093 "=r" (r) : "r" (h), "r" (l), "rn" (c))
1094 #endif /* i860 */
1096 #if defined (__i960__) && W_TYPE_SIZE == 32
1097 #define add_ssaaaa(sh, sl, ah, al, bh, bl) \
1098 __asm__ ("cmpo 1,0\;addc %5,%4,%1\;addc %3,%2,%0" \
1099 : "=r" (sh), "=&r" (sl) \
1100 : "dI" (ah), "dI" (bh), "%dI" (al), "dI" (bl))
1101 #define sub_ddmmss(sh, sl, ah, al, bh, bl) \
1102 __asm__ ("cmpo 0,0\;subc %5,%4,%1\;subc %3,%2,%0" \
1103 : "=r" (sh), "=&r" (sl) \
1104 : "dI" (ah), "dI" (bh), "dI" (al), "dI" (bl))
1105 #define umul_ppmm(w1, w0, u, v) \
1106 ({union {UDItype __ll; \
1107 struct {USItype __l, __h;} __i; \
1108 } __x; \
1109 __asm__ ("emul %2,%1,%0" \
1110 : "=d" (__x.__ll) : "%dI" (u), "dI" (v)); \
1111 (w1) = __x.__i.__h; (w0) = __x.__i.__l;})
1112 #define __umulsidi3(u, v) \
1113 ({UDItype __w; \
1114 __asm__ ("emul %2,%1,%0" : "=d" (__w) : "%dI" (u), "dI" (v)); \
1115 __w; })
1116 #define udiv_qrnnd(q, r, nh, nl, d) \
1117 do { \
1118 union {UDItype __ll; \
1119 struct {USItype __l, __h;} __i; \
1120 } __nn; \
1121 __nn.__i.__h = (nh); __nn.__i.__l = (nl); \
1122 __asm__ ("ediv %d,%n,%0" \
1123 : "=d" (__rq.__ll) : "dI" (__nn.__ll), "dI" (d)); \
1124 (r) = __rq.__i.__l; (q) = __rq.__i.__h; \
1125 } while (0)
1126 #define count_leading_zeros(count, x) \
1127 do { \
1128 USItype __cbtmp; \
1129 __asm__ ("scanbit %1,%0" : "=r" (__cbtmp) : "r" (x)); \
1130 (count) = __cbtmp ^ 31; \
1131 } while (0)
1132 #define COUNT_LEADING_ZEROS_0 (-32) /* sic */
1133 #if defined (__i960mx) /* what is the proper symbol to test??? */
1134 #define rshift_rhlc(r,h,l,c) \
1135 do { \
1136 union {UDItype __ll; \
1137 struct {USItype __l, __h;} __i; \
1138 } __nn; \
1139 __nn.__i.__h = (h); __nn.__i.__l = (l); \
1140 __asm__ ("shre %2,%1,%0" : "=d" (r) : "dI" (__nn.__ll), "dI" (c)); \
1142 #endif /* i960mx */
1143 #endif /* i960 */
1146 #if defined (__loongarch64) && W_TYPE_SIZE == 64
1147 #define umul_ppmm(w1, w0, u, v) \
1148 do { \
1149 UDItype __u = (u), __v = (v); \
1150 (w0) = __u * __v; \
1151 (w1) = (unsigned __int128__) __u * __v >> 64; \
1152 } while (0)
1153 #endif
1156 #if (defined (__mc68000__) || defined (__mc68020__) || defined(mc68020) \
1157 || defined (__m68k__) || defined (__mc5200__) || defined (__mc5206e__) \
1158 || defined (__mc5307__)) && W_TYPE_SIZE == 32
1159 #define add_ssaaaa(sh, sl, ah, al, bh, bl) \
1160 __asm__ ("add%.l %5,%1\n\taddx%.l %3,%0" \
1161 : "=d" (sh), "=&d" (sl) \
1162 : "0" ((USItype)(ah)), "d" ((USItype)(bh)), \
1163 "%1" ((USItype)(al)), "g" ((USItype)(bl)))
1164 #define sub_ddmmss(sh, sl, ah, al, bh, bl) \
1165 __asm__ ("sub%.l %5,%1\n\tsubx%.l %3,%0" \
1166 : "=d" (sh), "=&d" (sl) \
1167 : "0" ((USItype)(ah)), "d" ((USItype)(bh)), \
1168 "1" ((USItype)(al)), "g" ((USItype)(bl)))
1169 /* The '020, '030, '040 and CPU32 have 32x32->64 and 64/32->32q-32r. */
1170 #if defined (__mc68020__) || defined(mc68020) \
1171 || defined (__mc68030__) || defined (mc68030) \
1172 || defined (__mc68040__) || defined (mc68040) \
1173 || defined (__mcpu32__) || defined (mcpu32) \
1174 || defined (__NeXT__)
1175 #define umul_ppmm(w1, w0, u, v) \
1176 __asm__ ("mulu%.l %3,%1:%0" \
1177 : "=d" (w0), "=d" (w1) \
1178 : "%0" ((USItype)(u)), "dmi" ((USItype)(v)))
1179 #define udiv_qrnnd(q, r, n1, n0, d) \
1180 __asm__ ("divu%.l %4,%1:%0" \
1181 : "=d" (q), "=d" (r) \
1182 : "0" ((USItype)(n0)), "1" ((USItype)(n1)), "dmi" ((USItype)(d)))
1183 #define sdiv_qrnnd(q, r, n1, n0, d) \
1184 __asm__ ("divs%.l %4,%1:%0" \
1185 : "=d" (q), "=d" (r) \
1186 : "0" ((USItype)(n0)), "1" ((USItype)(n1)), "dmi" ((USItype)(d)))
1187 #else /* for other 68k family members use 16x16->32 multiplication */
1188 #define umul_ppmm(xh, xl, a, b) \
1189 do { USItype __umul_tmp1, __umul_tmp2; \
1190 __asm__ ("| Inlined umul_ppmm\n" \
1191 " move%.l %5,%3\n" \
1192 " move%.l %2,%0\n" \
1193 " move%.w %3,%1\n" \
1194 " swap %3\n" \
1195 " swap %0\n" \
1196 " mulu%.w %2,%1\n" \
1197 " mulu%.w %3,%0\n" \
1198 " mulu%.w %2,%3\n" \
1199 " swap %2\n" \
1200 " mulu%.w %5,%2\n" \
1201 " add%.l %3,%2\n" \
1202 " jcc 1f\n" \
1203 " add%.l %#0x10000,%0\n" \
1204 "1: move%.l %2,%3\n" \
1205 " clr%.w %2\n" \
1206 " swap %2\n" \
1207 " swap %3\n" \
1208 " clr%.w %3\n" \
1209 " add%.l %3,%1\n" \
1210 " addx%.l %2,%0\n" \
1211 " | End inlined umul_ppmm" \
1212 : "=&d" (xh), "=&d" (xl), \
1213 "=&d" (__umul_tmp1), "=&d" (__umul_tmp2) \
1214 : "%2" ((USItype)(a)), "d" ((USItype)(b))); \
1215 } while (0)
1216 #endif /* not mc68020 */
1217 /* The '020, '030, '040 and '060 have bitfield insns.
1218 GCC 3.4 defines __mc68020__ when in CPU32 mode, check for __mcpu32__ to
1219 exclude bfffo on that chip (bitfield insns not available). */
1220 #if (defined (__mc68020__) || defined (mc68020) \
1221 || defined (__mc68030__) || defined (mc68030) \
1222 || defined (__mc68040__) || defined (mc68040) \
1223 || defined (__mc68060__) || defined (mc68060) \
1224 || defined (__NeXT__)) \
1225 && ! defined (__mcpu32__)
1226 #define count_leading_zeros(count, x) \
1227 __asm__ ("bfffo %1{%b2:%b2},%0" \
1228 : "=d" (count) \
1229 : "od" ((USItype) (x)), "n" (0))
1230 #define COUNT_LEADING_ZEROS_0 32
1231 #endif
1232 #endif /* mc68000 */
1234 #if defined (__m88000__) && W_TYPE_SIZE == 32
1235 #define add_ssaaaa(sh, sl, ah, al, bh, bl) \
1236 __asm__ ("addu.co %1,%r4,%r5\n\taddu.ci %0,%r2,%r3" \
1237 : "=r" (sh), "=&r" (sl) \
1238 : "rJ" (ah), "rJ" (bh), "%rJ" (al), "rJ" (bl))
1239 #define sub_ddmmss(sh, sl, ah, al, bh, bl) \
1240 __asm__ ("subu.co %1,%r4,%r5\n\tsubu.ci %0,%r2,%r3" \
1241 : "=r" (sh), "=&r" (sl) \
1242 : "rJ" (ah), "rJ" (bh), "rJ" (al), "rJ" (bl))
1243 #define count_leading_zeros(count, x) \
1244 do { \
1245 USItype __cbtmp; \
1246 __asm__ ("ff1 %0,%1" : "=r" (__cbtmp) : "r" (x)); \
1247 (count) = __cbtmp ^ 31; \
1248 } while (0)
1249 #define COUNT_LEADING_ZEROS_0 63 /* sic */
1250 #if defined (__m88110__)
1251 #define umul_ppmm(wh, wl, u, v) \
1252 do { \
1253 union {UDItype __ll; \
1254 struct {USItype __h, __l;} __i; \
1255 } __x; \
1256 __asm__ ("mulu.d %0,%1,%2" : "=r" (__x.__ll) : "r" (u), "r" (v)); \
1257 (wh) = __x.__i.__h; \
1258 (wl) = __x.__i.__l; \
1259 } while (0)
1260 #define udiv_qrnnd(q, r, n1, n0, d) \
1261 ({union {UDItype __ll; \
1262 struct {USItype __h, __l;} __i; \
1263 } __x, __q; \
1264 __x.__i.__h = (n1); __x.__i.__l = (n0); \
1265 __asm__ ("divu.d %0,%1,%2" \
1266 : "=r" (__q.__ll) : "r" (__x.__ll), "r" (d)); \
1267 (r) = (n0) - __q.__l * (d); (q) = __q.__l; })
1268 #endif /* __m88110__ */
1269 #endif /* __m88000__ */
1271 #if defined (__mips) && W_TYPE_SIZE == 32
1272 #if __GMP_GNUC_PREREQ (4,4)
1273 #define umul_ppmm(w1, w0, u, v) \
1274 do { \
1275 UDItype __ll = (UDItype)(u) * (v); \
1276 w1 = __ll >> 32; \
1277 w0 = __ll; \
1278 } while (0)
1279 #endif
1280 #if !defined (umul_ppmm) && __GMP_GNUC_PREREQ (2,7) && !defined (__clang__)
1281 #define umul_ppmm(w1, w0, u, v) \
1282 __asm__ ("multu %2,%3" : "=l" (w0), "=h" (w1) : "d" (u), "d" (v))
1283 #endif
1284 #if !defined (umul_ppmm)
1285 #define umul_ppmm(w1, w0, u, v) \
1286 __asm__ ("multu %2,%3\n\tmflo %0\n\tmfhi %1" \
1287 : "=d" (w0), "=d" (w1) : "d" (u), "d" (v))
1288 #endif
1289 #endif /* __mips */
1291 #if (defined (__mips) && __mips >= 3) && W_TYPE_SIZE == 64
1292 #if defined (_MIPS_ARCH_MIPS64R6)
1293 #define umul_ppmm(w1, w0, u, v) \
1294 do { \
1295 UDItype __m0 = (u), __m1 = (v); \
1296 (w0) = __m0 * __m1; \
1297 __asm__ ("dmuhu\t%0, %1, %2" : "=d" (w1) : "d" (__m0), "d" (__m1)); \
1298 } while (0)
1299 #endif
1300 #if !defined (umul_ppmm) && __GMP_GNUC_PREREQ (4,4)
1301 #define umul_ppmm(w1, w0, u, v) \
1302 do { \
1303 typedef unsigned int __ll_UTItype __attribute__((mode(TI))); \
1304 __ll_UTItype __ll = (__ll_UTItype)(u) * (v); \
1305 w1 = __ll >> 64; \
1306 w0 = __ll; \
1307 } while (0)
1308 #endif
1309 #if !defined (umul_ppmm) && __GMP_GNUC_PREREQ (2,7) && !defined (__clang__)
1310 #define umul_ppmm(w1, w0, u, v) \
1311 __asm__ ("dmultu %2,%3" \
1312 : "=l" (w0), "=h" (w1) \
1313 : "d" ((UDItype)(u)), "d" ((UDItype)(v)))
1314 #endif
1315 #if !defined (umul_ppmm)
1316 #define umul_ppmm(w1, w0, u, v) \
1317 __asm__ ("dmultu %2,%3\n\tmflo %0\n\tmfhi %1" \
1318 : "=d" (w0), "=d" (w1) \
1319 : "d" ((UDItype)(u)), "d" ((UDItype)(v)))
1320 #endif
1321 #endif /* __mips */
1323 #if defined (__mmix__) && W_TYPE_SIZE == 64
1324 #define umul_ppmm(w1, w0, u, v) \
1325 __asm__ ("MULU %0,%2,%3" : "=r" (w0), "=z" (w1) : "r" (u), "r" (v))
1326 #endif
1328 #if defined (__ns32000__) && W_TYPE_SIZE == 32
1329 #define umul_ppmm(w1, w0, u, v) \
1330 ({union {UDItype __ll; \
1331 struct {USItype __l, __h;} __i; \
1332 } __x; \
1333 __asm__ ("meid %2,%0" \
1334 : "=g" (__x.__ll) \
1335 : "%0" ((USItype)(u)), "g" ((USItype)(v))); \
1336 (w1) = __x.__i.__h; (w0) = __x.__i.__l;})
1337 #define __umulsidi3(u, v) \
1338 ({UDItype __w; \
1339 __asm__ ("meid %2,%0" \
1340 : "=g" (__w) \
1341 : "%0" ((USItype)(u)), "g" ((USItype)(v))); \
1342 __w; })
1343 #define udiv_qrnnd(q, r, n1, n0, d) \
1344 ({union {UDItype __ll; \
1345 struct {USItype __l, __h;} __i; \
1346 } __x; \
1347 __x.__i.__h = (n1); __x.__i.__l = (n0); \
1348 __asm__ ("deid %2,%0" \
1349 : "=g" (__x.__ll) \
1350 : "0" (__x.__ll), "g" ((USItype)(d))); \
1351 (r) = __x.__i.__l; (q) = __x.__i.__h; })
1352 #define count_trailing_zeros(count,x) \
1353 do { \
1354 __asm__ ("ffsd %2,%0" \
1355 : "=r" (count) \
1356 : "0" ((USItype) 0), "r" ((USItype) (x))); \
1357 } while (0)
1358 #endif /* __ns32000__ */
1360 /* In the past we had a block of various #defines tested
1361 _ARCH_PPC - AIX
1362 _ARCH_PWR - AIX
1363 __powerpc__ - gcc
1364 __POWERPC__ - BEOS
1365 __ppc__ - Darwin
1366 PPC - old gcc, GNU/Linux, SysV
1367 The plain PPC test was not good for vxWorks, since PPC is defined on all
1368 CPUs there (eg. m68k too), as a constant one is expected to compare
1369 CPU_FAMILY against.
1371 At any rate, this was pretty unattractive and a bit fragile. The use of
1372 HAVE_HOST_CPU_FAMILY is designed to cut through it all and be sure of
1373 getting the desired effect.
1375 ENHANCE-ME: We should test _IBMR2 here when we add assembly support for
1376 the system vendor compilers. (Is that vendor compilers with inline asm,
1377 or what?) */
1379 #if (HAVE_HOST_CPU_FAMILY_power || HAVE_HOST_CPU_FAMILY_powerpc) \
1380 && W_TYPE_SIZE == 32
1381 #define add_ssaaaa(sh, sl, ah, al, bh, bl) \
1382 do { \
1383 if (__builtin_constant_p (bh) && (bh) == 0) \
1384 __asm__ ("add%I4c %1,%3,%4\n\taddze %0,%2" \
1385 : "=r" (sh), "=&r" (sl) : "r" (ah), "%r" (al), "rI" (bl) \
1386 __CLOBBER_CC); \
1387 else if (__builtin_constant_p (bh) && (bh) == ~(USItype) 0) \
1388 __asm__ ("add%I4c %1,%3,%4\n\taddme %0,%2" \
1389 : "=r" (sh), "=&r" (sl) : "r" (ah), "%r" (al), "rI" (bl) \
1390 __CLOBBER_CC); \
1391 else \
1392 __asm__ ("add%I5c %1,%4,%5\n\tadde %0,%2,%3" \
1393 : "=r" (sh), "=&r" (sl) \
1394 : "r" (ah), "r" (bh), "%r" (al), "rI" (bl) \
1395 __CLOBBER_CC); \
1396 } while (0)
1397 #define sub_ddmmss(sh, sl, ah, al, bh, bl) \
1398 do { \
1399 if (__builtin_constant_p (ah) && (ah) == 0) \
1400 __asm__ ("subf%I3c %1,%4,%3\n\tsubfze %0,%2" \
1401 : "=r" (sh), "=&r" (sl) : "r" (bh), "rI" (al), "r" (bl) \
1402 __CLOBBER_CC); \
1403 else if (__builtin_constant_p (ah) && (ah) == ~(USItype) 0) \
1404 __asm__ ("subf%I3c %1,%4,%3\n\tsubfme %0,%2" \
1405 : "=r" (sh), "=&r" (sl) : "r" (bh), "rI" (al), "r" (bl) \
1406 __CLOBBER_CC); \
1407 else if (__builtin_constant_p (bh) && (bh) == 0) \
1408 __asm__ ("subf%I3c %1,%4,%3\n\taddme %0,%2" \
1409 : "=r" (sh), "=&r" (sl) : "r" (ah), "rI" (al), "r" (bl) \
1410 __CLOBBER_CC); \
1411 else if (__builtin_constant_p (bh) && (bh) == ~(USItype) 0) \
1412 __asm__ ("subf%I3c %1,%4,%3\n\taddze %0,%2" \
1413 : "=r" (sh), "=&r" (sl) : "r" (ah), "rI" (al), "r" (bl) \
1414 __CLOBBER_CC); \
1415 else \
1416 __asm__ ("subf%I4c %1,%5,%4\n\tsubfe %0,%3,%2" \
1417 : "=r" (sh), "=&r" (sl) \
1418 : "r" (ah), "r" (bh), "rI" (al), "r" (bl) \
1419 __CLOBBER_CC); \
1420 } while (0)
1421 #define count_leading_zeros(count, x) \
1422 __asm__ ("cntlzw %0,%1" : "=r" (count) : "r" (x))
1423 #define COUNT_LEADING_ZEROS_0 32
1424 #if HAVE_HOST_CPU_FAMILY_powerpc
1425 #if __GMP_GNUC_PREREQ (4,4)
1426 #define umul_ppmm(w1, w0, u, v) \
1427 do { \
1428 UDItype __ll = (UDItype)(u) * (v); \
1429 w1 = __ll >> 32; \
1430 w0 = __ll; \
1431 } while (0)
1432 #endif
1433 #if !defined (umul_ppmm)
1434 #define umul_ppmm(ph, pl, m0, m1) \
1435 do { \
1436 USItype __m0 = (m0), __m1 = (m1); \
1437 __asm__ ("mulhwu %0,%1,%2" : "=r" (ph) : "%r" (m0), "r" (m1)); \
1438 (pl) = __m0 * __m1; \
1439 } while (0)
1440 #endif
1441 #define smul_ppmm(ph, pl, m0, m1) \
1442 do { \
1443 SItype __m0 = (m0), __m1 = (m1); \
1444 __asm__ ("mulhw %0,%1,%2" : "=r" (ph) : "%r" (m0), "r" (m1)); \
1445 (pl) = __m0 * __m1; \
1446 } while (0)
1447 #else
1448 #define smul_ppmm(xh, xl, m0, m1) \
1449 __asm__ ("mul %0,%2,%3" : "=r" (xh), "=q" (xl) : "r" (m0), "r" (m1))
1450 #define sdiv_qrnnd(q, r, nh, nl, d) \
1451 __asm__ ("div %0,%2,%4" : "=r" (q), "=q" (r) : "r" (nh), "1" (nl), "r" (d))
1452 #endif
1453 #endif /* 32-bit POWER architecture variants. */
1455 /* We should test _IBMR2 here when we add assembly support for the system
1456 vendor compilers. */
1457 #if HAVE_HOST_CPU_FAMILY_powerpc && W_TYPE_SIZE == 64
1458 #if !defined (_LONG_LONG_LIMB)
1459 /* _LONG_LONG_LIMB is ABI=mode32 where adde operates on 32-bit values. So
1460 use adde etc only when not _LONG_LONG_LIMB. */
1461 #define add_ssaaaa(sh, sl, ah, al, bh, bl) \
1462 do { \
1463 if (__builtin_constant_p (bh) && (bh) == 0) \
1464 __asm__ ("add%I4c %1,%3,%4\n\taddze %0,%2" \
1465 : "=r" (sh), "=&r" (sl) \
1466 : "r" ((UDItype)(ah)), \
1467 "%r" ((UDItype)(al)), "rI" ((UDItype)(bl)) \
1468 __CLOBBER_CC); \
1469 else if (__builtin_constant_p (bh) && (bh) == ~(UDItype) 0) \
1470 __asm__ ("add%I4c %1,%3,%4\n\taddme %0,%2" \
1471 : "=r" (sh), "=&r" (sl) \
1472 : "r" ((UDItype)(ah)), \
1473 "%r" ((UDItype)(al)), "rI" ((UDItype)(bl)) \
1474 __CLOBBER_CC); \
1475 else \
1476 __asm__ ("add%I5c %1,%4,%5\n\tadde %0,%2,%3" \
1477 : "=r" (sh), "=&r" (sl) \
1478 : "r" ((UDItype)(ah)), "r" ((UDItype)(bh)), \
1479 "%r" ((UDItype)(al)), "rI" ((UDItype)(bl)) \
1480 __CLOBBER_CC); \
1481 } while (0)
1482 /* We use "*rI" for the constant operand here, since with just "I", gcc barfs.
1483 This might seem strange, but gcc folds away the dead code late. */
1484 #define sub_ddmmss(sh, sl, ah, al, bh, bl) \
1485 do { \
1486 if (__builtin_constant_p (bl) \
1487 && (bl) > -0x8000 && (bl) <= 0x8000 && (bl) != 0) { \
1488 if (__builtin_constant_p (ah) && (ah) == 0) \
1489 __asm__ ("addic %1,%3,%4\n\tsubfze %0,%2" \
1490 : "=r" (sh), "=&r" (sl) \
1491 : "r" ((UDItype)(bh)), \
1492 "r" ((UDItype)(al)), "*rI" (-((UDItype)(bl))) \
1493 __CLOBBER_CC); \
1494 else if (__builtin_constant_p (ah) && (ah) == ~(UDItype) 0) \
1495 __asm__ ("addic %1,%3,%4\n\tsubfme %0,%2" \
1496 : "=r" (sh), "=&r" (sl) \
1497 : "r" ((UDItype)(bh)), \
1498 "r" ((UDItype)(al)), "*rI" (-((UDItype)(bl))) \
1499 __CLOBBER_CC); \
1500 else if (__builtin_constant_p (bh) && (bh) == 0) \
1501 __asm__ ("addic %1,%3,%4\n\taddme %0,%2" \
1502 : "=r" (sh), "=&r" (sl) \
1503 : "r" ((UDItype)(ah)), \
1504 "r" ((UDItype)(al)), "*rI" (-((UDItype)(bl))) \
1505 __CLOBBER_CC); \
1506 else if (__builtin_constant_p (bh) && (bh) == ~(UDItype) 0) \
1507 __asm__ ("addic %1,%3,%4\n\taddze %0,%2" \
1508 : "=r" (sh), "=&r" (sl) \
1509 : "r" ((UDItype)(ah)), \
1510 "r" ((UDItype)(al)), "*rI" (-((UDItype)(bl))) \
1511 __CLOBBER_CC); \
1512 else \
1513 __asm__ ("addic %1,%4,%5\n\tsubfe %0,%3,%2" \
1514 : "=r" (sh), "=&r" (sl) \
1515 : "r" ((UDItype)(ah)), "r" ((UDItype)(bh)), \
1516 "r" ((UDItype)(al)), "*rI" (-((UDItype)(bl))) \
1517 __CLOBBER_CC); \
1518 } else { \
1519 if (__builtin_constant_p (ah) && (ah) == 0) \
1520 __asm__ ("subf%I3c %1,%4,%3\n\tsubfze %0,%2" \
1521 : "=r" (sh), "=&r" (sl) \
1522 : "r" ((UDItype)(bh)), \
1523 "rI" ((UDItype)(al)), "r" ((UDItype)(bl)) \
1524 __CLOBBER_CC); \
1525 else if (__builtin_constant_p (ah) && (ah) == ~(UDItype) 0) \
1526 __asm__ ("subf%I3c %1,%4,%3\n\tsubfme %0,%2" \
1527 : "=r" (sh), "=&r" (sl) \
1528 : "r" ((UDItype)(bh)), \
1529 "rI" ((UDItype)(al)), "r" ((UDItype)(bl)) \
1530 __CLOBBER_CC); \
1531 else if (__builtin_constant_p (bh) && (bh) == 0) \
1532 __asm__ ("subf%I3c %1,%4,%3\n\taddme %0,%2" \
1533 : "=r" (sh), "=&r" (sl) \
1534 : "r" ((UDItype)(ah)), \
1535 "rI" ((UDItype)(al)), "r" ((UDItype)(bl)) \
1536 __CLOBBER_CC); \
1537 else if (__builtin_constant_p (bh) && (bh) == ~(UDItype) 0) \
1538 __asm__ ("subf%I3c %1,%4,%3\n\taddze %0,%2" \
1539 : "=r" (sh), "=&r" (sl) \
1540 : "r" ((UDItype)(ah)), \
1541 "rI" ((UDItype)(al)), "r" ((UDItype)(bl)) \
1542 __CLOBBER_CC); \
1543 else \
1544 __asm__ ("subf%I4c %1,%5,%4\n\tsubfe %0,%3,%2" \
1545 : "=r" (sh), "=&r" (sl) \
1546 : "r" ((UDItype)(ah)), "r" ((UDItype)(bh)), \
1547 "rI" ((UDItype)(al)), "r" ((UDItype)(bl)) \
1548 __CLOBBER_CC); \
1550 } while (0)
1551 #endif /* ! _LONG_LONG_LIMB */
1552 #define count_leading_zeros(count, x) \
1553 __asm__ ("cntlzd %0,%1" : "=r" (count) : "r" (x))
1554 #define COUNT_LEADING_ZEROS_0 64
1555 #if __GMP_GNUC_PREREQ (4,8)
1556 #define umul_ppmm(w1, w0, u, v) \
1557 do { \
1558 typedef unsigned int __ll_UTItype __attribute__((mode(TI))); \
1559 __ll_UTItype __ll = (__ll_UTItype)(u) * (v); \
1560 w1 = __ll >> 64; \
1561 w0 = __ll; \
1562 } while (0)
1563 #endif
1564 #if !defined (umul_ppmm)
1565 #define umul_ppmm(ph, pl, m0, m1) \
1566 do { \
1567 UDItype __m0 = (m0), __m1 = (m1); \
1568 __asm__ ("mulhdu %0,%1,%2" : "=r" (ph) : "%r" (__m0), "r" (__m1)); \
1569 (pl) = __m0 * __m1; \
1570 } while (0)
1571 #endif
1572 #define smul_ppmm(ph, pl, m0, m1) \
1573 do { \
1574 DItype __m0 = (m0), __m1 = (m1); \
1575 __asm__ ("mulhd %0,%1,%2" : "=r" (ph) : "%r" (__m0), "r" (__m1)); \
1576 (pl) = __m0 * __m1; \
1577 } while (0)
1578 #endif /* 64-bit PowerPC. */
1580 #if defined (__pyr__) && W_TYPE_SIZE == 32
1581 #define add_ssaaaa(sh, sl, ah, al, bh, bl) \
1582 __asm__ ("addw %5,%1\n\taddwc %3,%0" \
1583 : "=r" (sh), "=&r" (sl) \
1584 : "0" ((USItype)(ah)), "g" ((USItype)(bh)), \
1585 "%1" ((USItype)(al)), "g" ((USItype)(bl)))
1586 #define sub_ddmmss(sh, sl, ah, al, bh, bl) \
1587 __asm__ ("subw %5,%1\n\tsubwb %3,%0" \
1588 : "=r" (sh), "=&r" (sl) \
1589 : "0" ((USItype)(ah)), "g" ((USItype)(bh)), \
1590 "1" ((USItype)(al)), "g" ((USItype)(bl)))
1591 /* This insn works on Pyramids with AP, XP, or MI CPUs, but not with SP. */
1592 #define umul_ppmm(w1, w0, u, v) \
1593 ({union {UDItype __ll; \
1594 struct {USItype __h, __l;} __i; \
1595 } __x; \
1596 __asm__ ("movw %1,%R0\n\tuemul %2,%0" \
1597 : "=&r" (__x.__ll) \
1598 : "g" ((USItype) (u)), "g" ((USItype)(v))); \
1599 (w1) = __x.__i.__h; (w0) = __x.__i.__l;})
1600 #endif /* __pyr__ */
1602 #if defined (__ibm032__) /* RT/ROMP */ && W_TYPE_SIZE == 32
1603 #define add_ssaaaa(sh, sl, ah, al, bh, bl) \
1604 __asm__ ("a %1,%5\n\tae %0,%3" \
1605 : "=r" (sh), "=&r" (sl) \
1606 : "0" ((USItype)(ah)), "r" ((USItype)(bh)), \
1607 "%1" ((USItype)(al)), "r" ((USItype)(bl)))
1608 #define sub_ddmmss(sh, sl, ah, al, bh, bl) \
1609 __asm__ ("s %1,%5\n\tse %0,%3" \
1610 : "=r" (sh), "=&r" (sl) \
1611 : "0" ((USItype)(ah)), "r" ((USItype)(bh)), \
1612 "1" ((USItype)(al)), "r" ((USItype)(bl)))
1613 #define smul_ppmm(ph, pl, m0, m1) \
1614 __asm__ ( \
1615 "s r2,r2\n" \
1616 " mts r10,%2\n" \
1617 " m r2,%3\n" \
1618 " m r2,%3\n" \
1619 " m r2,%3\n" \
1620 " m r2,%3\n" \
1621 " m r2,%3\n" \
1622 " m r2,%3\n" \
1623 " m r2,%3\n" \
1624 " m r2,%3\n" \
1625 " m r2,%3\n" \
1626 " m r2,%3\n" \
1627 " m r2,%3\n" \
1628 " m r2,%3\n" \
1629 " m r2,%3\n" \
1630 " m r2,%3\n" \
1631 " m r2,%3\n" \
1632 " m r2,%3\n" \
1633 " cas %0,r2,r0\n" \
1634 " mfs r10,%1" \
1635 : "=r" (ph), "=r" (pl) \
1636 : "%r" ((USItype)(m0)), "r" ((USItype)(m1)) \
1637 : "r2")
1638 #define count_leading_zeros(count, x) \
1639 do { \
1640 if ((x) >= 0x10000) \
1641 __asm__ ("clz %0,%1" \
1642 : "=r" (count) : "r" ((USItype)(x) >> 16)); \
1643 else \
1645 __asm__ ("clz %0,%1" \
1646 : "=r" (count) : "r" ((USItype)(x))); \
1647 (count) += 16; \
1649 } while (0)
1650 #endif /* RT/ROMP */
1652 #if defined (__riscv) && defined (__riscv_mul) && W_TYPE_SIZE == 64
1653 #define umul_ppmm(ph, pl, u, v) \
1654 do { \
1655 UDItype __u = (u), __v = (v); \
1656 (pl) = __u * __v; \
1657 __asm__ ("mulhu\t%0, %1, %2" : "=r" (ph) : "%r" (__u), "r" (__v)); \
1658 } while (0)
1659 #endif
1661 #if (defined (__SH2__) || defined (__SH3__) || defined (__SH4__)) && W_TYPE_SIZE == 32
1662 #define umul_ppmm(w1, w0, u, v) \
1663 __asm__ ("dmulu.l %2,%3\n\tsts macl,%1\n\tsts mach,%0" \
1664 : "=r" (w1), "=r" (w0) : "r" (u), "r" (v) : "macl", "mach")
1665 #endif
1667 #if defined (__sparc__) && W_TYPE_SIZE == 32
1668 #define add_ssaaaa(sh, sl, ah, al, bh, bl) \
1669 __asm__ ("addcc %r4,%5,%1\n\taddx %r2,%3,%0" \
1670 : "=r" (sh), "=&r" (sl) \
1671 : "rJ" (ah), "rI" (bh),"%rJ" (al), "rI" (bl) \
1672 __CLOBBER_CC)
1673 #define sub_ddmmss(sh, sl, ah, al, bh, bl) \
1674 __asm__ ("subcc %r4,%5,%1\n\tsubx %r2,%3,%0" \
1675 : "=r" (sh), "=&r" (sl) \
1676 : "rJ" (ah), "rI" (bh), "rJ" (al), "rI" (bl) \
1677 __CLOBBER_CC)
1678 /* FIXME: When gcc -mcpu=v9 is used on solaris, gcc/config/sol2-sld-64.h
1679 doesn't define anything to indicate that to us, it only sets __sparcv8. */
1680 #if defined (__sparc_v9__) || defined (__sparcv9)
1681 /* Perhaps we should use floating-point operations here? */
1682 #if 0
1683 /* Triggers a bug making mpz/tests/t-gcd.c fail.
1684 Perhaps we simply need explicitly zero-extend the inputs? */
1685 #define umul_ppmm(w1, w0, u, v) \
1686 __asm__ ("mulx %2,%3,%%g1; srl %%g1,0,%1; srlx %%g1,32,%0" : \
1687 "=r" (w1), "=r" (w0) : "r" (u), "r" (v) : "g1")
1688 #else
1689 /* Use v8 umul until above bug is fixed. */
1690 #define umul_ppmm(w1, w0, u, v) \
1691 __asm__ ("umul %2,%3,%1;rd %%y,%0" : "=r" (w1), "=r" (w0) : "r" (u), "r" (v))
1692 #endif
1693 /* Use a plain v8 divide for v9. */
1694 #define udiv_qrnnd(q, r, n1, n0, d) \
1695 do { \
1696 USItype __q; \
1697 __asm__ ("mov %1,%%y;nop;nop;nop;udiv %2,%3,%0" \
1698 : "=r" (__q) : "r" (n1), "r" (n0), "r" (d)); \
1699 (r) = (n0) - __q * (d); \
1700 (q) = __q; \
1701 } while (0)
1702 #else
1703 #if defined (__sparc_v8__) /* gcc normal */ \
1704 || defined (__sparcv8) /* gcc solaris */ \
1705 || HAVE_HOST_CPU_supersparc
1706 /* Don't match immediate range because, 1) it is not often useful,
1707 2) the 'I' flag thinks of the range as a 13 bit signed interval,
1708 while we want to match a 13 bit interval, sign extended to 32 bits,
1709 but INTERPRETED AS UNSIGNED. */
1710 #define umul_ppmm(w1, w0, u, v) \
1711 __asm__ ("umul %2,%3,%1;rd %%y,%0" : "=r" (w1), "=r" (w0) : "r" (u), "r" (v))
1713 #if HAVE_HOST_CPU_supersparc
1714 #else
1715 /* Don't use this on SuperSPARC because its udiv only handles 53 bit
1716 dividends and will trap to the kernel for the rest. */
1717 #define udiv_qrnnd(q, r, n1, n0, d) \
1718 do { \
1719 USItype __q; \
1720 __asm__ ("mov %1,%%y;nop;nop;nop;udiv %2,%3,%0" \
1721 : "=r" (__q) : "r" (n1), "r" (n0), "r" (d)); \
1722 (r) = (n0) - __q * (d); \
1723 (q) = __q; \
1724 } while (0)
1725 #endif /* HAVE_HOST_CPU_supersparc */
1727 #else /* ! __sparc_v8__ */
1728 #if defined (__sparclite__)
1729 /* This has hardware multiply but not divide. It also has two additional
1730 instructions scan (ffs from high bit) and divscc. */
1731 #define umul_ppmm(w1, w0, u, v) \
1732 __asm__ ("umul %2,%3,%1;rd %%y,%0" : "=r" (w1), "=r" (w0) : "r" (u), "r" (v))
1733 #define udiv_qrnnd(q, r, n1, n0, d) \
1734 __asm__ ("! Inlined udiv_qrnnd\n" \
1735 " wr %%g0,%2,%%y ! Not a delayed write for sparclite\n" \
1736 " tst %%g0\n" \
1737 " divscc %3,%4,%%g1\n" \
1738 " divscc %%g1,%4,%%g1\n" \
1739 " divscc %%g1,%4,%%g1\n" \
1740 " divscc %%g1,%4,%%g1\n" \
1741 " divscc %%g1,%4,%%g1\n" \
1742 " divscc %%g1,%4,%%g1\n" \
1743 " divscc %%g1,%4,%%g1\n" \
1744 " divscc %%g1,%4,%%g1\n" \
1745 " divscc %%g1,%4,%%g1\n" \
1746 " divscc %%g1,%4,%%g1\n" \
1747 " divscc %%g1,%4,%%g1\n" \
1748 " divscc %%g1,%4,%%g1\n" \
1749 " divscc %%g1,%4,%%g1\n" \
1750 " divscc %%g1,%4,%%g1\n" \
1751 " divscc %%g1,%4,%%g1\n" \
1752 " divscc %%g1,%4,%%g1\n" \
1753 " divscc %%g1,%4,%%g1\n" \
1754 " divscc %%g1,%4,%%g1\n" \
1755 " divscc %%g1,%4,%%g1\n" \
1756 " divscc %%g1,%4,%%g1\n" \
1757 " divscc %%g1,%4,%%g1\n" \
1758 " divscc %%g1,%4,%%g1\n" \
1759 " divscc %%g1,%4,%%g1\n" \
1760 " divscc %%g1,%4,%%g1\n" \
1761 " divscc %%g1,%4,%%g1\n" \
1762 " divscc %%g1,%4,%%g1\n" \
1763 " divscc %%g1,%4,%%g1\n" \
1764 " divscc %%g1,%4,%%g1\n" \
1765 " divscc %%g1,%4,%%g1\n" \
1766 " divscc %%g1,%4,%%g1\n" \
1767 " divscc %%g1,%4,%%g1\n" \
1768 " divscc %%g1,%4,%0\n" \
1769 " rd %%y,%1\n" \
1770 " bl,a 1f\n" \
1771 " add %1,%4,%1\n" \
1772 "1: ! End of inline udiv_qrnnd" \
1773 : "=r" (q), "=r" (r) : "r" (n1), "r" (n0), "rI" (d) \
1774 : "%g1" __AND_CLOBBER_CC)
1775 #define count_leading_zeros(count, x) \
1776 __asm__ ("scan %1,1,%0" : "=r" (count) : "r" (x))
1777 /* Early sparclites return 63 for an argument of 0, but they warn that future
1778 implementations might change this. Therefore, leave COUNT_LEADING_ZEROS_0
1779 undefined. */
1780 #endif /* __sparclite__ */
1781 #endif /* __sparc_v8__ */
1782 #endif /* __sparc_v9__ */
1783 /* Default to sparc v7 versions of umul_ppmm and udiv_qrnnd. */
1784 #ifndef umul_ppmm
1785 #define umul_ppmm(w1, w0, u, v) \
1786 __asm__ ("! Inlined umul_ppmm\n" \
1787 " wr %%g0,%2,%%y ! SPARC has 0-3 delay insn after a wr\n" \
1788 " sra %3,31,%%g2 ! Don't move this insn\n" \
1789 " and %2,%%g2,%%g2 ! Don't move this insn\n" \
1790 " andcc %%g0,0,%%g1 ! Don't move this insn\n" \
1791 " mulscc %%g1,%3,%%g1\n" \
1792 " mulscc %%g1,%3,%%g1\n" \
1793 " mulscc %%g1,%3,%%g1\n" \
1794 " mulscc %%g1,%3,%%g1\n" \
1795 " mulscc %%g1,%3,%%g1\n" \
1796 " mulscc %%g1,%3,%%g1\n" \
1797 " mulscc %%g1,%3,%%g1\n" \
1798 " mulscc %%g1,%3,%%g1\n" \
1799 " mulscc %%g1,%3,%%g1\n" \
1800 " mulscc %%g1,%3,%%g1\n" \
1801 " mulscc %%g1,%3,%%g1\n" \
1802 " mulscc %%g1,%3,%%g1\n" \
1803 " mulscc %%g1,%3,%%g1\n" \
1804 " mulscc %%g1,%3,%%g1\n" \
1805 " mulscc %%g1,%3,%%g1\n" \
1806 " mulscc %%g1,%3,%%g1\n" \
1807 " mulscc %%g1,%3,%%g1\n" \
1808 " mulscc %%g1,%3,%%g1\n" \
1809 " mulscc %%g1,%3,%%g1\n" \
1810 " mulscc %%g1,%3,%%g1\n" \
1811 " mulscc %%g1,%3,%%g1\n" \
1812 " mulscc %%g1,%3,%%g1\n" \
1813 " mulscc %%g1,%3,%%g1\n" \
1814 " mulscc %%g1,%3,%%g1\n" \
1815 " mulscc %%g1,%3,%%g1\n" \
1816 " mulscc %%g1,%3,%%g1\n" \
1817 " mulscc %%g1,%3,%%g1\n" \
1818 " mulscc %%g1,%3,%%g1\n" \
1819 " mulscc %%g1,%3,%%g1\n" \
1820 " mulscc %%g1,%3,%%g1\n" \
1821 " mulscc %%g1,%3,%%g1\n" \
1822 " mulscc %%g1,%3,%%g1\n" \
1823 " mulscc %%g1,0,%%g1\n" \
1824 " add %%g1,%%g2,%0\n" \
1825 " rd %%y,%1" \
1826 : "=r" (w1), "=r" (w0) : "%rI" (u), "r" (v) \
1827 : "%g1", "%g2" __AND_CLOBBER_CC)
1828 #endif
1829 #ifndef udiv_qrnnd
1830 #ifndef LONGLONG_STANDALONE
1831 #define udiv_qrnnd(q, r, n1, n0, d) \
1832 do { UWtype __r; \
1833 (q) = __MPN(udiv_qrnnd) (&__r, (n1), (n0), (d)); \
1834 (r) = __r; \
1835 } while (0)
1836 extern UWtype __MPN(udiv_qrnnd) (UWtype *, UWtype, UWtype, UWtype);
1837 #endif /* LONGLONG_STANDALONE */
1838 #endif /* udiv_qrnnd */
1839 #endif /* __sparc__ */
1841 #if defined (__sparc__) && W_TYPE_SIZE == 64
1842 #define add_ssaaaa(sh, sl, ah, al, bh, bl) \
1843 __asm__ ( \
1844 "addcc %r4,%5,%1\n" \
1845 " addccc %r6,%7,%%g0\n" \
1846 " addc %r2,%3,%0" \
1847 : "=r" (sh), "=&r" (sl) \
1848 : "rJ" ((UDItype)(ah)), "rI" ((UDItype)(bh)), \
1849 "%rJ" ((UDItype)(al)), "rI" ((UDItype)(bl)), \
1850 "%rJ" ((UDItype)(al) >> 32), "rI" ((UDItype)(bl) >> 32) \
1851 __CLOBBER_CC)
1852 #define sub_ddmmss(sh, sl, ah, al, bh, bl) \
1853 __asm__ ( \
1854 "subcc %r4,%5,%1\n" \
1855 " subccc %r6,%7,%%g0\n" \
1856 " subc %r2,%3,%0" \
1857 : "=r" (sh), "=&r" (sl) \
1858 : "rJ" ((UDItype)(ah)), "rI" ((UDItype)(bh)), \
1859 "rJ" ((UDItype)(al)), "rI" ((UDItype)(bl)), \
1860 "rJ" ((UDItype)(al) >> 32), "rI" ((UDItype)(bl) >> 32) \
1861 __CLOBBER_CC)
1862 #if __VIS__ >= 0x300
1863 #undef add_ssaaaa
1864 #define add_ssaaaa(sh, sl, ah, al, bh, bl) \
1865 __asm__ ( \
1866 "addcc %r4, %5, %1\n" \
1867 " addxc %r2, %r3, %0" \
1868 : "=r" (sh), "=&r" (sl) \
1869 : "rJ" ((UDItype)(ah)), "rJ" ((UDItype)(bh)), \
1870 "%rJ" ((UDItype)(al)), "rI" ((UDItype)(bl)) __CLOBBER_CC)
1871 #define umul_ppmm(ph, pl, m0, m1) \
1872 do { \
1873 UDItype __m0 = (m0), __m1 = (m1); \
1874 (pl) = __m0 * __m1; \
1875 __asm__ ("umulxhi\t%2, %1, %0" \
1876 : "=r" (ph) \
1877 : "%r" (__m0), "r" (__m1)); \
1878 } while (0)
1879 #define count_leading_zeros(count, x) \
1880 __asm__ ("lzd\t%1,%0" : "=r" (count) : "r" (x))
1881 /* Needed by count_leading_zeros_32 in sparc64.h. */
1882 #define COUNT_LEADING_ZEROS_NEED_CLZ_TAB
1883 #endif
1884 #endif
1886 #if (defined (__vax) || defined (__vax__)) && W_TYPE_SIZE == 32
1887 #define add_ssaaaa(sh, sl, ah, al, bh, bl) \
1888 __asm__ ("addl2 %5,%1\n\tadwc %3,%0" \
1889 : "=g" (sh), "=&g" (sl) \
1890 : "0" ((USItype)(ah)), "g" ((USItype)(bh)), \
1891 "%1" ((USItype)(al)), "g" ((USItype)(bl)))
1892 #define sub_ddmmss(sh, sl, ah, al, bh, bl) \
1893 __asm__ ("subl2 %5,%1\n\tsbwc %3,%0" \
1894 : "=g" (sh), "=&g" (sl) \
1895 : "0" ((USItype)(ah)), "g" ((USItype)(bh)), \
1896 "1" ((USItype)(al)), "g" ((USItype)(bl)))
1897 #define smul_ppmm(xh, xl, m0, m1) \
1898 do { \
1899 union {UDItype __ll; \
1900 struct {USItype __l, __h;} __i; \
1901 } __x; \
1902 USItype __m0 = (m0), __m1 = (m1); \
1903 __asm__ ("emul %1,%2,$0,%0" \
1904 : "=g" (__x.__ll) : "g" (__m0), "g" (__m1)); \
1905 (xh) = __x.__i.__h; (xl) = __x.__i.__l; \
1906 } while (0)
1907 #define sdiv_qrnnd(q, r, n1, n0, d) \
1908 do { \
1909 union {DItype __ll; \
1910 struct {SItype __l, __h;} __i; \
1911 } __x; \
1912 __x.__i.__h = n1; __x.__i.__l = n0; \
1913 __asm__ ("ediv %3,%2,%0,%1" \
1914 : "=g" (q), "=g" (r) : "g" (__x.__ll), "g" (d)); \
1915 } while (0)
1916 #if 0
1917 /* FIXME: This instruction appears to be unimplemented on some systems (vax
1918 8800 maybe). */
1919 #define count_trailing_zeros(count,x) \
1920 do { \
1921 __asm__ ("ffs 0, 31, %1, %0" \
1922 : "=g" (count) \
1923 : "g" ((USItype) (x))); \
1924 } while (0)
1925 #endif
1926 #endif /* vax */
1928 #if defined (__z8000__) && W_TYPE_SIZE == 16
1929 #define add_ssaaaa(sh, sl, ah, al, bh, bl) \
1930 __asm__ ("add %H1,%H5\n\tadc %H0,%H3" \
1931 : "=r" (sh), "=&r" (sl) \
1932 : "0" ((unsigned int)(ah)), "r" ((unsigned int)(bh)), \
1933 "%1" ((unsigned int)(al)), "rQR" ((unsigned int)(bl)))
1934 #define sub_ddmmss(sh, sl, ah, al, bh, bl) \
1935 __asm__ ("sub %H1,%H5\n\tsbc %H0,%H3" \
1936 : "=r" (sh), "=&r" (sl) \
1937 : "0" ((unsigned int)(ah)), "r" ((unsigned int)(bh)), \
1938 "1" ((unsigned int)(al)), "rQR" ((unsigned int)(bl)))
1939 #define umul_ppmm(xh, xl, m0, m1) \
1940 do { \
1941 union {long int __ll; \
1942 struct {unsigned int __h, __l;} __i; \
1943 } __x; \
1944 unsigned int __m0 = (m0), __m1 = (m1); \
1945 __asm__ ("mult %S0,%H3" \
1946 : "=r" (__x.__i.__h), "=r" (__x.__i.__l) \
1947 : "%1" (m0), "rQR" (m1)); \
1948 (xh) = __x.__i.__h; (xl) = __x.__i.__l; \
1949 (xh) += ((((signed int) __m0 >> 15) & __m1) \
1950 + (((signed int) __m1 >> 15) & __m0)); \
1951 } while (0)
1952 #endif /* __z8000__ */
1954 #endif /* __GNUC__ */
1956 #endif /* NO_ASM */
1959 /* FIXME: "sidi" here is highly doubtful, should sometimes be "diti". */
1960 #if !defined (umul_ppmm) && defined (__umulsidi3)
1961 #define umul_ppmm(ph, pl, m0, m1) \
1962 do { \
1963 UDWtype __ll = __umulsidi3 (m0, m1); \
1964 ph = (UWtype) (__ll >> W_TYPE_SIZE); \
1965 pl = (UWtype) __ll; \
1966 } while (0)
1967 #endif
1969 #if !defined (__umulsidi3)
1970 #define __umulsidi3(u, v) \
1971 ({UWtype __hi, __lo; \
1972 umul_ppmm (__hi, __lo, u, v); \
1973 ((UDWtype) __hi << W_TYPE_SIZE) | __lo; })
1974 #endif
1977 #if defined (__cplusplus)
1978 #define __longlong_h_C "C"
1979 #else
1980 #define __longlong_h_C
1981 #endif
1983 /* Use mpn_umul_ppmm or mpn_udiv_qrnnd functions, if they exist. The "_r"
1984 forms have "reversed" arguments, meaning the pointer is last, which
1985 sometimes allows better parameter passing, in particular on 64-bit
1986 hppa. */
1988 #define mpn_umul_ppmm __MPN(umul_ppmm)
1989 extern __longlong_h_C UWtype mpn_umul_ppmm (UWtype *, UWtype, UWtype);
1991 #if ! defined (umul_ppmm) && HAVE_NATIVE_mpn_umul_ppmm \
1992 && ! defined (LONGLONG_STANDALONE)
1993 #define umul_ppmm(wh, wl, u, v) \
1994 do { \
1995 UWtype __umul_ppmm__p0; \
1996 (wh) = mpn_umul_ppmm (&__umul_ppmm__p0, (UWtype) (u), (UWtype) (v));\
1997 (wl) = __umul_ppmm__p0; \
1998 } while (0)
1999 #endif
2001 #define mpn_umul_ppmm_r __MPN(umul_ppmm_r)
2002 extern __longlong_h_C UWtype mpn_umul_ppmm_r (UWtype, UWtype, UWtype *);
2004 #if ! defined (umul_ppmm) && HAVE_NATIVE_mpn_umul_ppmm_r \
2005 && ! defined (LONGLONG_STANDALONE)
2006 #define umul_ppmm(wh, wl, u, v) \
2007 do { \
2008 UWtype __umul_p0; \
2009 (wh) = mpn_umul_ppmm_r ((UWtype) (u), (UWtype) (v), &__umul_p0); \
2010 (wl) = __umul_p0; \
2011 } while (0)
2012 #endif
2014 #define mpn_udiv_qrnnd __MPN(udiv_qrnnd)
2015 extern __longlong_h_C UWtype mpn_udiv_qrnnd (UWtype *, UWtype, UWtype, UWtype);
2017 #if ! defined (udiv_qrnnd) && HAVE_NATIVE_mpn_udiv_qrnnd \
2018 && ! defined (LONGLONG_STANDALONE)
2019 #define udiv_qrnnd(q, r, n1, n0, d) \
2020 do { \
2021 UWtype __udiv_qrnnd_r; \
2022 (q) = mpn_udiv_qrnnd (&__udiv_qrnnd_r, \
2023 (UWtype) (n1), (UWtype) (n0), (UWtype) d); \
2024 (r) = __udiv_qrnnd_r; \
2025 } while (0)
2026 #endif
2028 #define mpn_udiv_qrnnd_r __MPN(udiv_qrnnd_r)
2029 extern __longlong_h_C UWtype mpn_udiv_qrnnd_r (UWtype, UWtype, UWtype, UWtype *);
2031 #if ! defined (udiv_qrnnd) && HAVE_NATIVE_mpn_udiv_qrnnd_r \
2032 && ! defined (LONGLONG_STANDALONE)
2033 #define udiv_qrnnd(q, r, n1, n0, d) \
2034 do { \
2035 UWtype __udiv_qrnnd_r; \
2036 (q) = mpn_udiv_qrnnd_r ((UWtype) (n1), (UWtype) (n0), (UWtype) d, \
2037 &__udiv_qrnnd_r); \
2038 (r) = __udiv_qrnnd_r; \
2039 } while (0)
2040 #endif
2043 /* If this machine has no inline assembler, use C macros. */
2045 #if !defined (add_ssaaaa)
2046 #define add_ssaaaa(sh, sl, ah, al, bh, bl) \
2047 do { \
2048 UWtype __x; \
2049 UWtype __al = (al); \
2050 UWtype __bl = (bl); \
2051 __x = __al + __bl; \
2052 (sh) = (ah) + (bh) + (__x < __al); \
2053 (sl) = __x; \
2054 } while (0)
2055 #endif
2057 #if !defined (sub_ddmmss)
2058 #define sub_ddmmss(sh, sl, ah, al, bh, bl) \
2059 do { \
2060 UWtype __x; \
2061 UWtype __al = (al); \
2062 UWtype __bl = (bl); \
2063 __x = __al - __bl; \
2064 (sh) = (ah) - (bh) - (__al < __bl); \
2065 (sl) = __x; \
2066 } while (0)
2067 #endif
2069 /* If we lack umul_ppmm but have smul_ppmm, define umul_ppmm in terms of
2070 smul_ppmm. */
2071 #if !defined (umul_ppmm) && defined (smul_ppmm)
2072 #define umul_ppmm(w1, w0, u, v) \
2073 do { \
2074 UWtype __w1; \
2075 UWtype __xm0 = (u), __xm1 = (v); \
2076 smul_ppmm (__w1, w0, __xm0, __xm1); \
2077 (w1) = __w1 + (-(__xm0 >> (W_TYPE_SIZE - 1)) & __xm1) \
2078 + (-(__xm1 >> (W_TYPE_SIZE - 1)) & __xm0); \
2079 } while (0)
2080 #endif
2082 /* If we still don't have umul_ppmm, define it using plain C.
2084 For reference, when this code is used for squaring (ie. u and v identical
2085 expressions), gcc recognises __x1 and __x2 are the same and generates 3
2086 multiplies, not 4. The subsequent additions could be optimized a bit,
2087 but the only place GMP currently uses such a square is mpn_sqr_basecase,
2088 and chips obliged to use this generic C umul will have plenty of worse
2089 performance problems than a couple of extra instructions on the diagonal
2090 of sqr_basecase. */
2092 #if !defined (umul_ppmm)
2093 #define umul_ppmm(w1, w0, u, v) \
2094 do { \
2095 UWtype __x0, __x1, __x2, __x3; \
2096 UHWtype __ul, __vl, __uh, __vh; \
2097 UWtype __u = (u), __v = (v); \
2099 __ul = __ll_lowpart (__u); \
2100 __uh = __ll_highpart (__u); \
2101 __vl = __ll_lowpart (__v); \
2102 __vh = __ll_highpart (__v); \
2104 __x0 = (UWtype) __ul * __vl; \
2105 __x1 = (UWtype) __ul * __vh; \
2106 __x2 = (UWtype) __uh * __vl; \
2107 __x3 = (UWtype) __uh * __vh; \
2109 __x1 += __ll_highpart (__x0);/* this can't give carry */ \
2110 __x1 += __x2; /* but this indeed can */ \
2111 if (__x1 < __x2) /* did we get it? */ \
2112 __x3 += __ll_B; /* yes, add it in the proper pos. */ \
2114 (w1) = __x3 + __ll_highpart (__x1); \
2115 (w0) = (__x1 << W_TYPE_SIZE/2) + __ll_lowpart (__x0); \
2116 } while (0)
2117 #endif
2119 /* If we don't have smul_ppmm, define it using umul_ppmm (which surely will
2120 exist in one form or another. */
2121 #if !defined (smul_ppmm)
2122 #define smul_ppmm(w1, w0, u, v) \
2123 do { \
2124 UWtype __w1; \
2125 UWtype __xm0 = (u), __xm1 = (v); \
2126 umul_ppmm (__w1, w0, __xm0, __xm1); \
2127 (w1) = __w1 - (-(__xm0 >> (W_TYPE_SIZE - 1)) & __xm1) \
2128 - (-(__xm1 >> (W_TYPE_SIZE - 1)) & __xm0); \
2129 } while (0)
2130 #endif
2132 /* Define this unconditionally, so it can be used for debugging. */
2133 #define __udiv_qrnnd_c(q, r, n1, n0, d) \
2134 do { \
2135 UWtype __d1, __d0, __q1, __q0, __r1, __r0, __m; \
2137 ASSERT ((d) != 0); \
2138 ASSERT ((n1) < (d)); \
2140 __d1 = __ll_highpart (d); \
2141 __d0 = __ll_lowpart (d); \
2143 __q1 = (n1) / __d1; \
2144 __r1 = (n1) - __q1 * __d1; \
2145 __m = __q1 * __d0; \
2146 __r1 = __r1 * __ll_B | __ll_highpart (n0); \
2147 if (__r1 < __m) \
2149 __q1--, __r1 += (d); \
2150 if (__r1 >= (d)) /* i.e. we didn't get carry when adding to __r1 */\
2151 if (__r1 < __m) \
2152 __q1--, __r1 += (d); \
2154 __r1 -= __m; \
2156 __q0 = __r1 / __d1; \
2157 __r0 = __r1 - __q0 * __d1; \
2158 __m = __q0 * __d0; \
2159 __r0 = __r0 * __ll_B | __ll_lowpart (n0); \
2160 if (__r0 < __m) \
2162 __q0--, __r0 += (d); \
2163 if (__r0 >= (d)) \
2164 if (__r0 < __m) \
2165 __q0--, __r0 += (d); \
2167 __r0 -= __m; \
2169 (q) = __q1 * __ll_B | __q0; \
2170 (r) = __r0; \
2171 } while (0)
2173 /* If the processor has no udiv_qrnnd but sdiv_qrnnd, go through
2174 __udiv_w_sdiv (defined in libgcc or elsewhere). */
2175 #if !defined (udiv_qrnnd) && defined (sdiv_qrnnd) \
2176 && ! defined (LONGLONG_STANDALONE)
2177 #define udiv_qrnnd(q, r, nh, nl, d) \
2178 do { \
2179 UWtype __r; \
2180 (q) = __MPN(udiv_w_sdiv) (&__r, nh, nl, d); \
2181 (r) = __r; \
2182 } while (0)
2183 __GMP_DECLSPEC UWtype __MPN(udiv_w_sdiv) (UWtype *, UWtype, UWtype, UWtype);
2184 #endif
2186 /* If udiv_qrnnd was not defined for this processor, use __udiv_qrnnd_c. */
2187 #if !defined (udiv_qrnnd)
2188 #define UDIV_NEEDS_NORMALIZATION 1
2189 #define udiv_qrnnd __udiv_qrnnd_c
2190 #endif
2192 #if !defined (count_leading_zeros)
2193 #define count_leading_zeros(count, x) \
2194 do { \
2195 UWtype __xr = (x); \
2196 UWtype __a; \
2198 if (W_TYPE_SIZE == 32) \
2200 __a = __xr < ((UWtype) 1 << 2*__BITS4) \
2201 ? (__xr < ((UWtype) 1 << __BITS4) ? 1 : __BITS4 + 1) \
2202 : (__xr < ((UWtype) 1 << 3*__BITS4) ? 2*__BITS4 + 1 \
2203 : 3*__BITS4 + 1); \
2205 else \
2207 for (__a = W_TYPE_SIZE - 8; __a > 0; __a -= 8) \
2208 if (((__xr >> __a) & 0xff) != 0) \
2209 break; \
2210 ++__a; \
2213 (count) = W_TYPE_SIZE + 1 - __a - __clz_tab[__xr >> __a]; \
2214 } while (0)
2215 /* This version gives a well-defined value for zero. */
2216 #define COUNT_LEADING_ZEROS_0 (W_TYPE_SIZE - 1)
2217 #define COUNT_LEADING_ZEROS_NEED_CLZ_TAB
2218 #define COUNT_LEADING_ZEROS_SLOW
2219 #endif
2221 /* clz_tab needed by mpn/x86/pentium/mod_1.asm in a fat binary */
2222 #if HAVE_HOST_CPU_FAMILY_x86 && WANT_FAT_BINARY
2223 #define COUNT_LEADING_ZEROS_NEED_CLZ_TAB
2224 #endif
2226 #ifdef COUNT_LEADING_ZEROS_NEED_CLZ_TAB
2227 extern const unsigned char __GMP_DECLSPEC __clz_tab[129];
2228 #endif
2230 #if !defined (count_trailing_zeros)
2231 #if !defined (COUNT_LEADING_ZEROS_SLOW)
2232 /* Define count_trailing_zeros using an asm count_leading_zeros. */
2233 #define count_trailing_zeros(count, x) \
2234 do { \
2235 UWtype __ctz_x = (x); \
2236 UWtype __ctz_c; \
2237 ASSERT (__ctz_x != 0); \
2238 count_leading_zeros (__ctz_c, __ctz_x & -__ctz_x); \
2239 (count) = W_TYPE_SIZE - 1 - __ctz_c; \
2240 } while (0)
2241 #else
2242 /* Define count_trailing_zeros in plain C, assuming small counts are common.
2243 We use clz_tab without ado, since the C count_leading_zeros above will have
2244 pulled it in. */
2245 #define count_trailing_zeros(count, x) \
2246 do { \
2247 UWtype __ctz_x = (x); \
2248 int __ctz_c; \
2250 if (LIKELY ((__ctz_x & 0xff) != 0)) \
2251 (count) = __clz_tab[__ctz_x & -__ctz_x] - 2; \
2252 else \
2254 for (__ctz_c = 8 - 2; __ctz_c < W_TYPE_SIZE - 2; __ctz_c += 8) \
2256 __ctz_x >>= 8; \
2257 if (LIKELY ((__ctz_x & 0xff) != 0)) \
2258 break; \
2261 (count) = __ctz_c + __clz_tab[__ctz_x & -__ctz_x]; \
2263 } while (0)
2264 #endif
2265 #endif
2267 #ifndef UDIV_NEEDS_NORMALIZATION
2268 #define UDIV_NEEDS_NORMALIZATION 0
2269 #endif
2271 /* Whether udiv_qrnnd is actually implemented with udiv_qrnnd_preinv, and
2272 that hence the latter should always be used. */
2273 #ifndef UDIV_PREINV_ALWAYS
2274 #define UDIV_PREINV_ALWAYS 0
2275 #endif