1 /*---------------------------------------------------------------------------
3 * Ryu floating-point output for double precision.
5 * Portions Copyright (c) 2018-2024, PostgreSQL Global Development Group
8 * src/common/d2s_intrinsics.h
10 * This is a modification of code taken from github.com/ulfjack/ryu under the
11 * terms of the Boost license (not the Apache license). The original copyright
14 * Copyright 2018 Ulf Adams
16 * The contents of this file may be used under the terms of the Apache
17 * License, Version 2.0.
19 * (See accompanying file LICENSE-Apache or copy at
20 * http://www.apache.org/licenses/LICENSE-2.0)
22 * Alternatively, the contents of this file may be used under the terms of the
23 * Boost Software License, Version 1.0.
25 * (See accompanying file LICENSE-Boost or copy at
26 * https://www.boost.org/LICENSE_1_0.txt)
28 * Unless required by applicable law or agreed to in writing, this software is
29 * distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
30 * KIND, either express or implied.
32 *---------------------------------------------------------------------------
34 #ifndef RYU_D2S_INTRINSICS_H
35 #define RYU_D2S_INTRINSICS_H
37 #if defined(HAS_64_BIT_INTRINSICS)
42 umul128(const uint64 a
, const uint64 b
, uint64
*const productHi
)
44 return _umul128(a
, b
, productHi
);
48 shiftright128(const uint64 lo
, const uint64 hi
, const uint32 dist
)
51 * For the __shiftright128 intrinsic, the shift value is always modulo 64.
52 * In the current implementation of the double-precision version of Ryu,
53 * the shift value is always < 64. (In the case RYU_OPTIMIZE_SIZE == 0,
54 * the shift value is in the range [49, 58]. Otherwise in the range [2,
55 * 59].) Check this here in case a future change requires larger shift
56 * values. In this case this function needs to be adjusted.
59 return __shiftright128(lo
, hi
, (unsigned char) dist
);
62 #else /* defined(HAS_64_BIT_INTRINSICS) */
65 umul128(const uint64 a
, const uint64 b
, uint64
*const productHi
)
68 * The casts here help MSVC to avoid calls to the __allmul library
71 const uint32 aLo
= (uint32
) a
;
72 const uint32 aHi
= (uint32
) (a
>> 32);
73 const uint32 bLo
= (uint32
) b
;
74 const uint32 bHi
= (uint32
) (b
>> 32);
76 const uint64 b00
= (uint64
) aLo
* bLo
;
77 const uint64 b01
= (uint64
) aLo
* bHi
;
78 const uint64 b10
= (uint64
) aHi
* bLo
;
79 const uint64 b11
= (uint64
) aHi
* bHi
;
81 const uint32 b00Lo
= (uint32
) b00
;
82 const uint32 b00Hi
= (uint32
) (b00
>> 32);
84 const uint64 mid1
= b10
+ b00Hi
;
85 const uint32 mid1Lo
= (uint32
) (mid1
);
86 const uint32 mid1Hi
= (uint32
) (mid1
>> 32);
88 const uint64 mid2
= b01
+ mid1Lo
;
89 const uint32 mid2Lo
= (uint32
) (mid2
);
90 const uint32 mid2Hi
= (uint32
) (mid2
>> 32);
92 const uint64 pHi
= b11
+ mid1Hi
+ mid2Hi
;
93 const uint64 pLo
= ((uint64
) mid2Lo
<< 32) + b00Lo
;
100 shiftright128(const uint64 lo
, const uint64 hi
, const uint32 dist
)
102 /* We don't need to handle the case dist >= 64 here (see above). */
104 #if !defined(RYU_32_BIT_PLATFORM)
106 return (hi
<< (64 - dist
)) | (lo
>> dist
);
108 /* Avoid a 64-bit shift by taking advantage of the range of shift values. */
110 return (hi
<< (64 - dist
)) | ((uint32
) (lo
>> 32) >> (dist
- 32));
114 #endif /* // defined(HAS_64_BIT_INTRINSICS) */
116 #ifdef RYU_32_BIT_PLATFORM
118 /* Returns the high 64 bits of the 128-bit product of a and b. */
120 umulh(const uint64 a
, const uint64 b
)
123 * Reuse the umul128 implementation. Optimizers will likely eliminate the
124 * instructions used to compute the low part of the product.
133 * On 32-bit platforms, compilers typically generate calls to library
134 * functions for 64-bit divisions, even if the divisor is a constant.
137 * https://bugs.llvm.org/show_bug.cgi?id=37932
138 * https://gcc.gnu.org/bugzilla/show_bug.cgi?id=17958
139 * https://gcc.gnu.org/bugzilla/show_bug.cgi?id=37443
141 * The functions here perform division-by-constant using multiplications
142 * in the same way as 64-bit compilers would do.
145 * The multipliers and shift values are the ones generated by clang x64
146 * for expressions like x/5, x/10, etc.
153 return umulh(x
, UINT64CONST(0xCCCCCCCCCCCCCCCD)) >> 2;
157 div10(const uint64 x
)
159 return umulh(x
, UINT64CONST(0xCCCCCCCCCCCCCCCD)) >> 3;
163 div100(const uint64 x
)
165 return umulh(x
>> 2, UINT64CONST(0x28F5C28F5C28F5C3)) >> 2;
169 div1e8(const uint64 x
)
171 return umulh(x
, UINT64CONST(0xABCC77118461CEFD)) >> 26;
174 #else /* RYU_32_BIT_PLATFORM */
183 div10(const uint64 x
)
189 div100(const uint64 x
)
195 div1e8(const uint64 x
)
197 return x
/ 100000000;
200 #endif /* RYU_32_BIT_PLATFORM */
202 #endif /* RYU_D2S_INTRINSICS_H */