jsonpath scanner: reentrant scanner
[pgsql.git] / src / common / d2s_intrinsics.h
blobcd1b88963d3f87faa0aa6c6b789b84667fe9756b
1 /*---------------------------------------------------------------------------
3 * Ryu floating-point output for double precision.
5 * Portions Copyright (c) 2018-2024, PostgreSQL Global Development Group
7 * IDENTIFICATION
8 * src/common/d2s_intrinsics.h
10 * This is a modification of code taken from github.com/ulfjack/ryu under the
11 * terms of the Boost license (not the Apache license). The original copyright
12 * notice follows:
14 * Copyright 2018 Ulf Adams
16 * The contents of this file may be used under the terms of the Apache
17 * License, Version 2.0.
19 * (See accompanying file LICENSE-Apache or copy at
20 * http://www.apache.org/licenses/LICENSE-2.0)
22 * Alternatively, the contents of this file may be used under the terms of the
23 * Boost Software License, Version 1.0.
25 * (See accompanying file LICENSE-Boost or copy at
26 * https://www.boost.org/LICENSE_1_0.txt)
28 * Unless required by applicable law or agreed to in writing, this software is
29 * distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
30 * KIND, either express or implied.
32 *---------------------------------------------------------------------------
34 #ifndef RYU_D2S_INTRINSICS_H
35 #define RYU_D2S_INTRINSICS_H
37 #if defined(HAS_64_BIT_INTRINSICS)
39 #include <intrin.h>
41 static inline uint64
42 umul128(const uint64 a, const uint64 b, uint64 *const productHi)
44 return _umul128(a, b, productHi);
47 static inline uint64
48 shiftright128(const uint64 lo, const uint64 hi, const uint32 dist)
51 * For the __shiftright128 intrinsic, the shift value is always modulo 64.
52 * In the current implementation of the double-precision version of Ryu,
53 * the shift value is always < 64. (In the case RYU_OPTIMIZE_SIZE == 0,
54 * the shift value is in the range [49, 58]. Otherwise in the range [2,
55 * 59].) Check this here in case a future change requires larger shift
56 * values. In this case this function needs to be adjusted.
58 Assert(dist < 64);
59 return __shiftright128(lo, hi, (unsigned char) dist);
62 #else /* defined(HAS_64_BIT_INTRINSICS) */
64 static inline uint64
65 umul128(const uint64 a, const uint64 b, uint64 *const productHi)
68 * The casts here help MSVC to avoid calls to the __allmul library
69 * function.
71 const uint32 aLo = (uint32) a;
72 const uint32 aHi = (uint32) (a >> 32);
73 const uint32 bLo = (uint32) b;
74 const uint32 bHi = (uint32) (b >> 32);
76 const uint64 b00 = (uint64) aLo * bLo;
77 const uint64 b01 = (uint64) aLo * bHi;
78 const uint64 b10 = (uint64) aHi * bLo;
79 const uint64 b11 = (uint64) aHi * bHi;
81 const uint32 b00Lo = (uint32) b00;
82 const uint32 b00Hi = (uint32) (b00 >> 32);
84 const uint64 mid1 = b10 + b00Hi;
85 const uint32 mid1Lo = (uint32) (mid1);
86 const uint32 mid1Hi = (uint32) (mid1 >> 32);
88 const uint64 mid2 = b01 + mid1Lo;
89 const uint32 mid2Lo = (uint32) (mid2);
90 const uint32 mid2Hi = (uint32) (mid2 >> 32);
92 const uint64 pHi = b11 + mid1Hi + mid2Hi;
93 const uint64 pLo = ((uint64) mid2Lo << 32) + b00Lo;
95 *productHi = pHi;
96 return pLo;
99 static inline uint64
100 shiftright128(const uint64 lo, const uint64 hi, const uint32 dist)
102 /* We don't need to handle the case dist >= 64 here (see above). */
103 Assert(dist < 64);
104 #if !defined(RYU_32_BIT_PLATFORM)
105 Assert(dist > 0);
106 return (hi << (64 - dist)) | (lo >> dist);
107 #else
108 /* Avoid a 64-bit shift by taking advantage of the range of shift values. */
109 Assert(dist >= 32);
110 return (hi << (64 - dist)) | ((uint32) (lo >> 32) >> (dist - 32));
111 #endif
114 #endif /* // defined(HAS_64_BIT_INTRINSICS) */
116 #ifdef RYU_32_BIT_PLATFORM
118 /* Returns the high 64 bits of the 128-bit product of a and b. */
119 static inline uint64
120 umulh(const uint64 a, const uint64 b)
123 * Reuse the umul128 implementation. Optimizers will likely eliminate the
124 * instructions used to compute the low part of the product.
126 uint64 hi;
128 umul128(a, b, &hi);
129 return hi;
132 /*----
133 * On 32-bit platforms, compilers typically generate calls to library
134 * functions for 64-bit divisions, even if the divisor is a constant.
136 * E.g.:
137 * https://bugs.llvm.org/show_bug.cgi?id=37932
138 * https://gcc.gnu.org/bugzilla/show_bug.cgi?id=17958
139 * https://gcc.gnu.org/bugzilla/show_bug.cgi?id=37443
141 * The functions here perform division-by-constant using multiplications
142 * in the same way as 64-bit compilers would do.
144 * NB:
145 * The multipliers and shift values are the ones generated by clang x64
146 * for expressions like x/5, x/10, etc.
147 *----
150 static inline uint64
151 div5(const uint64 x)
153 return umulh(x, UINT64CONST(0xCCCCCCCCCCCCCCCD)) >> 2;
156 static inline uint64
157 div10(const uint64 x)
159 return umulh(x, UINT64CONST(0xCCCCCCCCCCCCCCCD)) >> 3;
162 static inline uint64
163 div100(const uint64 x)
165 return umulh(x >> 2, UINT64CONST(0x28F5C28F5C28F5C3)) >> 2;
168 static inline uint64
169 div1e8(const uint64 x)
171 return umulh(x, UINT64CONST(0xABCC77118461CEFD)) >> 26;
174 #else /* RYU_32_BIT_PLATFORM */
176 static inline uint64
177 div5(const uint64 x)
179 return x / 5;
182 static inline uint64
183 div10(const uint64 x)
185 return x / 10;
188 static inline uint64
189 div100(const uint64 x)
191 return x / 100;
194 static inline uint64
195 div1e8(const uint64 x)
197 return x / 100000000;
200 #endif /* RYU_32_BIT_PLATFORM */
202 #endif /* RYU_D2S_INTRINSICS_H */