1 //===----------------------------------------------------------------------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // Copyright (c) Microsoft Corporation.
10 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
12 // Copyright 2018 Ulf Adams
13 // Copyright (c) Microsoft Corporation. All rights reserved.
15 // Boost Software License - Version 1.0 - August 17th, 2003
17 // Permission is hereby granted, free of charge, to any person or organization
18 // obtaining a copy of the software and accompanying documentation covered by
19 // this license (the "Software") to use, reproduce, display, distribute,
20 // execute, and transmit the Software, and to prepare derivative works of the
21 // Software, and to permit third-parties to whom the Software is furnished to
22 // do so, all subject to the following:
24 // The copyright notices in the Software and this entire statement, including
25 // the above license grant, this restriction and the following disclaimer,
26 // must be included in all copies of the Software, in whole or in part, and
27 // all derivative works of the Software, unless such copies or derivative
28 // works are solely in the form of machine-executable object code generated by
29 // a source language processor.
31 // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
32 // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
33 // FITNESS FOR A PARTICULAR PURPOSE, TITLE AND NON-INFRINGEMENT. IN NO EVENT
34 // SHALL THE COPYRIGHT HOLDERS OR ANYONE DISTRIBUTING THE SOFTWARE BE LIABLE
35 // FOR ANY DAMAGES OR OTHER LIABILITY, WHETHER IN CONTRACT, TORT OR OTHERWISE,
36 // ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
37 // DEALINGS IN THE SOFTWARE.
39 // Avoid formatting to keep the changes with the original code minimal.
47 #include "include/ryu/common.h"
48 #include "include/ryu/d2fixed.h"
49 #include "include/ryu/d2fixed_full_table.h"
50 #include "include/ryu/d2s.h"
51 #include "include/ryu/d2s_intrinsics.h"
52 #include "include/ryu/digit_table.h"
54 _LIBCPP_BEGIN_NAMESPACE_STD
56 inline constexpr int __POW10_ADDITIONAL_BITS
= 120;
58 #ifdef _LIBCPP_INTRINSIC128
59 // Returns the low 64 bits of the high 128 bits of the 256-bit product of a and b.
60 [[nodiscard
]] _LIBCPP_HIDE_FROM_ABI
inline uint64_t __umul256_hi128_lo64(
61 const uint64_t __aHi
, const uint64_t __aLo
, const uint64_t __bHi
, const uint64_t __bLo
) {
63 const uint64_t __b00Lo
= __ryu_umul128(__aLo
, __bLo
, &__b00Hi
);
65 const uint64_t __b01Lo
= __ryu_umul128(__aLo
, __bHi
, &__b01Hi
);
67 const uint64_t __b10Lo
= __ryu_umul128(__aHi
, __bLo
, &__b10Hi
);
69 const uint64_t __b11Lo
= __ryu_umul128(__aHi
, __bHi
, &__b11Hi
);
70 (void) __b00Lo
; // unused
71 (void) __b11Hi
; // unused
72 const uint64_t __temp1Lo
= __b10Lo
+ __b00Hi
;
73 const uint64_t __temp1Hi
= __b10Hi
+ (__temp1Lo
< __b10Lo
);
74 const uint64_t __temp2Lo
= __b01Lo
+ __temp1Lo
;
75 const uint64_t __temp2Hi
= __b01Hi
+ (__temp2Lo
< __b01Lo
);
76 return __b11Lo
+ __temp1Hi
+ __temp2Hi
;
79 [[nodiscard
]] _LIBCPP_HIDE_FROM_ABI
inline uint32_t __uint128_mod1e9(const uint64_t __vHi
, const uint64_t __vLo
) {
80 // After multiplying, we're going to shift right by 29, then truncate to uint32_t.
81 // This means that we need only 29 + 32 = 61 bits, so we can truncate to uint64_t before shifting.
82 const uint64_t __multiplied
= __umul256_hi128_lo64(__vHi
, __vLo
, 0x89705F4136B4A597u
, 0x31680A88F8953031u
);
84 // For uint32_t truncation, see the __mod1e9() comment in d2s_intrinsics.h.
85 const uint32_t __shifted
= static_cast<uint32_t>(__multiplied
>> 29);
87 return static_cast<uint32_t>(__vLo
) - 1000000000 * __shifted
;
89 #endif // ^^^ intrinsics available ^^^
91 [[nodiscard
]] _LIBCPP_HIDE_FROM_ABI
inline uint32_t __mulShift_mod1e9(const uint64_t __m
, const uint64_t* const __mul
, const int32_t __j
) {
92 uint64_t __high0
; // 64
93 const uint64_t __low0
= __ryu_umul128(__m
, __mul
[0], &__high0
); // 0
94 uint64_t __high1
; // 128
95 const uint64_t __low1
= __ryu_umul128(__m
, __mul
[1], &__high1
); // 64
96 uint64_t __high2
; // 192
97 const uint64_t __low2
= __ryu_umul128(__m
, __mul
[2], &__high2
); // 128
98 const uint64_t __s0low
= __low0
; // 0
99 (void) __s0low
; // unused
100 const uint64_t __s0high
= __low1
+ __high0
; // 64
101 const uint32_t __c1
= __s0high
< __low1
;
102 const uint64_t __s1low
= __low2
+ __high1
+ __c1
; // 128
103 const uint32_t __c2
= __s1low
< __low2
; // __high1 + __c1 can't overflow, so compare against __low2
104 const uint64_t __s1high
= __high2
+ __c2
; // 192
105 _LIBCPP_ASSERT_UNCATEGORIZED(__j
>= 128, "");
106 _LIBCPP_ASSERT_UNCATEGORIZED(__j
<= 180, "");
107 #ifdef _LIBCPP_INTRINSIC128
108 const uint32_t __dist
= static_cast<uint32_t>(__j
- 128); // __dist: [0, 52]
109 const uint64_t __shiftedhigh
= __s1high
>> __dist
;
110 const uint64_t __shiftedlow
= __ryu_shiftright128(__s1low
, __s1high
, __dist
);
111 return __uint128_mod1e9(__shiftedhigh
, __shiftedlow
);
112 #else // ^^^ intrinsics available ^^^ / vvv intrinsics unavailable vvv
113 if (__j
< 160) { // __j: [128, 160)
114 const uint64_t __r0
= __mod1e9(__s1high
);
115 const uint64_t __r1
= __mod1e9((__r0
<< 32) | (__s1low
>> 32));
116 const uint64_t __r2
= ((__r1
<< 32) | (__s1low
& 0xffffffff));
117 return __mod1e9(__r2
>> (__j
- 128));
118 } else { // __j: [160, 192)
119 const uint64_t __r0
= __mod1e9(__s1high
);
120 const uint64_t __r1
= ((__r0
<< 32) | (__s1low
>> 32));
121 return __mod1e9(__r1
>> (__j
- 160));
123 #endif // ^^^ intrinsics unavailable ^^^
126 void __append_n_digits(const uint32_t __olength
, uint32_t __digits
, char* const __result
) {
128 while (__digits
>= 10000) {
129 #ifdef __clang__ // TRANSITION, LLVM-38217
130 const uint32_t __c
= __digits
- 10000 * (__digits
/ 10000);
132 const uint32_t __c
= __digits
% 10000;
135 const uint32_t __c0
= (__c
% 100) << 1;
136 const uint32_t __c1
= (__c
/ 100) << 1;
137 std::memcpy(__result
+ __olength
- __i
- 2, __DIGIT_TABLE
+ __c0
, 2);
138 std::memcpy(__result
+ __olength
- __i
- 4, __DIGIT_TABLE
+ __c1
, 2);
141 if (__digits
>= 100) {
142 const uint32_t __c
= (__digits
% 100) << 1;
144 std::memcpy(__result
+ __olength
- __i
- 2, __DIGIT_TABLE
+ __c
, 2);
147 if (__digits
>= 10) {
148 const uint32_t __c
= __digits
<< 1;
149 std::memcpy(__result
+ __olength
- __i
- 2, __DIGIT_TABLE
+ __c
, 2);
151 __result
[0] = static_cast<char>('0' + __digits
);
155 _LIBCPP_HIDE_FROM_ABI
inline void __append_d_digits(const uint32_t __olength
, uint32_t __digits
, char* const __result
) {
157 while (__digits
>= 10000) {
158 #ifdef __clang__ // TRANSITION, LLVM-38217
159 const uint32_t __c
= __digits
- 10000 * (__digits
/ 10000);
161 const uint32_t __c
= __digits
% 10000;
164 const uint32_t __c0
= (__c
% 100) << 1;
165 const uint32_t __c1
= (__c
/ 100) << 1;
166 std::memcpy(__result
+ __olength
+ 1 - __i
- 2, __DIGIT_TABLE
+ __c0
, 2);
167 std::memcpy(__result
+ __olength
+ 1 - __i
- 4, __DIGIT_TABLE
+ __c1
, 2);
170 if (__digits
>= 100) {
171 const uint32_t __c
= (__digits
% 100) << 1;
173 std::memcpy(__result
+ __olength
+ 1 - __i
- 2, __DIGIT_TABLE
+ __c
, 2);
176 if (__digits
>= 10) {
177 const uint32_t __c
= __digits
<< 1;
178 __result
[2] = __DIGIT_TABLE
[__c
+ 1];
180 __result
[0] = __DIGIT_TABLE
[__c
];
183 __result
[0] = static_cast<char>('0' + __digits
);
187 _LIBCPP_HIDE_FROM_ABI
inline void __append_c_digits(const uint32_t __count
, uint32_t __digits
, char* const __result
) {
189 for (; __i
< __count
- 1; __i
+= 2) {
190 const uint32_t __c
= (__digits
% 100) << 1;
192 std::memcpy(__result
+ __count
- __i
- 2, __DIGIT_TABLE
+ __c
, 2);
195 const char __c
= static_cast<char>('0' + (__digits
% 10));
196 __result
[__count
- __i
- 1] = __c
;
200 void __append_nine_digits(uint32_t __digits
, char* const __result
) {
202 std::memset(__result
, '0', 9);
206 for (uint32_t __i
= 0; __i
< 5; __i
+= 4) {
207 #ifdef __clang__ // TRANSITION, LLVM-38217
208 const uint32_t __c
= __digits
- 10000 * (__digits
/ 10000);
210 const uint32_t __c
= __digits
% 10000;
213 const uint32_t __c0
= (__c
% 100) << 1;
214 const uint32_t __c1
= (__c
/ 100) << 1;
215 std::memcpy(__result
+ 7 - __i
, __DIGIT_TABLE
+ __c0
, 2);
216 std::memcpy(__result
+ 5 - __i
, __DIGIT_TABLE
+ __c1
, 2);
218 __result
[0] = static_cast<char>('0' + __digits
);
221 [[nodiscard
]] _LIBCPP_HIDE_FROM_ABI
inline uint32_t __indexForExponent(const uint32_t __e
) {
222 return (__e
+ 15) / 16;
225 [[nodiscard
]] _LIBCPP_HIDE_FROM_ABI
inline uint32_t __pow10BitsForIndex(const uint32_t __idx
) {
226 return 16 * __idx
+ __POW10_ADDITIONAL_BITS
;
229 [[nodiscard
]] _LIBCPP_HIDE_FROM_ABI
inline uint32_t __lengthForIndex(const uint32_t __idx
) {
230 // +1 for ceil, +16 for mantissa, +8 to round up when dividing by 9
231 return (__log10Pow2(16 * static_cast<int32_t>(__idx
)) + 1 + 16 + 8) / 9;
234 [[nodiscard
]] to_chars_result
__d2fixed_buffered_n(char* _First
, char* const _Last
, const double __d
,
235 const uint32_t __precision
) {
236 char* const _Original_first
= _First
;
238 const uint64_t __bits
= __double_to_bits(__d
);
240 // Case distinction; exit early for the easy cases.
242 const int32_t _Total_zero_length
= 1 // leading zero
243 + static_cast<int32_t>(__precision
!= 0) // possible decimal point
244 + static_cast<int32_t>(__precision
); // zeroes after decimal point
246 if (_Last
- _First
< _Total_zero_length
) {
247 return { _Last
, errc::value_too_large
};
251 if (__precision
> 0) {
253 std::memset(_First
, '0', __precision
);
254 _First
+= __precision
;
256 return { _First
, errc
{} };
259 // Decode __bits into mantissa and exponent.
260 const uint64_t __ieeeMantissa
= __bits
& ((1ull << __DOUBLE_MANTISSA_BITS
) - 1);
261 const uint32_t __ieeeExponent
= static_cast<uint32_t>(__bits
>> __DOUBLE_MANTISSA_BITS
);
265 if (__ieeeExponent
== 0) {
266 __e2
= 1 - __DOUBLE_BIAS
- __DOUBLE_MANTISSA_BITS
;
267 __m2
= __ieeeMantissa
;
269 __e2
= static_cast<int32_t>(__ieeeExponent
) - __DOUBLE_BIAS
- __DOUBLE_MANTISSA_BITS
;
270 __m2
= (1ull << __DOUBLE_MANTISSA_BITS
) | __ieeeMantissa
;
273 bool __nonzero
= false;
275 const uint32_t __idx
= __e2
< 0 ? 0 : __indexForExponent(static_cast<uint32_t>(__e2
));
276 const uint32_t __p10bits
= __pow10BitsForIndex(__idx
);
277 const int32_t __len
= static_cast<int32_t>(__lengthForIndex(__idx
));
278 for (int32_t __i
= __len
- 1; __i
>= 0; --__i
) {
279 const uint32_t __j
= __p10bits
- __e2
;
280 // Temporary: __j is usually around 128, and by shifting a bit, we push it to 128 or above, which is
281 // a slightly faster code path in __mulShift_mod1e9. Instead, we can just increase the multipliers.
282 const uint32_t __digits
= __mulShift_mod1e9(__m2
<< 8, __POW10_SPLIT
[__POW10_OFFSET
[__idx
] + __i
],
283 static_cast<int32_t>(__j
+ 8));
285 if (_Last
- _First
< 9) {
286 return { _Last
, errc::value_too_large
};
288 __append_nine_digits(__digits
, _First
);
290 } else if (__digits
!= 0) {
291 const uint32_t __olength
= __decimalLength9(__digits
);
292 if (_Last
- _First
< static_cast<ptrdiff_t>(__olength
)) {
293 return { _Last
, errc::value_too_large
};
295 __append_n_digits(__olength
, __digits
, _First
);
302 if (_First
== _Last
) {
303 return { _Last
, errc::value_too_large
};
307 if (__precision
> 0) {
308 if (_First
== _Last
) {
309 return { _Last
, errc::value_too_large
};
314 const int32_t __idx
= -__e2
/ 16;
315 const uint32_t __blocks
= __precision
/ 9 + 1;
316 // 0 = don't round up; 1 = round up unconditionally; 2 = round up if odd.
319 if (__blocks
<= __MIN_BLOCK_2
[__idx
]) {
321 if (_Last
- _First
< static_cast<ptrdiff_t>(__precision
)) {
322 return { _Last
, errc::value_too_large
};
324 std::memset(_First
, '0', __precision
);
325 _First
+= __precision
;
326 } else if (__i
< __MIN_BLOCK_2
[__idx
]) {
327 __i
= __MIN_BLOCK_2
[__idx
];
328 if (_Last
- _First
< static_cast<ptrdiff_t>(9 * __i
)) {
329 return { _Last
, errc::value_too_large
};
331 std::memset(_First
, '0', 9 * __i
);
334 for (; __i
< __blocks
; ++__i
) {
335 const int32_t __j
= __ADDITIONAL_BITS_2
+ (-__e2
- 16 * __idx
);
336 const uint32_t __p
= __POW10_OFFSET_2
[__idx
] + __i
- __MIN_BLOCK_2
[__idx
];
337 if (__p
>= __POW10_OFFSET_2
[__idx
+ 1]) {
338 // If the remaining digits are all 0, then we might as well use memset.
339 // No rounding required in this case.
340 const uint32_t __fill
= __precision
- 9 * __i
;
341 if (_Last
- _First
< static_cast<ptrdiff_t>(__fill
)) {
342 return { _Last
, errc::value_too_large
};
344 std::memset(_First
, '0', __fill
);
348 // Temporary: __j is usually around 128, and by shifting a bit, we push it to 128 or above, which is
349 // a slightly faster code path in __mulShift_mod1e9. Instead, we can just increase the multipliers.
350 uint32_t __digits
= __mulShift_mod1e9(__m2
<< 8, __POW10_SPLIT_2
[__p
], __j
+ 8);
351 if (__i
< __blocks
- 1) {
352 if (_Last
- _First
< 9) {
353 return { _Last
, errc::value_too_large
};
355 __append_nine_digits(__digits
, _First
);
358 const uint32_t __maximum
= __precision
- 9 * __i
;
359 uint32_t __lastDigit
= 0;
360 for (uint32_t __k
= 0; __k
< 9 - __maximum
; ++__k
) {
361 __lastDigit
= __digits
% 10;
364 if (__lastDigit
!= 5) {
365 __roundUp
= __lastDigit
> 5;
367 // Is m * 10^(additionalDigits + 1) / 2^(-__e2) integer?
368 const int32_t __requiredTwos
= -__e2
- static_cast<int32_t>(__precision
) - 1;
369 const bool __trailingZeros
= __requiredTwos
<= 0
370 || (__requiredTwos
< 60 && __multipleOfPowerOf2(__m2
, static_cast<uint32_t>(__requiredTwos
)));
371 __roundUp
= __trailingZeros
? 2 : 1;
374 if (_Last
- _First
< static_cast<ptrdiff_t>(__maximum
)) {
375 return { _Last
, errc::value_too_large
};
377 __append_c_digits(__maximum
, __digits
, _First
);
383 if (__roundUp
!= 0) {
384 char* _Round
= _First
;
387 if (_Round
== _Original_first
) {
393 if (_First
== _Last
) {
394 return { _Last
, errc::value_too_large
};
400 const char __c
= _Round
[0];
403 } else if (__c
== '9') {
407 if (__roundUp
== 1 || __c
% 2 != 0) {
415 if (_Last
- _First
< static_cast<ptrdiff_t>(__precision
)) {
416 return { _Last
, errc::value_too_large
};
418 std::memset(_First
, '0', __precision
);
419 _First
+= __precision
;
421 return { _First
, errc
{} };
424 [[nodiscard
]] to_chars_result
__d2exp_buffered_n(char* _First
, char* const _Last
, const double __d
,
425 uint32_t __precision
) {
426 char* const _Original_first
= _First
;
428 const uint64_t __bits
= __double_to_bits(__d
);
430 // Case distinction; exit early for the easy cases.
432 const int32_t _Total_zero_length
= 1 // leading zero
433 + static_cast<int32_t>(__precision
!= 0) // possible decimal point
434 + static_cast<int32_t>(__precision
) // zeroes after decimal point
436 if (_Last
- _First
< _Total_zero_length
) {
437 return { _Last
, errc::value_too_large
};
440 if (__precision
> 0) {
442 std::memset(_First
, '0', __precision
);
443 _First
+= __precision
;
445 std::memcpy(_First
, "e+00", 4);
447 return { _First
, errc
{} };
450 // Decode __bits into mantissa and exponent.
451 const uint64_t __ieeeMantissa
= __bits
& ((1ull << __DOUBLE_MANTISSA_BITS
) - 1);
452 const uint32_t __ieeeExponent
= static_cast<uint32_t>(__bits
>> __DOUBLE_MANTISSA_BITS
);
456 if (__ieeeExponent
== 0) {
457 __e2
= 1 - __DOUBLE_BIAS
- __DOUBLE_MANTISSA_BITS
;
458 __m2
= __ieeeMantissa
;
460 __e2
= static_cast<int32_t>(__ieeeExponent
) - __DOUBLE_BIAS
- __DOUBLE_MANTISSA_BITS
;
461 __m2
= (1ull << __DOUBLE_MANTISSA_BITS
) | __ieeeMantissa
;
464 const bool __printDecimalPoint
= __precision
> 0;
466 uint32_t __digits
= 0;
467 uint32_t __printedDigits
= 0;
468 uint32_t __availableDigits
= 0;
471 const uint32_t __idx
= __e2
< 0 ? 0 : __indexForExponent(static_cast<uint32_t>(__e2
));
472 const uint32_t __p10bits
= __pow10BitsForIndex(__idx
);
473 const int32_t __len
= static_cast<int32_t>(__lengthForIndex(__idx
));
474 for (int32_t __i
= __len
- 1; __i
>= 0; --__i
) {
475 const uint32_t __j
= __p10bits
- __e2
;
476 // Temporary: __j is usually around 128, and by shifting a bit, we push it to 128 or above, which is
477 // a slightly faster code path in __mulShift_mod1e9. Instead, we can just increase the multipliers.
478 __digits
= __mulShift_mod1e9(__m2
<< 8, __POW10_SPLIT
[__POW10_OFFSET
[__idx
] + __i
],
479 static_cast<int32_t>(__j
+ 8));
480 if (__printedDigits
!= 0) {
481 if (__printedDigits
+ 9 > __precision
) {
482 __availableDigits
= 9;
485 if (_Last
- _First
< 9) {
486 return { _Last
, errc::value_too_large
};
488 __append_nine_digits(__digits
, _First
);
490 __printedDigits
+= 9;
491 } else if (__digits
!= 0) {
492 __availableDigits
= __decimalLength9(__digits
);
493 __exp
= __i
* 9 + static_cast<int32_t>(__availableDigits
) - 1;
494 if (__availableDigits
> __precision
) {
497 if (__printDecimalPoint
) {
498 if (_Last
- _First
< static_cast<ptrdiff_t>(__availableDigits
+ 1)) {
499 return { _Last
, errc::value_too_large
};
501 __append_d_digits(__availableDigits
, __digits
, _First
);
502 _First
+= __availableDigits
+ 1; // +1 for decimal point
504 if (_First
== _Last
) {
505 return { _Last
, errc::value_too_large
};
507 *_First
++ = static_cast<char>('0' + __digits
);
509 __printedDigits
= __availableDigits
;
510 __availableDigits
= 0;
515 if (__e2
< 0 && __availableDigits
== 0) {
516 const int32_t __idx
= -__e2
/ 16;
517 for (int32_t __i
= __MIN_BLOCK_2
[__idx
]; __i
< 200; ++__i
) {
518 const int32_t __j
= __ADDITIONAL_BITS_2
+ (-__e2
- 16 * __idx
);
519 const uint32_t __p
= __POW10_OFFSET_2
[__idx
] + static_cast<uint32_t>(__i
) - __MIN_BLOCK_2
[__idx
];
520 // Temporary: __j is usually around 128, and by shifting a bit, we push it to 128 or above, which is
521 // a slightly faster code path in __mulShift_mod1e9. Instead, we can just increase the multipliers.
522 __digits
= (__p
>= __POW10_OFFSET_2
[__idx
+ 1]) ? 0 : __mulShift_mod1e9(__m2
<< 8, __POW10_SPLIT_2
[__p
], __j
+ 8);
523 if (__printedDigits
!= 0) {
524 if (__printedDigits
+ 9 > __precision
) {
525 __availableDigits
= 9;
528 if (_Last
- _First
< 9) {
529 return { _Last
, errc::value_too_large
};
531 __append_nine_digits(__digits
, _First
);
533 __printedDigits
+= 9;
534 } else if (__digits
!= 0) {
535 __availableDigits
= __decimalLength9(__digits
);
536 __exp
= -(__i
+ 1) * 9 + static_cast<int32_t>(__availableDigits
) - 1;
537 if (__availableDigits
> __precision
) {
540 if (__printDecimalPoint
) {
541 if (_Last
- _First
< static_cast<ptrdiff_t>(__availableDigits
+ 1)) {
542 return { _Last
, errc::value_too_large
};
544 __append_d_digits(__availableDigits
, __digits
, _First
);
545 _First
+= __availableDigits
+ 1; // +1 for decimal point
547 if (_First
== _Last
) {
548 return { _Last
, errc::value_too_large
};
550 *_First
++ = static_cast<char>('0' + __digits
);
552 __printedDigits
= __availableDigits
;
553 __availableDigits
= 0;
558 const uint32_t __maximum
= __precision
- __printedDigits
;
559 if (__availableDigits
== 0) {
562 uint32_t __lastDigit
= 0;
563 if (__availableDigits
> __maximum
) {
564 for (uint32_t __k
= 0; __k
< __availableDigits
- __maximum
; ++__k
) {
565 __lastDigit
= __digits
% 10;
569 // 0 = don't round up; 1 = round up unconditionally; 2 = round up if odd.
571 if (__lastDigit
!= 5) {
572 __roundUp
= __lastDigit
> 5;
574 // Is m * 2^__e2 * 10^(__precision + 1 - __exp) integer?
575 // __precision was already increased by 1, so we don't need to write + 1 here.
576 const int32_t __rexp
= static_cast<int32_t>(__precision
) - __exp
;
577 const int32_t __requiredTwos
= -__e2
- __rexp
;
578 bool __trailingZeros
= __requiredTwos
<= 0
579 || (__requiredTwos
< 60 && __multipleOfPowerOf2(__m2
, static_cast<uint32_t>(__requiredTwos
)));
581 const int32_t __requiredFives
= -__rexp
;
582 __trailingZeros
= __trailingZeros
&& __multipleOfPowerOf5(__m2
, static_cast<uint32_t>(__requiredFives
));
584 __roundUp
= __trailingZeros
? 2 : 1;
586 if (__printedDigits
!= 0) {
587 if (_Last
- _First
< static_cast<ptrdiff_t>(__maximum
)) {
588 return { _Last
, errc::value_too_large
};
591 std::memset(_First
, '0', __maximum
);
593 __append_c_digits(__maximum
, __digits
, _First
);
597 if (__printDecimalPoint
) {
598 if (_Last
- _First
< static_cast<ptrdiff_t>(__maximum
+ 1)) {
599 return { _Last
, errc::value_too_large
};
601 __append_d_digits(__maximum
, __digits
, _First
);
602 _First
+= __maximum
+ 1; // +1 for decimal point
604 if (_First
== _Last
) {
605 return { _Last
, errc::value_too_large
};
607 *_First
++ = static_cast<char>('0' + __digits
);
610 if (__roundUp
!= 0) {
611 char* _Round
= _First
;
613 if (_Round
== _Original_first
) {
619 const char __c
= _Round
[0];
622 } else if (__c
== '9') {
626 if (__roundUp
== 1 || __c
% 2 != 0) {
634 char _Sign_character
;
637 _Sign_character
= '-';
640 _Sign_character
= '+';
643 const int _Exponent_part_length
= __exp
>= 100
647 if (_Last
- _First
< _Exponent_part_length
) {
648 return { _Last
, errc::value_too_large
};
652 *_First
++ = _Sign_character
;
655 const int32_t __c
= __exp
% 10;
656 std::memcpy(_First
, __DIGIT_TABLE
+ 2 * (__exp
/ 10), 2);
657 _First
[2] = static_cast<char>('0' + __c
);
660 std::memcpy(_First
, __DIGIT_TABLE
+ 2 * __exp
, 2);
664 return { _First
, errc
{} };
667 _LIBCPP_END_NAMESPACE_STD