1 /* ieee754-df.S double-precision floating point support for ARM
3 Copyright (C) 2003, 2004, 2005 Free Software Foundation, Inc.
4 Contributed by Nicolas Pitre (nico@cam.org)
6 This file is free software; you can redistribute it and/or modify it
7 under the terms of the GNU General Public License as published by the
8 Free Software Foundation; either version 2, or (at your option) any
11 In addition to the permissions in the GNU General Public License, the
12 Free Software Foundation gives you unlimited permission to link the
13 compiled version of this file into combinations with other programs,
14 and to distribute those combinations without any restriction coming
15 from the use of this file. (The General Public License restrictions
16 do apply in other respects; for example, they cover modification of
17 the file, and distribution when not linked into a combine
20 This file is distributed in the hope that it will be useful, but
21 WITHOUT ANY WARRANTY; without even the implied warranty of
22 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
23 General Public License for more details.
25 You should have received a copy of the GNU General Public License
26 along with this program; see the file COPYING. If not, write to
27 the Free Software Foundation, 51 Franklin Street, Fifth Floor,
28 Boston, MA 02110-1301, USA. */
33 * The goal of this code is to be as fast as possible. This is
34 * not meant to be easy to understand for the casual reader.
35 * For slightly simpler code please see the single precision version
38 * Only the default rounding mode is intended for best performances.
39 * Exceptions aren't supported yet, but that can be added quite easily
40 * if necessary without impacting performances.
44 @ For FPA, float words are always big-endian.
45 @ For VFP, floats words follow the memory system mode.
46 #if defined(__VFP_FP__) && !defined(__ARMEB__)
62 ARM_FUNC_ALIAS aeabi_dneg negdf2
65 eor xh, xh, #0x80000000
75 ARM_FUNC_START aeabi_drsub
77 eor xh, xh, #0x80000000 @ flip sign bit of first arg
81 ARM_FUNC_ALIAS aeabi_dsub subdf3
83 eor yh, yh, #0x80000000 @ flip sign bit of second arg
84 #if defined(__INTERWORKING_STUBS__)
85 b 1f @ Skip Thumb-code prologue
89 ARM_FUNC_ALIAS aeabi_dadd adddf3
91 1: stmfd sp!, {r4, r5, lr}
93 @ Look for zeroes, equal values, INF, or NAN.
100 mvnnes ip, r4, asr #21
101 mvnnes ip, r5, asr #21
104 @ Compute exponent difference. Make largest exponent in r4,
105 @ corresponding arg in xh-xl, and positive exponent difference in r5.
107 rsbs r5, r4, r5, lsr #21
118 @ If exponent difference is too large, return largest argument
119 @ already in xh-xl. We need up to 54 bit to handle proper rounding
124 @ Convert mantissa to signed integer.
128 orr xh, ip, xh, lsr #12
135 orr yh, ip, yh, lsr #12
140 @ If exponent == difference, one or both args were denormalized.
141 @ Since this is not common case, rescale them off line.
146 @ Compensate for the exponent overlapping the mantissa MSB added later
149 @ Shift yh-yl right per r5, add to xh-xl, keep leftover bits into ip.
153 adds xl, xl, yl, lsr r5
155 adds xl, xl, yh, lsl lr
156 adcs xh, xh, yh, asr r5
162 orrcs ip, ip, #2 @ 2 not 1, to allow lsr #1 later
163 adds xl, xl, yh, asr r5
164 adcs xh, xh, yh, asr #31
166 @ We now have a result in xh-xl-ip.
167 @ Keep absolute value in xh-xl-ip, sign in r5 (the n bit was set above)
168 and r5, xh, #0x80000000
174 @ Determine how to normalize the result.
181 @ Result needs to be shifted right.
187 @ Make sure we did not bust our exponent.
192 @ Our result is now properly aligned into xh-xl, remaining bits in ip.
193 @ Round with MSB of ip. If halfway between two numbers, round towards
195 @ Pack final result together.
198 moveqs ip, xl, lsr #1
200 adc xh, xh, r4, lsl #20
204 @ Result must be shifted left and exponent adjusted.
213 @ No rounding necessary since ip will always be 0 at this point.
225 movhs r2, r2, lsr #16
235 sublo r3, r3, r2, lsr #1
236 sub r3, r3, r2, lsr #3
249 @ determine how to shift the value.
255 @ shift value left 21 to 31 bits, or actually right 11 to 1 bits
256 @ since a register switch happened above.
263 @ actually shift value left 1 to 20 bits, which might also represent
264 @ 32 to 52 bits if counting the register switch that happened earlier.
268 orrle xh, xh, xl, lsr ip
271 @ adjust exponent accordingly.
273 addge xh, xh, r4, lsl #20
277 @ Exponent too small, denormalize result.
278 @ Find out proper shift value.
285 @ shift result right of 1 to 20 bits, sign is in r5.
289 orr xl, xl, xh, lsl r2
290 orr xh, r5, xh, lsr r4
293 @ shift result right of 21 to 31 bits, or left 11 to 1 bits after
294 @ a register switch from xh to xl.
298 orr xl, xl, xh, lsl r4
302 @ Shift value right of 32 to 64 bits, or 0 to 32 bits after a switch
304 2: mov xl, xh, lsr r4
308 @ Adjust exponents for denormalized arguments.
309 @ Note that r4 must not remain equal to 0.
312 eor yh, yh, #0x00100000
313 eoreq xh, xh, #0x00100000
321 mvnnes ip, r5, asr #21
328 @ Result is x + 0.0 = x or 0.0 + y = y.
336 @ Result is x - x = 0.
341 @ Result is x + x = 2x.
346 orrcs xh, xh, #0x80000000
348 2: adds r4, r4, #(2 << 21)
349 addcc xh, xh, #(1 << 20)
351 and r5, xh, #0x80000000
353 @ Overflow: return INF.
355 orr xh, r5, #0x7f000000
356 orr xh, xh, #0x00f00000
360 @ At least one of x or y is INF/NAN.
361 @ if xh-xl != INF/NAN: return yh-yl (which is INF/NAN)
362 @ if yh-yl != INF/NAN: return xh-xl (which is INF/NAN)
363 @ if either is NAN: return NAN
364 @ if opposite sign: return NAN
365 @ otherwise return xh-xl (which is INF or -INF)
370 mvneqs ip, r5, asr #21
373 orrs r4, xl, xh, lsl #12
374 orreqs r5, yl, yh, lsl #12
376 orrne xh, xh, #0x00080000 @ quiet NAN
384 ARM_FUNC_START floatunsidf
385 ARM_FUNC_ALIAS aeabi_ui2d floatunsidf
390 stmfd sp!, {r4, r5, lr}
391 mov r4, #0x400 @ initial exponent
392 add r4, r4, #(52-1 - 1)
393 mov r5, #0 @ sign bit is 0
403 ARM_FUNC_START floatsidf
404 ARM_FUNC_ALIAS aeabi_i2d floatsidf
409 stmfd sp!, {r4, r5, lr}
410 mov r4, #0x400 @ initial exponent
411 add r4, r4, #(52-1 - 1)
412 ands r5, r0, #0x80000000 @ sign bit in r5
413 rsbmi r0, r0, #0 @ absolute value
423 ARM_FUNC_START extendsfdf2
424 ARM_FUNC_ALIAS aeabi_f2d extendsfdf2
426 movs r2, r0, lsl #1 @ toss sign bit
427 mov xh, r2, asr #3 @ stretch exponent
428 mov xh, xh, rrx @ retrieve sign bit
429 mov xl, r2, lsl #28 @ retrieve remaining bits
430 andnes r3, r2, #0xff000000 @ isolate exponent
431 teqne r3, #0xff000000 @ if not 0, check if INF or NAN
432 eorne xh, xh, #0x38000000 @ fixup exponent otherwise.
433 RETc(ne) @ and return it.
435 teq r2, #0 @ if actually 0
436 teqne r3, #0xff000000 @ or INF or NAN
437 RETc(eq) @ we are done already.
439 @ value was denormalized. We can normalize it now.
440 stmfd sp!, {r4, r5, lr}
441 mov r4, #0x380 @ setup corresponding exponent
442 and r5, xh, #0x80000000 @ move sign bit in r5
443 bic xh, xh, #0x80000000
449 ARM_FUNC_START floatundidf
450 ARM_FUNC_ALIAS aeabi_ul2d floatundidf
453 #if !defined (__VFP_FP__) && !defined(__SOFTFP__)
458 #if !defined (__VFP_FP__) && !defined(__SOFTFP__)
459 @ For hard FPA code we want to return via the tail below so that
460 @ we can return the result in f0 as well as in r0/r1 for backwards
463 stmfd sp!, {r4, r5, ip, lr}
465 stmfd sp!, {r4, r5, lr}
471 ARM_FUNC_START floatdidf
472 ARM_FUNC_ALIAS aeabi_l2d floatdidf
475 #if !defined (__VFP_FP__) && !defined(__SOFTFP__)
480 #if !defined (__VFP_FP__) && !defined(__SOFTFP__)
481 @ For hard FPA code we want to return via the tail below so that
482 @ we can return the result in f0 as well as in r0/r1 for backwards
485 stmfd sp!, {r4, r5, ip, lr}
487 stmfd sp!, {r4, r5, lr}
490 ands r5, ah, #0x80000000 @ sign bit in r5
495 mov r4, #0x400 @ initial exponent
496 add r4, r4, #(52-1 - 1)
498 @ FPA little-endian: must swap the word order.
508 @ The value is too big. Scale it down a bit...
514 add r2, r2, ip, lsr #3
519 orr xl, xl, xh, lsl r3
524 #if !defined (__VFP_FP__) && !defined(__SOFTFP__)
526 @ Legacy code expects the result to be returned in f0. Copy it
540 #endif /* L_addsubdf3 */
544 ARM_FUNC_START muldf3
545 ARM_FUNC_ALIAS aeabi_dmul muldf3
546 stmfd sp!, {r4, r5, r6, lr}
548 @ Mask out exponents, trap any zero/denormal/INF/NAN.
551 ands r4, ip, xh, lsr #20
552 andnes r5, ip, yh, lsr #20
557 @ Add exponents together
560 @ Determine final sign.
563 @ Convert mantissa to unsigned integer.
564 @ If power of two, branch to a separate path.
565 bic xh, xh, ip, lsl #21
566 bic yh, yh, ip, lsl #21
567 orrs r5, xl, xh, lsl #12
568 orrnes r5, yl, yh, lsl #12
569 orr xh, xh, #0x00100000
570 orr yh, yh, #0x00100000
575 @ Put sign bit in r6, which will be restored in yl later.
576 and r6, r6, #0x80000000
578 @ Well, no way to make it shorter without the umull instruction.
579 stmfd sp!, {r6, r7, r8, r9, sl, fp}
584 bic xl, xl, r7, lsl #16
585 bic yl, yl, r8, lsl #16
586 bic xh, xh, r9, lsl #16
587 bic yh, yh, sl, lsl #16
591 adds ip, ip, fp, lsl #16
592 adc lr, lr, fp, lsr #16
594 adds ip, ip, fp, lsl #16
595 adc lr, lr, fp, lsr #16
598 adds lr, lr, fp, lsl #16
599 adc r5, r5, fp, lsr #16
601 adds lr, lr, fp, lsl #16
602 adc r5, r5, fp, lsr #16
604 adds lr, lr, fp, lsl #16
605 adc r5, r5, fp, lsr #16
607 adds lr, lr, fp, lsl #16
608 adc r5, r5, fp, lsr #16
611 adds r5, r5, fp, lsl #16
612 adc r6, r6, fp, lsr #16
614 adds r5, r5, fp, lsl #16
615 adc r6, r6, fp, lsr #16
631 ldmfd sp!, {yl, r7, r8, r9, sl, fp}
635 @ Here is the actual multiplication.
639 and yl, r6, #0x80000000
646 @ The LSBs in ip are only significant for the final rounding.
651 @ Adjust result upon the MSB position.
653 cmp r6, #(1 << (20-11))
660 @ Shift to final position, add sign to result.
661 orr xh, yl, r6, lsl #11
662 orr xh, xh, r5, lsr #21
664 orr xl, xl, lr, lsr #21
667 @ Check exponent range for under/overflow.
668 subs ip, r4, #(254 - 1)
672 @ Round the result, merge final exponent.
674 moveqs lr, xl, lsr #1
676 adc xh, xh, r4, lsl #20
679 @ Multiplication by 0x1p*: let''s shortcut a lot of code.
681 and r6, r6, #0x80000000
685 subs r4, r4, ip, lsr #1
687 orrgt xh, xh, r4, lsl #20
688 RETLDM "r4, r5, r6" gt
690 @ Under/overflow: fix things up for the code below.
691 orr xh, xh, #0x00100000
699 @ Check if denormalized result is possible, otherwise return signed 0.
702 bicle xh, xh, #0x7fffffff
703 RETLDM "r4, r5, r6" le
705 @ Find out proper shift value.
712 @ shift result right of 1 to 20 bits, preserve sign bit, round, etc.
717 orr xl, xl, xh, lsl r5
718 and r2, xh, #0x80000000
719 bic xh, xh, #0x80000000
720 adds xl, xl, r3, lsr #31
721 adc xh, r2, xh, lsr r4
722 orrs lr, lr, r3, lsl #1
723 biceq xl, xl, r3, lsr #31
726 @ shift result right of 21 to 31 bits, or left 11 to 1 bits after
727 @ a register switch from xh to xl. Then round.
732 orr xl, xl, xh, lsl r4
733 bic xh, xh, #0x7fffffff
734 adds xl, xl, r3, lsr #31
736 orrs lr, lr, r3, lsl #1
737 biceq xl, xl, r3, lsr #31
740 @ Shift value right of 32 to 64 bits, or 0 to 32 bits after a switch
741 @ from xh to xl. Leftover bits are in r3-r6-lr for rounding.
743 orr lr, lr, xl, lsl r5
745 orr r3, r3, xh, lsl r5
747 bic xh, xh, #0x7fffffff
748 bic xl, xl, xh, lsr r4
749 add xl, xl, r3, lsr #31
750 orrs lr, lr, r3, lsl #1
751 biceq xl, xl, r3, lsr #31
754 @ One or both arguments are denormalized.
755 @ Scale them leftwards and preserve sign bit.
759 and r6, xh, #0x80000000
760 1: movs xl, xl, lsl #1
768 2: and r6, yh, #0x80000000
769 3: movs yl, yl, lsl #1
778 @ Isolate the INF and NAN cases away
780 and r5, ip, yh, lsr #20
784 @ Here, one or more arguments are either denormalized or zero.
785 orrs r6, xl, xh, lsl #1
786 orrnes r6, yl, yh, lsl #1
789 @ Result is 0, but determine sign anyway.
792 bic xh, xh, #0x7fffffff
796 1: @ One or both args are INF or NAN.
797 orrs r6, xl, xh, lsl #1
800 orrnes r6, yl, yh, lsl #1
801 beq LSYM(Lml_n) @ 0 * INF or INF * 0 -> NAN
804 orrs r6, xl, xh, lsl #12
805 bne LSYM(Lml_n) @ NAN * <anything> -> NAN
808 orrs r6, yl, yh, lsl #12
811 bne LSYM(Lml_n) @ <anything> * NAN -> NAN
813 @ Result is INF, but we need to determine its sign.
817 @ Overflow: return INF (sign already in xh).
819 and xh, xh, #0x80000000
820 orr xh, xh, #0x7f000000
821 orr xh, xh, #0x00f00000
825 @ Return a quiet NAN.
827 orr xh, xh, #0x7f000000
828 orr xh, xh, #0x00f80000
834 ARM_FUNC_START divdf3
835 ARM_FUNC_ALIAS aeabi_ddiv divdf3
837 stmfd sp!, {r4, r5, r6, lr}
839 @ Mask out exponents, trap any zero/denormal/INF/NAN.
842 ands r4, ip, xh, lsr #20
843 andnes r5, ip, yh, lsr #20
848 @ Substract divisor exponent from dividend''s.
851 @ Preserve final sign into lr.
854 @ Convert mantissa to unsigned integer.
855 @ Dividend -> r5-r6, divisor -> yh-yl.
856 orrs r5, yl, yh, lsl #12
861 orr yh, r5, yh, lsr #4
862 orr yh, yh, yl, lsr #24
864 orr r5, r5, xh, lsr #4
865 orr r5, r5, xl, lsr #24
868 @ Initialize xh with final sign bit.
869 and xh, lr, #0x80000000
871 @ Ensure result will land to known bit position.
872 @ Apply exponent bias accordingly.
875 adc r4, r4, #(255 - 2)
881 @ Perform first substraction to align result to a nibble.
889 @ The actual division loop.
901 orrcs xl, xl, ip, lsr #1
908 orrcs xl, xl, ip, lsr #2
915 orrcs xl, xl, ip, lsr #3
920 orr r5, r5, r6, lsr #28
923 orr yh, yh, yl, lsr #29
928 @ We are done with a word of the result.
929 @ Loop again for the low word if this pass was for the high word.
937 @ Be sure result starts in the high word.
942 @ Check exponent range for under/overflow.
943 subs ip, r4, #(254 - 1)
947 @ Round the result, merge final exponent.
950 moveqs ip, xl, lsr #1
952 adc xh, xh, r4, lsl #20
955 @ Division by 0x1p*: shortcut a lot of code.
957 and lr, lr, #0x80000000
958 orr xh, lr, xh, lsr #12
959 adds r4, r4, ip, lsr #1
961 orrgt xh, xh, r4, lsl #20
962 RETLDM "r4, r5, r6" gt
964 orr xh, xh, #0x00100000
969 @ Result mightt need to be denormalized: put remainder bits
970 @ in lr for rounding considerations.
975 @ One or both arguments is either INF, NAN or zero.
977 and r5, ip, yh, lsr #20
980 beq LSYM(Lml_n) @ INF/NAN / INF/NAN -> NAN
983 orrs r4, xl, xh, lsl #12
984 bne LSYM(Lml_n) @ NAN / <anything> -> NAN
986 bne LSYM(Lml_i) @ INF / <anything> -> INF
989 b LSYM(Lml_n) @ INF / (INF or NAN) -> NAN
992 orrs r5, yl, yh, lsl #12
993 beq LSYM(Lml_z) @ <anything> / INF -> 0
996 b LSYM(Lml_n) @ <anything> / NAN -> NAN
997 2: @ If both are nonzero, we need to normalize and resume above.
998 orrs r6, xl, xh, lsl #1
999 orrnes r6, yl, yh, lsl #1
1001 @ One or both arguments are 0.
1002 orrs r4, xl, xh, lsl #1
1003 bne LSYM(Lml_i) @ <non_zero> / 0 -> INF
1004 orrs r5, yl, yh, lsl #1
1005 bne LSYM(Lml_z) @ 0 / <non_zero> -> 0
1006 b LSYM(Lml_n) @ 0 / 0 -> NAN
1011 #endif /* L_muldivdf3 */
1015 @ Note: only r0 (return value) and ip are clobbered here.
1017 ARM_FUNC_START gtdf2
1018 ARM_FUNC_ALIAS gedf2 gtdf2
1022 ARM_FUNC_START ltdf2
1023 ARM_FUNC_ALIAS ledf2 ltdf2
1027 ARM_FUNC_START cmpdf2
1028 ARM_FUNC_ALIAS nedf2 cmpdf2
1029 ARM_FUNC_ALIAS eqdf2 cmpdf2
1030 mov ip, #1 @ how should we specify unordered here?
1032 1: str ip, [sp, #-4]
1034 @ Trap any INF/NAN first.
1036 mvns ip, ip, asr #21
1038 mvnnes ip, ip, asr #21
1041 @ Test for equality.
1042 @ Note that 0.0 is equal to -0.0.
1043 2: orrs ip, xl, xh, lsl #1 @ if x == 0.0 or -0.0
1044 orreqs ip, yl, yh, lsl #1 @ and y == 0.0 or -0.0
1045 teqne xh, yh @ or xh == yh
1046 teqeq xl, yl @ and xl == yl
1047 moveq r0, #0 @ then equal.
1056 @ Compare values if same sign
1061 movcs r0, yh, asr #31
1062 mvncc r0, yh, asr #31
1067 3: mov ip, xh, lsl #1
1068 mvns ip, ip, asr #21
1070 orrs ip, xl, xh, lsl #12
1072 4: mov ip, yh, lsl #1
1073 mvns ip, ip, asr #21
1075 orrs ip, yl, yh, lsl #12
1076 beq 2b @ y is not NAN
1077 5: ldr r0, [sp, #-4] @ unordered return code
1088 ARM_FUNC_START aeabi_cdrcmple
1098 ARM_FUNC_START aeabi_cdcmpeq
1099 ARM_FUNC_ALIAS aeabi_cdcmple aeabi_cdcmpeq
1101 @ The status-returning routines are required to preserve all
1102 @ registers except ip, lr, and cpsr.
1103 6: stmfd sp!, {r0, lr}
1105 @ Set the Z flag correctly, and the C flag unconditionally.
1107 @ Clear the C flag if the return value was -1, indicating
1108 @ that the first operand was smaller than the second.
1112 FUNC_END aeabi_cdcmple
1113 FUNC_END aeabi_cdcmpeq
1114 FUNC_END aeabi_cdrcmple
1116 ARM_FUNC_START aeabi_dcmpeq
1119 ARM_CALL aeabi_cdcmple
1120 moveq r0, #1 @ Equal to.
1121 movne r0, #0 @ Less than, greater than, or unordered.
1124 FUNC_END aeabi_dcmpeq
1126 ARM_FUNC_START aeabi_dcmplt
1129 ARM_CALL aeabi_cdcmple
1130 movcc r0, #1 @ Less than.
1131 movcs r0, #0 @ Equal to, greater than, or unordered.
1134 FUNC_END aeabi_dcmplt
1136 ARM_FUNC_START aeabi_dcmple
1139 ARM_CALL aeabi_cdcmple
1140 movls r0, #1 @ Less than or equal to.
1141 movhi r0, #0 @ Greater than or unordered.
1144 FUNC_END aeabi_dcmple
1146 ARM_FUNC_START aeabi_dcmpge
1149 ARM_CALL aeabi_cdrcmple
1150 movls r0, #1 @ Operand 2 is less than or equal to operand 1.
1151 movhi r0, #0 @ Operand 2 greater than operand 1, or unordered.
1154 FUNC_END aeabi_dcmpge
1156 ARM_FUNC_START aeabi_dcmpgt
1159 ARM_CALL aeabi_cdrcmple
1160 movcc r0, #1 @ Operand 2 is less than operand 1.
1161 movcs r0, #0 @ Operand 2 is greater than or equal to operand 1,
1162 @ or they are unordered.
1165 FUNC_END aeabi_dcmpgt
1167 #endif /* L_cmpdf2 */
1171 ARM_FUNC_START unorddf2
1172 ARM_FUNC_ALIAS aeabi_dcmpun unorddf2
1175 mvns ip, ip, asr #21
1177 orrs ip, xl, xh, lsl #12
1179 1: mov ip, yh, lsl #1
1180 mvns ip, ip, asr #21
1182 orrs ip, yl, yh, lsl #12
1184 2: mov r0, #0 @ arguments are ordered.
1187 3: mov r0, #1 @ arguments are unordered.
1190 FUNC_END aeabi_dcmpun
1193 #endif /* L_unorddf2 */
1197 ARM_FUNC_START fixdfsi
1198 ARM_FUNC_ALIAS aeabi_d2iz fixdfsi
1200 @ check exponent range.
1202 adds r2, r2, #(1 << 21)
1203 bcs 2f @ value is INF or NAN
1204 bpl 1f @ value is too small
1205 mov r3, #(0xfffffc00 + 31)
1206 subs r2, r3, r2, asr #21
1207 bls 3f @ value is too large
1211 orr r3, r3, #0x80000000
1212 orr r3, r3, xl, lsr #21
1213 tst xh, #0x80000000 @ the sign bit
1221 2: orrs xl, xl, xh, lsl #12
1223 3: ands r0, xh, #0x80000000 @ the sign bit
1224 moveq r0, #0x7fffffff @ maximum signed positive si
1227 4: mov r0, #0 @ How should we convert NAN?
1233 #endif /* L_fixdfsi */
1237 ARM_FUNC_START fixunsdfsi
1238 ARM_FUNC_ALIAS aeabi_d2uiz fixunsdfsi
1240 @ check exponent range.
1242 bcs 1f @ value is negative
1243 adds r2, r2, #(1 << 21)
1244 bcs 2f @ value is INF or NAN
1245 bpl 1f @ value is too small
1246 mov r3, #(0xfffffc00 + 31)
1247 subs r2, r3, r2, asr #21
1248 bmi 3f @ value is too large
1252 orr r3, r3, #0x80000000
1253 orr r3, r3, xl, lsr #21
1260 2: orrs xl, xl, xh, lsl #12
1261 bne 4f @ value is NAN.
1262 3: mov r0, #0xffffffff @ maximum unsigned si
1265 4: mov r0, #0 @ How should we convert NAN?
1268 FUNC_END aeabi_d2uiz
1271 #endif /* L_fixunsdfsi */
1275 ARM_FUNC_START truncdfsf2
1276 ARM_FUNC_ALIAS aeabi_d2f truncdfsf2
1278 @ check exponent range.
1280 subs r3, r2, #((1023 - 127) << 21)
1281 subcss ip, r3, #(1 << 21)
1282 rsbcss ip, ip, #(254 << 21)
1283 bls 2f @ value is out of range
1285 1: @ shift and round mantissa
1286 and ip, xh, #0x80000000
1288 orr xl, ip, xl, lsr #29
1290 adc r0, xl, r3, lsl #2
1294 2: @ either overflow or underflow
1298 @ check if denormalized value is possible
1299 adds r2, r3, #(23 << 21)
1300 andlt r0, xh, #0x80000000 @ too small, return signed 0.
1303 @ denormalize value so we can resume with the code above afterwards.
1304 orr xh, xh, #0x00100000
1310 orrne xl, xl, #1 @ fold r3 for rounding considerations.
1313 orr xl, xl, r3, lsl ip
1319 mvns r3, r2, asr #21
1320 bne 5f @ simple overflow
1321 orrs r3, xl, xh, lsl #12
1322 movne r0, #0x7f000000
1323 orrne r0, r0, #0x00c00000
1324 RETc(ne) @ return NAN
1326 5: @ return INF with sign
1327 and r0, xh, #0x80000000
1328 orr r0, r0, #0x7f000000
1329 orr r0, r0, #0x00800000
1335 #endif /* L_truncdfsf2 */