4 | fpsp_unfl --- FPSP handler for underflow exception
6 | Trap disabled results
7 | For 881/2 compatibility, sw must denormalize the intermediate
8 | result, then store the result. Denormalization is accomplished
9 | by taking the intermediate result (which is always normalized) and
10 | shifting the mantissa right while incrementing the exponent until
11 | it is equal to the denormalized exponent for the destination
12 | format. After denormalization, the result is rounded to the
15 | Trap enabled results
16 | All trap disabled code applies. In addition the exceptional
17 | operand needs to made available to the user with a bias of $6000
18 | added to the exponent.
21 | Copyright (C) Motorola, Inc. 1990
24 | THIS IS UNPUBLISHED PROPRIETARY SOURCE CODE OF MOTOROLA
25 | The copyright notice above does not evidence any
26 | actual or intended publication of such source code.
28 X_UNFL: |idnt 2,1 | Motorola 040 Floating Point Software Package
49 moveml %d0-%d1/%a0-%a1,USER_DA(%a6)
50 fmovemx %fp0-%fp3,USER_FP0(%a6)
51 fmoveml %fpcr/%fpsr/%fpiar,USER_FPCR(%a6)
54 bsrl unf_res |denormalize, round & store interm op
56 | If underflow exceptions are not enabled, check for inexact
59 btstb #unfl_bit,FPCR_ENABLE(%a6)
65 | Clear dirty bit on dest resister in the frame before branching
68 bfextu CMDREG3B(%a6){#6:#3},%d0 |get dest reg no
69 bclrb %d0,FPR_DIRTY_BITS(%a6) |clr dest dirty bit
70 bsrl b1238_fix |test for bug1238 case
71 movel USER_FPSR(%a6),FPSR_SHADOW(%a6)
72 orl #sx_mask,E_BYTE(%a6)
74 moveml USER_DA(%a6),%d0-%d1/%a0-%a1
75 fmovemx USER_FP0(%a6),%fp0-%fp3
76 fmoveml USER_FPCR(%a6),%fpcr/%fpsr/%fpiar
81 | It is possible to have either inex2 or inex1 exceptions with the
82 | unfl. If the inex enable bit is set in the FPCR, and either
83 | inex2 or inex1 occurred, we must clean up and branch to the
87 moveb FPCR_ENABLE(%a6),%d0
88 andb FPSR_EXCEPT(%a6),%d0
93 | Inexact enabled and reported, and we must take an inexact exception
99 | Clear dirty bit on dest resister in the frame before branching
102 bfextu CMDREG3B(%a6){#6:#3},%d0 |get dest reg no
103 bclrb %d0,FPR_DIRTY_BITS(%a6) |clr dest dirty bit
104 bsrl b1238_fix |test for bug1238 case
105 movel USER_FPSR(%a6),FPSR_SHADOW(%a6)
106 orl #sx_mask,E_BYTE(%a6)
108 moveb #INEX_VEC,EXC_VEC+1(%a6)
109 moveml USER_DA(%a6),%d0-%d1/%a0-%a1
110 fmovemx USER_FP0(%a6),%fp0-%fp3
111 fmoveml USER_FPCR(%a6),%fpcr/%fpsr/%fpiar
117 bclrb #E3,E_BYTE(%a6)
118 beqs e1_set |if set then branch
120 | Clear dirty bit on dest resister in the frame before branching
123 bfextu CMDREG3B(%a6){#6:#3},%d0 |get dest reg no
124 bclrb %d0,FPR_DIRTY_BITS(%a6) |clr dest dirty bit
125 bsrl b1238_fix |test for bug1238 case
126 movel USER_FPSR(%a6),FPSR_SHADOW(%a6)
127 orl #sx_mask,E_BYTE(%a6)
128 moveml USER_DA(%a6),%d0-%d1/%a0-%a1
129 fmovemx USER_FP0(%a6),%fp0-%fp3
130 fmoveml USER_FPCR(%a6),%fpcr/%fpsr/%fpiar
135 moveml USER_DA(%a6),%d0-%d1/%a0-%a1
136 fmovemx USER_FP0(%a6),%fp0-%fp3
137 fmoveml USER_FPCR(%a6),%fpcr/%fpsr/%fpiar
141 | unf_res --- underflow result calculation
144 bsrl g_rndpr |returns RND_PREC in d0 0=ext,
146 | ;we need the RND_PREC in the
147 | ;upper word for round
149 movew %d0,-(%a7) |copy RND_PREC to stack
152 | If the exception bit set is E3, the exceptional operand from the
153 | fpu is in WBTEMP; else it is in FPTEMP.
155 btstb #E3,E_BYTE(%a6)
158 lea WBTEMP(%a6),%a0 |a0 now points to operand
160 | Test for fsgldiv and fsglmul. If the inst was one of these, then
161 | force the precision to extended for the denorm routine. Use
162 | the user's precision for the round routine.
164 movew CMDREG3B(%a6),%d1 |check for fsgldiv or fsglmul
166 cmpiw #0x30,%d1 |check for sgldiv
168 cmpiw #0x33,%d1 |check for sglmul
169 bnes unf_cont |if not, use fpcr prec in round
172 movew #0x1,(%a7) |override g_rndpr precision
176 lea FPTEMP(%a6),%a0 |a0 now points to operand
178 bclrb #sign_bit,LOCAL_EX(%a0) |clear sign bit
179 sne LOCAL_SGN(%a0) |store sign
181 bsrl denorm |returns denorm, a0 points to it
184 | ;d0 has guard,round sticky bit
185 | ;make sure that it is not corrupted
186 | ;before it reaches the round subroutine
187 | ;also ensure that a0 isn't corrupted
190 | Set up d1 for round subroutine d1 contains the PREC/MODE
191 | information respectively on upper/lower register halves.
193 bfextu FPCR_MODE(%a6){#2:#2},%d1 |get mode from FPCR
195 addl (%a7)+,%d1 |merge PREC/MODE
197 | WARNING: a0 and d0 are assumed to be intact between the denorm and
198 | round subroutines. All code between these two subroutines
199 | must not corrupt a0 and d0.
203 | Input: a0 points to input operand
204 | d0{31:29} has guard, round, sticky
205 | d1{01:00} has rounding mode
206 | d1{17:16} has rounding precision
207 | Output: a0 points to rounded operand
210 bsrl round |returns rounded denorm at (a0)
212 | Differentiate between store to memory vs. store to register
215 bsrl g_opcls |returns opclass in d0{2:0}
219 | At this point, a store to memory is pending
224 beqs ext_opc011 |If extended, do not subtract
225 | ;If destination format is sgl/dbl,
226 tstb LOCAL_HI(%a0) |If rounded result is normal,don't
229 subqw #1,LOCAL_EX(%a0) |account for denorm bias vs.
231 | ; normalized denormalized
236 bsrl store |stores to memory
237 bras unf_done |finish up
240 | At this point, a store to a float register is pending
243 bsrl store |stores to float register
244 | ;a0 is not corrupted on a store to a
247 | Set the condition codes according to result
249 tstl LOCAL_HI(%a0) |check upper mantissa
251 tstl LOCAL_LO(%a0) |check lower mantissa
253 bsetb #z_bit,FPSR_CC(%a6) |set condition codes if zero
255 btstb #sign_bit,LOCAL_EX(%a0) |check the sign bit
257 bsetb #neg_bit,FPSR_CC(%a6)
263 btstb #inex2_bit,FPSR_EXCEPT(%a6)
265 bsetb #aunfl_bit,FPSR_AEXCEPT(%a6)