2 * MIPS emulation helpers for qemu.
4 * Copyright (c) 2004-2005 Jocelyn Mayer
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23 #define GETPC() (__builtin_return_address(0))
25 /*****************************************************************************/
26 /* Exceptions processing helpers */
28 void do_raise_exception_err (uint32_t exception
, int error_code
)
31 if (logfile
&& exception
< 0x100)
32 fprintf(logfile
, "%s: %d %d\n", __func__
, exception
, error_code
);
34 env
->exception_index
= exception
;
35 env
->error_code
= error_code
;
40 void do_raise_exception (uint32_t exception
)
42 do_raise_exception_err(exception
, 0);
45 void do_restore_state (void *pc_ptr
)
48 unsigned long pc
= (unsigned long) pc_ptr
;
51 cpu_restore_state (tb
, env
, pc
, NULL
);
54 void do_raise_exception_direct_err (uint32_t exception
, int error_code
)
56 do_restore_state (GETPC ());
57 do_raise_exception_err (exception
, error_code
);
60 void do_raise_exception_direct (uint32_t exception
)
62 do_raise_exception_direct_err (exception
, 0);
65 #define MEMSUFFIX _raw
66 #include "op_helper_mem.c"
68 #if !defined(CONFIG_USER_ONLY)
69 #define MEMSUFFIX _user
70 #include "op_helper_mem.c"
72 #define MEMSUFFIX _kernel
73 #include "op_helper_mem.c"
77 #if defined(TARGET_MIPSN32) || defined(TARGET_MIPS64)
78 #if TARGET_LONG_BITS > HOST_LONG_BITS
79 /* Those might call libgcc functions. */
92 T0
= (int64_t)T0
>> T1
;
97 T0
= (int64_t)T0
>> (T1
+ 32);
105 void do_dsrl32 (void)
107 T0
= T0
>> (T1
+ 32);
115 tmp
= T0
<< (0x40 - T1
);
116 T0
= (T0
>> T1
) | tmp
;
120 void do_drotr32 (void)
125 tmp
= T0
<< (0x40 - (32 + T1
));
126 T0
= (T0
>> (32 + T1
)) | tmp
;
132 T0
= T1
<< (T0
& 0x3F);
137 T0
= (int64_t)T1
>> (T0
& 0x3F);
142 T0
= T1
>> (T0
& 0x3F);
145 void do_drotrv (void)
151 tmp
= T1
<< (0x40 - T0
);
152 T0
= (T1
>> T0
) | tmp
;
156 #endif /* TARGET_LONG_BITS > HOST_LONG_BITS */
157 #endif /* TARGET_MIPSN32 || TARGET_MIPS64 */
159 /* 64 bits arithmetic for 32 bits hosts */
160 #if TARGET_LONG_BITS > HOST_LONG_BITS
161 static inline uint64_t get_HILO (void)
163 return (env
->HI
[0][env
->current_tc
] << 32) | (uint32_t)env
->LO
[0][env
->current_tc
];
166 static inline void set_HILO (uint64_t HILO
)
168 env
->LO
[0][env
->current_tc
] = (int32_t)HILO
;
169 env
->HI
[0][env
->current_tc
] = (int32_t)(HILO
>> 32);
174 set_HILO((int64_t)(int32_t)T0
* (int64_t)(int32_t)T1
);
179 set_HILO((uint64_t)(uint32_t)T0
* (uint64_t)(uint32_t)T1
);
186 tmp
= ((int64_t)(int32_t)T0
* (int64_t)(int32_t)T1
);
187 set_HILO((int64_t)get_HILO() + tmp
);
194 tmp
= ((uint64_t)(uint32_t)T0
* (uint64_t)(uint32_t)T1
);
195 set_HILO(get_HILO() + tmp
);
202 tmp
= ((int64_t)(int32_t)T0
* (int64_t)(int32_t)T1
);
203 set_HILO((int64_t)get_HILO() - tmp
);
210 tmp
= ((uint64_t)(uint32_t)T0
* (uint64_t)(uint32_t)T1
);
211 set_HILO(get_HILO() - tmp
);
215 #if HOST_LONG_BITS < 64
218 /* 64bit datatypes because we may see overflow/underflow. */
220 env
->LO
[0][env
->current_tc
] = (int32_t)((int64_t)(int32_t)T0
/ (int32_t)T1
);
221 env
->HI
[0][env
->current_tc
] = (int32_t)((int64_t)(int32_t)T0
% (int32_t)T1
);
226 #if defined(TARGET_MIPSN32) || defined(TARGET_MIPS64)
230 lldiv_t res
= lldiv((int64_t)T0
, (int64_t)T1
);
231 env
->LO
[0][env
->current_tc
] = res
.quot
;
232 env
->HI
[0][env
->current_tc
] = res
.rem
;
236 #if TARGET_LONG_BITS > HOST_LONG_BITS
240 env
->LO
[0][env
->current_tc
] = T0
/ T1
;
241 env
->HI
[0][env
->current_tc
] = T0
% T1
;
245 #endif /* TARGET_MIPSN32 || TARGET_MIPS64 */
247 #if defined(CONFIG_USER_ONLY)
248 void do_mfc0_random (void)
250 cpu_abort(env
, "mfc0 random\n");
253 void do_mfc0_count (void)
255 cpu_abort(env
, "mfc0 count\n");
258 void cpu_mips_store_count(CPUState
*env
, uint32_t value
)
260 cpu_abort(env
, "mtc0 count\n");
263 void cpu_mips_store_compare(CPUState
*env
, uint32_t value
)
265 cpu_abort(env
, "mtc0 compare\n");
268 void cpu_mips_start_count(CPUState
*env
)
270 cpu_abort(env
, "start count\n");
273 void cpu_mips_stop_count(CPUState
*env
)
275 cpu_abort(env
, "stop count\n");
278 void cpu_mips_update_irq(CPUState
*env
)
280 cpu_abort(env
, "mtc0 status / mtc0 cause\n");
283 void do_mtc0_status_debug(uint32_t old
, uint32_t val
)
285 cpu_abort(env
, "mtc0 status debug\n");
288 void do_mtc0_status_irqraise_debug (void)
290 cpu_abort(env
, "mtc0 status irqraise debug\n");
293 void cpu_mips_tlb_flush (CPUState
*env
, int flush_global
)
295 cpu_abort(env
, "mips_tlb_flush\n");
301 void do_mfc0_random (void)
303 T0
= (int32_t)cpu_mips_get_random(env
);
306 void do_mfc0_count (void)
308 T0
= (int32_t)cpu_mips_get_count(env
);
311 void do_mtc0_status_debug(uint32_t old
, uint32_t val
)
313 fprintf(logfile
, "Status %08x (%08x) => %08x (%08x) Cause %08x",
314 old
, old
& env
->CP0_Cause
& CP0Ca_IP_mask
,
315 val
, val
& env
->CP0_Cause
& CP0Ca_IP_mask
,
317 (env
->hflags
& MIPS_HFLAG_UM
) ? fputs(", UM\n", logfile
)
318 : fputs("\n", logfile
);
321 void do_mtc0_status_irqraise_debug(void)
323 fprintf(logfile
, "Raise pending IRQs\n");
326 void fpu_handle_exception(void)
328 #ifdef CONFIG_SOFTFLOAT
329 int flags
= get_float_exception_flags(&env
->fpu
->fp_status
);
330 unsigned int cpuflags
= 0, enable
, cause
= 0;
332 enable
= GET_FP_ENABLE(env
->fpu
->fcr31
);
334 /* determine current flags */
335 if (flags
& float_flag_invalid
) {
336 cpuflags
|= FP_INVALID
;
337 cause
|= FP_INVALID
& enable
;
339 if (flags
& float_flag_divbyzero
) {
341 cause
|= FP_DIV0
& enable
;
343 if (flags
& float_flag_overflow
) {
344 cpuflags
|= FP_OVERFLOW
;
345 cause
|= FP_OVERFLOW
& enable
;
347 if (flags
& float_flag_underflow
) {
348 cpuflags
|= FP_UNDERFLOW
;
349 cause
|= FP_UNDERFLOW
& enable
;
351 if (flags
& float_flag_inexact
) {
352 cpuflags
|= FP_INEXACT
;
353 cause
|= FP_INEXACT
& enable
;
355 SET_FP_FLAGS(env
->fpu
->fcr31
, cpuflags
);
356 SET_FP_CAUSE(env
->fpu
->fcr31
, cause
);
358 SET_FP_FLAGS(env
->fpu
->fcr31
, 0);
359 SET_FP_CAUSE(env
->fpu
->fcr31
, 0);
364 void cpu_mips_tlb_flush (CPUState
*env
, int flush_global
)
366 /* Flush qemu's TLB and discard all shadowed entries. */
367 tlb_flush (env
, flush_global
);
368 env
->tlb
->tlb_in_use
= env
->tlb
->nb_tlb
;
371 static void r4k_mips_tlb_flush_extra (CPUState
*env
, int first
)
373 /* Discard entries from env->tlb[first] onwards. */
374 while (env
->tlb
->tlb_in_use
> first
) {
375 r4k_invalidate_tlb(env
, --env
->tlb
->tlb_in_use
, 0);
379 static void r4k_fill_tlb (int idx
)
383 /* XXX: detect conflicting TLBs and raise a MCHECK exception when needed */
384 tlb
= &env
->tlb
->mmu
.r4k
.tlb
[idx
];
385 tlb
->VPN
= env
->CP0_EntryHi
& (TARGET_PAGE_MASK
<< 1);
386 #if defined(TARGET_MIPSN32) || defined(TARGET_MIPS64)
387 tlb
->VPN
&= env
->SEGMask
;
389 tlb
->ASID
= env
->CP0_EntryHi
& 0xFF;
390 tlb
->PageMask
= env
->CP0_PageMask
;
391 tlb
->G
= env
->CP0_EntryLo0
& env
->CP0_EntryLo1
& 1;
392 tlb
->V0
= (env
->CP0_EntryLo0
& 2) != 0;
393 tlb
->D0
= (env
->CP0_EntryLo0
& 4) != 0;
394 tlb
->C0
= (env
->CP0_EntryLo0
>> 3) & 0x7;
395 tlb
->PFN
[0] = (env
->CP0_EntryLo0
>> 6) << 12;
396 tlb
->V1
= (env
->CP0_EntryLo1
& 2) != 0;
397 tlb
->D1
= (env
->CP0_EntryLo1
& 4) != 0;
398 tlb
->C1
= (env
->CP0_EntryLo1
>> 3) & 0x7;
399 tlb
->PFN
[1] = (env
->CP0_EntryLo1
>> 6) << 12;
402 void r4k_do_tlbwi (void)
404 /* Discard cached TLB entries. We could avoid doing this if the
405 tlbwi is just upgrading access permissions on the current entry;
406 that might be a further win. */
407 r4k_mips_tlb_flush_extra (env
, env
->tlb
->nb_tlb
);
409 r4k_invalidate_tlb(env
, env
->CP0_Index
% env
->tlb
->nb_tlb
, 0);
410 r4k_fill_tlb(env
->CP0_Index
% env
->tlb
->nb_tlb
);
413 void r4k_do_tlbwr (void)
415 int r
= cpu_mips_get_random(env
);
417 r4k_invalidate_tlb(env
, r
, 1);
421 void r4k_do_tlbp (void)
430 ASID
= env
->CP0_EntryHi
& 0xFF;
431 for (i
= 0; i
< env
->tlb
->nb_tlb
; i
++) {
432 tlb
= &env
->tlb
->mmu
.r4k
.tlb
[i
];
433 /* 1k pages are not supported. */
434 mask
= tlb
->PageMask
| ~(TARGET_PAGE_MASK
<< 1);
435 tag
= env
->CP0_EntryHi
& ~mask
;
436 VPN
= tlb
->VPN
& ~mask
;
437 /* Check ASID, virtual page number & size */
438 if ((tlb
->G
== 1 || tlb
->ASID
== ASID
) && VPN
== tag
) {
444 if (i
== env
->tlb
->nb_tlb
) {
445 /* No match. Discard any shadow entries, if any of them match. */
446 for (i
= env
->tlb
->nb_tlb
; i
< env
->tlb
->tlb_in_use
; i
++) {
447 tlb
= &env
->tlb
->mmu
.r4k
.tlb
[i
];
448 /* 1k pages are not supported. */
449 mask
= tlb
->PageMask
| ~(TARGET_PAGE_MASK
<< 1);
450 tag
= env
->CP0_EntryHi
& ~mask
;
451 VPN
= tlb
->VPN
& ~mask
;
452 /* Check ASID, virtual page number & size */
453 if ((tlb
->G
== 1 || tlb
->ASID
== ASID
) && VPN
== tag
) {
454 r4k_mips_tlb_flush_extra (env
, i
);
459 env
->CP0_Index
|= 0x80000000;
463 void r4k_do_tlbr (void)
468 ASID
= env
->CP0_EntryHi
& 0xFF;
469 tlb
= &env
->tlb
->mmu
.r4k
.tlb
[env
->CP0_Index
% env
->tlb
->nb_tlb
];
471 /* If this will change the current ASID, flush qemu's TLB. */
472 if (ASID
!= tlb
->ASID
)
473 cpu_mips_tlb_flush (env
, 1);
475 r4k_mips_tlb_flush_extra(env
, env
->tlb
->nb_tlb
);
477 env
->CP0_EntryHi
= tlb
->VPN
| tlb
->ASID
;
478 env
->CP0_PageMask
= tlb
->PageMask
;
479 env
->CP0_EntryLo0
= tlb
->G
| (tlb
->V0
<< 1) | (tlb
->D0
<< 2) |
480 (tlb
->C0
<< 3) | (tlb
->PFN
[0] >> 6);
481 env
->CP0_EntryLo1
= tlb
->G
| (tlb
->V1
<< 1) | (tlb
->D1
<< 2) |
482 (tlb
->C1
<< 3) | (tlb
->PFN
[1] >> 6);
485 #endif /* !CONFIG_USER_ONLY */
487 void dump_ldst (const unsigned char *func
)
490 fprintf(logfile
, "%s => " TARGET_FMT_lx
" " TARGET_FMT_lx
"\n", __func__
, T0
, T1
);
496 fprintf(logfile
, "%s " TARGET_FMT_lx
" at " TARGET_FMT_lx
" (" TARGET_FMT_lx
")\n", __func__
,
497 T1
, T0
, env
->CP0_LLAddr
);
501 void debug_pre_eret (void)
503 fprintf(logfile
, "ERET: PC " TARGET_FMT_lx
" EPC " TARGET_FMT_lx
,
504 env
->PC
[env
->current_tc
], env
->CP0_EPC
);
505 if (env
->CP0_Status
& (1 << CP0St_ERL
))
506 fprintf(logfile
, " ErrorEPC " TARGET_FMT_lx
, env
->CP0_ErrorEPC
);
507 if (env
->hflags
& MIPS_HFLAG_DM
)
508 fprintf(logfile
, " DEPC " TARGET_FMT_lx
, env
->CP0_DEPC
);
509 fputs("\n", logfile
);
512 void debug_post_eret (void)
514 fprintf(logfile
, " => PC " TARGET_FMT_lx
" EPC " TARGET_FMT_lx
,
515 env
->PC
[env
->current_tc
], env
->CP0_EPC
);
516 if (env
->CP0_Status
& (1 << CP0St_ERL
))
517 fprintf(logfile
, " ErrorEPC " TARGET_FMT_lx
, env
->CP0_ErrorEPC
);
518 if (env
->hflags
& MIPS_HFLAG_DM
)
519 fprintf(logfile
, " DEPC " TARGET_FMT_lx
, env
->CP0_DEPC
);
520 if (env
->hflags
& MIPS_HFLAG_UM
)
521 fputs(", UM\n", logfile
);
523 fputs("\n", logfile
);
526 void do_pmon (int function
)
530 case 2: /* TODO: char inbyte(int waitflag); */
531 if (env
->gpr
[4][env
->current_tc
] == 0)
532 env
->gpr
[2][env
->current_tc
] = -1;
534 case 11: /* TODO: char inbyte (void); */
535 env
->gpr
[2][env
->current_tc
] = -1;
539 printf("%c", (char)(env
->gpr
[4][env
->current_tc
] & 0xFF));
545 unsigned char *fmt
= (void *)(unsigned long)env
->gpr
[4][env
->current_tc
];
552 #if !defined(CONFIG_USER_ONLY)
554 static void do_unaligned_access (target_ulong addr
, int is_write
, int is_user
, void *retaddr
);
556 #define MMUSUFFIX _mmu
560 #include "softmmu_template.h"
563 #include "softmmu_template.h"
566 #include "softmmu_template.h"
569 #include "softmmu_template.h"
571 static void do_unaligned_access (target_ulong addr
, int is_write
, int is_user
, void *retaddr
)
573 env
->CP0_BadVAddr
= addr
;
574 do_restore_state (retaddr
);
575 do_raise_exception ((is_write
== 1) ? EXCP_AdES
: EXCP_AdEL
);
578 void tlb_fill (target_ulong addr
, int is_write
, int is_user
, void *retaddr
)
580 TranslationBlock
*tb
;
585 /* XXX: hack to restore env in all cases, even if not called from
588 env
= cpu_single_env
;
589 ret
= cpu_mips_handle_mmu_fault(env
, addr
, is_write
, is_user
, 1);
592 /* now we have a real cpu fault */
593 pc
= (unsigned long)retaddr
;
596 /* the PC is inside the translated code. It means that we have
597 a virtual CPU fault */
598 cpu_restore_state(tb
, env
, pc
, NULL
);
601 do_raise_exception_err(env
->exception_index
, env
->error_code
);
608 /* Complex FPU operations which may need stack space. */
610 #define FLOAT_SIGN32 (1 << 31)
611 #define FLOAT_SIGN64 (1ULL << 63)
612 #define FLOAT_ONE32 (0x3f8 << 20)
613 #define FLOAT_ONE64 (0x3ffULL << 52)
614 #define FLOAT_TWO32 (1 << 30)
615 #define FLOAT_TWO64 (1ULL << 62)
616 #define FLOAT_QNAN32 0x7fbfffff
617 #define FLOAT_QNAN64 0x7ff7ffffffffffffULL
618 #define FLOAT_SNAN32 0x7fffffff
619 #define FLOAT_SNAN64 0x7fffffffffffffffULL
621 /* convert MIPS rounding mode in FCR31 to IEEE library */
622 unsigned int ieee_rm
[] = {
623 float_round_nearest_even
,
629 #define RESTORE_ROUNDING_MODE \
630 set_float_rounding_mode(ieee_rm[env->fpu->fcr31 & 3], &env->fpu->fp_status)
632 void do_cfc1 (int reg
)
636 T0
= (int32_t)env
->fpu
->fcr0
;
639 T0
= ((env
->fpu
->fcr31
>> 24) & 0xfe) | ((env
->fpu
->fcr31
>> 23) & 0x1);
642 T0
= env
->fpu
->fcr31
& 0x0003f07c;
645 T0
= (env
->fpu
->fcr31
& 0x00000f83) | ((env
->fpu
->fcr31
>> 22) & 0x4);
648 T0
= (int32_t)env
->fpu
->fcr31
;
653 void do_ctc1 (int reg
)
659 env
->fpu
->fcr31
= (env
->fpu
->fcr31
& 0x017fffff) | ((T0
& 0xfe) << 24) |
665 env
->fpu
->fcr31
= (env
->fpu
->fcr31
& 0xfffc0f83) | (T0
& 0x0003f07c);
670 env
->fpu
->fcr31
= (env
->fpu
->fcr31
& 0xfefff07c) | (T0
& 0x00000f83) |
676 env
->fpu
->fcr31
= T0
;
681 /* set rounding mode */
682 RESTORE_ROUNDING_MODE
;
683 set_float_exception_flags(0, &env
->fpu
->fp_status
);
684 if ((GET_FP_ENABLE(env
->fpu
->fcr31
) | 0x20) & GET_FP_CAUSE(env
->fpu
->fcr31
))
685 do_raise_exception(EXCP_FPE
);
688 inline char ieee_ex_to_mips(char xcpt
)
690 return (xcpt
& float_flag_inexact
) >> 5 |
691 (xcpt
& float_flag_underflow
) >> 3 |
692 (xcpt
& float_flag_overflow
) >> 1 |
693 (xcpt
& float_flag_divbyzero
) << 1 |
694 (xcpt
& float_flag_invalid
) << 4;
697 inline char mips_ex_to_ieee(char xcpt
)
699 return (xcpt
& FP_INEXACT
) << 5 |
700 (xcpt
& FP_UNDERFLOW
) << 3 |
701 (xcpt
& FP_OVERFLOW
) << 1 |
702 (xcpt
& FP_DIV0
) >> 1 |
703 (xcpt
& FP_INVALID
) >> 4;
706 inline void update_fcr31(void)
708 int tmp
= ieee_ex_to_mips(get_float_exception_flags(&env
->fpu
->fp_status
));
710 SET_FP_CAUSE(env
->fpu
->fcr31
, tmp
);
711 if (GET_FP_ENABLE(env
->fpu
->fcr31
) & tmp
)
712 do_raise_exception(EXCP_FPE
);
714 UPDATE_FP_FLAGS(env
->fpu
->fcr31
, tmp
);
717 #define FLOAT_OP(name, p) void do_float_##name##_##p(void)
721 set_float_exception_flags(0, &env
->fpu
->fp_status
);
722 FDT2
= float32_to_float64(FST0
, &env
->fpu
->fp_status
);
727 set_float_exception_flags(0, &env
->fpu
->fp_status
);
728 FDT2
= int32_to_float64(WT0
, &env
->fpu
->fp_status
);
733 set_float_exception_flags(0, &env
->fpu
->fp_status
);
734 FDT2
= int64_to_float64(DT0
, &env
->fpu
->fp_status
);
739 set_float_exception_flags(0, &env
->fpu
->fp_status
);
740 DT2
= float64_to_int64(FDT0
, &env
->fpu
->fp_status
);
742 if (GET_FP_CAUSE(env
->fpu
->fcr31
) & (FP_OVERFLOW
| FP_INVALID
))
747 set_float_exception_flags(0, &env
->fpu
->fp_status
);
748 DT2
= float32_to_int64(FST0
, &env
->fpu
->fp_status
);
750 if (GET_FP_CAUSE(env
->fpu
->fcr31
) & (FP_OVERFLOW
| FP_INVALID
))
756 set_float_exception_flags(0, &env
->fpu
->fp_status
);
757 FST2
= int32_to_float32(WT0
, &env
->fpu
->fp_status
);
758 FSTH2
= int32_to_float32(WTH0
, &env
->fpu
->fp_status
);
763 set_float_exception_flags(0, &env
->fpu
->fp_status
);
764 WT2
= float32_to_int32(FST0
, &env
->fpu
->fp_status
);
765 WTH2
= float32_to_int32(FSTH0
, &env
->fpu
->fp_status
);
767 if (GET_FP_CAUSE(env
->fpu
->fcr31
) & (FP_OVERFLOW
| FP_INVALID
))
772 set_float_exception_flags(0, &env
->fpu
->fp_status
);
773 FST2
= float64_to_float32(FDT0
, &env
->fpu
->fp_status
);
778 set_float_exception_flags(0, &env
->fpu
->fp_status
);
779 FST2
= int32_to_float32(WT0
, &env
->fpu
->fp_status
);
784 set_float_exception_flags(0, &env
->fpu
->fp_status
);
785 FST2
= int64_to_float32(DT0
, &env
->fpu
->fp_status
);
790 set_float_exception_flags(0, &env
->fpu
->fp_status
);
796 set_float_exception_flags(0, &env
->fpu
->fp_status
);
802 set_float_exception_flags(0, &env
->fpu
->fp_status
);
803 WT2
= float32_to_int32(FST0
, &env
->fpu
->fp_status
);
805 if (GET_FP_CAUSE(env
->fpu
->fcr31
) & (FP_OVERFLOW
| FP_INVALID
))
810 set_float_exception_flags(0, &env
->fpu
->fp_status
);
811 WT2
= float64_to_int32(FDT0
, &env
->fpu
->fp_status
);
813 if (GET_FP_CAUSE(env
->fpu
->fcr31
) & (FP_OVERFLOW
| FP_INVALID
))
819 set_float_rounding_mode(float_round_nearest_even
, &env
->fpu
->fp_status
);
820 DT2
= float64_to_int64(FDT0
, &env
->fpu
->fp_status
);
821 RESTORE_ROUNDING_MODE
;
823 if (GET_FP_CAUSE(env
->fpu
->fcr31
) & (FP_OVERFLOW
| FP_INVALID
))
828 set_float_rounding_mode(float_round_nearest_even
, &env
->fpu
->fp_status
);
829 DT2
= float32_to_int64(FST0
, &env
->fpu
->fp_status
);
830 RESTORE_ROUNDING_MODE
;
832 if (GET_FP_CAUSE(env
->fpu
->fcr31
) & (FP_OVERFLOW
| FP_INVALID
))
837 set_float_rounding_mode(float_round_nearest_even
, &env
->fpu
->fp_status
);
838 WT2
= float64_to_int32(FDT0
, &env
->fpu
->fp_status
);
839 RESTORE_ROUNDING_MODE
;
841 if (GET_FP_CAUSE(env
->fpu
->fcr31
) & (FP_OVERFLOW
| FP_INVALID
))
846 set_float_rounding_mode(float_round_nearest_even
, &env
->fpu
->fp_status
);
847 WT2
= float32_to_int32(FST0
, &env
->fpu
->fp_status
);
848 RESTORE_ROUNDING_MODE
;
850 if (GET_FP_CAUSE(env
->fpu
->fcr31
) & (FP_OVERFLOW
| FP_INVALID
))
856 DT2
= float64_to_int64_round_to_zero(FDT0
, &env
->fpu
->fp_status
);
858 if (GET_FP_CAUSE(env
->fpu
->fcr31
) & (FP_OVERFLOW
| FP_INVALID
))
863 DT2
= float32_to_int64_round_to_zero(FST0
, &env
->fpu
->fp_status
);
865 if (GET_FP_CAUSE(env
->fpu
->fcr31
) & (FP_OVERFLOW
| FP_INVALID
))
870 WT2
= float64_to_int32_round_to_zero(FDT0
, &env
->fpu
->fp_status
);
872 if (GET_FP_CAUSE(env
->fpu
->fcr31
) & (FP_OVERFLOW
| FP_INVALID
))
877 WT2
= float32_to_int32_round_to_zero(FST0
, &env
->fpu
->fp_status
);
879 if (GET_FP_CAUSE(env
->fpu
->fcr31
) & (FP_OVERFLOW
| FP_INVALID
))
885 set_float_rounding_mode(float_round_up
, &env
->fpu
->fp_status
);
886 DT2
= float64_to_int64(FDT0
, &env
->fpu
->fp_status
);
887 RESTORE_ROUNDING_MODE
;
889 if (GET_FP_CAUSE(env
->fpu
->fcr31
) & (FP_OVERFLOW
| FP_INVALID
))
894 set_float_rounding_mode(float_round_up
, &env
->fpu
->fp_status
);
895 DT2
= float32_to_int64(FST0
, &env
->fpu
->fp_status
);
896 RESTORE_ROUNDING_MODE
;
898 if (GET_FP_CAUSE(env
->fpu
->fcr31
) & (FP_OVERFLOW
| FP_INVALID
))
903 set_float_rounding_mode(float_round_up
, &env
->fpu
->fp_status
);
904 WT2
= float64_to_int32(FDT0
, &env
->fpu
->fp_status
);
905 RESTORE_ROUNDING_MODE
;
907 if (GET_FP_CAUSE(env
->fpu
->fcr31
) & (FP_OVERFLOW
| FP_INVALID
))
912 set_float_rounding_mode(float_round_up
, &env
->fpu
->fp_status
);
913 WT2
= float32_to_int32(FST0
, &env
->fpu
->fp_status
);
914 RESTORE_ROUNDING_MODE
;
916 if (GET_FP_CAUSE(env
->fpu
->fcr31
) & (FP_OVERFLOW
| FP_INVALID
))
922 set_float_rounding_mode(float_round_down
, &env
->fpu
->fp_status
);
923 DT2
= float64_to_int64(FDT0
, &env
->fpu
->fp_status
);
924 RESTORE_ROUNDING_MODE
;
926 if (GET_FP_CAUSE(env
->fpu
->fcr31
) & (FP_OVERFLOW
| FP_INVALID
))
931 set_float_rounding_mode(float_round_down
, &env
->fpu
->fp_status
);
932 DT2
= float32_to_int64(FST0
, &env
->fpu
->fp_status
);
933 RESTORE_ROUNDING_MODE
;
935 if (GET_FP_CAUSE(env
->fpu
->fcr31
) & (FP_OVERFLOW
| FP_INVALID
))
940 set_float_rounding_mode(float_round_down
, &env
->fpu
->fp_status
);
941 WT2
= float64_to_int32(FDT0
, &env
->fpu
->fp_status
);
942 RESTORE_ROUNDING_MODE
;
944 if (GET_FP_CAUSE(env
->fpu
->fcr31
) & (FP_OVERFLOW
| FP_INVALID
))
949 set_float_rounding_mode(float_round_down
, &env
->fpu
->fp_status
);
950 WT2
= float32_to_int32(FST0
, &env
->fpu
->fp_status
);
951 RESTORE_ROUNDING_MODE
;
953 if (GET_FP_CAUSE(env
->fpu
->fcr31
) & (FP_OVERFLOW
| FP_INVALID
))
957 /* MIPS specific unary operations */
960 set_float_exception_flags(0, &env
->fpu
->fp_status
);
961 FDT2
= float64_div(FLOAT_ONE64
, FDT0
, &env
->fpu
->fp_status
);
966 set_float_exception_flags(0, &env
->fpu
->fp_status
);
967 FST2
= float32_div(FLOAT_ONE32
, FST0
, &env
->fpu
->fp_status
);
973 set_float_exception_flags(0, &env
->fpu
->fp_status
);
974 FDT2
= float64_sqrt(FDT0
, &env
->fpu
->fp_status
);
975 FDT2
= float64_div(FLOAT_ONE64
, FDT2
, &env
->fpu
->fp_status
);
980 set_float_exception_flags(0, &env
->fpu
->fp_status
);
981 FST2
= float32_sqrt(FST0
, &env
->fpu
->fp_status
);
982 FST2
= float32_div(FLOAT_ONE32
, FST2
, &env
->fpu
->fp_status
);
988 set_float_exception_flags(0, &env
->fpu
->fp_status
);
989 FDT2
= float64_div(FLOAT_ONE64
, FDT0
, &env
->fpu
->fp_status
);
994 set_float_exception_flags(0, &env
->fpu
->fp_status
);
995 FST2
= float32_div(FLOAT_ONE32
, FST0
, &env
->fpu
->fp_status
);
1000 set_float_exception_flags(0, &env
->fpu
->fp_status
);
1001 FST2
= float32_div(FLOAT_ONE32
, FST0
, &env
->fpu
->fp_status
);
1002 FSTH2
= float32_div(FLOAT_ONE32
, FSTH0
, &env
->fpu
->fp_status
);
1008 set_float_exception_flags(0, &env
->fpu
->fp_status
);
1009 FDT2
= float64_sqrt(FDT0
, &env
->fpu
->fp_status
);
1010 FDT2
= float64_div(FLOAT_ONE64
, FDT2
, &env
->fpu
->fp_status
);
1015 set_float_exception_flags(0, &env
->fpu
->fp_status
);
1016 FST2
= float32_sqrt(FST0
, &env
->fpu
->fp_status
);
1017 FST2
= float32_div(FLOAT_ONE32
, FST2
, &env
->fpu
->fp_status
);
1020 FLOAT_OP(rsqrt1
, ps
)
1022 set_float_exception_flags(0, &env
->fpu
->fp_status
);
1023 FST2
= float32_sqrt(FST0
, &env
->fpu
->fp_status
);
1024 FSTH2
= float32_sqrt(FSTH0
, &env
->fpu
->fp_status
);
1025 FST2
= float32_div(FLOAT_ONE32
, FST2
, &env
->fpu
->fp_status
);
1026 FSTH2
= float32_div(FLOAT_ONE32
, FSTH2
, &env
->fpu
->fp_status
);
1030 /* binary operations */
1031 #define FLOAT_BINOP(name) \
1034 set_float_exception_flags(0, &env->fpu->fp_status); \
1035 FDT2 = float64_ ## name (FDT0, FDT1, &env->fpu->fp_status); \
1037 if (GET_FP_CAUSE(env->fpu->fcr31) & FP_INVALID) \
1038 FDT2 = FLOAT_QNAN64; \
1042 set_float_exception_flags(0, &env->fpu->fp_status); \
1043 FST2 = float32_ ## name (FST0, FST1, &env->fpu->fp_status); \
1045 if (GET_FP_CAUSE(env->fpu->fcr31) & FP_INVALID) \
1046 FST2 = FLOAT_QNAN32; \
1048 FLOAT_OP(name, ps) \
1050 set_float_exception_flags(0, &env->fpu->fp_status); \
1051 FST2 = float32_ ## name (FST0, FST1, &env->fpu->fp_status); \
1052 FSTH2 = float32_ ## name (FSTH0, FSTH1, &env->fpu->fp_status); \
1054 if (GET_FP_CAUSE(env->fpu->fcr31) & FP_INVALID) { \
1055 FST2 = FLOAT_QNAN32; \
1056 FSTH2 = FLOAT_QNAN32; \
1065 /* MIPS specific binary operations */
1068 set_float_exception_flags(0, &env
->fpu
->fp_status
);
1069 FDT2
= float64_mul(FDT0
, FDT2
, &env
->fpu
->fp_status
);
1070 FDT2
= float64_sub(FDT2
, FLOAT_ONE64
, &env
->fpu
->fp_status
) ^ FLOAT_SIGN64
;
1075 set_float_exception_flags(0, &env
->fpu
->fp_status
);
1076 FST2
= float32_mul(FST0
, FST2
, &env
->fpu
->fp_status
);
1077 FST2
= float32_sub(FST2
, FLOAT_ONE32
, &env
->fpu
->fp_status
) ^ FLOAT_SIGN32
;
1080 FLOAT_OP(recip2
, ps
)
1082 set_float_exception_flags(0, &env
->fpu
->fp_status
);
1083 FST2
= float32_mul(FST0
, FST2
, &env
->fpu
->fp_status
);
1084 FSTH2
= float32_mul(FSTH0
, FSTH2
, &env
->fpu
->fp_status
);
1085 FST2
= float32_sub(FST2
, FLOAT_ONE32
, &env
->fpu
->fp_status
) ^ FLOAT_SIGN32
;
1086 FSTH2
= float32_sub(FSTH2
, FLOAT_ONE32
, &env
->fpu
->fp_status
) ^ FLOAT_SIGN32
;
1092 set_float_exception_flags(0, &env
->fpu
->fp_status
);
1093 FDT2
= float64_mul(FDT0
, FDT2
, &env
->fpu
->fp_status
);
1094 FDT2
= float64_sub(FDT2
, FLOAT_ONE64
, &env
->fpu
->fp_status
);
1095 FDT2
= float64_div(FDT2
, FLOAT_TWO64
, &env
->fpu
->fp_status
) ^ FLOAT_SIGN64
;
1100 set_float_exception_flags(0, &env
->fpu
->fp_status
);
1101 FST2
= float32_mul(FST0
, FST2
, &env
->fpu
->fp_status
);
1102 FST2
= float32_sub(FST2
, FLOAT_ONE32
, &env
->fpu
->fp_status
);
1103 FST2
= float32_div(FST2
, FLOAT_TWO32
, &env
->fpu
->fp_status
) ^ FLOAT_SIGN32
;
1106 FLOAT_OP(rsqrt2
, ps
)
1108 set_float_exception_flags(0, &env
->fpu
->fp_status
);
1109 FST2
= float32_mul(FST0
, FST2
, &env
->fpu
->fp_status
);
1110 FSTH2
= float32_mul(FSTH0
, FSTH2
, &env
->fpu
->fp_status
);
1111 FST2
= float32_sub(FST2
, FLOAT_ONE32
, &env
->fpu
->fp_status
);
1112 FSTH2
= float32_sub(FSTH2
, FLOAT_ONE32
, &env
->fpu
->fp_status
);
1113 FST2
= float32_div(FST2
, FLOAT_TWO32
, &env
->fpu
->fp_status
) ^ FLOAT_SIGN32
;
1114 FSTH2
= float32_div(FSTH2
, FLOAT_TWO32
, &env
->fpu
->fp_status
) ^ FLOAT_SIGN32
;
1120 set_float_exception_flags(0, &env
->fpu
->fp_status
);
1121 FST2
= float32_add (FST0
, FSTH0
, &env
->fpu
->fp_status
);
1122 FSTH2
= float32_add (FST1
, FSTH1
, &env
->fpu
->fp_status
);
1128 set_float_exception_flags(0, &env
->fpu
->fp_status
);
1129 FST2
= float32_mul (FST0
, FSTH0
, &env
->fpu
->fp_status
);
1130 FSTH2
= float32_mul (FST1
, FSTH1
, &env
->fpu
->fp_status
);
1134 /* compare operations */
1135 #define FOP_COND_D(op, cond) \
1136 void do_cmp_d_ ## op (long cc) \
1141 SET_FP_COND(cc, env->fpu); \
1143 CLEAR_FP_COND(cc, env->fpu); \
1145 void do_cmpabs_d_ ## op (long cc) \
1148 FDT0 &= ~FLOAT_SIGN64; \
1149 FDT1 &= ~FLOAT_SIGN64; \
1153 SET_FP_COND(cc, env->fpu); \
1155 CLEAR_FP_COND(cc, env->fpu); \
1158 int float64_is_unordered(int sig
, float64 a
, float64 b STATUS_PARAM
)
1160 if (float64_is_signaling_nan(a
) ||
1161 float64_is_signaling_nan(b
) ||
1162 (sig
&& (float64_is_nan(a
) || float64_is_nan(b
)))) {
1163 float_raise(float_flag_invalid
, status
);
1165 } else if (float64_is_nan(a
) || float64_is_nan(b
)) {
1172 /* NOTE: the comma operator will make "cond" to eval to false,
1173 * but float*_is_unordered() is still called. */
1174 FOP_COND_D(f
, (float64_is_unordered(0, FDT1
, FDT0
, &env
->fpu
->fp_status
), 0))
1175 FOP_COND_D(un
, float64_is_unordered(0, FDT1
, FDT0
, &env
->fpu
->fp_status
))
1176 FOP_COND_D(eq
, !float64_is_unordered(0, FDT1
, FDT0
, &env
->fpu
->fp_status
) && float64_eq(FDT0
, FDT1
, &env
->fpu
->fp_status
))
1177 FOP_COND_D(ueq
, float64_is_unordered(0, FDT1
, FDT0
, &env
->fpu
->fp_status
) || float64_eq(FDT0
, FDT1
, &env
->fpu
->fp_status
))
1178 FOP_COND_D(olt
, !float64_is_unordered(0, FDT1
, FDT0
, &env
->fpu
->fp_status
) && float64_lt(FDT0
, FDT1
, &env
->fpu
->fp_status
))
1179 FOP_COND_D(ult
, float64_is_unordered(0, FDT1
, FDT0
, &env
->fpu
->fp_status
) || float64_lt(FDT0
, FDT1
, &env
->fpu
->fp_status
))
1180 FOP_COND_D(ole
, !float64_is_unordered(0, FDT1
, FDT0
, &env
->fpu
->fp_status
) && float64_le(FDT0
, FDT1
, &env
->fpu
->fp_status
))
1181 FOP_COND_D(ule
, float64_is_unordered(0, FDT1
, FDT0
, &env
->fpu
->fp_status
) || float64_le(FDT0
, FDT1
, &env
->fpu
->fp_status
))
1182 /* NOTE: the comma operator will make "cond" to eval to false,
1183 * but float*_is_unordered() is still called. */
1184 FOP_COND_D(sf
, (float64_is_unordered(1, FDT1
, FDT0
, &env
->fpu
->fp_status
), 0))
1185 FOP_COND_D(ngle
,float64_is_unordered(1, FDT1
, FDT0
, &env
->fpu
->fp_status
))
1186 FOP_COND_D(seq
, !float64_is_unordered(1, FDT1
, FDT0
, &env
->fpu
->fp_status
) && float64_eq(FDT0
, FDT1
, &env
->fpu
->fp_status
))
1187 FOP_COND_D(ngl
, float64_is_unordered(1, FDT1
, FDT0
, &env
->fpu
->fp_status
) || float64_eq(FDT0
, FDT1
, &env
->fpu
->fp_status
))
1188 FOP_COND_D(lt
, !float64_is_unordered(1, FDT1
, FDT0
, &env
->fpu
->fp_status
) && float64_lt(FDT0
, FDT1
, &env
->fpu
->fp_status
))
1189 FOP_COND_D(nge
, float64_is_unordered(1, FDT1
, FDT0
, &env
->fpu
->fp_status
) || float64_lt(FDT0
, FDT1
, &env
->fpu
->fp_status
))
1190 FOP_COND_D(le
, !float64_is_unordered(1, FDT1
, FDT0
, &env
->fpu
->fp_status
) && float64_le(FDT0
, FDT1
, &env
->fpu
->fp_status
))
1191 FOP_COND_D(ngt
, float64_is_unordered(1, FDT1
, FDT0
, &env
->fpu
->fp_status
) || float64_le(FDT0
, FDT1
, &env
->fpu
->fp_status
))
1193 #define FOP_COND_S(op, cond) \
1194 void do_cmp_s_ ## op (long cc) \
1199 SET_FP_COND(cc, env->fpu); \
1201 CLEAR_FP_COND(cc, env->fpu); \
1203 void do_cmpabs_s_ ## op (long cc) \
1206 FST0 &= ~FLOAT_SIGN32; \
1207 FST1 &= ~FLOAT_SIGN32; \
1211 SET_FP_COND(cc, env->fpu); \
1213 CLEAR_FP_COND(cc, env->fpu); \
1216 flag
float32_is_unordered(int sig
, float32 a
, float32 b STATUS_PARAM
)
1218 if (float32_is_signaling_nan(a
) ||
1219 float32_is_signaling_nan(b
) ||
1220 (sig
&& (float32_is_nan(a
) || float32_is_nan(b
)))) {
1221 float_raise(float_flag_invalid
, status
);
1223 } else if (float32_is_nan(a
) || float32_is_nan(b
)) {
1230 /* NOTE: the comma operator will make "cond" to eval to false,
1231 * but float*_is_unordered() is still called. */
1232 FOP_COND_S(f
, (float32_is_unordered(0, FST1
, FST0
, &env
->fpu
->fp_status
), 0))
1233 FOP_COND_S(un
, float32_is_unordered(0, FST1
, FST0
, &env
->fpu
->fp_status
))
1234 FOP_COND_S(eq
, !float32_is_unordered(0, FST1
, FST0
, &env
->fpu
->fp_status
) && float32_eq(FST0
, FST1
, &env
->fpu
->fp_status
))
1235 FOP_COND_S(ueq
, float32_is_unordered(0, FST1
, FST0
, &env
->fpu
->fp_status
) || float32_eq(FST0
, FST1
, &env
->fpu
->fp_status
))
1236 FOP_COND_S(olt
, !float32_is_unordered(0, FST1
, FST0
, &env
->fpu
->fp_status
) && float32_lt(FST0
, FST1
, &env
->fpu
->fp_status
))
1237 FOP_COND_S(ult
, float32_is_unordered(0, FST1
, FST0
, &env
->fpu
->fp_status
) || float32_lt(FST0
, FST1
, &env
->fpu
->fp_status
))
1238 FOP_COND_S(ole
, !float32_is_unordered(0, FST1
, FST0
, &env
->fpu
->fp_status
) && float32_le(FST0
, FST1
, &env
->fpu
->fp_status
))
1239 FOP_COND_S(ule
, float32_is_unordered(0, FST1
, FST0
, &env
->fpu
->fp_status
) || float32_le(FST0
, FST1
, &env
->fpu
->fp_status
))
1240 /* NOTE: the comma operator will make "cond" to eval to false,
1241 * but float*_is_unordered() is still called. */
1242 FOP_COND_S(sf
, (float32_is_unordered(1, FST1
, FST0
, &env
->fpu
->fp_status
), 0))
1243 FOP_COND_S(ngle
,float32_is_unordered(1, FST1
, FST0
, &env
->fpu
->fp_status
))
1244 FOP_COND_S(seq
, !float32_is_unordered(1, FST1
, FST0
, &env
->fpu
->fp_status
) && float32_eq(FST0
, FST1
, &env
->fpu
->fp_status
))
1245 FOP_COND_S(ngl
, float32_is_unordered(1, FST1
, FST0
, &env
->fpu
->fp_status
) || float32_eq(FST0
, FST1
, &env
->fpu
->fp_status
))
1246 FOP_COND_S(lt
, !float32_is_unordered(1, FST1
, FST0
, &env
->fpu
->fp_status
) && float32_lt(FST0
, FST1
, &env
->fpu
->fp_status
))
1247 FOP_COND_S(nge
, float32_is_unordered(1, FST1
, FST0
, &env
->fpu
->fp_status
) || float32_lt(FST0
, FST1
, &env
->fpu
->fp_status
))
1248 FOP_COND_S(le
, !float32_is_unordered(1, FST1
, FST0
, &env
->fpu
->fp_status
) && float32_le(FST0
, FST1
, &env
->fpu
->fp_status
))
1249 FOP_COND_S(ngt
, float32_is_unordered(1, FST1
, FST0
, &env
->fpu
->fp_status
) || float32_le(FST0
, FST1
, &env
->fpu
->fp_status
))
1251 #define FOP_COND_PS(op, condl, condh) \
1252 void do_cmp_ps_ ## op (long cc) \
1258 SET_FP_COND(cc, env->fpu); \
1260 CLEAR_FP_COND(cc, env->fpu); \
1262 SET_FP_COND(cc + 1, env->fpu); \
1264 CLEAR_FP_COND(cc + 1, env->fpu); \
1266 void do_cmpabs_ps_ ## op (long cc) \
1269 FST0 &= ~FLOAT_SIGN32; \
1270 FSTH0 &= ~FLOAT_SIGN32; \
1271 FST1 &= ~FLOAT_SIGN32; \
1272 FSTH1 &= ~FLOAT_SIGN32; \
1277 SET_FP_COND(cc, env->fpu); \
1279 CLEAR_FP_COND(cc, env->fpu); \
1281 SET_FP_COND(cc + 1, env->fpu); \
1283 CLEAR_FP_COND(cc + 1, env->fpu); \
1286 /* NOTE: the comma operator will make "cond" to eval to false,
1287 * but float*_is_unordered() is still called. */
1288 FOP_COND_PS(f
, (float32_is_unordered(0, FST1
, FST0
, &env
->fpu
->fp_status
), 0),
1289 (float32_is_unordered(0, FSTH1
, FSTH0
, &env
->fpu
->fp_status
), 0))
1290 FOP_COND_PS(un
, float32_is_unordered(0, FST1
, FST0
, &env
->fpu
->fp_status
),
1291 float32_is_unordered(0, FSTH1
, FSTH0
, &env
->fpu
->fp_status
))
1292 FOP_COND_PS(eq
, !float32_is_unordered(0, FST1
, FST0
, &env
->fpu
->fp_status
) && float32_eq(FST0
, FST1
, &env
->fpu
->fp_status
),
1293 !float32_is_unordered(0, FSTH1
, FSTH0
, &env
->fpu
->fp_status
) && float32_eq(FSTH0
, FSTH1
, &env
->fpu
->fp_status
))
1294 FOP_COND_PS(ueq
, float32_is_unordered(0, FST1
, FST0
, &env
->fpu
->fp_status
) || float32_eq(FST0
, FST1
, &env
->fpu
->fp_status
),
1295 float32_is_unordered(0, FSTH1
, FSTH0
, &env
->fpu
->fp_status
) || float32_eq(FSTH0
, FSTH1
, &env
->fpu
->fp_status
))
1296 FOP_COND_PS(olt
, !float32_is_unordered(0, FST1
, FST0
, &env
->fpu
->fp_status
) && float32_lt(FST0
, FST1
, &env
->fpu
->fp_status
),
1297 !float32_is_unordered(0, FSTH1
, FSTH0
, &env
->fpu
->fp_status
) && float32_lt(FSTH0
, FSTH1
, &env
->fpu
->fp_status
))
1298 FOP_COND_PS(ult
, float32_is_unordered(0, FST1
, FST0
, &env
->fpu
->fp_status
) || float32_lt(FST0
, FST1
, &env
->fpu
->fp_status
),
1299 float32_is_unordered(0, FSTH1
, FSTH0
, &env
->fpu
->fp_status
) || float32_lt(FSTH0
, FSTH1
, &env
->fpu
->fp_status
))
1300 FOP_COND_PS(ole
, !float32_is_unordered(0, FST1
, FST0
, &env
->fpu
->fp_status
) && float32_le(FST0
, FST1
, &env
->fpu
->fp_status
),
1301 !float32_is_unordered(0, FSTH1
, FSTH0
, &env
->fpu
->fp_status
) && float32_le(FSTH0
, FSTH1
, &env
->fpu
->fp_status
))
1302 FOP_COND_PS(ule
, float32_is_unordered(0, FST1
, FST0
, &env
->fpu
->fp_status
) || float32_le(FST0
, FST1
, &env
->fpu
->fp_status
),
1303 float32_is_unordered(0, FSTH1
, FSTH0
, &env
->fpu
->fp_status
) || float32_le(FSTH0
, FSTH1
, &env
->fpu
->fp_status
))
1304 /* NOTE: the comma operator will make "cond" to eval to false,
1305 * but float*_is_unordered() is still called. */
1306 FOP_COND_PS(sf
, (float32_is_unordered(1, FST1
, FST0
, &env
->fpu
->fp_status
), 0),
1307 (float32_is_unordered(1, FSTH1
, FSTH0
, &env
->fpu
->fp_status
), 0))
1308 FOP_COND_PS(ngle
,float32_is_unordered(1, FST1
, FST0
, &env
->fpu
->fp_status
),
1309 float32_is_unordered(1, FSTH1
, FSTH0
, &env
->fpu
->fp_status
))
1310 FOP_COND_PS(seq
, !float32_is_unordered(1, FST1
, FST0
, &env
->fpu
->fp_status
) && float32_eq(FST0
, FST1
, &env
->fpu
->fp_status
),
1311 !float32_is_unordered(1, FSTH1
, FSTH0
, &env
->fpu
->fp_status
) && float32_eq(FSTH0
, FSTH1
, &env
->fpu
->fp_status
))
1312 FOP_COND_PS(ngl
, float32_is_unordered(1, FST1
, FST0
, &env
->fpu
->fp_status
) || float32_eq(FST0
, FST1
, &env
->fpu
->fp_status
),
1313 float32_is_unordered(1, FSTH1
, FSTH0
, &env
->fpu
->fp_status
) || float32_eq(FSTH0
, FSTH1
, &env
->fpu
->fp_status
))
1314 FOP_COND_PS(lt
, !float32_is_unordered(1, FST1
, FST0
, &env
->fpu
->fp_status
) && float32_lt(FST0
, FST1
, &env
->fpu
->fp_status
),
1315 !float32_is_unordered(1, FSTH1
, FSTH0
, &env
->fpu
->fp_status
) && float32_lt(FSTH0
, FSTH1
, &env
->fpu
->fp_status
))
1316 FOP_COND_PS(nge
, float32_is_unordered(1, FST1
, FST0
, &env
->fpu
->fp_status
) || float32_lt(FST0
, FST1
, &env
->fpu
->fp_status
),
1317 float32_is_unordered(1, FSTH1
, FSTH0
, &env
->fpu
->fp_status
) || float32_lt(FSTH0
, FSTH1
, &env
->fpu
->fp_status
))
1318 FOP_COND_PS(le
, !float32_is_unordered(1, FST1
, FST0
, &env
->fpu
->fp_status
) && float32_le(FST0
, FST1
, &env
->fpu
->fp_status
),
1319 !float32_is_unordered(1, FSTH1
, FSTH0
, &env
->fpu
->fp_status
) && float32_le(FSTH0
, FSTH1
, &env
->fpu
->fp_status
))
1320 FOP_COND_PS(ngt
, float32_is_unordered(1, FST1
, FST0
, &env
->fpu
->fp_status
) || float32_le(FST0
, FST1
, &env
->fpu
->fp_status
),
1321 float32_is_unordered(1, FSTH1
, FSTH0
, &env
->fpu
->fp_status
) || float32_le(FSTH0
, FSTH1
, &env
->fpu
->fp_status
))