milkymist: convert to memory API
[qemu/qmp-unstable.git] / target-mips / op_helper.c
blob056011f1cc31c7b53d21504b2d0e9f4c128809ae
1 /*
2 * MIPS emulation helpers for qemu.
4 * Copyright (c) 2004-2005 Jocelyn Mayer
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 #include <stdlib.h>
20 #include "cpu.h"
21 #include "dyngen-exec.h"
23 #include "host-utils.h"
25 #include "helper.h"
27 #if !defined(CONFIG_USER_ONLY)
28 #include "softmmu_exec.h"
29 #endif /* !defined(CONFIG_USER_ONLY) */
31 #ifndef CONFIG_USER_ONLY
32 static inline void cpu_mips_tlb_flush (CPUState *env, int flush_global);
33 #endif
35 static inline void compute_hflags(CPUState *env)
37 env->hflags &= ~(MIPS_HFLAG_COP1X | MIPS_HFLAG_64 | MIPS_HFLAG_CP0 |
38 MIPS_HFLAG_F64 | MIPS_HFLAG_FPU | MIPS_HFLAG_KSU |
39 MIPS_HFLAG_UX);
40 if (!(env->CP0_Status & (1 << CP0St_EXL)) &&
41 !(env->CP0_Status & (1 << CP0St_ERL)) &&
42 !(env->hflags & MIPS_HFLAG_DM)) {
43 env->hflags |= (env->CP0_Status >> CP0St_KSU) & MIPS_HFLAG_KSU;
45 #if defined(TARGET_MIPS64)
46 if (((env->hflags & MIPS_HFLAG_KSU) != MIPS_HFLAG_UM) ||
47 (env->CP0_Status & (1 << CP0St_PX)) ||
48 (env->CP0_Status & (1 << CP0St_UX))) {
49 env->hflags |= MIPS_HFLAG_64;
51 if (env->CP0_Status & (1 << CP0St_UX)) {
52 env->hflags |= MIPS_HFLAG_UX;
54 #endif
55 if ((env->CP0_Status & (1 << CP0St_CU0)) ||
56 !(env->hflags & MIPS_HFLAG_KSU)) {
57 env->hflags |= MIPS_HFLAG_CP0;
59 if (env->CP0_Status & (1 << CP0St_CU1)) {
60 env->hflags |= MIPS_HFLAG_FPU;
62 if (env->CP0_Status & (1 << CP0St_FR)) {
63 env->hflags |= MIPS_HFLAG_F64;
65 if (env->insn_flags & ISA_MIPS32R2) {
66 if (env->active_fpu.fcr0 & (1 << FCR0_F64)) {
67 env->hflags |= MIPS_HFLAG_COP1X;
69 } else if (env->insn_flags & ISA_MIPS32) {
70 if (env->hflags & MIPS_HFLAG_64) {
71 env->hflags |= MIPS_HFLAG_COP1X;
73 } else if (env->insn_flags & ISA_MIPS4) {
74 /* All supported MIPS IV CPUs use the XX (CU3) to enable
75 and disable the MIPS IV extensions to the MIPS III ISA.
76 Some other MIPS IV CPUs ignore the bit, so the check here
77 would be too restrictive for them. */
78 if (env->CP0_Status & (1 << CP0St_CU3)) {
79 env->hflags |= MIPS_HFLAG_COP1X;
84 /*****************************************************************************/
85 /* Exceptions processing helpers */
87 void helper_raise_exception_err (uint32_t exception, int error_code)
89 #if 1
90 if (exception < 0x100)
91 qemu_log("%s: %d %d\n", __func__, exception, error_code);
92 #endif
93 env->exception_index = exception;
94 env->error_code = error_code;
95 cpu_loop_exit(env);
98 void helper_raise_exception (uint32_t exception)
100 helper_raise_exception_err(exception, 0);
103 #if !defined(CONFIG_USER_ONLY)
104 static void do_restore_state (void *pc_ptr)
106 TranslationBlock *tb;
107 unsigned long pc = (unsigned long) pc_ptr;
109 tb = tb_find_pc (pc);
110 if (tb) {
111 cpu_restore_state(tb, env, pc);
114 #endif
116 #if defined(CONFIG_USER_ONLY)
117 #define HELPER_LD(name, insn, type) \
118 static inline type do_##name(target_ulong addr, int mem_idx) \
120 return (type) insn##_raw(addr); \
122 #else
123 #define HELPER_LD(name, insn, type) \
124 static inline type do_##name(target_ulong addr, int mem_idx) \
126 switch (mem_idx) \
128 case 0: return (type) insn##_kernel(addr); break; \
129 case 1: return (type) insn##_super(addr); break; \
130 default: \
131 case 2: return (type) insn##_user(addr); break; \
134 #endif
135 HELPER_LD(lbu, ldub, uint8_t)
136 HELPER_LD(lw, ldl, int32_t)
137 #ifdef TARGET_MIPS64
138 HELPER_LD(ld, ldq, int64_t)
139 #endif
140 #undef HELPER_LD
142 #if defined(CONFIG_USER_ONLY)
143 #define HELPER_ST(name, insn, type) \
144 static inline void do_##name(target_ulong addr, type val, int mem_idx) \
146 insn##_raw(addr, val); \
148 #else
149 #define HELPER_ST(name, insn, type) \
150 static inline void do_##name(target_ulong addr, type val, int mem_idx) \
152 switch (mem_idx) \
154 case 0: insn##_kernel(addr, val); break; \
155 case 1: insn##_super(addr, val); break; \
156 default: \
157 case 2: insn##_user(addr, val); break; \
160 #endif
161 HELPER_ST(sb, stb, uint8_t)
162 HELPER_ST(sw, stl, uint32_t)
163 #ifdef TARGET_MIPS64
164 HELPER_ST(sd, stq, uint64_t)
165 #endif
166 #undef HELPER_ST
168 target_ulong helper_clo (target_ulong arg1)
170 return clo32(arg1);
173 target_ulong helper_clz (target_ulong arg1)
175 return clz32(arg1);
178 #if defined(TARGET_MIPS64)
179 target_ulong helper_dclo (target_ulong arg1)
181 return clo64(arg1);
184 target_ulong helper_dclz (target_ulong arg1)
186 return clz64(arg1);
188 #endif /* TARGET_MIPS64 */
190 /* 64 bits arithmetic for 32 bits hosts */
191 static inline uint64_t get_HILO (void)
193 return ((uint64_t)(env->active_tc.HI[0]) << 32) | (uint32_t)env->active_tc.LO[0];
196 static inline void set_HILO (uint64_t HILO)
198 env->active_tc.LO[0] = (int32_t)HILO;
199 env->active_tc.HI[0] = (int32_t)(HILO >> 32);
202 static inline void set_HIT0_LO (target_ulong arg1, uint64_t HILO)
204 env->active_tc.LO[0] = (int32_t)(HILO & 0xFFFFFFFF);
205 arg1 = env->active_tc.HI[0] = (int32_t)(HILO >> 32);
208 static inline void set_HI_LOT0 (target_ulong arg1, uint64_t HILO)
210 arg1 = env->active_tc.LO[0] = (int32_t)(HILO & 0xFFFFFFFF);
211 env->active_tc.HI[0] = (int32_t)(HILO >> 32);
214 /* Multiplication variants of the vr54xx. */
215 target_ulong helper_muls (target_ulong arg1, target_ulong arg2)
217 set_HI_LOT0(arg1, 0 - ((int64_t)(int32_t)arg1 * (int64_t)(int32_t)arg2));
219 return arg1;
222 target_ulong helper_mulsu (target_ulong arg1, target_ulong arg2)
224 set_HI_LOT0(arg1, 0 - ((uint64_t)(uint32_t)arg1 * (uint64_t)(uint32_t)arg2));
226 return arg1;
229 target_ulong helper_macc (target_ulong arg1, target_ulong arg2)
231 set_HI_LOT0(arg1, ((int64_t)get_HILO()) + ((int64_t)(int32_t)arg1 * (int64_t)(int32_t)arg2));
233 return arg1;
236 target_ulong helper_macchi (target_ulong arg1, target_ulong arg2)
238 set_HIT0_LO(arg1, ((int64_t)get_HILO()) + ((int64_t)(int32_t)arg1 * (int64_t)(int32_t)arg2));
240 return arg1;
243 target_ulong helper_maccu (target_ulong arg1, target_ulong arg2)
245 set_HI_LOT0(arg1, ((uint64_t)get_HILO()) + ((uint64_t)(uint32_t)arg1 * (uint64_t)(uint32_t)arg2));
247 return arg1;
250 target_ulong helper_macchiu (target_ulong arg1, target_ulong arg2)
252 set_HIT0_LO(arg1, ((uint64_t)get_HILO()) + ((uint64_t)(uint32_t)arg1 * (uint64_t)(uint32_t)arg2));
254 return arg1;
257 target_ulong helper_msac (target_ulong arg1, target_ulong arg2)
259 set_HI_LOT0(arg1, ((int64_t)get_HILO()) - ((int64_t)(int32_t)arg1 * (int64_t)(int32_t)arg2));
261 return arg1;
264 target_ulong helper_msachi (target_ulong arg1, target_ulong arg2)
266 set_HIT0_LO(arg1, ((int64_t)get_HILO()) - ((int64_t)(int32_t)arg1 * (int64_t)(int32_t)arg2));
268 return arg1;
271 target_ulong helper_msacu (target_ulong arg1, target_ulong arg2)
273 set_HI_LOT0(arg1, ((uint64_t)get_HILO()) - ((uint64_t)(uint32_t)arg1 * (uint64_t)(uint32_t)arg2));
275 return arg1;
278 target_ulong helper_msachiu (target_ulong arg1, target_ulong arg2)
280 set_HIT0_LO(arg1, ((uint64_t)get_HILO()) - ((uint64_t)(uint32_t)arg1 * (uint64_t)(uint32_t)arg2));
282 return arg1;
285 target_ulong helper_mulhi (target_ulong arg1, target_ulong arg2)
287 set_HIT0_LO(arg1, (int64_t)(int32_t)arg1 * (int64_t)(int32_t)arg2);
289 return arg1;
292 target_ulong helper_mulhiu (target_ulong arg1, target_ulong arg2)
294 set_HIT0_LO(arg1, (uint64_t)(uint32_t)arg1 * (uint64_t)(uint32_t)arg2);
296 return arg1;
299 target_ulong helper_mulshi (target_ulong arg1, target_ulong arg2)
301 set_HIT0_LO(arg1, 0 - ((int64_t)(int32_t)arg1 * (int64_t)(int32_t)arg2));
303 return arg1;
306 target_ulong helper_mulshiu (target_ulong arg1, target_ulong arg2)
308 set_HIT0_LO(arg1, 0 - ((uint64_t)(uint32_t)arg1 * (uint64_t)(uint32_t)arg2));
310 return arg1;
313 #ifdef TARGET_MIPS64
314 void helper_dmult (target_ulong arg1, target_ulong arg2)
316 muls64(&(env->active_tc.LO[0]), &(env->active_tc.HI[0]), arg1, arg2);
319 void helper_dmultu (target_ulong arg1, target_ulong arg2)
321 mulu64(&(env->active_tc.LO[0]), &(env->active_tc.HI[0]), arg1, arg2);
323 #endif
325 #ifndef CONFIG_USER_ONLY
327 static inline target_phys_addr_t do_translate_address(target_ulong address, int rw)
329 target_phys_addr_t lladdr;
331 lladdr = cpu_mips_translate_address(env, address, rw);
333 if (lladdr == -1LL) {
334 cpu_loop_exit(env);
335 } else {
336 return lladdr;
340 #define HELPER_LD_ATOMIC(name, insn) \
341 target_ulong helper_##name(target_ulong arg, int mem_idx) \
343 env->lladdr = do_translate_address(arg, 0); \
344 env->llval = do_##insn(arg, mem_idx); \
345 return env->llval; \
347 HELPER_LD_ATOMIC(ll, lw)
348 #ifdef TARGET_MIPS64
349 HELPER_LD_ATOMIC(lld, ld)
350 #endif
351 #undef HELPER_LD_ATOMIC
353 #define HELPER_ST_ATOMIC(name, ld_insn, st_insn, almask) \
354 target_ulong helper_##name(target_ulong arg1, target_ulong arg2, int mem_idx) \
356 target_long tmp; \
358 if (arg2 & almask) { \
359 env->CP0_BadVAddr = arg2; \
360 helper_raise_exception(EXCP_AdES); \
362 if (do_translate_address(arg2, 1) == env->lladdr) { \
363 tmp = do_##ld_insn(arg2, mem_idx); \
364 if (tmp == env->llval) { \
365 do_##st_insn(arg2, arg1, mem_idx); \
366 return 1; \
369 return 0; \
371 HELPER_ST_ATOMIC(sc, lw, sw, 0x3)
372 #ifdef TARGET_MIPS64
373 HELPER_ST_ATOMIC(scd, ld, sd, 0x7)
374 #endif
375 #undef HELPER_ST_ATOMIC
376 #endif
378 #ifdef TARGET_WORDS_BIGENDIAN
379 #define GET_LMASK(v) ((v) & 3)
380 #define GET_OFFSET(addr, offset) (addr + (offset))
381 #else
382 #define GET_LMASK(v) (((v) & 3) ^ 3)
383 #define GET_OFFSET(addr, offset) (addr - (offset))
384 #endif
386 target_ulong helper_lwl(target_ulong arg1, target_ulong arg2, int mem_idx)
388 target_ulong tmp;
390 tmp = do_lbu(arg2, mem_idx);
391 arg1 = (arg1 & 0x00FFFFFF) | (tmp << 24);
393 if (GET_LMASK(arg2) <= 2) {
394 tmp = do_lbu(GET_OFFSET(arg2, 1), mem_idx);
395 arg1 = (arg1 & 0xFF00FFFF) | (tmp << 16);
398 if (GET_LMASK(arg2) <= 1) {
399 tmp = do_lbu(GET_OFFSET(arg2, 2), mem_idx);
400 arg1 = (arg1 & 0xFFFF00FF) | (tmp << 8);
403 if (GET_LMASK(arg2) == 0) {
404 tmp = do_lbu(GET_OFFSET(arg2, 3), mem_idx);
405 arg1 = (arg1 & 0xFFFFFF00) | tmp;
407 return (int32_t)arg1;
410 target_ulong helper_lwr(target_ulong arg1, target_ulong arg2, int mem_idx)
412 target_ulong tmp;
414 tmp = do_lbu(arg2, mem_idx);
415 arg1 = (arg1 & 0xFFFFFF00) | tmp;
417 if (GET_LMASK(arg2) >= 1) {
418 tmp = do_lbu(GET_OFFSET(arg2, -1), mem_idx);
419 arg1 = (arg1 & 0xFFFF00FF) | (tmp << 8);
422 if (GET_LMASK(arg2) >= 2) {
423 tmp = do_lbu(GET_OFFSET(arg2, -2), mem_idx);
424 arg1 = (arg1 & 0xFF00FFFF) | (tmp << 16);
427 if (GET_LMASK(arg2) == 3) {
428 tmp = do_lbu(GET_OFFSET(arg2, -3), mem_idx);
429 arg1 = (arg1 & 0x00FFFFFF) | (tmp << 24);
431 return (int32_t)arg1;
434 void helper_swl(target_ulong arg1, target_ulong arg2, int mem_idx)
436 do_sb(arg2, (uint8_t)(arg1 >> 24), mem_idx);
438 if (GET_LMASK(arg2) <= 2)
439 do_sb(GET_OFFSET(arg2, 1), (uint8_t)(arg1 >> 16), mem_idx);
441 if (GET_LMASK(arg2) <= 1)
442 do_sb(GET_OFFSET(arg2, 2), (uint8_t)(arg1 >> 8), mem_idx);
444 if (GET_LMASK(arg2) == 0)
445 do_sb(GET_OFFSET(arg2, 3), (uint8_t)arg1, mem_idx);
448 void helper_swr(target_ulong arg1, target_ulong arg2, int mem_idx)
450 do_sb(arg2, (uint8_t)arg1, mem_idx);
452 if (GET_LMASK(arg2) >= 1)
453 do_sb(GET_OFFSET(arg2, -1), (uint8_t)(arg1 >> 8), mem_idx);
455 if (GET_LMASK(arg2) >= 2)
456 do_sb(GET_OFFSET(arg2, -2), (uint8_t)(arg1 >> 16), mem_idx);
458 if (GET_LMASK(arg2) == 3)
459 do_sb(GET_OFFSET(arg2, -3), (uint8_t)(arg1 >> 24), mem_idx);
462 #if defined(TARGET_MIPS64)
463 /* "half" load and stores. We must do the memory access inline,
464 or fault handling won't work. */
466 #ifdef TARGET_WORDS_BIGENDIAN
467 #define GET_LMASK64(v) ((v) & 7)
468 #else
469 #define GET_LMASK64(v) (((v) & 7) ^ 7)
470 #endif
472 target_ulong helper_ldl(target_ulong arg1, target_ulong arg2, int mem_idx)
474 uint64_t tmp;
476 tmp = do_lbu(arg2, mem_idx);
477 arg1 = (arg1 & 0x00FFFFFFFFFFFFFFULL) | (tmp << 56);
479 if (GET_LMASK64(arg2) <= 6) {
480 tmp = do_lbu(GET_OFFSET(arg2, 1), mem_idx);
481 arg1 = (arg1 & 0xFF00FFFFFFFFFFFFULL) | (tmp << 48);
484 if (GET_LMASK64(arg2) <= 5) {
485 tmp = do_lbu(GET_OFFSET(arg2, 2), mem_idx);
486 arg1 = (arg1 & 0xFFFF00FFFFFFFFFFULL) | (tmp << 40);
489 if (GET_LMASK64(arg2) <= 4) {
490 tmp = do_lbu(GET_OFFSET(arg2, 3), mem_idx);
491 arg1 = (arg1 & 0xFFFFFF00FFFFFFFFULL) | (tmp << 32);
494 if (GET_LMASK64(arg2) <= 3) {
495 tmp = do_lbu(GET_OFFSET(arg2, 4), mem_idx);
496 arg1 = (arg1 & 0xFFFFFFFF00FFFFFFULL) | (tmp << 24);
499 if (GET_LMASK64(arg2) <= 2) {
500 tmp = do_lbu(GET_OFFSET(arg2, 5), mem_idx);
501 arg1 = (arg1 & 0xFFFFFFFFFF00FFFFULL) | (tmp << 16);
504 if (GET_LMASK64(arg2) <= 1) {
505 tmp = do_lbu(GET_OFFSET(arg2, 6), mem_idx);
506 arg1 = (arg1 & 0xFFFFFFFFFFFF00FFULL) | (tmp << 8);
509 if (GET_LMASK64(arg2) == 0) {
510 tmp = do_lbu(GET_OFFSET(arg2, 7), mem_idx);
511 arg1 = (arg1 & 0xFFFFFFFFFFFFFF00ULL) | tmp;
514 return arg1;
517 target_ulong helper_ldr(target_ulong arg1, target_ulong arg2, int mem_idx)
519 uint64_t tmp;
521 tmp = do_lbu(arg2, mem_idx);
522 arg1 = (arg1 & 0xFFFFFFFFFFFFFF00ULL) | tmp;
524 if (GET_LMASK64(arg2) >= 1) {
525 tmp = do_lbu(GET_OFFSET(arg2, -1), mem_idx);
526 arg1 = (arg1 & 0xFFFFFFFFFFFF00FFULL) | (tmp << 8);
529 if (GET_LMASK64(arg2) >= 2) {
530 tmp = do_lbu(GET_OFFSET(arg2, -2), mem_idx);
531 arg1 = (arg1 & 0xFFFFFFFFFF00FFFFULL) | (tmp << 16);
534 if (GET_LMASK64(arg2) >= 3) {
535 tmp = do_lbu(GET_OFFSET(arg2, -3), mem_idx);
536 arg1 = (arg1 & 0xFFFFFFFF00FFFFFFULL) | (tmp << 24);
539 if (GET_LMASK64(arg2) >= 4) {
540 tmp = do_lbu(GET_OFFSET(arg2, -4), mem_idx);
541 arg1 = (arg1 & 0xFFFFFF00FFFFFFFFULL) | (tmp << 32);
544 if (GET_LMASK64(arg2) >= 5) {
545 tmp = do_lbu(GET_OFFSET(arg2, -5), mem_idx);
546 arg1 = (arg1 & 0xFFFF00FFFFFFFFFFULL) | (tmp << 40);
549 if (GET_LMASK64(arg2) >= 6) {
550 tmp = do_lbu(GET_OFFSET(arg2, -6), mem_idx);
551 arg1 = (arg1 & 0xFF00FFFFFFFFFFFFULL) | (tmp << 48);
554 if (GET_LMASK64(arg2) == 7) {
555 tmp = do_lbu(GET_OFFSET(arg2, -7), mem_idx);
556 arg1 = (arg1 & 0x00FFFFFFFFFFFFFFULL) | (tmp << 56);
559 return arg1;
562 void helper_sdl(target_ulong arg1, target_ulong arg2, int mem_idx)
564 do_sb(arg2, (uint8_t)(arg1 >> 56), mem_idx);
566 if (GET_LMASK64(arg2) <= 6)
567 do_sb(GET_OFFSET(arg2, 1), (uint8_t)(arg1 >> 48), mem_idx);
569 if (GET_LMASK64(arg2) <= 5)
570 do_sb(GET_OFFSET(arg2, 2), (uint8_t)(arg1 >> 40), mem_idx);
572 if (GET_LMASK64(arg2) <= 4)
573 do_sb(GET_OFFSET(arg2, 3), (uint8_t)(arg1 >> 32), mem_idx);
575 if (GET_LMASK64(arg2) <= 3)
576 do_sb(GET_OFFSET(arg2, 4), (uint8_t)(arg1 >> 24), mem_idx);
578 if (GET_LMASK64(arg2) <= 2)
579 do_sb(GET_OFFSET(arg2, 5), (uint8_t)(arg1 >> 16), mem_idx);
581 if (GET_LMASK64(arg2) <= 1)
582 do_sb(GET_OFFSET(arg2, 6), (uint8_t)(arg1 >> 8), mem_idx);
584 if (GET_LMASK64(arg2) <= 0)
585 do_sb(GET_OFFSET(arg2, 7), (uint8_t)arg1, mem_idx);
588 void helper_sdr(target_ulong arg1, target_ulong arg2, int mem_idx)
590 do_sb(arg2, (uint8_t)arg1, mem_idx);
592 if (GET_LMASK64(arg2) >= 1)
593 do_sb(GET_OFFSET(arg2, -1), (uint8_t)(arg1 >> 8), mem_idx);
595 if (GET_LMASK64(arg2) >= 2)
596 do_sb(GET_OFFSET(arg2, -2), (uint8_t)(arg1 >> 16), mem_idx);
598 if (GET_LMASK64(arg2) >= 3)
599 do_sb(GET_OFFSET(arg2, -3), (uint8_t)(arg1 >> 24), mem_idx);
601 if (GET_LMASK64(arg2) >= 4)
602 do_sb(GET_OFFSET(arg2, -4), (uint8_t)(arg1 >> 32), mem_idx);
604 if (GET_LMASK64(arg2) >= 5)
605 do_sb(GET_OFFSET(arg2, -5), (uint8_t)(arg1 >> 40), mem_idx);
607 if (GET_LMASK64(arg2) >= 6)
608 do_sb(GET_OFFSET(arg2, -6), (uint8_t)(arg1 >> 48), mem_idx);
610 if (GET_LMASK64(arg2) == 7)
611 do_sb(GET_OFFSET(arg2, -7), (uint8_t)(arg1 >> 56), mem_idx);
613 #endif /* TARGET_MIPS64 */
615 static const int multiple_regs[] = { 16, 17, 18, 19, 20, 21, 22, 23, 30 };
617 void helper_lwm (target_ulong addr, target_ulong reglist, uint32_t mem_idx)
619 target_ulong base_reglist = reglist & 0xf;
620 target_ulong do_r31 = reglist & 0x10;
621 #ifdef CONFIG_USER_ONLY
622 #undef ldfun
623 #define ldfun ldl_raw
624 #else
625 uint32_t (*ldfun)(target_ulong);
627 switch (mem_idx)
629 case 0: ldfun = ldl_kernel; break;
630 case 1: ldfun = ldl_super; break;
631 default:
632 case 2: ldfun = ldl_user; break;
634 #endif
636 if (base_reglist > 0 && base_reglist <= ARRAY_SIZE (multiple_regs)) {
637 target_ulong i;
639 for (i = 0; i < base_reglist; i++) {
640 env->active_tc.gpr[multiple_regs[i]] = (target_long) ldfun(addr);
641 addr += 4;
645 if (do_r31) {
646 env->active_tc.gpr[31] = (target_long) ldfun(addr);
650 void helper_swm (target_ulong addr, target_ulong reglist, uint32_t mem_idx)
652 target_ulong base_reglist = reglist & 0xf;
653 target_ulong do_r31 = reglist & 0x10;
654 #ifdef CONFIG_USER_ONLY
655 #undef stfun
656 #define stfun stl_raw
657 #else
658 void (*stfun)(target_ulong, uint32_t);
660 switch (mem_idx)
662 case 0: stfun = stl_kernel; break;
663 case 1: stfun = stl_super; break;
664 default:
665 case 2: stfun = stl_user; break;
667 #endif
669 if (base_reglist > 0 && base_reglist <= ARRAY_SIZE (multiple_regs)) {
670 target_ulong i;
672 for (i = 0; i < base_reglist; i++) {
673 stfun(addr, env->active_tc.gpr[multiple_regs[i]]);
674 addr += 4;
678 if (do_r31) {
679 stfun(addr, env->active_tc.gpr[31]);
683 #if defined(TARGET_MIPS64)
684 void helper_ldm (target_ulong addr, target_ulong reglist, uint32_t mem_idx)
686 target_ulong base_reglist = reglist & 0xf;
687 target_ulong do_r31 = reglist & 0x10;
688 #ifdef CONFIG_USER_ONLY
689 #undef ldfun
690 #define ldfun ldq_raw
691 #else
692 uint64_t (*ldfun)(target_ulong);
694 switch (mem_idx)
696 case 0: ldfun = ldq_kernel; break;
697 case 1: ldfun = ldq_super; break;
698 default:
699 case 2: ldfun = ldq_user; break;
701 #endif
703 if (base_reglist > 0 && base_reglist <= ARRAY_SIZE (multiple_regs)) {
704 target_ulong i;
706 for (i = 0; i < base_reglist; i++) {
707 env->active_tc.gpr[multiple_regs[i]] = ldfun(addr);
708 addr += 8;
712 if (do_r31) {
713 env->active_tc.gpr[31] = ldfun(addr);
717 void helper_sdm (target_ulong addr, target_ulong reglist, uint32_t mem_idx)
719 target_ulong base_reglist = reglist & 0xf;
720 target_ulong do_r31 = reglist & 0x10;
721 #ifdef CONFIG_USER_ONLY
722 #undef stfun
723 #define stfun stq_raw
724 #else
725 void (*stfun)(target_ulong, uint64_t);
727 switch (mem_idx)
729 case 0: stfun = stq_kernel; break;
730 case 1: stfun = stq_super; break;
731 default:
732 case 2: stfun = stq_user; break;
734 #endif
736 if (base_reglist > 0 && base_reglist <= ARRAY_SIZE (multiple_regs)) {
737 target_ulong i;
739 for (i = 0; i < base_reglist; i++) {
740 stfun(addr, env->active_tc.gpr[multiple_regs[i]]);
741 addr += 8;
745 if (do_r31) {
746 stfun(addr, env->active_tc.gpr[31]);
749 #endif
751 #ifndef CONFIG_USER_ONLY
752 /* CP0 helpers */
753 target_ulong helper_mfc0_mvpcontrol (void)
755 return env->mvp->CP0_MVPControl;
758 target_ulong helper_mfc0_mvpconf0 (void)
760 return env->mvp->CP0_MVPConf0;
763 target_ulong helper_mfc0_mvpconf1 (void)
765 return env->mvp->CP0_MVPConf1;
768 target_ulong helper_mfc0_random (void)
770 return (int32_t)cpu_mips_get_random(env);
773 target_ulong helper_mfc0_tcstatus (void)
775 return env->active_tc.CP0_TCStatus;
778 target_ulong helper_mftc0_tcstatus(void)
780 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
782 if (other_tc == env->current_tc)
783 return env->active_tc.CP0_TCStatus;
784 else
785 return env->tcs[other_tc].CP0_TCStatus;
788 target_ulong helper_mfc0_tcbind (void)
790 return env->active_tc.CP0_TCBind;
793 target_ulong helper_mftc0_tcbind(void)
795 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
797 if (other_tc == env->current_tc)
798 return env->active_tc.CP0_TCBind;
799 else
800 return env->tcs[other_tc].CP0_TCBind;
803 target_ulong helper_mfc0_tcrestart (void)
805 return env->active_tc.PC;
808 target_ulong helper_mftc0_tcrestart(void)
810 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
812 if (other_tc == env->current_tc)
813 return env->active_tc.PC;
814 else
815 return env->tcs[other_tc].PC;
818 target_ulong helper_mfc0_tchalt (void)
820 return env->active_tc.CP0_TCHalt;
823 target_ulong helper_mftc0_tchalt(void)
825 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
827 if (other_tc == env->current_tc)
828 return env->active_tc.CP0_TCHalt;
829 else
830 return env->tcs[other_tc].CP0_TCHalt;
833 target_ulong helper_mfc0_tccontext (void)
835 return env->active_tc.CP0_TCContext;
838 target_ulong helper_mftc0_tccontext(void)
840 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
842 if (other_tc == env->current_tc)
843 return env->active_tc.CP0_TCContext;
844 else
845 return env->tcs[other_tc].CP0_TCContext;
848 target_ulong helper_mfc0_tcschedule (void)
850 return env->active_tc.CP0_TCSchedule;
853 target_ulong helper_mftc0_tcschedule(void)
855 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
857 if (other_tc == env->current_tc)
858 return env->active_tc.CP0_TCSchedule;
859 else
860 return env->tcs[other_tc].CP0_TCSchedule;
863 target_ulong helper_mfc0_tcschefback (void)
865 return env->active_tc.CP0_TCScheFBack;
868 target_ulong helper_mftc0_tcschefback(void)
870 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
872 if (other_tc == env->current_tc)
873 return env->active_tc.CP0_TCScheFBack;
874 else
875 return env->tcs[other_tc].CP0_TCScheFBack;
878 target_ulong helper_mfc0_count (void)
880 return (int32_t)cpu_mips_get_count(env);
883 target_ulong helper_mftc0_entryhi(void)
885 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
886 int32_t tcstatus;
888 if (other_tc == env->current_tc)
889 tcstatus = env->active_tc.CP0_TCStatus;
890 else
891 tcstatus = env->tcs[other_tc].CP0_TCStatus;
893 return (env->CP0_EntryHi & ~0xff) | (tcstatus & 0xff);
896 target_ulong helper_mftc0_status(void)
898 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
899 target_ulong t0;
900 int32_t tcstatus;
902 if (other_tc == env->current_tc)
903 tcstatus = env->active_tc.CP0_TCStatus;
904 else
905 tcstatus = env->tcs[other_tc].CP0_TCStatus;
907 t0 = env->CP0_Status & ~0xf1000018;
908 t0 |= tcstatus & (0xf << CP0TCSt_TCU0);
909 t0 |= (tcstatus & (1 << CP0TCSt_TMX)) >> (CP0TCSt_TMX - CP0St_MX);
910 t0 |= (tcstatus & (0x3 << CP0TCSt_TKSU)) >> (CP0TCSt_TKSU - CP0St_KSU);
912 return t0;
915 target_ulong helper_mfc0_lladdr (void)
917 return (int32_t)(env->lladdr >> env->CP0_LLAddr_shift);
920 target_ulong helper_mfc0_watchlo (uint32_t sel)
922 return (int32_t)env->CP0_WatchLo[sel];
925 target_ulong helper_mfc0_watchhi (uint32_t sel)
927 return env->CP0_WatchHi[sel];
930 target_ulong helper_mfc0_debug (void)
932 target_ulong t0 = env->CP0_Debug;
933 if (env->hflags & MIPS_HFLAG_DM)
934 t0 |= 1 << CP0DB_DM;
936 return t0;
939 target_ulong helper_mftc0_debug(void)
941 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
942 int32_t tcstatus;
944 if (other_tc == env->current_tc)
945 tcstatus = env->active_tc.CP0_Debug_tcstatus;
946 else
947 tcstatus = env->tcs[other_tc].CP0_Debug_tcstatus;
949 /* XXX: Might be wrong, check with EJTAG spec. */
950 return (env->CP0_Debug & ~((1 << CP0DB_SSt) | (1 << CP0DB_Halt))) |
951 (tcstatus & ((1 << CP0DB_SSt) | (1 << CP0DB_Halt)));
954 #if defined(TARGET_MIPS64)
955 target_ulong helper_dmfc0_tcrestart (void)
957 return env->active_tc.PC;
960 target_ulong helper_dmfc0_tchalt (void)
962 return env->active_tc.CP0_TCHalt;
965 target_ulong helper_dmfc0_tccontext (void)
967 return env->active_tc.CP0_TCContext;
970 target_ulong helper_dmfc0_tcschedule (void)
972 return env->active_tc.CP0_TCSchedule;
975 target_ulong helper_dmfc0_tcschefback (void)
977 return env->active_tc.CP0_TCScheFBack;
980 target_ulong helper_dmfc0_lladdr (void)
982 return env->lladdr >> env->CP0_LLAddr_shift;
985 target_ulong helper_dmfc0_watchlo (uint32_t sel)
987 return env->CP0_WatchLo[sel];
989 #endif /* TARGET_MIPS64 */
991 void helper_mtc0_index (target_ulong arg1)
993 int num = 1;
994 unsigned int tmp = env->tlb->nb_tlb;
996 do {
997 tmp >>= 1;
998 num <<= 1;
999 } while (tmp);
1000 env->CP0_Index = (env->CP0_Index & 0x80000000) | (arg1 & (num - 1));
1003 void helper_mtc0_mvpcontrol (target_ulong arg1)
1005 uint32_t mask = 0;
1006 uint32_t newval;
1008 if (env->CP0_VPEConf0 & (1 << CP0VPEC0_MVP))
1009 mask |= (1 << CP0MVPCo_CPA) | (1 << CP0MVPCo_VPC) |
1010 (1 << CP0MVPCo_EVP);
1011 if (env->mvp->CP0_MVPControl & (1 << CP0MVPCo_VPC))
1012 mask |= (1 << CP0MVPCo_STLB);
1013 newval = (env->mvp->CP0_MVPControl & ~mask) | (arg1 & mask);
1015 // TODO: Enable/disable shared TLB, enable/disable VPEs.
1017 env->mvp->CP0_MVPControl = newval;
1020 void helper_mtc0_vpecontrol (target_ulong arg1)
1022 uint32_t mask;
1023 uint32_t newval;
1025 mask = (1 << CP0VPECo_YSI) | (1 << CP0VPECo_GSI) |
1026 (1 << CP0VPECo_TE) | (0xff << CP0VPECo_TargTC);
1027 newval = (env->CP0_VPEControl & ~mask) | (arg1 & mask);
1029 /* Yield scheduler intercept not implemented. */
1030 /* Gating storage scheduler intercept not implemented. */
1032 // TODO: Enable/disable TCs.
1034 env->CP0_VPEControl = newval;
1037 void helper_mtc0_vpeconf0 (target_ulong arg1)
1039 uint32_t mask = 0;
1040 uint32_t newval;
1042 if (env->CP0_VPEConf0 & (1 << CP0VPEC0_MVP)) {
1043 if (env->CP0_VPEConf0 & (1 << CP0VPEC0_VPA))
1044 mask |= (0xff << CP0VPEC0_XTC);
1045 mask |= (1 << CP0VPEC0_MVP) | (1 << CP0VPEC0_VPA);
1047 newval = (env->CP0_VPEConf0 & ~mask) | (arg1 & mask);
1049 // TODO: TC exclusive handling due to ERL/EXL.
1051 env->CP0_VPEConf0 = newval;
1054 void helper_mtc0_vpeconf1 (target_ulong arg1)
1056 uint32_t mask = 0;
1057 uint32_t newval;
1059 if (env->mvp->CP0_MVPControl & (1 << CP0MVPCo_VPC))
1060 mask |= (0xff << CP0VPEC1_NCX) | (0xff << CP0VPEC1_NCP2) |
1061 (0xff << CP0VPEC1_NCP1);
1062 newval = (env->CP0_VPEConf1 & ~mask) | (arg1 & mask);
1064 /* UDI not implemented. */
1065 /* CP2 not implemented. */
1067 // TODO: Handle FPU (CP1) binding.
1069 env->CP0_VPEConf1 = newval;
1072 void helper_mtc0_yqmask (target_ulong arg1)
1074 /* Yield qualifier inputs not implemented. */
1075 env->CP0_YQMask = 0x00000000;
1078 void helper_mtc0_vpeopt (target_ulong arg1)
1080 env->CP0_VPEOpt = arg1 & 0x0000ffff;
1083 void helper_mtc0_entrylo0 (target_ulong arg1)
1085 /* Large physaddr (PABITS) not implemented */
1086 /* 1k pages not implemented */
1087 env->CP0_EntryLo0 = arg1 & 0x3FFFFFFF;
1090 void helper_mtc0_tcstatus (target_ulong arg1)
1092 uint32_t mask = env->CP0_TCStatus_rw_bitmask;
1093 uint32_t newval;
1095 newval = (env->active_tc.CP0_TCStatus & ~mask) | (arg1 & mask);
1097 // TODO: Sync with CP0_Status.
1099 env->active_tc.CP0_TCStatus = newval;
1102 void helper_mttc0_tcstatus (target_ulong arg1)
1104 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1106 // TODO: Sync with CP0_Status.
1108 if (other_tc == env->current_tc)
1109 env->active_tc.CP0_TCStatus = arg1;
1110 else
1111 env->tcs[other_tc].CP0_TCStatus = arg1;
1114 void helper_mtc0_tcbind (target_ulong arg1)
1116 uint32_t mask = (1 << CP0TCBd_TBE);
1117 uint32_t newval;
1119 if (env->mvp->CP0_MVPControl & (1 << CP0MVPCo_VPC))
1120 mask |= (1 << CP0TCBd_CurVPE);
1121 newval = (env->active_tc.CP0_TCBind & ~mask) | (arg1 & mask);
1122 env->active_tc.CP0_TCBind = newval;
1125 void helper_mttc0_tcbind (target_ulong arg1)
1127 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1128 uint32_t mask = (1 << CP0TCBd_TBE);
1129 uint32_t newval;
1131 if (env->mvp->CP0_MVPControl & (1 << CP0MVPCo_VPC))
1132 mask |= (1 << CP0TCBd_CurVPE);
1133 if (other_tc == env->current_tc) {
1134 newval = (env->active_tc.CP0_TCBind & ~mask) | (arg1 & mask);
1135 env->active_tc.CP0_TCBind = newval;
1136 } else {
1137 newval = (env->tcs[other_tc].CP0_TCBind & ~mask) | (arg1 & mask);
1138 env->tcs[other_tc].CP0_TCBind = newval;
1142 void helper_mtc0_tcrestart (target_ulong arg1)
1144 env->active_tc.PC = arg1;
1145 env->active_tc.CP0_TCStatus &= ~(1 << CP0TCSt_TDS);
1146 env->lladdr = 0ULL;
1147 /* MIPS16 not implemented. */
1150 void helper_mttc0_tcrestart (target_ulong arg1)
1152 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1154 if (other_tc == env->current_tc) {
1155 env->active_tc.PC = arg1;
1156 env->active_tc.CP0_TCStatus &= ~(1 << CP0TCSt_TDS);
1157 env->lladdr = 0ULL;
1158 /* MIPS16 not implemented. */
1159 } else {
1160 env->tcs[other_tc].PC = arg1;
1161 env->tcs[other_tc].CP0_TCStatus &= ~(1 << CP0TCSt_TDS);
1162 env->lladdr = 0ULL;
1163 /* MIPS16 not implemented. */
1167 void helper_mtc0_tchalt (target_ulong arg1)
1169 env->active_tc.CP0_TCHalt = arg1 & 0x1;
1171 // TODO: Halt TC / Restart (if allocated+active) TC.
1174 void helper_mttc0_tchalt (target_ulong arg1)
1176 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1178 // TODO: Halt TC / Restart (if allocated+active) TC.
1180 if (other_tc == env->current_tc)
1181 env->active_tc.CP0_TCHalt = arg1;
1182 else
1183 env->tcs[other_tc].CP0_TCHalt = arg1;
1186 void helper_mtc0_tccontext (target_ulong arg1)
1188 env->active_tc.CP0_TCContext = arg1;
1191 void helper_mttc0_tccontext (target_ulong arg1)
1193 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1195 if (other_tc == env->current_tc)
1196 env->active_tc.CP0_TCContext = arg1;
1197 else
1198 env->tcs[other_tc].CP0_TCContext = arg1;
1201 void helper_mtc0_tcschedule (target_ulong arg1)
1203 env->active_tc.CP0_TCSchedule = arg1;
1206 void helper_mttc0_tcschedule (target_ulong arg1)
1208 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1210 if (other_tc == env->current_tc)
1211 env->active_tc.CP0_TCSchedule = arg1;
1212 else
1213 env->tcs[other_tc].CP0_TCSchedule = arg1;
1216 void helper_mtc0_tcschefback (target_ulong arg1)
1218 env->active_tc.CP0_TCScheFBack = arg1;
1221 void helper_mttc0_tcschefback (target_ulong arg1)
1223 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1225 if (other_tc == env->current_tc)
1226 env->active_tc.CP0_TCScheFBack = arg1;
1227 else
1228 env->tcs[other_tc].CP0_TCScheFBack = arg1;
1231 void helper_mtc0_entrylo1 (target_ulong arg1)
1233 /* Large physaddr (PABITS) not implemented */
1234 /* 1k pages not implemented */
1235 env->CP0_EntryLo1 = arg1 & 0x3FFFFFFF;
1238 void helper_mtc0_context (target_ulong arg1)
1240 env->CP0_Context = (env->CP0_Context & 0x007FFFFF) | (arg1 & ~0x007FFFFF);
1243 void helper_mtc0_pagemask (target_ulong arg1)
1245 /* 1k pages not implemented */
1246 env->CP0_PageMask = arg1 & (0x1FFFFFFF & (TARGET_PAGE_MASK << 1));
1249 void helper_mtc0_pagegrain (target_ulong arg1)
1251 /* SmartMIPS not implemented */
1252 /* Large physaddr (PABITS) not implemented */
1253 /* 1k pages not implemented */
1254 env->CP0_PageGrain = 0;
1257 void helper_mtc0_wired (target_ulong arg1)
1259 env->CP0_Wired = arg1 % env->tlb->nb_tlb;
1262 void helper_mtc0_srsconf0 (target_ulong arg1)
1264 env->CP0_SRSConf0 |= arg1 & env->CP0_SRSConf0_rw_bitmask;
1267 void helper_mtc0_srsconf1 (target_ulong arg1)
1269 env->CP0_SRSConf1 |= arg1 & env->CP0_SRSConf1_rw_bitmask;
1272 void helper_mtc0_srsconf2 (target_ulong arg1)
1274 env->CP0_SRSConf2 |= arg1 & env->CP0_SRSConf2_rw_bitmask;
1277 void helper_mtc0_srsconf3 (target_ulong arg1)
1279 env->CP0_SRSConf3 |= arg1 & env->CP0_SRSConf3_rw_bitmask;
1282 void helper_mtc0_srsconf4 (target_ulong arg1)
1284 env->CP0_SRSConf4 |= arg1 & env->CP0_SRSConf4_rw_bitmask;
1287 void helper_mtc0_hwrena (target_ulong arg1)
1289 env->CP0_HWREna = arg1 & 0x0000000F;
1292 void helper_mtc0_count (target_ulong arg1)
1294 cpu_mips_store_count(env, arg1);
1297 void helper_mtc0_entryhi (target_ulong arg1)
1299 target_ulong old, val;
1301 /* 1k pages not implemented */
1302 val = arg1 & ((TARGET_PAGE_MASK << 1) | 0xFF);
1303 #if defined(TARGET_MIPS64)
1304 val &= env->SEGMask;
1305 #endif
1306 old = env->CP0_EntryHi;
1307 env->CP0_EntryHi = val;
1308 if (env->CP0_Config3 & (1 << CP0C3_MT)) {
1309 uint32_t tcst = env->active_tc.CP0_TCStatus & ~0xff;
1310 env->active_tc.CP0_TCStatus = tcst | (val & 0xff);
1312 /* If the ASID changes, flush qemu's TLB. */
1313 if ((old & 0xFF) != (val & 0xFF))
1314 cpu_mips_tlb_flush(env, 1);
1317 void helper_mttc0_entryhi(target_ulong arg1)
1319 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1320 int32_t tcstatus;
1322 env->CP0_EntryHi = (env->CP0_EntryHi & 0xff) | (arg1 & ~0xff);
1323 if (other_tc == env->current_tc) {
1324 tcstatus = (env->active_tc.CP0_TCStatus & ~0xff) | (arg1 & 0xff);
1325 env->active_tc.CP0_TCStatus = tcstatus;
1326 } else {
1327 tcstatus = (env->tcs[other_tc].CP0_TCStatus & ~0xff) | (arg1 & 0xff);
1328 env->tcs[other_tc].CP0_TCStatus = tcstatus;
1332 void helper_mtc0_compare (target_ulong arg1)
1334 cpu_mips_store_compare(env, arg1);
1337 void helper_mtc0_status (target_ulong arg1)
1339 uint32_t val, old;
1340 uint32_t mask = env->CP0_Status_rw_bitmask;
1342 val = arg1 & mask;
1343 old = env->CP0_Status;
1344 env->CP0_Status = (env->CP0_Status & ~mask) | val;
1345 compute_hflags(env);
1346 if (qemu_loglevel_mask(CPU_LOG_EXEC)) {
1347 qemu_log("Status %08x (%08x) => %08x (%08x) Cause %08x",
1348 old, old & env->CP0_Cause & CP0Ca_IP_mask,
1349 val, val & env->CP0_Cause & CP0Ca_IP_mask,
1350 env->CP0_Cause);
1351 switch (env->hflags & MIPS_HFLAG_KSU) {
1352 case MIPS_HFLAG_UM: qemu_log(", UM\n"); break;
1353 case MIPS_HFLAG_SM: qemu_log(", SM\n"); break;
1354 case MIPS_HFLAG_KM: qemu_log("\n"); break;
1355 default: cpu_abort(env, "Invalid MMU mode!\n"); break;
1360 void helper_mttc0_status(target_ulong arg1)
1362 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1363 int32_t tcstatus = env->tcs[other_tc].CP0_TCStatus;
1365 env->CP0_Status = arg1 & ~0xf1000018;
1366 tcstatus = (tcstatus & ~(0xf << CP0TCSt_TCU0)) | (arg1 & (0xf << CP0St_CU0));
1367 tcstatus = (tcstatus & ~(1 << CP0TCSt_TMX)) | ((arg1 & (1 << CP0St_MX)) << (CP0TCSt_TMX - CP0St_MX));
1368 tcstatus = (tcstatus & ~(0x3 << CP0TCSt_TKSU)) | ((arg1 & (0x3 << CP0St_KSU)) << (CP0TCSt_TKSU - CP0St_KSU));
1369 if (other_tc == env->current_tc)
1370 env->active_tc.CP0_TCStatus = tcstatus;
1371 else
1372 env->tcs[other_tc].CP0_TCStatus = tcstatus;
1375 void helper_mtc0_intctl (target_ulong arg1)
1377 /* vectored interrupts not implemented, no performance counters. */
1378 env->CP0_IntCtl = (env->CP0_IntCtl & ~0x000002e0) | (arg1 & 0x000002e0);
1381 void helper_mtc0_srsctl (target_ulong arg1)
1383 uint32_t mask = (0xf << CP0SRSCtl_ESS) | (0xf << CP0SRSCtl_PSS);
1384 env->CP0_SRSCtl = (env->CP0_SRSCtl & ~mask) | (arg1 & mask);
1387 void helper_mtc0_cause (target_ulong arg1)
1389 uint32_t mask = 0x00C00300;
1390 uint32_t old = env->CP0_Cause;
1391 int i;
1393 if (env->insn_flags & ISA_MIPS32R2)
1394 mask |= 1 << CP0Ca_DC;
1396 env->CP0_Cause = (env->CP0_Cause & ~mask) | (arg1 & mask);
1398 if ((old ^ env->CP0_Cause) & (1 << CP0Ca_DC)) {
1399 if (env->CP0_Cause & (1 << CP0Ca_DC))
1400 cpu_mips_stop_count(env);
1401 else
1402 cpu_mips_start_count(env);
1405 /* Set/reset software interrupts */
1406 for (i = 0 ; i < 2 ; i++) {
1407 if ((old ^ env->CP0_Cause) & (1 << (CP0Ca_IP + i))) {
1408 cpu_mips_soft_irq(env, i, env->CP0_Cause & (1 << (CP0Ca_IP + i)));
1413 void helper_mtc0_ebase (target_ulong arg1)
1415 /* vectored interrupts not implemented */
1416 env->CP0_EBase = (env->CP0_EBase & ~0x3FFFF000) | (arg1 & 0x3FFFF000);
1419 void helper_mtc0_config0 (target_ulong arg1)
1421 env->CP0_Config0 = (env->CP0_Config0 & 0x81FFFFF8) | (arg1 & 0x00000007);
1424 void helper_mtc0_config2 (target_ulong arg1)
1426 /* tertiary/secondary caches not implemented */
1427 env->CP0_Config2 = (env->CP0_Config2 & 0x8FFF0FFF);
1430 void helper_mtc0_lladdr (target_ulong arg1)
1432 target_long mask = env->CP0_LLAddr_rw_bitmask;
1433 arg1 = arg1 << env->CP0_LLAddr_shift;
1434 env->lladdr = (env->lladdr & ~mask) | (arg1 & mask);
1437 void helper_mtc0_watchlo (target_ulong arg1, uint32_t sel)
1439 /* Watch exceptions for instructions, data loads, data stores
1440 not implemented. */
1441 env->CP0_WatchLo[sel] = (arg1 & ~0x7);
1444 void helper_mtc0_watchhi (target_ulong arg1, uint32_t sel)
1446 env->CP0_WatchHi[sel] = (arg1 & 0x40FF0FF8);
1447 env->CP0_WatchHi[sel] &= ~(env->CP0_WatchHi[sel] & arg1 & 0x7);
1450 void helper_mtc0_xcontext (target_ulong arg1)
1452 target_ulong mask = (1ULL << (env->SEGBITS - 7)) - 1;
1453 env->CP0_XContext = (env->CP0_XContext & mask) | (arg1 & ~mask);
1456 void helper_mtc0_framemask (target_ulong arg1)
1458 env->CP0_Framemask = arg1; /* XXX */
1461 void helper_mtc0_debug (target_ulong arg1)
1463 env->CP0_Debug = (env->CP0_Debug & 0x8C03FC1F) | (arg1 & 0x13300120);
1464 if (arg1 & (1 << CP0DB_DM))
1465 env->hflags |= MIPS_HFLAG_DM;
1466 else
1467 env->hflags &= ~MIPS_HFLAG_DM;
1470 void helper_mttc0_debug(target_ulong arg1)
1472 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1473 uint32_t val = arg1 & ((1 << CP0DB_SSt) | (1 << CP0DB_Halt));
1475 /* XXX: Might be wrong, check with EJTAG spec. */
1476 if (other_tc == env->current_tc)
1477 env->active_tc.CP0_Debug_tcstatus = val;
1478 else
1479 env->tcs[other_tc].CP0_Debug_tcstatus = val;
1480 env->CP0_Debug = (env->CP0_Debug & ((1 << CP0DB_SSt) | (1 << CP0DB_Halt))) |
1481 (arg1 & ~((1 << CP0DB_SSt) | (1 << CP0DB_Halt)));
1484 void helper_mtc0_performance0 (target_ulong arg1)
1486 env->CP0_Performance0 = arg1 & 0x000007ff;
1489 void helper_mtc0_taglo (target_ulong arg1)
1491 env->CP0_TagLo = arg1 & 0xFFFFFCF6;
1494 void helper_mtc0_datalo (target_ulong arg1)
1496 env->CP0_DataLo = arg1; /* XXX */
1499 void helper_mtc0_taghi (target_ulong arg1)
1501 env->CP0_TagHi = arg1; /* XXX */
1504 void helper_mtc0_datahi (target_ulong arg1)
1506 env->CP0_DataHi = arg1; /* XXX */
1509 /* MIPS MT functions */
1510 target_ulong helper_mftgpr(uint32_t sel)
1512 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1514 if (other_tc == env->current_tc)
1515 return env->active_tc.gpr[sel];
1516 else
1517 return env->tcs[other_tc].gpr[sel];
1520 target_ulong helper_mftlo(uint32_t sel)
1522 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1524 if (other_tc == env->current_tc)
1525 return env->active_tc.LO[sel];
1526 else
1527 return env->tcs[other_tc].LO[sel];
1530 target_ulong helper_mfthi(uint32_t sel)
1532 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1534 if (other_tc == env->current_tc)
1535 return env->active_tc.HI[sel];
1536 else
1537 return env->tcs[other_tc].HI[sel];
1540 target_ulong helper_mftacx(uint32_t sel)
1542 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1544 if (other_tc == env->current_tc)
1545 return env->active_tc.ACX[sel];
1546 else
1547 return env->tcs[other_tc].ACX[sel];
1550 target_ulong helper_mftdsp(void)
1552 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1554 if (other_tc == env->current_tc)
1555 return env->active_tc.DSPControl;
1556 else
1557 return env->tcs[other_tc].DSPControl;
1560 void helper_mttgpr(target_ulong arg1, uint32_t sel)
1562 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1564 if (other_tc == env->current_tc)
1565 env->active_tc.gpr[sel] = arg1;
1566 else
1567 env->tcs[other_tc].gpr[sel] = arg1;
1570 void helper_mttlo(target_ulong arg1, uint32_t sel)
1572 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1574 if (other_tc == env->current_tc)
1575 env->active_tc.LO[sel] = arg1;
1576 else
1577 env->tcs[other_tc].LO[sel] = arg1;
1580 void helper_mtthi(target_ulong arg1, uint32_t sel)
1582 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1584 if (other_tc == env->current_tc)
1585 env->active_tc.HI[sel] = arg1;
1586 else
1587 env->tcs[other_tc].HI[sel] = arg1;
1590 void helper_mttacx(target_ulong arg1, uint32_t sel)
1592 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1594 if (other_tc == env->current_tc)
1595 env->active_tc.ACX[sel] = arg1;
1596 else
1597 env->tcs[other_tc].ACX[sel] = arg1;
1600 void helper_mttdsp(target_ulong arg1)
1602 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1604 if (other_tc == env->current_tc)
1605 env->active_tc.DSPControl = arg1;
1606 else
1607 env->tcs[other_tc].DSPControl = arg1;
1610 /* MIPS MT functions */
1611 target_ulong helper_dmt(void)
1613 // TODO
1614 return 0;
1617 target_ulong helper_emt(void)
1619 // TODO
1620 return 0;
1623 target_ulong helper_dvpe(void)
1625 // TODO
1626 return 0;
1629 target_ulong helper_evpe(void)
1631 // TODO
1632 return 0;
1634 #endif /* !CONFIG_USER_ONLY */
1636 void helper_fork(target_ulong arg1, target_ulong arg2)
1638 // arg1 = rt, arg2 = rs
1639 arg1 = 0;
1640 // TODO: store to TC register
1643 target_ulong helper_yield(target_ulong arg)
1645 target_long arg1 = arg;
1647 if (arg1 < 0) {
1648 /* No scheduling policy implemented. */
1649 if (arg1 != -2) {
1650 if (env->CP0_VPEControl & (1 << CP0VPECo_YSI) &&
1651 env->active_tc.CP0_TCStatus & (1 << CP0TCSt_DT)) {
1652 env->CP0_VPEControl &= ~(0x7 << CP0VPECo_EXCPT);
1653 env->CP0_VPEControl |= 4 << CP0VPECo_EXCPT;
1654 helper_raise_exception(EXCP_THREAD);
1657 } else if (arg1 == 0) {
1658 if (0 /* TODO: TC underflow */) {
1659 env->CP0_VPEControl &= ~(0x7 << CP0VPECo_EXCPT);
1660 helper_raise_exception(EXCP_THREAD);
1661 } else {
1662 // TODO: Deallocate TC
1664 } else if (arg1 > 0) {
1665 /* Yield qualifier inputs not implemented. */
1666 env->CP0_VPEControl &= ~(0x7 << CP0VPECo_EXCPT);
1667 env->CP0_VPEControl |= 2 << CP0VPECo_EXCPT;
1668 helper_raise_exception(EXCP_THREAD);
1670 return env->CP0_YQMask;
1673 #ifndef CONFIG_USER_ONLY
1674 /* TLB management */
1675 static void cpu_mips_tlb_flush (CPUState *env, int flush_global)
1677 /* Flush qemu's TLB and discard all shadowed entries. */
1678 tlb_flush (env, flush_global);
1679 env->tlb->tlb_in_use = env->tlb->nb_tlb;
1682 static void r4k_mips_tlb_flush_extra (CPUState *env, int first)
1684 /* Discard entries from env->tlb[first] onwards. */
1685 while (env->tlb->tlb_in_use > first) {
1686 r4k_invalidate_tlb(env, --env->tlb->tlb_in_use, 0);
1690 static void r4k_fill_tlb (int idx)
1692 r4k_tlb_t *tlb;
1694 /* XXX: detect conflicting TLBs and raise a MCHECK exception when needed */
1695 tlb = &env->tlb->mmu.r4k.tlb[idx];
1696 tlb->VPN = env->CP0_EntryHi & (TARGET_PAGE_MASK << 1);
1697 #if defined(TARGET_MIPS64)
1698 tlb->VPN &= env->SEGMask;
1699 #endif
1700 tlb->ASID = env->CP0_EntryHi & 0xFF;
1701 tlb->PageMask = env->CP0_PageMask;
1702 tlb->G = env->CP0_EntryLo0 & env->CP0_EntryLo1 & 1;
1703 tlb->V0 = (env->CP0_EntryLo0 & 2) != 0;
1704 tlb->D0 = (env->CP0_EntryLo0 & 4) != 0;
1705 tlb->C0 = (env->CP0_EntryLo0 >> 3) & 0x7;
1706 tlb->PFN[0] = (env->CP0_EntryLo0 >> 6) << 12;
1707 tlb->V1 = (env->CP0_EntryLo1 & 2) != 0;
1708 tlb->D1 = (env->CP0_EntryLo1 & 4) != 0;
1709 tlb->C1 = (env->CP0_EntryLo1 >> 3) & 0x7;
1710 tlb->PFN[1] = (env->CP0_EntryLo1 >> 6) << 12;
1713 void r4k_helper_tlbwi (void)
1715 int idx;
1717 idx = (env->CP0_Index & ~0x80000000) % env->tlb->nb_tlb;
1719 /* Discard cached TLB entries. We could avoid doing this if the
1720 tlbwi is just upgrading access permissions on the current entry;
1721 that might be a further win. */
1722 r4k_mips_tlb_flush_extra (env, env->tlb->nb_tlb);
1724 r4k_invalidate_tlb(env, idx, 0);
1725 r4k_fill_tlb(idx);
1728 void r4k_helper_tlbwr (void)
1730 int r = cpu_mips_get_random(env);
1732 r4k_invalidate_tlb(env, r, 1);
1733 r4k_fill_tlb(r);
1736 void r4k_helper_tlbp (void)
1738 r4k_tlb_t *tlb;
1739 target_ulong mask;
1740 target_ulong tag;
1741 target_ulong VPN;
1742 uint8_t ASID;
1743 int i;
1745 ASID = env->CP0_EntryHi & 0xFF;
1746 for (i = 0; i < env->tlb->nb_tlb; i++) {
1747 tlb = &env->tlb->mmu.r4k.tlb[i];
1748 /* 1k pages are not supported. */
1749 mask = tlb->PageMask | ~(TARGET_PAGE_MASK << 1);
1750 tag = env->CP0_EntryHi & ~mask;
1751 VPN = tlb->VPN & ~mask;
1752 /* Check ASID, virtual page number & size */
1753 if ((tlb->G == 1 || tlb->ASID == ASID) && VPN == tag) {
1754 /* TLB match */
1755 env->CP0_Index = i;
1756 break;
1759 if (i == env->tlb->nb_tlb) {
1760 /* No match. Discard any shadow entries, if any of them match. */
1761 for (i = env->tlb->nb_tlb; i < env->tlb->tlb_in_use; i++) {
1762 tlb = &env->tlb->mmu.r4k.tlb[i];
1763 /* 1k pages are not supported. */
1764 mask = tlb->PageMask | ~(TARGET_PAGE_MASK << 1);
1765 tag = env->CP0_EntryHi & ~mask;
1766 VPN = tlb->VPN & ~mask;
1767 /* Check ASID, virtual page number & size */
1768 if ((tlb->G == 1 || tlb->ASID == ASID) && VPN == tag) {
1769 r4k_mips_tlb_flush_extra (env, i);
1770 break;
1774 env->CP0_Index |= 0x80000000;
1778 void r4k_helper_tlbr (void)
1780 r4k_tlb_t *tlb;
1781 uint8_t ASID;
1782 int idx;
1784 ASID = env->CP0_EntryHi & 0xFF;
1785 idx = (env->CP0_Index & ~0x80000000) % env->tlb->nb_tlb;
1786 tlb = &env->tlb->mmu.r4k.tlb[idx];
1788 /* If this will change the current ASID, flush qemu's TLB. */
1789 if (ASID != tlb->ASID)
1790 cpu_mips_tlb_flush (env, 1);
1792 r4k_mips_tlb_flush_extra(env, env->tlb->nb_tlb);
1794 env->CP0_EntryHi = tlb->VPN | tlb->ASID;
1795 env->CP0_PageMask = tlb->PageMask;
1796 env->CP0_EntryLo0 = tlb->G | (tlb->V0 << 1) | (tlb->D0 << 2) |
1797 (tlb->C0 << 3) | (tlb->PFN[0] >> 6);
1798 env->CP0_EntryLo1 = tlb->G | (tlb->V1 << 1) | (tlb->D1 << 2) |
1799 (tlb->C1 << 3) | (tlb->PFN[1] >> 6);
1802 void helper_tlbwi(void)
1804 env->tlb->helper_tlbwi();
1807 void helper_tlbwr(void)
1809 env->tlb->helper_tlbwr();
1812 void helper_tlbp(void)
1814 env->tlb->helper_tlbp();
1817 void helper_tlbr(void)
1819 env->tlb->helper_tlbr();
1822 /* Specials */
1823 target_ulong helper_di (void)
1825 target_ulong t0 = env->CP0_Status;
1827 env->CP0_Status = t0 & ~(1 << CP0St_IE);
1828 return t0;
1831 target_ulong helper_ei (void)
1833 target_ulong t0 = env->CP0_Status;
1835 env->CP0_Status = t0 | (1 << CP0St_IE);
1836 return t0;
1839 static void debug_pre_eret (void)
1841 if (qemu_loglevel_mask(CPU_LOG_EXEC)) {
1842 qemu_log("ERET: PC " TARGET_FMT_lx " EPC " TARGET_FMT_lx,
1843 env->active_tc.PC, env->CP0_EPC);
1844 if (env->CP0_Status & (1 << CP0St_ERL))
1845 qemu_log(" ErrorEPC " TARGET_FMT_lx, env->CP0_ErrorEPC);
1846 if (env->hflags & MIPS_HFLAG_DM)
1847 qemu_log(" DEPC " TARGET_FMT_lx, env->CP0_DEPC);
1848 qemu_log("\n");
1852 static void debug_post_eret (void)
1854 if (qemu_loglevel_mask(CPU_LOG_EXEC)) {
1855 qemu_log(" => PC " TARGET_FMT_lx " EPC " TARGET_FMT_lx,
1856 env->active_tc.PC, env->CP0_EPC);
1857 if (env->CP0_Status & (1 << CP0St_ERL))
1858 qemu_log(" ErrorEPC " TARGET_FMT_lx, env->CP0_ErrorEPC);
1859 if (env->hflags & MIPS_HFLAG_DM)
1860 qemu_log(" DEPC " TARGET_FMT_lx, env->CP0_DEPC);
1861 switch (env->hflags & MIPS_HFLAG_KSU) {
1862 case MIPS_HFLAG_UM: qemu_log(", UM\n"); break;
1863 case MIPS_HFLAG_SM: qemu_log(", SM\n"); break;
1864 case MIPS_HFLAG_KM: qemu_log("\n"); break;
1865 default: cpu_abort(env, "Invalid MMU mode!\n"); break;
1870 static void set_pc (target_ulong error_pc)
1872 env->active_tc.PC = error_pc & ~(target_ulong)1;
1873 if (error_pc & 1) {
1874 env->hflags |= MIPS_HFLAG_M16;
1875 } else {
1876 env->hflags &= ~(MIPS_HFLAG_M16);
1880 void helper_eret (void)
1882 debug_pre_eret();
1883 if (env->CP0_Status & (1 << CP0St_ERL)) {
1884 set_pc(env->CP0_ErrorEPC);
1885 env->CP0_Status &= ~(1 << CP0St_ERL);
1886 } else {
1887 set_pc(env->CP0_EPC);
1888 env->CP0_Status &= ~(1 << CP0St_EXL);
1890 compute_hflags(env);
1891 debug_post_eret();
1892 env->lladdr = 1;
1895 void helper_deret (void)
1897 debug_pre_eret();
1898 set_pc(env->CP0_DEPC);
1900 env->hflags &= MIPS_HFLAG_DM;
1901 compute_hflags(env);
1902 debug_post_eret();
1903 env->lladdr = 1;
1905 #endif /* !CONFIG_USER_ONLY */
1907 target_ulong helper_rdhwr_cpunum(void)
1909 if ((env->hflags & MIPS_HFLAG_CP0) ||
1910 (env->CP0_HWREna & (1 << 0)))
1911 return env->CP0_EBase & 0x3ff;
1912 else
1913 helper_raise_exception(EXCP_RI);
1915 return 0;
1918 target_ulong helper_rdhwr_synci_step(void)
1920 if ((env->hflags & MIPS_HFLAG_CP0) ||
1921 (env->CP0_HWREna & (1 << 1)))
1922 return env->SYNCI_Step;
1923 else
1924 helper_raise_exception(EXCP_RI);
1926 return 0;
1929 target_ulong helper_rdhwr_cc(void)
1931 if ((env->hflags & MIPS_HFLAG_CP0) ||
1932 (env->CP0_HWREna & (1 << 2)))
1933 return env->CP0_Count;
1934 else
1935 helper_raise_exception(EXCP_RI);
1937 return 0;
1940 target_ulong helper_rdhwr_ccres(void)
1942 if ((env->hflags & MIPS_HFLAG_CP0) ||
1943 (env->CP0_HWREna & (1 << 3)))
1944 return env->CCRes;
1945 else
1946 helper_raise_exception(EXCP_RI);
1948 return 0;
1951 void helper_pmon (int function)
1953 function /= 2;
1954 switch (function) {
1955 case 2: /* TODO: char inbyte(int waitflag); */
1956 if (env->active_tc.gpr[4] == 0)
1957 env->active_tc.gpr[2] = -1;
1958 /* Fall through */
1959 case 11: /* TODO: char inbyte (void); */
1960 env->active_tc.gpr[2] = -1;
1961 break;
1962 case 3:
1963 case 12:
1964 printf("%c", (char)(env->active_tc.gpr[4] & 0xFF));
1965 break;
1966 case 17:
1967 break;
1968 case 158:
1970 unsigned char *fmt = (void *)(unsigned long)env->active_tc.gpr[4];
1971 printf("%s", fmt);
1973 break;
1977 void helper_wait (void)
1979 env->halted = 1;
1980 helper_raise_exception(EXCP_HLT);
1983 #if !defined(CONFIG_USER_ONLY)
1985 static void do_unaligned_access (target_ulong addr, int is_write, int is_user, void *retaddr);
1987 #define MMUSUFFIX _mmu
1988 #define ALIGNED_ONLY
1990 #define SHIFT 0
1991 #include "softmmu_template.h"
1993 #define SHIFT 1
1994 #include "softmmu_template.h"
1996 #define SHIFT 2
1997 #include "softmmu_template.h"
1999 #define SHIFT 3
2000 #include "softmmu_template.h"
2002 static void do_unaligned_access (target_ulong addr, int is_write, int is_user, void *retaddr)
2004 env->CP0_BadVAddr = addr;
2005 do_restore_state (retaddr);
2006 helper_raise_exception ((is_write == 1) ? EXCP_AdES : EXCP_AdEL);
2009 void tlb_fill (target_ulong addr, int is_write, int mmu_idx, void *retaddr)
2011 TranslationBlock *tb;
2012 CPUState *saved_env;
2013 unsigned long pc;
2014 int ret;
2016 /* XXX: hack to restore env in all cases, even if not called from
2017 generated code */
2018 saved_env = env;
2019 env = cpu_single_env;
2020 ret = cpu_mips_handle_mmu_fault(env, addr, is_write, mmu_idx);
2021 if (ret) {
2022 if (retaddr) {
2023 /* now we have a real cpu fault */
2024 pc = (unsigned long)retaddr;
2025 tb = tb_find_pc(pc);
2026 if (tb) {
2027 /* the PC is inside the translated code. It means that we have
2028 a virtual CPU fault */
2029 cpu_restore_state(tb, env, pc);
2032 helper_raise_exception_err(env->exception_index, env->error_code);
2034 env = saved_env;
2037 void cpu_unassigned_access(CPUState *env1, target_phys_addr_t addr,
2038 int is_write, int is_exec, int unused, int size)
2040 env = env1;
2042 if (is_exec)
2043 helper_raise_exception(EXCP_IBE);
2044 else
2045 helper_raise_exception(EXCP_DBE);
2047 #endif /* !CONFIG_USER_ONLY */
2049 /* Complex FPU operations which may need stack space. */
2051 #define FLOAT_ONE32 make_float32(0x3f8 << 20)
2052 #define FLOAT_ONE64 make_float64(0x3ffULL << 52)
2053 #define FLOAT_TWO32 make_float32(1 << 30)
2054 #define FLOAT_TWO64 make_float64(1ULL << 62)
2055 #define FLOAT_QNAN32 0x7fbfffff
2056 #define FLOAT_QNAN64 0x7ff7ffffffffffffULL
2057 #define FLOAT_SNAN32 0x7fffffff
2058 #define FLOAT_SNAN64 0x7fffffffffffffffULL
2060 /* convert MIPS rounding mode in FCR31 to IEEE library */
2061 static unsigned int ieee_rm[] = {
2062 float_round_nearest_even,
2063 float_round_to_zero,
2064 float_round_up,
2065 float_round_down
2068 #define RESTORE_ROUNDING_MODE \
2069 set_float_rounding_mode(ieee_rm[env->active_fpu.fcr31 & 3], &env->active_fpu.fp_status)
2071 #define RESTORE_FLUSH_MODE \
2072 set_flush_to_zero((env->active_fpu.fcr31 & (1 << 24)) != 0, &env->active_fpu.fp_status);
2074 target_ulong helper_cfc1 (uint32_t reg)
2076 target_ulong arg1;
2078 switch (reg) {
2079 case 0:
2080 arg1 = (int32_t)env->active_fpu.fcr0;
2081 break;
2082 case 25:
2083 arg1 = ((env->active_fpu.fcr31 >> 24) & 0xfe) | ((env->active_fpu.fcr31 >> 23) & 0x1);
2084 break;
2085 case 26:
2086 arg1 = env->active_fpu.fcr31 & 0x0003f07c;
2087 break;
2088 case 28:
2089 arg1 = (env->active_fpu.fcr31 & 0x00000f83) | ((env->active_fpu.fcr31 >> 22) & 0x4);
2090 break;
2091 default:
2092 arg1 = (int32_t)env->active_fpu.fcr31;
2093 break;
2096 return arg1;
2099 void helper_ctc1 (target_ulong arg1, uint32_t reg)
2101 switch(reg) {
2102 case 25:
2103 if (arg1 & 0xffffff00)
2104 return;
2105 env->active_fpu.fcr31 = (env->active_fpu.fcr31 & 0x017fffff) | ((arg1 & 0xfe) << 24) |
2106 ((arg1 & 0x1) << 23);
2107 break;
2108 case 26:
2109 if (arg1 & 0x007c0000)
2110 return;
2111 env->active_fpu.fcr31 = (env->active_fpu.fcr31 & 0xfffc0f83) | (arg1 & 0x0003f07c);
2112 break;
2113 case 28:
2114 if (arg1 & 0x007c0000)
2115 return;
2116 env->active_fpu.fcr31 = (env->active_fpu.fcr31 & 0xfefff07c) | (arg1 & 0x00000f83) |
2117 ((arg1 & 0x4) << 22);
2118 break;
2119 case 31:
2120 if (arg1 & 0x007c0000)
2121 return;
2122 env->active_fpu.fcr31 = arg1;
2123 break;
2124 default:
2125 return;
2127 /* set rounding mode */
2128 RESTORE_ROUNDING_MODE;
2129 /* set flush-to-zero mode */
2130 RESTORE_FLUSH_MODE;
2131 set_float_exception_flags(0, &env->active_fpu.fp_status);
2132 if ((GET_FP_ENABLE(env->active_fpu.fcr31) | 0x20) & GET_FP_CAUSE(env->active_fpu.fcr31))
2133 helper_raise_exception(EXCP_FPE);
2136 static inline int ieee_ex_to_mips(int xcpt)
2138 int ret = 0;
2139 if (xcpt) {
2140 if (xcpt & float_flag_invalid) {
2141 ret |= FP_INVALID;
2143 if (xcpt & float_flag_overflow) {
2144 ret |= FP_OVERFLOW;
2146 if (xcpt & float_flag_underflow) {
2147 ret |= FP_UNDERFLOW;
2149 if (xcpt & float_flag_divbyzero) {
2150 ret |= FP_DIV0;
2152 if (xcpt & float_flag_inexact) {
2153 ret |= FP_INEXACT;
2156 return ret;
2159 static inline void update_fcr31(void)
2161 int tmp = ieee_ex_to_mips(get_float_exception_flags(&env->active_fpu.fp_status));
2163 SET_FP_CAUSE(env->active_fpu.fcr31, tmp);
2164 if (GET_FP_ENABLE(env->active_fpu.fcr31) & tmp)
2165 helper_raise_exception(EXCP_FPE);
2166 else
2167 UPDATE_FP_FLAGS(env->active_fpu.fcr31, tmp);
2170 /* Float support.
2171 Single precition routines have a "s" suffix, double precision a
2172 "d" suffix, 32bit integer "w", 64bit integer "l", paired single "ps",
2173 paired single lower "pl", paired single upper "pu". */
2175 /* unary operations, modifying fp status */
2176 uint64_t helper_float_sqrt_d(uint64_t fdt0)
2178 return float64_sqrt(fdt0, &env->active_fpu.fp_status);
2181 uint32_t helper_float_sqrt_s(uint32_t fst0)
2183 return float32_sqrt(fst0, &env->active_fpu.fp_status);
2186 uint64_t helper_float_cvtd_s(uint32_t fst0)
2188 uint64_t fdt2;
2190 set_float_exception_flags(0, &env->active_fpu.fp_status);
2191 fdt2 = float32_to_float64(fst0, &env->active_fpu.fp_status);
2192 update_fcr31();
2193 return fdt2;
2196 uint64_t helper_float_cvtd_w(uint32_t wt0)
2198 uint64_t fdt2;
2200 set_float_exception_flags(0, &env->active_fpu.fp_status);
2201 fdt2 = int32_to_float64(wt0, &env->active_fpu.fp_status);
2202 update_fcr31();
2203 return fdt2;
2206 uint64_t helper_float_cvtd_l(uint64_t dt0)
2208 uint64_t fdt2;
2210 set_float_exception_flags(0, &env->active_fpu.fp_status);
2211 fdt2 = int64_to_float64(dt0, &env->active_fpu.fp_status);
2212 update_fcr31();
2213 return fdt2;
2216 uint64_t helper_float_cvtl_d(uint64_t fdt0)
2218 uint64_t dt2;
2220 set_float_exception_flags(0, &env->active_fpu.fp_status);
2221 dt2 = float64_to_int64(fdt0, &env->active_fpu.fp_status);
2222 update_fcr31();
2223 if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2224 dt2 = FLOAT_SNAN64;
2225 return dt2;
2228 uint64_t helper_float_cvtl_s(uint32_t fst0)
2230 uint64_t dt2;
2232 set_float_exception_flags(0, &env->active_fpu.fp_status);
2233 dt2 = float32_to_int64(fst0, &env->active_fpu.fp_status);
2234 update_fcr31();
2235 if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2236 dt2 = FLOAT_SNAN64;
2237 return dt2;
2240 uint64_t helper_float_cvtps_pw(uint64_t dt0)
2242 uint32_t fst2;
2243 uint32_t fsth2;
2245 set_float_exception_flags(0, &env->active_fpu.fp_status);
2246 fst2 = int32_to_float32(dt0 & 0XFFFFFFFF, &env->active_fpu.fp_status);
2247 fsth2 = int32_to_float32(dt0 >> 32, &env->active_fpu.fp_status);
2248 update_fcr31();
2249 return ((uint64_t)fsth2 << 32) | fst2;
2252 uint64_t helper_float_cvtpw_ps(uint64_t fdt0)
2254 uint32_t wt2;
2255 uint32_t wth2;
2257 set_float_exception_flags(0, &env->active_fpu.fp_status);
2258 wt2 = float32_to_int32(fdt0 & 0XFFFFFFFF, &env->active_fpu.fp_status);
2259 wth2 = float32_to_int32(fdt0 >> 32, &env->active_fpu.fp_status);
2260 update_fcr31();
2261 if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID)) {
2262 wt2 = FLOAT_SNAN32;
2263 wth2 = FLOAT_SNAN32;
2265 return ((uint64_t)wth2 << 32) | wt2;
2268 uint32_t helper_float_cvts_d(uint64_t fdt0)
2270 uint32_t fst2;
2272 set_float_exception_flags(0, &env->active_fpu.fp_status);
2273 fst2 = float64_to_float32(fdt0, &env->active_fpu.fp_status);
2274 update_fcr31();
2275 return fst2;
2278 uint32_t helper_float_cvts_w(uint32_t wt0)
2280 uint32_t fst2;
2282 set_float_exception_flags(0, &env->active_fpu.fp_status);
2283 fst2 = int32_to_float32(wt0, &env->active_fpu.fp_status);
2284 update_fcr31();
2285 return fst2;
2288 uint32_t helper_float_cvts_l(uint64_t dt0)
2290 uint32_t fst2;
2292 set_float_exception_flags(0, &env->active_fpu.fp_status);
2293 fst2 = int64_to_float32(dt0, &env->active_fpu.fp_status);
2294 update_fcr31();
2295 return fst2;
2298 uint32_t helper_float_cvts_pl(uint32_t wt0)
2300 uint32_t wt2;
2302 set_float_exception_flags(0, &env->active_fpu.fp_status);
2303 wt2 = wt0;
2304 update_fcr31();
2305 return wt2;
2308 uint32_t helper_float_cvts_pu(uint32_t wth0)
2310 uint32_t wt2;
2312 set_float_exception_flags(0, &env->active_fpu.fp_status);
2313 wt2 = wth0;
2314 update_fcr31();
2315 return wt2;
2318 uint32_t helper_float_cvtw_s(uint32_t fst0)
2320 uint32_t wt2;
2322 set_float_exception_flags(0, &env->active_fpu.fp_status);
2323 wt2 = float32_to_int32(fst0, &env->active_fpu.fp_status);
2324 update_fcr31();
2325 if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2326 wt2 = FLOAT_SNAN32;
2327 return wt2;
2330 uint32_t helper_float_cvtw_d(uint64_t fdt0)
2332 uint32_t wt2;
2334 set_float_exception_flags(0, &env->active_fpu.fp_status);
2335 wt2 = float64_to_int32(fdt0, &env->active_fpu.fp_status);
2336 update_fcr31();
2337 if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2338 wt2 = FLOAT_SNAN32;
2339 return wt2;
2342 uint64_t helper_float_roundl_d(uint64_t fdt0)
2344 uint64_t dt2;
2346 set_float_exception_flags(0, &env->active_fpu.fp_status);
2347 set_float_rounding_mode(float_round_nearest_even, &env->active_fpu.fp_status);
2348 dt2 = float64_to_int64(fdt0, &env->active_fpu.fp_status);
2349 RESTORE_ROUNDING_MODE;
2350 update_fcr31();
2351 if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2352 dt2 = FLOAT_SNAN64;
2353 return dt2;
2356 uint64_t helper_float_roundl_s(uint32_t fst0)
2358 uint64_t dt2;
2360 set_float_exception_flags(0, &env->active_fpu.fp_status);
2361 set_float_rounding_mode(float_round_nearest_even, &env->active_fpu.fp_status);
2362 dt2 = float32_to_int64(fst0, &env->active_fpu.fp_status);
2363 RESTORE_ROUNDING_MODE;
2364 update_fcr31();
2365 if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2366 dt2 = FLOAT_SNAN64;
2367 return dt2;
2370 uint32_t helper_float_roundw_d(uint64_t fdt0)
2372 uint32_t wt2;
2374 set_float_exception_flags(0, &env->active_fpu.fp_status);
2375 set_float_rounding_mode(float_round_nearest_even, &env->active_fpu.fp_status);
2376 wt2 = float64_to_int32(fdt0, &env->active_fpu.fp_status);
2377 RESTORE_ROUNDING_MODE;
2378 update_fcr31();
2379 if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2380 wt2 = FLOAT_SNAN32;
2381 return wt2;
2384 uint32_t helper_float_roundw_s(uint32_t fst0)
2386 uint32_t wt2;
2388 set_float_exception_flags(0, &env->active_fpu.fp_status);
2389 set_float_rounding_mode(float_round_nearest_even, &env->active_fpu.fp_status);
2390 wt2 = float32_to_int32(fst0, &env->active_fpu.fp_status);
2391 RESTORE_ROUNDING_MODE;
2392 update_fcr31();
2393 if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2394 wt2 = FLOAT_SNAN32;
2395 return wt2;
2398 uint64_t helper_float_truncl_d(uint64_t fdt0)
2400 uint64_t dt2;
2402 set_float_exception_flags(0, &env->active_fpu.fp_status);
2403 dt2 = float64_to_int64_round_to_zero(fdt0, &env->active_fpu.fp_status);
2404 update_fcr31();
2405 if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2406 dt2 = FLOAT_SNAN64;
2407 return dt2;
2410 uint64_t helper_float_truncl_s(uint32_t fst0)
2412 uint64_t dt2;
2414 set_float_exception_flags(0, &env->active_fpu.fp_status);
2415 dt2 = float32_to_int64_round_to_zero(fst0, &env->active_fpu.fp_status);
2416 update_fcr31();
2417 if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2418 dt2 = FLOAT_SNAN64;
2419 return dt2;
2422 uint32_t helper_float_truncw_d(uint64_t fdt0)
2424 uint32_t wt2;
2426 set_float_exception_flags(0, &env->active_fpu.fp_status);
2427 wt2 = float64_to_int32_round_to_zero(fdt0, &env->active_fpu.fp_status);
2428 update_fcr31();
2429 if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2430 wt2 = FLOAT_SNAN32;
2431 return wt2;
2434 uint32_t helper_float_truncw_s(uint32_t fst0)
2436 uint32_t wt2;
2438 set_float_exception_flags(0, &env->active_fpu.fp_status);
2439 wt2 = float32_to_int32_round_to_zero(fst0, &env->active_fpu.fp_status);
2440 update_fcr31();
2441 if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2442 wt2 = FLOAT_SNAN32;
2443 return wt2;
2446 uint64_t helper_float_ceill_d(uint64_t fdt0)
2448 uint64_t dt2;
2450 set_float_exception_flags(0, &env->active_fpu.fp_status);
2451 set_float_rounding_mode(float_round_up, &env->active_fpu.fp_status);
2452 dt2 = float64_to_int64(fdt0, &env->active_fpu.fp_status);
2453 RESTORE_ROUNDING_MODE;
2454 update_fcr31();
2455 if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2456 dt2 = FLOAT_SNAN64;
2457 return dt2;
2460 uint64_t helper_float_ceill_s(uint32_t fst0)
2462 uint64_t dt2;
2464 set_float_exception_flags(0, &env->active_fpu.fp_status);
2465 set_float_rounding_mode(float_round_up, &env->active_fpu.fp_status);
2466 dt2 = float32_to_int64(fst0, &env->active_fpu.fp_status);
2467 RESTORE_ROUNDING_MODE;
2468 update_fcr31();
2469 if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2470 dt2 = FLOAT_SNAN64;
2471 return dt2;
2474 uint32_t helper_float_ceilw_d(uint64_t fdt0)
2476 uint32_t wt2;
2478 set_float_exception_flags(0, &env->active_fpu.fp_status);
2479 set_float_rounding_mode(float_round_up, &env->active_fpu.fp_status);
2480 wt2 = float64_to_int32(fdt0, &env->active_fpu.fp_status);
2481 RESTORE_ROUNDING_MODE;
2482 update_fcr31();
2483 if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2484 wt2 = FLOAT_SNAN32;
2485 return wt2;
2488 uint32_t helper_float_ceilw_s(uint32_t fst0)
2490 uint32_t wt2;
2492 set_float_exception_flags(0, &env->active_fpu.fp_status);
2493 set_float_rounding_mode(float_round_up, &env->active_fpu.fp_status);
2494 wt2 = float32_to_int32(fst0, &env->active_fpu.fp_status);
2495 RESTORE_ROUNDING_MODE;
2496 update_fcr31();
2497 if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2498 wt2 = FLOAT_SNAN32;
2499 return wt2;
2502 uint64_t helper_float_floorl_d(uint64_t fdt0)
2504 uint64_t dt2;
2506 set_float_exception_flags(0, &env->active_fpu.fp_status);
2507 set_float_rounding_mode(float_round_down, &env->active_fpu.fp_status);
2508 dt2 = float64_to_int64(fdt0, &env->active_fpu.fp_status);
2509 RESTORE_ROUNDING_MODE;
2510 update_fcr31();
2511 if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2512 dt2 = FLOAT_SNAN64;
2513 return dt2;
2516 uint64_t helper_float_floorl_s(uint32_t fst0)
2518 uint64_t dt2;
2520 set_float_exception_flags(0, &env->active_fpu.fp_status);
2521 set_float_rounding_mode(float_round_down, &env->active_fpu.fp_status);
2522 dt2 = float32_to_int64(fst0, &env->active_fpu.fp_status);
2523 RESTORE_ROUNDING_MODE;
2524 update_fcr31();
2525 if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2526 dt2 = FLOAT_SNAN64;
2527 return dt2;
2530 uint32_t helper_float_floorw_d(uint64_t fdt0)
2532 uint32_t wt2;
2534 set_float_exception_flags(0, &env->active_fpu.fp_status);
2535 set_float_rounding_mode(float_round_down, &env->active_fpu.fp_status);
2536 wt2 = float64_to_int32(fdt0, &env->active_fpu.fp_status);
2537 RESTORE_ROUNDING_MODE;
2538 update_fcr31();
2539 if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2540 wt2 = FLOAT_SNAN32;
2541 return wt2;
2544 uint32_t helper_float_floorw_s(uint32_t fst0)
2546 uint32_t wt2;
2548 set_float_exception_flags(0, &env->active_fpu.fp_status);
2549 set_float_rounding_mode(float_round_down, &env->active_fpu.fp_status);
2550 wt2 = float32_to_int32(fst0, &env->active_fpu.fp_status);
2551 RESTORE_ROUNDING_MODE;
2552 update_fcr31();
2553 if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2554 wt2 = FLOAT_SNAN32;
2555 return wt2;
2558 /* unary operations, not modifying fp status */
2559 #define FLOAT_UNOP(name) \
2560 uint64_t helper_float_ ## name ## _d(uint64_t fdt0) \
2562 return float64_ ## name(fdt0); \
2564 uint32_t helper_float_ ## name ## _s(uint32_t fst0) \
2566 return float32_ ## name(fst0); \
2568 uint64_t helper_float_ ## name ## _ps(uint64_t fdt0) \
2570 uint32_t wt0; \
2571 uint32_t wth0; \
2573 wt0 = float32_ ## name(fdt0 & 0XFFFFFFFF); \
2574 wth0 = float32_ ## name(fdt0 >> 32); \
2575 return ((uint64_t)wth0 << 32) | wt0; \
2577 FLOAT_UNOP(abs)
2578 FLOAT_UNOP(chs)
2579 #undef FLOAT_UNOP
2581 /* MIPS specific unary operations */
2582 uint64_t helper_float_recip_d(uint64_t fdt0)
2584 uint64_t fdt2;
2586 set_float_exception_flags(0, &env->active_fpu.fp_status);
2587 fdt2 = float64_div(FLOAT_ONE64, fdt0, &env->active_fpu.fp_status);
2588 update_fcr31();
2589 return fdt2;
2592 uint32_t helper_float_recip_s(uint32_t fst0)
2594 uint32_t fst2;
2596 set_float_exception_flags(0, &env->active_fpu.fp_status);
2597 fst2 = float32_div(FLOAT_ONE32, fst0, &env->active_fpu.fp_status);
2598 update_fcr31();
2599 return fst2;
2602 uint64_t helper_float_rsqrt_d(uint64_t fdt0)
2604 uint64_t fdt2;
2606 set_float_exception_flags(0, &env->active_fpu.fp_status);
2607 fdt2 = float64_sqrt(fdt0, &env->active_fpu.fp_status);
2608 fdt2 = float64_div(FLOAT_ONE64, fdt2, &env->active_fpu.fp_status);
2609 update_fcr31();
2610 return fdt2;
2613 uint32_t helper_float_rsqrt_s(uint32_t fst0)
2615 uint32_t fst2;
2617 set_float_exception_flags(0, &env->active_fpu.fp_status);
2618 fst2 = float32_sqrt(fst0, &env->active_fpu.fp_status);
2619 fst2 = float32_div(FLOAT_ONE32, fst2, &env->active_fpu.fp_status);
2620 update_fcr31();
2621 return fst2;
2624 uint64_t helper_float_recip1_d(uint64_t fdt0)
2626 uint64_t fdt2;
2628 set_float_exception_flags(0, &env->active_fpu.fp_status);
2629 fdt2 = float64_div(FLOAT_ONE64, fdt0, &env->active_fpu.fp_status);
2630 update_fcr31();
2631 return fdt2;
2634 uint32_t helper_float_recip1_s(uint32_t fst0)
2636 uint32_t fst2;
2638 set_float_exception_flags(0, &env->active_fpu.fp_status);
2639 fst2 = float32_div(FLOAT_ONE32, fst0, &env->active_fpu.fp_status);
2640 update_fcr31();
2641 return fst2;
2644 uint64_t helper_float_recip1_ps(uint64_t fdt0)
2646 uint32_t fst2;
2647 uint32_t fsth2;
2649 set_float_exception_flags(0, &env->active_fpu.fp_status);
2650 fst2 = float32_div(FLOAT_ONE32, fdt0 & 0XFFFFFFFF, &env->active_fpu.fp_status);
2651 fsth2 = float32_div(FLOAT_ONE32, fdt0 >> 32, &env->active_fpu.fp_status);
2652 update_fcr31();
2653 return ((uint64_t)fsth2 << 32) | fst2;
2656 uint64_t helper_float_rsqrt1_d(uint64_t fdt0)
2658 uint64_t fdt2;
2660 set_float_exception_flags(0, &env->active_fpu.fp_status);
2661 fdt2 = float64_sqrt(fdt0, &env->active_fpu.fp_status);
2662 fdt2 = float64_div(FLOAT_ONE64, fdt2, &env->active_fpu.fp_status);
2663 update_fcr31();
2664 return fdt2;
2667 uint32_t helper_float_rsqrt1_s(uint32_t fst0)
2669 uint32_t fst2;
2671 set_float_exception_flags(0, &env->active_fpu.fp_status);
2672 fst2 = float32_sqrt(fst0, &env->active_fpu.fp_status);
2673 fst2 = float32_div(FLOAT_ONE32, fst2, &env->active_fpu.fp_status);
2674 update_fcr31();
2675 return fst2;
2678 uint64_t helper_float_rsqrt1_ps(uint64_t fdt0)
2680 uint32_t fst2;
2681 uint32_t fsth2;
2683 set_float_exception_flags(0, &env->active_fpu.fp_status);
2684 fst2 = float32_sqrt(fdt0 & 0XFFFFFFFF, &env->active_fpu.fp_status);
2685 fsth2 = float32_sqrt(fdt0 >> 32, &env->active_fpu.fp_status);
2686 fst2 = float32_div(FLOAT_ONE32, fst2, &env->active_fpu.fp_status);
2687 fsth2 = float32_div(FLOAT_ONE32, fsth2, &env->active_fpu.fp_status);
2688 update_fcr31();
2689 return ((uint64_t)fsth2 << 32) | fst2;
2692 #define FLOAT_OP(name, p) void helper_float_##name##_##p(void)
2694 /* binary operations */
2695 #define FLOAT_BINOP(name) \
2696 uint64_t helper_float_ ## name ## _d(uint64_t fdt0, uint64_t fdt1) \
2698 uint64_t dt2; \
2700 set_float_exception_flags(0, &env->active_fpu.fp_status); \
2701 dt2 = float64_ ## name (fdt0, fdt1, &env->active_fpu.fp_status); \
2702 update_fcr31(); \
2703 if (GET_FP_CAUSE(env->active_fpu.fcr31) & FP_INVALID) \
2704 dt2 = FLOAT_QNAN64; \
2705 return dt2; \
2708 uint32_t helper_float_ ## name ## _s(uint32_t fst0, uint32_t fst1) \
2710 uint32_t wt2; \
2712 set_float_exception_flags(0, &env->active_fpu.fp_status); \
2713 wt2 = float32_ ## name (fst0, fst1, &env->active_fpu.fp_status); \
2714 update_fcr31(); \
2715 if (GET_FP_CAUSE(env->active_fpu.fcr31) & FP_INVALID) \
2716 wt2 = FLOAT_QNAN32; \
2717 return wt2; \
2720 uint64_t helper_float_ ## name ## _ps(uint64_t fdt0, uint64_t fdt1) \
2722 uint32_t fst0 = fdt0 & 0XFFFFFFFF; \
2723 uint32_t fsth0 = fdt0 >> 32; \
2724 uint32_t fst1 = fdt1 & 0XFFFFFFFF; \
2725 uint32_t fsth1 = fdt1 >> 32; \
2726 uint32_t wt2; \
2727 uint32_t wth2; \
2729 set_float_exception_flags(0, &env->active_fpu.fp_status); \
2730 wt2 = float32_ ## name (fst0, fst1, &env->active_fpu.fp_status); \
2731 wth2 = float32_ ## name (fsth0, fsth1, &env->active_fpu.fp_status); \
2732 update_fcr31(); \
2733 if (GET_FP_CAUSE(env->active_fpu.fcr31) & FP_INVALID) { \
2734 wt2 = FLOAT_QNAN32; \
2735 wth2 = FLOAT_QNAN32; \
2737 return ((uint64_t)wth2 << 32) | wt2; \
2740 FLOAT_BINOP(add)
2741 FLOAT_BINOP(sub)
2742 FLOAT_BINOP(mul)
2743 FLOAT_BINOP(div)
2744 #undef FLOAT_BINOP
2746 /* ternary operations */
2747 #define FLOAT_TERNOP(name1, name2) \
2748 uint64_t helper_float_ ## name1 ## name2 ## _d(uint64_t fdt0, uint64_t fdt1, \
2749 uint64_t fdt2) \
2751 fdt0 = float64_ ## name1 (fdt0, fdt1, &env->active_fpu.fp_status); \
2752 return float64_ ## name2 (fdt0, fdt2, &env->active_fpu.fp_status); \
2755 uint32_t helper_float_ ## name1 ## name2 ## _s(uint32_t fst0, uint32_t fst1, \
2756 uint32_t fst2) \
2758 fst0 = float32_ ## name1 (fst0, fst1, &env->active_fpu.fp_status); \
2759 return float32_ ## name2 (fst0, fst2, &env->active_fpu.fp_status); \
2762 uint64_t helper_float_ ## name1 ## name2 ## _ps(uint64_t fdt0, uint64_t fdt1, \
2763 uint64_t fdt2) \
2765 uint32_t fst0 = fdt0 & 0XFFFFFFFF; \
2766 uint32_t fsth0 = fdt0 >> 32; \
2767 uint32_t fst1 = fdt1 & 0XFFFFFFFF; \
2768 uint32_t fsth1 = fdt1 >> 32; \
2769 uint32_t fst2 = fdt2 & 0XFFFFFFFF; \
2770 uint32_t fsth2 = fdt2 >> 32; \
2772 fst0 = float32_ ## name1 (fst0, fst1, &env->active_fpu.fp_status); \
2773 fsth0 = float32_ ## name1 (fsth0, fsth1, &env->active_fpu.fp_status); \
2774 fst2 = float32_ ## name2 (fst0, fst2, &env->active_fpu.fp_status); \
2775 fsth2 = float32_ ## name2 (fsth0, fsth2, &env->active_fpu.fp_status); \
2776 return ((uint64_t)fsth2 << 32) | fst2; \
2779 FLOAT_TERNOP(mul, add)
2780 FLOAT_TERNOP(mul, sub)
2781 #undef FLOAT_TERNOP
2783 /* negated ternary operations */
2784 #define FLOAT_NTERNOP(name1, name2) \
2785 uint64_t helper_float_n ## name1 ## name2 ## _d(uint64_t fdt0, uint64_t fdt1, \
2786 uint64_t fdt2) \
2788 fdt0 = float64_ ## name1 (fdt0, fdt1, &env->active_fpu.fp_status); \
2789 fdt2 = float64_ ## name2 (fdt0, fdt2, &env->active_fpu.fp_status); \
2790 return float64_chs(fdt2); \
2793 uint32_t helper_float_n ## name1 ## name2 ## _s(uint32_t fst0, uint32_t fst1, \
2794 uint32_t fst2) \
2796 fst0 = float32_ ## name1 (fst0, fst1, &env->active_fpu.fp_status); \
2797 fst2 = float32_ ## name2 (fst0, fst2, &env->active_fpu.fp_status); \
2798 return float32_chs(fst2); \
2801 uint64_t helper_float_n ## name1 ## name2 ## _ps(uint64_t fdt0, uint64_t fdt1,\
2802 uint64_t fdt2) \
2804 uint32_t fst0 = fdt0 & 0XFFFFFFFF; \
2805 uint32_t fsth0 = fdt0 >> 32; \
2806 uint32_t fst1 = fdt1 & 0XFFFFFFFF; \
2807 uint32_t fsth1 = fdt1 >> 32; \
2808 uint32_t fst2 = fdt2 & 0XFFFFFFFF; \
2809 uint32_t fsth2 = fdt2 >> 32; \
2811 fst0 = float32_ ## name1 (fst0, fst1, &env->active_fpu.fp_status); \
2812 fsth0 = float32_ ## name1 (fsth0, fsth1, &env->active_fpu.fp_status); \
2813 fst2 = float32_ ## name2 (fst0, fst2, &env->active_fpu.fp_status); \
2814 fsth2 = float32_ ## name2 (fsth0, fsth2, &env->active_fpu.fp_status); \
2815 fst2 = float32_chs(fst2); \
2816 fsth2 = float32_chs(fsth2); \
2817 return ((uint64_t)fsth2 << 32) | fst2; \
2820 FLOAT_NTERNOP(mul, add)
2821 FLOAT_NTERNOP(mul, sub)
2822 #undef FLOAT_NTERNOP
2824 /* MIPS specific binary operations */
2825 uint64_t helper_float_recip2_d(uint64_t fdt0, uint64_t fdt2)
2827 set_float_exception_flags(0, &env->active_fpu.fp_status);
2828 fdt2 = float64_mul(fdt0, fdt2, &env->active_fpu.fp_status);
2829 fdt2 = float64_chs(float64_sub(fdt2, FLOAT_ONE64, &env->active_fpu.fp_status));
2830 update_fcr31();
2831 return fdt2;
2834 uint32_t helper_float_recip2_s(uint32_t fst0, uint32_t fst2)
2836 set_float_exception_flags(0, &env->active_fpu.fp_status);
2837 fst2 = float32_mul(fst0, fst2, &env->active_fpu.fp_status);
2838 fst2 = float32_chs(float32_sub(fst2, FLOAT_ONE32, &env->active_fpu.fp_status));
2839 update_fcr31();
2840 return fst2;
2843 uint64_t helper_float_recip2_ps(uint64_t fdt0, uint64_t fdt2)
2845 uint32_t fst0 = fdt0 & 0XFFFFFFFF;
2846 uint32_t fsth0 = fdt0 >> 32;
2847 uint32_t fst2 = fdt2 & 0XFFFFFFFF;
2848 uint32_t fsth2 = fdt2 >> 32;
2850 set_float_exception_flags(0, &env->active_fpu.fp_status);
2851 fst2 = float32_mul(fst0, fst2, &env->active_fpu.fp_status);
2852 fsth2 = float32_mul(fsth0, fsth2, &env->active_fpu.fp_status);
2853 fst2 = float32_chs(float32_sub(fst2, FLOAT_ONE32, &env->active_fpu.fp_status));
2854 fsth2 = float32_chs(float32_sub(fsth2, FLOAT_ONE32, &env->active_fpu.fp_status));
2855 update_fcr31();
2856 return ((uint64_t)fsth2 << 32) | fst2;
2859 uint64_t helper_float_rsqrt2_d(uint64_t fdt0, uint64_t fdt2)
2861 set_float_exception_flags(0, &env->active_fpu.fp_status);
2862 fdt2 = float64_mul(fdt0, fdt2, &env->active_fpu.fp_status);
2863 fdt2 = float64_sub(fdt2, FLOAT_ONE64, &env->active_fpu.fp_status);
2864 fdt2 = float64_chs(float64_div(fdt2, FLOAT_TWO64, &env->active_fpu.fp_status));
2865 update_fcr31();
2866 return fdt2;
2869 uint32_t helper_float_rsqrt2_s(uint32_t fst0, uint32_t fst2)
2871 set_float_exception_flags(0, &env->active_fpu.fp_status);
2872 fst2 = float32_mul(fst0, fst2, &env->active_fpu.fp_status);
2873 fst2 = float32_sub(fst2, FLOAT_ONE32, &env->active_fpu.fp_status);
2874 fst2 = float32_chs(float32_div(fst2, FLOAT_TWO32, &env->active_fpu.fp_status));
2875 update_fcr31();
2876 return fst2;
2879 uint64_t helper_float_rsqrt2_ps(uint64_t fdt0, uint64_t fdt2)
2881 uint32_t fst0 = fdt0 & 0XFFFFFFFF;
2882 uint32_t fsth0 = fdt0 >> 32;
2883 uint32_t fst2 = fdt2 & 0XFFFFFFFF;
2884 uint32_t fsth2 = fdt2 >> 32;
2886 set_float_exception_flags(0, &env->active_fpu.fp_status);
2887 fst2 = float32_mul(fst0, fst2, &env->active_fpu.fp_status);
2888 fsth2 = float32_mul(fsth0, fsth2, &env->active_fpu.fp_status);
2889 fst2 = float32_sub(fst2, FLOAT_ONE32, &env->active_fpu.fp_status);
2890 fsth2 = float32_sub(fsth2, FLOAT_ONE32, &env->active_fpu.fp_status);
2891 fst2 = float32_chs(float32_div(fst2, FLOAT_TWO32, &env->active_fpu.fp_status));
2892 fsth2 = float32_chs(float32_div(fsth2, FLOAT_TWO32, &env->active_fpu.fp_status));
2893 update_fcr31();
2894 return ((uint64_t)fsth2 << 32) | fst2;
2897 uint64_t helper_float_addr_ps(uint64_t fdt0, uint64_t fdt1)
2899 uint32_t fst0 = fdt0 & 0XFFFFFFFF;
2900 uint32_t fsth0 = fdt0 >> 32;
2901 uint32_t fst1 = fdt1 & 0XFFFFFFFF;
2902 uint32_t fsth1 = fdt1 >> 32;
2903 uint32_t fst2;
2904 uint32_t fsth2;
2906 set_float_exception_flags(0, &env->active_fpu.fp_status);
2907 fst2 = float32_add (fst0, fsth0, &env->active_fpu.fp_status);
2908 fsth2 = float32_add (fst1, fsth1, &env->active_fpu.fp_status);
2909 update_fcr31();
2910 return ((uint64_t)fsth2 << 32) | fst2;
2913 uint64_t helper_float_mulr_ps(uint64_t fdt0, uint64_t fdt1)
2915 uint32_t fst0 = fdt0 & 0XFFFFFFFF;
2916 uint32_t fsth0 = fdt0 >> 32;
2917 uint32_t fst1 = fdt1 & 0XFFFFFFFF;
2918 uint32_t fsth1 = fdt1 >> 32;
2919 uint32_t fst2;
2920 uint32_t fsth2;
2922 set_float_exception_flags(0, &env->active_fpu.fp_status);
2923 fst2 = float32_mul (fst0, fsth0, &env->active_fpu.fp_status);
2924 fsth2 = float32_mul (fst1, fsth1, &env->active_fpu.fp_status);
2925 update_fcr31();
2926 return ((uint64_t)fsth2 << 32) | fst2;
2929 /* compare operations */
2930 #define FOP_COND_D(op, cond) \
2931 void helper_cmp_d_ ## op (uint64_t fdt0, uint64_t fdt1, int cc) \
2933 int c; \
2934 set_float_exception_flags(0, &env->active_fpu.fp_status); \
2935 c = cond; \
2936 update_fcr31(); \
2937 if (c) \
2938 SET_FP_COND(cc, env->active_fpu); \
2939 else \
2940 CLEAR_FP_COND(cc, env->active_fpu); \
2942 void helper_cmpabs_d_ ## op (uint64_t fdt0, uint64_t fdt1, int cc) \
2944 int c; \
2945 set_float_exception_flags(0, &env->active_fpu.fp_status); \
2946 fdt0 = float64_abs(fdt0); \
2947 fdt1 = float64_abs(fdt1); \
2948 c = cond; \
2949 update_fcr31(); \
2950 if (c) \
2951 SET_FP_COND(cc, env->active_fpu); \
2952 else \
2953 CLEAR_FP_COND(cc, env->active_fpu); \
2956 /* NOTE: the comma operator will make "cond" to eval to false,
2957 * but float64_unordered_quiet() is still called. */
2958 FOP_COND_D(f, (float64_unordered_quiet(fdt1, fdt0, &env->active_fpu.fp_status), 0))
2959 FOP_COND_D(un, float64_unordered_quiet(fdt1, fdt0, &env->active_fpu.fp_status))
2960 FOP_COND_D(eq, float64_eq_quiet(fdt0, fdt1, &env->active_fpu.fp_status))
2961 FOP_COND_D(ueq, float64_unordered_quiet(fdt1, fdt0, &env->active_fpu.fp_status) || float64_eq_quiet(fdt0, fdt1, &env->active_fpu.fp_status))
2962 FOP_COND_D(olt, float64_lt_quiet(fdt0, fdt1, &env->active_fpu.fp_status))
2963 FOP_COND_D(ult, float64_unordered_quiet(fdt1, fdt0, &env->active_fpu.fp_status) || float64_lt_quiet(fdt0, fdt1, &env->active_fpu.fp_status))
2964 FOP_COND_D(ole, float64_le_quiet(fdt0, fdt1, &env->active_fpu.fp_status))
2965 FOP_COND_D(ule, float64_unordered_quiet(fdt1, fdt0, &env->active_fpu.fp_status) || float64_le_quiet(fdt0, fdt1, &env->active_fpu.fp_status))
2966 /* NOTE: the comma operator will make "cond" to eval to false,
2967 * but float64_unordered() is still called. */
2968 FOP_COND_D(sf, (float64_unordered(fdt1, fdt0, &env->active_fpu.fp_status), 0))
2969 FOP_COND_D(ngle,float64_unordered(fdt1, fdt0, &env->active_fpu.fp_status))
2970 FOP_COND_D(seq, float64_eq(fdt0, fdt1, &env->active_fpu.fp_status))
2971 FOP_COND_D(ngl, float64_unordered(fdt1, fdt0, &env->active_fpu.fp_status) || float64_eq(fdt0, fdt1, &env->active_fpu.fp_status))
2972 FOP_COND_D(lt, float64_lt(fdt0, fdt1, &env->active_fpu.fp_status))
2973 FOP_COND_D(nge, float64_unordered(fdt1, fdt0, &env->active_fpu.fp_status) || float64_lt(fdt0, fdt1, &env->active_fpu.fp_status))
2974 FOP_COND_D(le, float64_le(fdt0, fdt1, &env->active_fpu.fp_status))
2975 FOP_COND_D(ngt, float64_unordered(fdt1, fdt0, &env->active_fpu.fp_status) || float64_le(fdt0, fdt1, &env->active_fpu.fp_status))
2977 #define FOP_COND_S(op, cond) \
2978 void helper_cmp_s_ ## op (uint32_t fst0, uint32_t fst1, int cc) \
2980 int c; \
2981 set_float_exception_flags(0, &env->active_fpu.fp_status); \
2982 c = cond; \
2983 update_fcr31(); \
2984 if (c) \
2985 SET_FP_COND(cc, env->active_fpu); \
2986 else \
2987 CLEAR_FP_COND(cc, env->active_fpu); \
2989 void helper_cmpabs_s_ ## op (uint32_t fst0, uint32_t fst1, int cc) \
2991 int c; \
2992 set_float_exception_flags(0, &env->active_fpu.fp_status); \
2993 fst0 = float32_abs(fst0); \
2994 fst1 = float32_abs(fst1); \
2995 c = cond; \
2996 update_fcr31(); \
2997 if (c) \
2998 SET_FP_COND(cc, env->active_fpu); \
2999 else \
3000 CLEAR_FP_COND(cc, env->active_fpu); \
3003 /* NOTE: the comma operator will make "cond" to eval to false,
3004 * but float32_unordered_quiet() is still called. */
3005 FOP_COND_S(f, (float32_unordered_quiet(fst1, fst0, &env->active_fpu.fp_status), 0))
3006 FOP_COND_S(un, float32_unordered_quiet(fst1, fst0, &env->active_fpu.fp_status))
3007 FOP_COND_S(eq, float32_eq_quiet(fst0, fst1, &env->active_fpu.fp_status))
3008 FOP_COND_S(ueq, float32_unordered_quiet(fst1, fst0, &env->active_fpu.fp_status) || float32_eq_quiet(fst0, fst1, &env->active_fpu.fp_status))
3009 FOP_COND_S(olt, float32_lt_quiet(fst0, fst1, &env->active_fpu.fp_status))
3010 FOP_COND_S(ult, float32_unordered_quiet(fst1, fst0, &env->active_fpu.fp_status) || float32_lt_quiet(fst0, fst1, &env->active_fpu.fp_status))
3011 FOP_COND_S(ole, float32_le_quiet(fst0, fst1, &env->active_fpu.fp_status))
3012 FOP_COND_S(ule, float32_unordered_quiet(fst1, fst0, &env->active_fpu.fp_status) || float32_le_quiet(fst0, fst1, &env->active_fpu.fp_status))
3013 /* NOTE: the comma operator will make "cond" to eval to false,
3014 * but float32_unordered() is still called. */
3015 FOP_COND_S(sf, (float32_unordered(fst1, fst0, &env->active_fpu.fp_status), 0))
3016 FOP_COND_S(ngle,float32_unordered(fst1, fst0, &env->active_fpu.fp_status))
3017 FOP_COND_S(seq, float32_eq(fst0, fst1, &env->active_fpu.fp_status))
3018 FOP_COND_S(ngl, float32_unordered(fst1, fst0, &env->active_fpu.fp_status) || float32_eq(fst0, fst1, &env->active_fpu.fp_status))
3019 FOP_COND_S(lt, float32_lt(fst0, fst1, &env->active_fpu.fp_status))
3020 FOP_COND_S(nge, float32_unordered(fst1, fst0, &env->active_fpu.fp_status) || float32_lt(fst0, fst1, &env->active_fpu.fp_status))
3021 FOP_COND_S(le, float32_le(fst0, fst1, &env->active_fpu.fp_status))
3022 FOP_COND_S(ngt, float32_unordered(fst1, fst0, &env->active_fpu.fp_status) || float32_le(fst0, fst1, &env->active_fpu.fp_status))
3024 #define FOP_COND_PS(op, condl, condh) \
3025 void helper_cmp_ps_ ## op (uint64_t fdt0, uint64_t fdt1, int cc) \
3027 uint32_t fst0, fsth0, fst1, fsth1; \
3028 int ch, cl; \
3029 set_float_exception_flags(0, &env->active_fpu.fp_status); \
3030 fst0 = fdt0 & 0XFFFFFFFF; \
3031 fsth0 = fdt0 >> 32; \
3032 fst1 = fdt1 & 0XFFFFFFFF; \
3033 fsth1 = fdt1 >> 32; \
3034 cl = condl; \
3035 ch = condh; \
3036 update_fcr31(); \
3037 if (cl) \
3038 SET_FP_COND(cc, env->active_fpu); \
3039 else \
3040 CLEAR_FP_COND(cc, env->active_fpu); \
3041 if (ch) \
3042 SET_FP_COND(cc + 1, env->active_fpu); \
3043 else \
3044 CLEAR_FP_COND(cc + 1, env->active_fpu); \
3046 void helper_cmpabs_ps_ ## op (uint64_t fdt0, uint64_t fdt1, int cc) \
3048 uint32_t fst0, fsth0, fst1, fsth1; \
3049 int ch, cl; \
3050 fst0 = float32_abs(fdt0 & 0XFFFFFFFF); \
3051 fsth0 = float32_abs(fdt0 >> 32); \
3052 fst1 = float32_abs(fdt1 & 0XFFFFFFFF); \
3053 fsth1 = float32_abs(fdt1 >> 32); \
3054 cl = condl; \
3055 ch = condh; \
3056 update_fcr31(); \
3057 if (cl) \
3058 SET_FP_COND(cc, env->active_fpu); \
3059 else \
3060 CLEAR_FP_COND(cc, env->active_fpu); \
3061 if (ch) \
3062 SET_FP_COND(cc + 1, env->active_fpu); \
3063 else \
3064 CLEAR_FP_COND(cc + 1, env->active_fpu); \
3067 /* NOTE: the comma operator will make "cond" to eval to false,
3068 * but float32_unordered_quiet() is still called. */
3069 FOP_COND_PS(f, (float32_unordered_quiet(fst1, fst0, &env->active_fpu.fp_status), 0),
3070 (float32_unordered_quiet(fsth1, fsth0, &env->active_fpu.fp_status), 0))
3071 FOP_COND_PS(un, float32_unordered_quiet(fst1, fst0, &env->active_fpu.fp_status),
3072 float32_unordered_quiet(fsth1, fsth0, &env->active_fpu.fp_status))
3073 FOP_COND_PS(eq, float32_eq_quiet(fst0, fst1, &env->active_fpu.fp_status),
3074 float32_eq_quiet(fsth0, fsth1, &env->active_fpu.fp_status))
3075 FOP_COND_PS(ueq, float32_unordered_quiet(fst1, fst0, &env->active_fpu.fp_status) || float32_eq_quiet(fst0, fst1, &env->active_fpu.fp_status),
3076 float32_unordered_quiet(fsth1, fsth0, &env->active_fpu.fp_status) || float32_eq_quiet(fsth0, fsth1, &env->active_fpu.fp_status))
3077 FOP_COND_PS(olt, float32_lt_quiet(fst0, fst1, &env->active_fpu.fp_status),
3078 float32_lt_quiet(fsth0, fsth1, &env->active_fpu.fp_status))
3079 FOP_COND_PS(ult, float32_unordered_quiet(fst1, fst0, &env->active_fpu.fp_status) || float32_lt_quiet(fst0, fst1, &env->active_fpu.fp_status),
3080 float32_unordered_quiet(fsth1, fsth0, &env->active_fpu.fp_status) || float32_lt_quiet(fsth0, fsth1, &env->active_fpu.fp_status))
3081 FOP_COND_PS(ole, float32_le_quiet(fst0, fst1, &env->active_fpu.fp_status),
3082 float32_le_quiet(fsth0, fsth1, &env->active_fpu.fp_status))
3083 FOP_COND_PS(ule, float32_unordered_quiet(fst1, fst0, &env->active_fpu.fp_status) || float32_le_quiet(fst0, fst1, &env->active_fpu.fp_status),
3084 float32_unordered_quiet(fsth1, fsth0, &env->active_fpu.fp_status) || float32_le_quiet(fsth0, fsth1, &env->active_fpu.fp_status))
3085 /* NOTE: the comma operator will make "cond" to eval to false,
3086 * but float32_unordered() is still called. */
3087 FOP_COND_PS(sf, (float32_unordered(fst1, fst0, &env->active_fpu.fp_status), 0),
3088 (float32_unordered(fsth1, fsth0, &env->active_fpu.fp_status), 0))
3089 FOP_COND_PS(ngle,float32_unordered(fst1, fst0, &env->active_fpu.fp_status),
3090 float32_unordered(fsth1, fsth0, &env->active_fpu.fp_status))
3091 FOP_COND_PS(seq, float32_eq(fst0, fst1, &env->active_fpu.fp_status),
3092 float32_eq(fsth0, fsth1, &env->active_fpu.fp_status))
3093 FOP_COND_PS(ngl, float32_unordered(fst1, fst0, &env->active_fpu.fp_status) || float32_eq(fst0, fst1, &env->active_fpu.fp_status),
3094 float32_unordered(fsth1, fsth0, &env->active_fpu.fp_status) || float32_eq(fsth0, fsth1, &env->active_fpu.fp_status))
3095 FOP_COND_PS(lt, float32_lt(fst0, fst1, &env->active_fpu.fp_status),
3096 float32_lt(fsth0, fsth1, &env->active_fpu.fp_status))
3097 FOP_COND_PS(nge, float32_unordered(fst1, fst0, &env->active_fpu.fp_status) || float32_lt(fst0, fst1, &env->active_fpu.fp_status),
3098 float32_unordered(fsth1, fsth0, &env->active_fpu.fp_status) || float32_lt(fsth0, fsth1, &env->active_fpu.fp_status))
3099 FOP_COND_PS(le, float32_le(fst0, fst1, &env->active_fpu.fp_status),
3100 float32_le(fsth0, fsth1, &env->active_fpu.fp_status))
3101 FOP_COND_PS(ngt, float32_unordered(fst1, fst0, &env->active_fpu.fp_status) || float32_le(fst0, fst1, &env->active_fpu.fp_status),
3102 float32_unordered(fsth1, fsth0, &env->active_fpu.fp_status) || float32_le(fsth0, fsth1, &env->active_fpu.fp_status))