Fix mingw32 build warnings
[qemu/hppa.git] / target-ppc / op_helper.c
blob94e530327ada84b0d2ec13dd8564a710cf01d058
1 /*
2 * PowerPC emulation helpers for qemu.
4 * Copyright (c) 2003-2007 Jocelyn Mayer
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA 02110-1301 USA
20 #include <string.h>
21 #include "exec.h"
22 #include "host-utils.h"
23 #include "helper.h"
25 #include "helper_regs.h"
27 //#define DEBUG_OP
28 //#define DEBUG_EXCEPTIONS
29 //#define DEBUG_SOFTWARE_TLB
31 #ifdef DEBUG_SOFTWARE_TLB
32 # define LOG_SWTLB(...) qemu_log(__VA_ARGS__)
33 #else
34 # define LOG_SWTLB(...) do { } while (0)
35 #endif
38 /*****************************************************************************/
39 /* Exceptions processing helpers */
41 void helper_raise_exception_err (uint32_t exception, uint32_t error_code)
43 #if 0
44 printf("Raise exception %3x code : %d\n", exception, error_code);
45 #endif
46 env->exception_index = exception;
47 env->error_code = error_code;
48 cpu_loop_exit();
51 void helper_raise_exception (uint32_t exception)
53 helper_raise_exception_err(exception, 0);
56 /*****************************************************************************/
57 /* SPR accesses */
58 void helper_load_dump_spr (uint32_t sprn)
60 qemu_log("Read SPR %d %03x => " ADDRX "\n",
61 sprn, sprn, env->spr[sprn]);
64 void helper_store_dump_spr (uint32_t sprn)
66 qemu_log("Write SPR %d %03x <= " ADDRX "\n",
67 sprn, sprn, env->spr[sprn]);
70 target_ulong helper_load_tbl (void)
72 return cpu_ppc_load_tbl(env);
75 target_ulong helper_load_tbu (void)
77 return cpu_ppc_load_tbu(env);
80 target_ulong helper_load_atbl (void)
82 return cpu_ppc_load_atbl(env);
85 target_ulong helper_load_atbu (void)
87 return cpu_ppc_load_atbu(env);
90 target_ulong helper_load_601_rtcl (void)
92 return cpu_ppc601_load_rtcl(env);
95 target_ulong helper_load_601_rtcu (void)
97 return cpu_ppc601_load_rtcu(env);
100 #if !defined(CONFIG_USER_ONLY)
101 #if defined (TARGET_PPC64)
102 void helper_store_asr (target_ulong val)
104 ppc_store_asr(env, val);
106 #endif
108 void helper_store_sdr1 (target_ulong val)
110 ppc_store_sdr1(env, val);
113 void helper_store_tbl (target_ulong val)
115 cpu_ppc_store_tbl(env, val);
118 void helper_store_tbu (target_ulong val)
120 cpu_ppc_store_tbu(env, val);
123 void helper_store_atbl (target_ulong val)
125 cpu_ppc_store_atbl(env, val);
128 void helper_store_atbu (target_ulong val)
130 cpu_ppc_store_atbu(env, val);
133 void helper_store_601_rtcl (target_ulong val)
135 cpu_ppc601_store_rtcl(env, val);
138 void helper_store_601_rtcu (target_ulong val)
140 cpu_ppc601_store_rtcu(env, val);
143 target_ulong helper_load_decr (void)
145 return cpu_ppc_load_decr(env);
148 void helper_store_decr (target_ulong val)
150 cpu_ppc_store_decr(env, val);
153 void helper_store_hid0_601 (target_ulong val)
155 target_ulong hid0;
157 hid0 = env->spr[SPR_HID0];
158 if ((val ^ hid0) & 0x00000008) {
159 /* Change current endianness */
160 env->hflags &= ~(1 << MSR_LE);
161 env->hflags_nmsr &= ~(1 << MSR_LE);
162 env->hflags_nmsr |= (1 << MSR_LE) & (((val >> 3) & 1) << MSR_LE);
163 env->hflags |= env->hflags_nmsr;
164 qemu_log("%s: set endianness to %c => " ADDRX "\n",
165 __func__, val & 0x8 ? 'l' : 'b', env->hflags);
167 env->spr[SPR_HID0] = (uint32_t)val;
170 void helper_store_403_pbr (uint32_t num, target_ulong value)
172 if (likely(env->pb[num] != value)) {
173 env->pb[num] = value;
174 /* Should be optimized */
175 tlb_flush(env, 1);
179 target_ulong helper_load_40x_pit (void)
181 return load_40x_pit(env);
184 void helper_store_40x_pit (target_ulong val)
186 store_40x_pit(env, val);
189 void helper_store_40x_dbcr0 (target_ulong val)
191 store_40x_dbcr0(env, val);
194 void helper_store_40x_sler (target_ulong val)
196 store_40x_sler(env, val);
199 void helper_store_booke_tcr (target_ulong val)
201 store_booke_tcr(env, val);
204 void helper_store_booke_tsr (target_ulong val)
206 store_booke_tsr(env, val);
209 void helper_store_ibatu (uint32_t nr, target_ulong val)
211 ppc_store_ibatu(env, nr, val);
214 void helper_store_ibatl (uint32_t nr, target_ulong val)
216 ppc_store_ibatl(env, nr, val);
219 void helper_store_dbatu (uint32_t nr, target_ulong val)
221 ppc_store_dbatu(env, nr, val);
224 void helper_store_dbatl (uint32_t nr, target_ulong val)
226 ppc_store_dbatl(env, nr, val);
229 void helper_store_601_batl (uint32_t nr, target_ulong val)
231 ppc_store_ibatl_601(env, nr, val);
234 void helper_store_601_batu (uint32_t nr, target_ulong val)
236 ppc_store_ibatu_601(env, nr, val);
238 #endif
240 /*****************************************************************************/
241 /* Memory load and stores */
243 static always_inline target_ulong addr_add(target_ulong addr, target_long arg)
245 #if defined(TARGET_PPC64)
246 if (!msr_sf)
247 return (uint32_t)(addr + arg);
248 else
249 #endif
250 return addr + arg;
253 void helper_lmw (target_ulong addr, uint32_t reg)
255 for (; reg < 32; reg++) {
256 if (msr_le)
257 env->gpr[reg] = bswap32(ldl(addr));
258 else
259 env->gpr[reg] = ldl(addr);
260 addr = addr_add(addr, 4);
264 void helper_stmw (target_ulong addr, uint32_t reg)
266 for (; reg < 32; reg++) {
267 if (msr_le)
268 stl(addr, bswap32((uint32_t)env->gpr[reg]));
269 else
270 stl(addr, (uint32_t)env->gpr[reg]);
271 addr = addr_add(addr, 4);
275 void helper_lsw(target_ulong addr, uint32_t nb, uint32_t reg)
277 int sh;
278 for (; nb > 3; nb -= 4) {
279 env->gpr[reg] = ldl(addr);
280 reg = (reg + 1) % 32;
281 addr = addr_add(addr, 4);
283 if (unlikely(nb > 0)) {
284 env->gpr[reg] = 0;
285 for (sh = 24; nb > 0; nb--, sh -= 8) {
286 env->gpr[reg] |= ldub(addr) << sh;
287 addr = addr_add(addr, 1);
291 /* PPC32 specification says we must generate an exception if
292 * rA is in the range of registers to be loaded.
293 * In an other hand, IBM says this is valid, but rA won't be loaded.
294 * For now, I'll follow the spec...
296 void helper_lswx(target_ulong addr, uint32_t reg, uint32_t ra, uint32_t rb)
298 if (likely(xer_bc != 0)) {
299 if (unlikely((ra != 0 && reg < ra && (reg + xer_bc) > ra) ||
300 (reg < rb && (reg + xer_bc) > rb))) {
301 helper_raise_exception_err(POWERPC_EXCP_PROGRAM,
302 POWERPC_EXCP_INVAL |
303 POWERPC_EXCP_INVAL_LSWX);
304 } else {
305 helper_lsw(addr, xer_bc, reg);
310 void helper_stsw(target_ulong addr, uint32_t nb, uint32_t reg)
312 int sh;
313 for (; nb > 3; nb -= 4) {
314 stl(addr, env->gpr[reg]);
315 reg = (reg + 1) % 32;
316 addr = addr_add(addr, 4);
318 if (unlikely(nb > 0)) {
319 for (sh = 24; nb > 0; nb--, sh -= 8) {
320 stb(addr, (env->gpr[reg] >> sh) & 0xFF);
321 addr = addr_add(addr, 1);
326 static void do_dcbz(target_ulong addr, int dcache_line_size)
328 addr &= ~(dcache_line_size - 1);
329 int i;
330 for (i = 0 ; i < dcache_line_size ; i += 4) {
331 stl(addr + i , 0);
333 if (env->reserve == addr)
334 env->reserve = (target_ulong)-1ULL;
337 void helper_dcbz(target_ulong addr)
339 do_dcbz(addr, env->dcache_line_size);
342 void helper_dcbz_970(target_ulong addr)
344 if (((env->spr[SPR_970_HID5] >> 7) & 0x3) == 1)
345 do_dcbz(addr, 32);
346 else
347 do_dcbz(addr, env->dcache_line_size);
350 void helper_icbi(target_ulong addr)
352 uint32_t tmp;
354 addr &= ~(env->dcache_line_size - 1);
355 /* Invalidate one cache line :
356 * PowerPC specification says this is to be treated like a load
357 * (not a fetch) by the MMU. To be sure it will be so,
358 * do the load "by hand".
360 tmp = ldl(addr);
361 tb_invalidate_page_range(addr, addr + env->icache_line_size);
364 // XXX: to be tested
365 target_ulong helper_lscbx (target_ulong addr, uint32_t reg, uint32_t ra, uint32_t rb)
367 int i, c, d;
368 d = 24;
369 for (i = 0; i < xer_bc; i++) {
370 c = ldub(addr);
371 addr = addr_add(addr, 1);
372 /* ra (if not 0) and rb are never modified */
373 if (likely(reg != rb && (ra == 0 || reg != ra))) {
374 env->gpr[reg] = (env->gpr[reg] & ~(0xFF << d)) | (c << d);
376 if (unlikely(c == xer_cmp))
377 break;
378 if (likely(d != 0)) {
379 d -= 8;
380 } else {
381 d = 24;
382 reg++;
383 reg = reg & 0x1F;
386 return i;
389 /*****************************************************************************/
390 /* Fixed point operations helpers */
391 #if defined(TARGET_PPC64)
393 /* multiply high word */
394 uint64_t helper_mulhd (uint64_t arg1, uint64_t arg2)
396 uint64_t tl, th;
398 muls64(&tl, &th, arg1, arg2);
399 return th;
402 /* multiply high word unsigned */
403 uint64_t helper_mulhdu (uint64_t arg1, uint64_t arg2)
405 uint64_t tl, th;
407 mulu64(&tl, &th, arg1, arg2);
408 return th;
411 uint64_t helper_mulldo (uint64_t arg1, uint64_t arg2)
413 int64_t th;
414 uint64_t tl;
416 muls64(&tl, (uint64_t *)&th, arg1, arg2);
417 /* If th != 0 && th != -1, then we had an overflow */
418 if (likely((uint64_t)(th + 1) <= 1)) {
419 env->xer &= ~(1 << XER_OV);
420 } else {
421 env->xer |= (1 << XER_OV) | (1 << XER_SO);
423 return (int64_t)tl;
425 #endif
427 target_ulong helper_cntlzw (target_ulong t)
429 return clz32(t);
432 #if defined(TARGET_PPC64)
433 target_ulong helper_cntlzd (target_ulong t)
435 return clz64(t);
437 #endif
439 /* shift right arithmetic helper */
440 target_ulong helper_sraw (target_ulong value, target_ulong shift)
442 int32_t ret;
444 if (likely(!(shift & 0x20))) {
445 if (likely((uint32_t)shift != 0)) {
446 shift &= 0x1f;
447 ret = (int32_t)value >> shift;
448 if (likely(ret >= 0 || (value & ((1 << shift) - 1)) == 0)) {
449 env->xer &= ~(1 << XER_CA);
450 } else {
451 env->xer |= (1 << XER_CA);
453 } else {
454 ret = (int32_t)value;
455 env->xer &= ~(1 << XER_CA);
457 } else {
458 ret = (int32_t)value >> 31;
459 if (ret) {
460 env->xer |= (1 << XER_CA);
461 } else {
462 env->xer &= ~(1 << XER_CA);
465 return (target_long)ret;
468 #if defined(TARGET_PPC64)
469 target_ulong helper_srad (target_ulong value, target_ulong shift)
471 int64_t ret;
473 if (likely(!(shift & 0x40))) {
474 if (likely((uint64_t)shift != 0)) {
475 shift &= 0x3f;
476 ret = (int64_t)value >> shift;
477 if (likely(ret >= 0 || (value & ((1 << shift) - 1)) == 0)) {
478 env->xer &= ~(1 << XER_CA);
479 } else {
480 env->xer |= (1 << XER_CA);
482 } else {
483 ret = (int64_t)value;
484 env->xer &= ~(1 << XER_CA);
486 } else {
487 ret = (int64_t)value >> 63;
488 if (ret) {
489 env->xer |= (1 << XER_CA);
490 } else {
491 env->xer &= ~(1 << XER_CA);
494 return ret;
496 #endif
498 target_ulong helper_popcntb (target_ulong val)
500 val = (val & 0x55555555) + ((val >> 1) & 0x55555555);
501 val = (val & 0x33333333) + ((val >> 2) & 0x33333333);
502 val = (val & 0x0f0f0f0f) + ((val >> 4) & 0x0f0f0f0f);
503 return val;
506 #if defined(TARGET_PPC64)
507 target_ulong helper_popcntb_64 (target_ulong val)
509 val = (val & 0x5555555555555555ULL) + ((val >> 1) & 0x5555555555555555ULL);
510 val = (val & 0x3333333333333333ULL) + ((val >> 2) & 0x3333333333333333ULL);
511 val = (val & 0x0f0f0f0f0f0f0f0fULL) + ((val >> 4) & 0x0f0f0f0f0f0f0f0fULL);
512 return val;
514 #endif
516 /*****************************************************************************/
517 /* Floating point operations helpers */
518 uint64_t helper_float32_to_float64(uint32_t arg)
520 CPU_FloatU f;
521 CPU_DoubleU d;
522 f.l = arg;
523 d.d = float32_to_float64(f.f, &env->fp_status);
524 return d.ll;
527 uint32_t helper_float64_to_float32(uint64_t arg)
529 CPU_FloatU f;
530 CPU_DoubleU d;
531 d.ll = arg;
532 f.f = float64_to_float32(d.d, &env->fp_status);
533 return f.l;
536 static always_inline int isden (float64 d)
538 CPU_DoubleU u;
540 u.d = d;
542 return ((u.ll >> 52) & 0x7FF) == 0;
545 uint32_t helper_compute_fprf (uint64_t arg, uint32_t set_fprf)
547 CPU_DoubleU farg;
548 int isneg;
549 int ret;
550 farg.ll = arg;
551 isneg = float64_is_neg(farg.d);
552 if (unlikely(float64_is_nan(farg.d))) {
553 if (float64_is_signaling_nan(farg.d)) {
554 /* Signaling NaN: flags are undefined */
555 ret = 0x00;
556 } else {
557 /* Quiet NaN */
558 ret = 0x11;
560 } else if (unlikely(float64_is_infinity(farg.d))) {
561 /* +/- infinity */
562 if (isneg)
563 ret = 0x09;
564 else
565 ret = 0x05;
566 } else {
567 if (float64_is_zero(farg.d)) {
568 /* +/- zero */
569 if (isneg)
570 ret = 0x12;
571 else
572 ret = 0x02;
573 } else {
574 if (isden(farg.d)) {
575 /* Denormalized numbers */
576 ret = 0x10;
577 } else {
578 /* Normalized numbers */
579 ret = 0x00;
581 if (isneg) {
582 ret |= 0x08;
583 } else {
584 ret |= 0x04;
588 if (set_fprf) {
589 /* We update FPSCR_FPRF */
590 env->fpscr &= ~(0x1F << FPSCR_FPRF);
591 env->fpscr |= ret << FPSCR_FPRF;
593 /* We just need fpcc to update Rc1 */
594 return ret & 0xF;
597 /* Floating-point invalid operations exception */
598 static always_inline uint64_t fload_invalid_op_excp (int op)
600 uint64_t ret = 0;
601 int ve;
603 ve = fpscr_ve;
604 switch (op) {
605 case POWERPC_EXCP_FP_VXSNAN:
606 env->fpscr |= 1 << FPSCR_VXSNAN;
607 break;
608 case POWERPC_EXCP_FP_VXSOFT:
609 env->fpscr |= 1 << FPSCR_VXSOFT;
610 break;
611 case POWERPC_EXCP_FP_VXISI:
612 /* Magnitude subtraction of infinities */
613 env->fpscr |= 1 << FPSCR_VXISI;
614 goto update_arith;
615 case POWERPC_EXCP_FP_VXIDI:
616 /* Division of infinity by infinity */
617 env->fpscr |= 1 << FPSCR_VXIDI;
618 goto update_arith;
619 case POWERPC_EXCP_FP_VXZDZ:
620 /* Division of zero by zero */
621 env->fpscr |= 1 << FPSCR_VXZDZ;
622 goto update_arith;
623 case POWERPC_EXCP_FP_VXIMZ:
624 /* Multiplication of zero by infinity */
625 env->fpscr |= 1 << FPSCR_VXIMZ;
626 goto update_arith;
627 case POWERPC_EXCP_FP_VXVC:
628 /* Ordered comparison of NaN */
629 env->fpscr |= 1 << FPSCR_VXVC;
630 env->fpscr &= ~(0xF << FPSCR_FPCC);
631 env->fpscr |= 0x11 << FPSCR_FPCC;
632 /* We must update the target FPR before raising the exception */
633 if (ve != 0) {
634 env->exception_index = POWERPC_EXCP_PROGRAM;
635 env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_VXVC;
636 /* Update the floating-point enabled exception summary */
637 env->fpscr |= 1 << FPSCR_FEX;
638 /* Exception is differed */
639 ve = 0;
641 break;
642 case POWERPC_EXCP_FP_VXSQRT:
643 /* Square root of a negative number */
644 env->fpscr |= 1 << FPSCR_VXSQRT;
645 update_arith:
646 env->fpscr &= ~((1 << FPSCR_FR) | (1 << FPSCR_FI));
647 if (ve == 0) {
648 /* Set the result to quiet NaN */
649 ret = 0xFFF8000000000000ULL;
650 env->fpscr &= ~(0xF << FPSCR_FPCC);
651 env->fpscr |= 0x11 << FPSCR_FPCC;
653 break;
654 case POWERPC_EXCP_FP_VXCVI:
655 /* Invalid conversion */
656 env->fpscr |= 1 << FPSCR_VXCVI;
657 env->fpscr &= ~((1 << FPSCR_FR) | (1 << FPSCR_FI));
658 if (ve == 0) {
659 /* Set the result to quiet NaN */
660 ret = 0xFFF8000000000000ULL;
661 env->fpscr &= ~(0xF << FPSCR_FPCC);
662 env->fpscr |= 0x11 << FPSCR_FPCC;
664 break;
666 /* Update the floating-point invalid operation summary */
667 env->fpscr |= 1 << FPSCR_VX;
668 /* Update the floating-point exception summary */
669 env->fpscr |= 1 << FPSCR_FX;
670 if (ve != 0) {
671 /* Update the floating-point enabled exception summary */
672 env->fpscr |= 1 << FPSCR_FEX;
673 if (msr_fe0 != 0 || msr_fe1 != 0)
674 helper_raise_exception_err(POWERPC_EXCP_PROGRAM, POWERPC_EXCP_FP | op);
676 return ret;
679 static always_inline void float_zero_divide_excp (void)
681 env->fpscr |= 1 << FPSCR_ZX;
682 env->fpscr &= ~((1 << FPSCR_FR) | (1 << FPSCR_FI));
683 /* Update the floating-point exception summary */
684 env->fpscr |= 1 << FPSCR_FX;
685 if (fpscr_ze != 0) {
686 /* Update the floating-point enabled exception summary */
687 env->fpscr |= 1 << FPSCR_FEX;
688 if (msr_fe0 != 0 || msr_fe1 != 0) {
689 helper_raise_exception_err(POWERPC_EXCP_PROGRAM,
690 POWERPC_EXCP_FP | POWERPC_EXCP_FP_ZX);
695 static always_inline void float_overflow_excp (void)
697 env->fpscr |= 1 << FPSCR_OX;
698 /* Update the floating-point exception summary */
699 env->fpscr |= 1 << FPSCR_FX;
700 if (fpscr_oe != 0) {
701 /* XXX: should adjust the result */
702 /* Update the floating-point enabled exception summary */
703 env->fpscr |= 1 << FPSCR_FEX;
704 /* We must update the target FPR before raising the exception */
705 env->exception_index = POWERPC_EXCP_PROGRAM;
706 env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_OX;
707 } else {
708 env->fpscr |= 1 << FPSCR_XX;
709 env->fpscr |= 1 << FPSCR_FI;
713 static always_inline void float_underflow_excp (void)
715 env->fpscr |= 1 << FPSCR_UX;
716 /* Update the floating-point exception summary */
717 env->fpscr |= 1 << FPSCR_FX;
718 if (fpscr_ue != 0) {
719 /* XXX: should adjust the result */
720 /* Update the floating-point enabled exception summary */
721 env->fpscr |= 1 << FPSCR_FEX;
722 /* We must update the target FPR before raising the exception */
723 env->exception_index = POWERPC_EXCP_PROGRAM;
724 env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_UX;
728 static always_inline void float_inexact_excp (void)
730 env->fpscr |= 1 << FPSCR_XX;
731 /* Update the floating-point exception summary */
732 env->fpscr |= 1 << FPSCR_FX;
733 if (fpscr_xe != 0) {
734 /* Update the floating-point enabled exception summary */
735 env->fpscr |= 1 << FPSCR_FEX;
736 /* We must update the target FPR before raising the exception */
737 env->exception_index = POWERPC_EXCP_PROGRAM;
738 env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_XX;
742 static always_inline void fpscr_set_rounding_mode (void)
744 int rnd_type;
746 /* Set rounding mode */
747 switch (fpscr_rn) {
748 case 0:
749 /* Best approximation (round to nearest) */
750 rnd_type = float_round_nearest_even;
751 break;
752 case 1:
753 /* Smaller magnitude (round toward zero) */
754 rnd_type = float_round_to_zero;
755 break;
756 case 2:
757 /* Round toward +infinite */
758 rnd_type = float_round_up;
759 break;
760 default:
761 case 3:
762 /* Round toward -infinite */
763 rnd_type = float_round_down;
764 break;
766 set_float_rounding_mode(rnd_type, &env->fp_status);
769 void helper_fpscr_clrbit (uint32_t bit)
771 int prev;
773 prev = (env->fpscr >> bit) & 1;
774 env->fpscr &= ~(1 << bit);
775 if (prev == 1) {
776 switch (bit) {
777 case FPSCR_RN1:
778 case FPSCR_RN:
779 fpscr_set_rounding_mode();
780 break;
781 default:
782 break;
787 void helper_fpscr_setbit (uint32_t bit)
789 int prev;
791 prev = (env->fpscr >> bit) & 1;
792 env->fpscr |= 1 << bit;
793 if (prev == 0) {
794 switch (bit) {
795 case FPSCR_VX:
796 env->fpscr |= 1 << FPSCR_FX;
797 if (fpscr_ve)
798 goto raise_ve;
799 case FPSCR_OX:
800 env->fpscr |= 1 << FPSCR_FX;
801 if (fpscr_oe)
802 goto raise_oe;
803 break;
804 case FPSCR_UX:
805 env->fpscr |= 1 << FPSCR_FX;
806 if (fpscr_ue)
807 goto raise_ue;
808 break;
809 case FPSCR_ZX:
810 env->fpscr |= 1 << FPSCR_FX;
811 if (fpscr_ze)
812 goto raise_ze;
813 break;
814 case FPSCR_XX:
815 env->fpscr |= 1 << FPSCR_FX;
816 if (fpscr_xe)
817 goto raise_xe;
818 break;
819 case FPSCR_VXSNAN:
820 case FPSCR_VXISI:
821 case FPSCR_VXIDI:
822 case FPSCR_VXZDZ:
823 case FPSCR_VXIMZ:
824 case FPSCR_VXVC:
825 case FPSCR_VXSOFT:
826 case FPSCR_VXSQRT:
827 case FPSCR_VXCVI:
828 env->fpscr |= 1 << FPSCR_VX;
829 env->fpscr |= 1 << FPSCR_FX;
830 if (fpscr_ve != 0)
831 goto raise_ve;
832 break;
833 case FPSCR_VE:
834 if (fpscr_vx != 0) {
835 raise_ve:
836 env->error_code = POWERPC_EXCP_FP;
837 if (fpscr_vxsnan)
838 env->error_code |= POWERPC_EXCP_FP_VXSNAN;
839 if (fpscr_vxisi)
840 env->error_code |= POWERPC_EXCP_FP_VXISI;
841 if (fpscr_vxidi)
842 env->error_code |= POWERPC_EXCP_FP_VXIDI;
843 if (fpscr_vxzdz)
844 env->error_code |= POWERPC_EXCP_FP_VXZDZ;
845 if (fpscr_vximz)
846 env->error_code |= POWERPC_EXCP_FP_VXIMZ;
847 if (fpscr_vxvc)
848 env->error_code |= POWERPC_EXCP_FP_VXVC;
849 if (fpscr_vxsoft)
850 env->error_code |= POWERPC_EXCP_FP_VXSOFT;
851 if (fpscr_vxsqrt)
852 env->error_code |= POWERPC_EXCP_FP_VXSQRT;
853 if (fpscr_vxcvi)
854 env->error_code |= POWERPC_EXCP_FP_VXCVI;
855 goto raise_excp;
857 break;
858 case FPSCR_OE:
859 if (fpscr_ox != 0) {
860 raise_oe:
861 env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_OX;
862 goto raise_excp;
864 break;
865 case FPSCR_UE:
866 if (fpscr_ux != 0) {
867 raise_ue:
868 env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_UX;
869 goto raise_excp;
871 break;
872 case FPSCR_ZE:
873 if (fpscr_zx != 0) {
874 raise_ze:
875 env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_ZX;
876 goto raise_excp;
878 break;
879 case FPSCR_XE:
880 if (fpscr_xx != 0) {
881 raise_xe:
882 env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_XX;
883 goto raise_excp;
885 break;
886 case FPSCR_RN1:
887 case FPSCR_RN:
888 fpscr_set_rounding_mode();
889 break;
890 default:
891 break;
892 raise_excp:
893 /* Update the floating-point enabled exception summary */
894 env->fpscr |= 1 << FPSCR_FEX;
895 /* We have to update Rc1 before raising the exception */
896 env->exception_index = POWERPC_EXCP_PROGRAM;
897 break;
902 void helper_store_fpscr (uint64_t arg, uint32_t mask)
905 * We use only the 32 LSB of the incoming fpr
907 uint32_t prev, new;
908 int i;
910 prev = env->fpscr;
911 new = (uint32_t)arg;
912 new &= ~0x60000000;
913 new |= prev & 0x60000000;
914 for (i = 0; i < 8; i++) {
915 if (mask & (1 << i)) {
916 env->fpscr &= ~(0xF << (4 * i));
917 env->fpscr |= new & (0xF << (4 * i));
920 /* Update VX and FEX */
921 if (fpscr_ix != 0)
922 env->fpscr |= 1 << FPSCR_VX;
923 else
924 env->fpscr &= ~(1 << FPSCR_VX);
925 if ((fpscr_ex & fpscr_eex) != 0) {
926 env->fpscr |= 1 << FPSCR_FEX;
927 env->exception_index = POWERPC_EXCP_PROGRAM;
928 /* XXX: we should compute it properly */
929 env->error_code = POWERPC_EXCP_FP;
931 else
932 env->fpscr &= ~(1 << FPSCR_FEX);
933 fpscr_set_rounding_mode();
936 void helper_float_check_status (void)
938 #ifdef CONFIG_SOFTFLOAT
939 if (env->exception_index == POWERPC_EXCP_PROGRAM &&
940 (env->error_code & POWERPC_EXCP_FP)) {
941 /* Differred floating-point exception after target FPR update */
942 if (msr_fe0 != 0 || msr_fe1 != 0)
943 helper_raise_exception_err(env->exception_index, env->error_code);
944 } else {
945 int status = get_float_exception_flags(&env->fp_status);
946 if (status & float_flag_divbyzero) {
947 float_zero_divide_excp();
948 } else if (status & float_flag_overflow) {
949 float_overflow_excp();
950 } else if (status & float_flag_underflow) {
951 float_underflow_excp();
952 } else if (status & float_flag_inexact) {
953 float_inexact_excp();
956 #else
957 if (env->exception_index == POWERPC_EXCP_PROGRAM &&
958 (env->error_code & POWERPC_EXCP_FP)) {
959 /* Differred floating-point exception after target FPR update */
960 if (msr_fe0 != 0 || msr_fe1 != 0)
961 helper_raise_exception_err(env->exception_index, env->error_code);
963 #endif
966 #ifdef CONFIG_SOFTFLOAT
967 void helper_reset_fpstatus (void)
969 set_float_exception_flags(0, &env->fp_status);
971 #endif
973 /* fadd - fadd. */
974 uint64_t helper_fadd (uint64_t arg1, uint64_t arg2)
976 CPU_DoubleU farg1, farg2;
978 farg1.ll = arg1;
979 farg2.ll = arg2;
980 #if USE_PRECISE_EMULATION
981 if (unlikely(float64_is_signaling_nan(farg1.d) ||
982 float64_is_signaling_nan(farg2.d))) {
983 /* sNaN addition */
984 farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
985 } else if (unlikely(float64_is_infinity(farg1.d) && float64_is_infinity(farg2.d) &&
986 float64_is_neg(farg1.d) != float64_is_neg(farg2.d))) {
987 /* Magnitude subtraction of infinities */
988 farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXISI);
989 } else {
990 farg1.d = float64_add(farg1.d, farg2.d, &env->fp_status);
992 #else
993 farg1.d = float64_add(farg1.d, farg2.d, &env->fp_status);
994 #endif
995 return farg1.ll;
998 /* fsub - fsub. */
999 uint64_t helper_fsub (uint64_t arg1, uint64_t arg2)
1001 CPU_DoubleU farg1, farg2;
1003 farg1.ll = arg1;
1004 farg2.ll = arg2;
1005 #if USE_PRECISE_EMULATION
1007 if (unlikely(float64_is_signaling_nan(farg1.d) ||
1008 float64_is_signaling_nan(farg2.d))) {
1009 /* sNaN subtraction */
1010 farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1011 } else if (unlikely(float64_is_infinity(farg1.d) && float64_is_infinity(farg2.d) &&
1012 float64_is_neg(farg1.d) == float64_is_neg(farg2.d))) {
1013 /* Magnitude subtraction of infinities */
1014 farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXISI);
1015 } else {
1016 farg1.d = float64_sub(farg1.d, farg2.d, &env->fp_status);
1019 #else
1020 farg1.d = float64_sub(farg1.d, farg2.d, &env->fp_status);
1021 #endif
1022 return farg1.ll;
1025 /* fmul - fmul. */
1026 uint64_t helper_fmul (uint64_t arg1, uint64_t arg2)
1028 CPU_DoubleU farg1, farg2;
1030 farg1.ll = arg1;
1031 farg2.ll = arg2;
1032 #if USE_PRECISE_EMULATION
1033 if (unlikely(float64_is_signaling_nan(farg1.d) ||
1034 float64_is_signaling_nan(farg2.d))) {
1035 /* sNaN multiplication */
1036 farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1037 } else if (unlikely((float64_is_infinity(farg1.d) && float64_is_zero(farg2.d)) ||
1038 (float64_is_zero(farg1.d) && float64_is_infinity(farg2.d)))) {
1039 /* Multiplication of zero by infinity */
1040 farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXIMZ);
1041 } else {
1042 farg1.d = float64_mul(farg1.d, farg2.d, &env->fp_status);
1044 #else
1045 farg1.d = float64_mul(farg1.d, farg2.d, &env->fp_status);
1046 #endif
1047 return farg1.ll;
1050 /* fdiv - fdiv. */
1051 uint64_t helper_fdiv (uint64_t arg1, uint64_t arg2)
1053 CPU_DoubleU farg1, farg2;
1055 farg1.ll = arg1;
1056 farg2.ll = arg2;
1057 #if USE_PRECISE_EMULATION
1058 if (unlikely(float64_is_signaling_nan(farg1.d) ||
1059 float64_is_signaling_nan(farg2.d))) {
1060 /* sNaN division */
1061 farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1062 } else if (unlikely(float64_is_infinity(farg1.d) && float64_is_infinity(farg2.d))) {
1063 /* Division of infinity by infinity */
1064 farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXIDI);
1065 } else if (unlikely(float64_is_zero(farg1.d) && float64_is_zero(farg2.d))) {
1066 /* Division of zero by zero */
1067 farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXZDZ);
1068 } else {
1069 farg1.d = float64_div(farg1.d, farg2.d, &env->fp_status);
1071 #else
1072 farg1.d = float64_div(farg1.d, farg2.d, &env->fp_status);
1073 #endif
1074 return farg1.ll;
1077 /* fabs */
1078 uint64_t helper_fabs (uint64_t arg)
1080 CPU_DoubleU farg;
1082 farg.ll = arg;
1083 farg.d = float64_abs(farg.d);
1084 return farg.ll;
1087 /* fnabs */
1088 uint64_t helper_fnabs (uint64_t arg)
1090 CPU_DoubleU farg;
1092 farg.ll = arg;
1093 farg.d = float64_abs(farg.d);
1094 farg.d = float64_chs(farg.d);
1095 return farg.ll;
1098 /* fneg */
1099 uint64_t helper_fneg (uint64_t arg)
1101 CPU_DoubleU farg;
1103 farg.ll = arg;
1104 farg.d = float64_chs(farg.d);
1105 return farg.ll;
1108 /* fctiw - fctiw. */
1109 uint64_t helper_fctiw (uint64_t arg)
1111 CPU_DoubleU farg;
1112 farg.ll = arg;
1114 if (unlikely(float64_is_signaling_nan(farg.d))) {
1115 /* sNaN conversion */
1116 farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN | POWERPC_EXCP_FP_VXCVI);
1117 } else if (unlikely(float64_is_nan(farg.d) || float64_is_infinity(farg.d))) {
1118 /* qNan / infinity conversion */
1119 farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXCVI);
1120 } else {
1121 farg.ll = float64_to_int32(farg.d, &env->fp_status);
1122 #if USE_PRECISE_EMULATION
1123 /* XXX: higher bits are not supposed to be significant.
1124 * to make tests easier, return the same as a real PowerPC 750
1126 farg.ll |= 0xFFF80000ULL << 32;
1127 #endif
1129 return farg.ll;
1132 /* fctiwz - fctiwz. */
1133 uint64_t helper_fctiwz (uint64_t arg)
1135 CPU_DoubleU farg;
1136 farg.ll = arg;
1138 if (unlikely(float64_is_signaling_nan(farg.d))) {
1139 /* sNaN conversion */
1140 farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN | POWERPC_EXCP_FP_VXCVI);
1141 } else if (unlikely(float64_is_nan(farg.d) || float64_is_infinity(farg.d))) {
1142 /* qNan / infinity conversion */
1143 farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXCVI);
1144 } else {
1145 farg.ll = float64_to_int32_round_to_zero(farg.d, &env->fp_status);
1146 #if USE_PRECISE_EMULATION
1147 /* XXX: higher bits are not supposed to be significant.
1148 * to make tests easier, return the same as a real PowerPC 750
1150 farg.ll |= 0xFFF80000ULL << 32;
1151 #endif
1153 return farg.ll;
1156 #if defined(TARGET_PPC64)
1157 /* fcfid - fcfid. */
1158 uint64_t helper_fcfid (uint64_t arg)
1160 CPU_DoubleU farg;
1161 farg.d = int64_to_float64(arg, &env->fp_status);
1162 return farg.ll;
1165 /* fctid - fctid. */
1166 uint64_t helper_fctid (uint64_t arg)
1168 CPU_DoubleU farg;
1169 farg.ll = arg;
1171 if (unlikely(float64_is_signaling_nan(farg.d))) {
1172 /* sNaN conversion */
1173 farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN | POWERPC_EXCP_FP_VXCVI);
1174 } else if (unlikely(float64_is_nan(farg.d) || float64_is_infinity(farg.d))) {
1175 /* qNan / infinity conversion */
1176 farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXCVI);
1177 } else {
1178 farg.ll = float64_to_int64(farg.d, &env->fp_status);
1180 return farg.ll;
1183 /* fctidz - fctidz. */
1184 uint64_t helper_fctidz (uint64_t arg)
1186 CPU_DoubleU farg;
1187 farg.ll = arg;
1189 if (unlikely(float64_is_signaling_nan(farg.d))) {
1190 /* sNaN conversion */
1191 farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN | POWERPC_EXCP_FP_VXCVI);
1192 } else if (unlikely(float64_is_nan(farg.d) || float64_is_infinity(farg.d))) {
1193 /* qNan / infinity conversion */
1194 farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXCVI);
1195 } else {
1196 farg.ll = float64_to_int64_round_to_zero(farg.d, &env->fp_status);
1198 return farg.ll;
1201 #endif
1203 static always_inline uint64_t do_fri (uint64_t arg, int rounding_mode)
1205 CPU_DoubleU farg;
1206 farg.ll = arg;
1208 if (unlikely(float64_is_signaling_nan(farg.d))) {
1209 /* sNaN round */
1210 farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN | POWERPC_EXCP_FP_VXCVI);
1211 } else if (unlikely(float64_is_nan(farg.d) || float64_is_infinity(farg.d))) {
1212 /* qNan / infinity round */
1213 farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXCVI);
1214 } else {
1215 set_float_rounding_mode(rounding_mode, &env->fp_status);
1216 farg.ll = float64_round_to_int(farg.d, &env->fp_status);
1217 /* Restore rounding mode from FPSCR */
1218 fpscr_set_rounding_mode();
1220 return farg.ll;
1223 uint64_t helper_frin (uint64_t arg)
1225 return do_fri(arg, float_round_nearest_even);
1228 uint64_t helper_friz (uint64_t arg)
1230 return do_fri(arg, float_round_to_zero);
1233 uint64_t helper_frip (uint64_t arg)
1235 return do_fri(arg, float_round_up);
1238 uint64_t helper_frim (uint64_t arg)
1240 return do_fri(arg, float_round_down);
1243 /* fmadd - fmadd. */
1244 uint64_t helper_fmadd (uint64_t arg1, uint64_t arg2, uint64_t arg3)
1246 CPU_DoubleU farg1, farg2, farg3;
1248 farg1.ll = arg1;
1249 farg2.ll = arg2;
1250 farg3.ll = arg3;
1251 #if USE_PRECISE_EMULATION
1252 if (unlikely(float64_is_signaling_nan(farg1.d) ||
1253 float64_is_signaling_nan(farg2.d) ||
1254 float64_is_signaling_nan(farg3.d))) {
1255 /* sNaN operation */
1256 farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1257 } else if (unlikely((float64_is_infinity(farg1.d) && float64_is_zero(farg2.d)) ||
1258 (float64_is_zero(farg1.d) && float64_is_infinity(farg2.d)))) {
1259 /* Multiplication of zero by infinity */
1260 farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXIMZ);
1261 } else {
1262 #ifdef FLOAT128
1263 /* This is the way the PowerPC specification defines it */
1264 float128 ft0_128, ft1_128;
1266 ft0_128 = float64_to_float128(farg1.d, &env->fp_status);
1267 ft1_128 = float64_to_float128(farg2.d, &env->fp_status);
1268 ft0_128 = float128_mul(ft0_128, ft1_128, &env->fp_status);
1269 if (unlikely(float128_is_infinity(ft0_128) && float64_is_infinity(farg3.d) &&
1270 float128_is_neg(ft0_128) != float64_is_neg(farg3.d))) {
1271 /* Magnitude subtraction of infinities */
1272 farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXISI);
1273 } else {
1274 ft1_128 = float64_to_float128(farg3.d, &env->fp_status);
1275 ft0_128 = float128_add(ft0_128, ft1_128, &env->fp_status);
1276 farg1.d = float128_to_float64(ft0_128, &env->fp_status);
1278 #else
1279 /* This is OK on x86 hosts */
1280 farg1.d = (farg1.d * farg2.d) + farg3.d;
1281 #endif
1283 #else
1284 farg1.d = float64_mul(farg1.d, farg2.d, &env->fp_status);
1285 farg1.d = float64_add(farg1.d, farg3.d, &env->fp_status);
1286 #endif
1287 return farg1.ll;
1290 /* fmsub - fmsub. */
1291 uint64_t helper_fmsub (uint64_t arg1, uint64_t arg2, uint64_t arg3)
1293 CPU_DoubleU farg1, farg2, farg3;
1295 farg1.ll = arg1;
1296 farg2.ll = arg2;
1297 farg3.ll = arg3;
1298 #if USE_PRECISE_EMULATION
1299 if (unlikely(float64_is_signaling_nan(farg1.d) ||
1300 float64_is_signaling_nan(farg2.d) ||
1301 float64_is_signaling_nan(farg3.d))) {
1302 /* sNaN operation */
1303 farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1304 } else if (unlikely((float64_is_infinity(farg1.d) && float64_is_zero(farg2.d)) ||
1305 (float64_is_zero(farg1.d) && float64_is_infinity(farg2.d)))) {
1306 /* Multiplication of zero by infinity */
1307 farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXIMZ);
1308 } else {
1309 #ifdef FLOAT128
1310 /* This is the way the PowerPC specification defines it */
1311 float128 ft0_128, ft1_128;
1313 ft0_128 = float64_to_float128(farg1.d, &env->fp_status);
1314 ft1_128 = float64_to_float128(farg2.d, &env->fp_status);
1315 ft0_128 = float128_mul(ft0_128, ft1_128, &env->fp_status);
1316 if (unlikely(float128_is_infinity(ft0_128) && float64_is_infinity(farg3.d) &&
1317 float128_is_neg(ft0_128) == float64_is_neg(farg3.d))) {
1318 /* Magnitude subtraction of infinities */
1319 farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXISI);
1320 } else {
1321 ft1_128 = float64_to_float128(farg3.d, &env->fp_status);
1322 ft0_128 = float128_sub(ft0_128, ft1_128, &env->fp_status);
1323 farg1.d = float128_to_float64(ft0_128, &env->fp_status);
1325 #else
1326 /* This is OK on x86 hosts */
1327 farg1.d = (farg1.d * farg2.d) - farg3.d;
1328 #endif
1330 #else
1331 farg1.d = float64_mul(farg1.d, farg2.d, &env->fp_status);
1332 farg1.d = float64_sub(farg1.d, farg3.d, &env->fp_status);
1333 #endif
1334 return farg1.ll;
1337 /* fnmadd - fnmadd. */
1338 uint64_t helper_fnmadd (uint64_t arg1, uint64_t arg2, uint64_t arg3)
1340 CPU_DoubleU farg1, farg2, farg3;
1342 farg1.ll = arg1;
1343 farg2.ll = arg2;
1344 farg3.ll = arg3;
1346 if (unlikely(float64_is_signaling_nan(farg1.d) ||
1347 float64_is_signaling_nan(farg2.d) ||
1348 float64_is_signaling_nan(farg3.d))) {
1349 /* sNaN operation */
1350 farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1351 } else if (unlikely((float64_is_infinity(farg1.d) && float64_is_zero(farg2.d)) ||
1352 (float64_is_zero(farg1.d) && float64_is_infinity(farg2.d)))) {
1353 /* Multiplication of zero by infinity */
1354 farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXIMZ);
1355 } else {
1356 #if USE_PRECISE_EMULATION
1357 #ifdef FLOAT128
1358 /* This is the way the PowerPC specification defines it */
1359 float128 ft0_128, ft1_128;
1361 ft0_128 = float64_to_float128(farg1.d, &env->fp_status);
1362 ft1_128 = float64_to_float128(farg2.d, &env->fp_status);
1363 ft0_128 = float128_mul(ft0_128, ft1_128, &env->fp_status);
1364 if (unlikely(float128_is_infinity(ft0_128) && float64_is_infinity(farg3.d) &&
1365 float128_is_neg(ft0_128) != float64_is_neg(farg3.d))) {
1366 /* Magnitude subtraction of infinities */
1367 farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXISI);
1368 } else {
1369 ft1_128 = float64_to_float128(farg3.d, &env->fp_status);
1370 ft0_128 = float128_add(ft0_128, ft1_128, &env->fp_status);
1371 farg1.d = float128_to_float64(ft0_128, &env->fp_status);
1373 #else
1374 /* This is OK on x86 hosts */
1375 farg1.d = (farg1.d * farg2.d) + farg3.d;
1376 #endif
1377 #else
1378 farg1.d = float64_mul(farg1.d, farg2.d, &env->fp_status);
1379 farg1.d = float64_add(farg1.d, farg3.d, &env->fp_status);
1380 #endif
1381 if (likely(!float64_is_nan(farg1.d)))
1382 farg1.d = float64_chs(farg1.d);
1384 return farg1.ll;
1387 /* fnmsub - fnmsub. */
1388 uint64_t helper_fnmsub (uint64_t arg1, uint64_t arg2, uint64_t arg3)
1390 CPU_DoubleU farg1, farg2, farg3;
1392 farg1.ll = arg1;
1393 farg2.ll = arg2;
1394 farg3.ll = arg3;
1396 if (unlikely(float64_is_signaling_nan(farg1.d) ||
1397 float64_is_signaling_nan(farg2.d) ||
1398 float64_is_signaling_nan(farg3.d))) {
1399 /* sNaN operation */
1400 farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1401 } else if (unlikely((float64_is_infinity(farg1.d) && float64_is_zero(farg2.d)) ||
1402 (float64_is_zero(farg1.d) && float64_is_infinity(farg2.d)))) {
1403 /* Multiplication of zero by infinity */
1404 farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXIMZ);
1405 } else {
1406 #if USE_PRECISE_EMULATION
1407 #ifdef FLOAT128
1408 /* This is the way the PowerPC specification defines it */
1409 float128 ft0_128, ft1_128;
1411 ft0_128 = float64_to_float128(farg1.d, &env->fp_status);
1412 ft1_128 = float64_to_float128(farg2.d, &env->fp_status);
1413 ft0_128 = float128_mul(ft0_128, ft1_128, &env->fp_status);
1414 if (unlikely(float128_is_infinity(ft0_128) && float64_is_infinity(farg3.d) &&
1415 float128_is_neg(ft0_128) == float64_is_neg(farg3.d))) {
1416 /* Magnitude subtraction of infinities */
1417 farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXISI);
1418 } else {
1419 ft1_128 = float64_to_float128(farg3.d, &env->fp_status);
1420 ft0_128 = float128_sub(ft0_128, ft1_128, &env->fp_status);
1421 farg1.d = float128_to_float64(ft0_128, &env->fp_status);
1423 #else
1424 /* This is OK on x86 hosts */
1425 farg1.d = (farg1.d * farg2.d) - farg3.d;
1426 #endif
1427 #else
1428 farg1.d = float64_mul(farg1.d, farg2.d, &env->fp_status);
1429 farg1.d = float64_sub(farg1.d, farg3.d, &env->fp_status);
1430 #endif
1431 if (likely(!float64_is_nan(farg1.d)))
1432 farg1.d = float64_chs(farg1.d);
1434 return farg1.ll;
1437 /* frsp - frsp. */
1438 uint64_t helper_frsp (uint64_t arg)
1440 CPU_DoubleU farg;
1441 float32 f32;
1442 farg.ll = arg;
1444 #if USE_PRECISE_EMULATION
1445 if (unlikely(float64_is_signaling_nan(farg.d))) {
1446 /* sNaN square root */
1447 farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1448 } else {
1449 f32 = float64_to_float32(farg.d, &env->fp_status);
1450 farg.d = float32_to_float64(f32, &env->fp_status);
1452 #else
1453 f32 = float64_to_float32(farg.d, &env->fp_status);
1454 farg.d = float32_to_float64(f32, &env->fp_status);
1455 #endif
1456 return farg.ll;
1459 /* fsqrt - fsqrt. */
1460 uint64_t helper_fsqrt (uint64_t arg)
1462 CPU_DoubleU farg;
1463 farg.ll = arg;
1465 if (unlikely(float64_is_signaling_nan(farg.d))) {
1466 /* sNaN square root */
1467 farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1468 } else if (unlikely(float64_is_neg(farg.d) && !float64_is_zero(farg.d))) {
1469 /* Square root of a negative nonzero number */
1470 farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSQRT);
1471 } else {
1472 farg.d = float64_sqrt(farg.d, &env->fp_status);
1474 return farg.ll;
1477 /* fre - fre. */
1478 uint64_t helper_fre (uint64_t arg)
1480 CPU_DoubleU farg;
1481 farg.ll = arg;
1483 if (unlikely(float64_is_signaling_nan(farg.d))) {
1484 /* sNaN reciprocal */
1485 farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1486 } else {
1487 farg.d = float64_div(float64_one, farg.d, &env->fp_status);
1489 return farg.d;
1492 /* fres - fres. */
1493 uint64_t helper_fres (uint64_t arg)
1495 CPU_DoubleU farg;
1496 float32 f32;
1497 farg.ll = arg;
1499 if (unlikely(float64_is_signaling_nan(farg.d))) {
1500 /* sNaN reciprocal */
1501 farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1502 } else {
1503 farg.d = float64_div(float64_one, farg.d, &env->fp_status);
1504 f32 = float64_to_float32(farg.d, &env->fp_status);
1505 farg.d = float32_to_float64(f32, &env->fp_status);
1507 return farg.ll;
1510 /* frsqrte - frsqrte. */
1511 uint64_t helper_frsqrte (uint64_t arg)
1513 CPU_DoubleU farg;
1514 float32 f32;
1515 farg.ll = arg;
1517 if (unlikely(float64_is_signaling_nan(farg.d))) {
1518 /* sNaN reciprocal square root */
1519 farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1520 } else if (unlikely(float64_is_neg(farg.d) && !float64_is_zero(farg.d))) {
1521 /* Reciprocal square root of a negative nonzero number */
1522 farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSQRT);
1523 } else {
1524 farg.d = float64_sqrt(farg.d, &env->fp_status);
1525 farg.d = float64_div(float64_one, farg.d, &env->fp_status);
1526 f32 = float64_to_float32(farg.d, &env->fp_status);
1527 farg.d = float32_to_float64(f32, &env->fp_status);
1529 return farg.ll;
1532 /* fsel - fsel. */
1533 uint64_t helper_fsel (uint64_t arg1, uint64_t arg2, uint64_t arg3)
1535 CPU_DoubleU farg1;
1537 farg1.ll = arg1;
1539 if ((!float64_is_neg(farg1.d) || float64_is_zero(farg1.d)) && !float64_is_nan(farg1.d))
1540 return arg2;
1541 else
1542 return arg3;
1545 void helper_fcmpu (uint64_t arg1, uint64_t arg2, uint32_t crfD)
1547 CPU_DoubleU farg1, farg2;
1548 uint32_t ret = 0;
1549 farg1.ll = arg1;
1550 farg2.ll = arg2;
1552 if (unlikely(float64_is_nan(farg1.d) ||
1553 float64_is_nan(farg2.d))) {
1554 ret = 0x01UL;
1555 } else if (float64_lt(farg1.d, farg2.d, &env->fp_status)) {
1556 ret = 0x08UL;
1557 } else if (!float64_le(farg1.d, farg2.d, &env->fp_status)) {
1558 ret = 0x04UL;
1559 } else {
1560 ret = 0x02UL;
1563 env->fpscr &= ~(0x0F << FPSCR_FPRF);
1564 env->fpscr |= ret << FPSCR_FPRF;
1565 env->crf[crfD] = ret;
1566 if (unlikely(ret == 0x01UL
1567 && (float64_is_signaling_nan(farg1.d) ||
1568 float64_is_signaling_nan(farg2.d)))) {
1569 /* sNaN comparison */
1570 fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1574 void helper_fcmpo (uint64_t arg1, uint64_t arg2, uint32_t crfD)
1576 CPU_DoubleU farg1, farg2;
1577 uint32_t ret = 0;
1578 farg1.ll = arg1;
1579 farg2.ll = arg2;
1581 if (unlikely(float64_is_nan(farg1.d) ||
1582 float64_is_nan(farg2.d))) {
1583 ret = 0x01UL;
1584 } else if (float64_lt(farg1.d, farg2.d, &env->fp_status)) {
1585 ret = 0x08UL;
1586 } else if (!float64_le(farg1.d, farg2.d, &env->fp_status)) {
1587 ret = 0x04UL;
1588 } else {
1589 ret = 0x02UL;
1592 env->fpscr &= ~(0x0F << FPSCR_FPRF);
1593 env->fpscr |= ret << FPSCR_FPRF;
1594 env->crf[crfD] = ret;
1595 if (unlikely (ret == 0x01UL)) {
1596 if (float64_is_signaling_nan(farg1.d) ||
1597 float64_is_signaling_nan(farg2.d)) {
1598 /* sNaN comparison */
1599 fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN |
1600 POWERPC_EXCP_FP_VXVC);
1601 } else {
1602 /* qNaN comparison */
1603 fload_invalid_op_excp(POWERPC_EXCP_FP_VXVC);
1608 #if !defined (CONFIG_USER_ONLY)
1609 void helper_store_msr (target_ulong val)
1611 val = hreg_store_msr(env, val, 0);
1612 if (val != 0) {
1613 env->interrupt_request |= CPU_INTERRUPT_EXITTB;
1614 helper_raise_exception(val);
1618 static always_inline void do_rfi (target_ulong nip, target_ulong msr,
1619 target_ulong msrm, int keep_msrh)
1621 #if defined(TARGET_PPC64)
1622 if (msr & (1ULL << MSR_SF)) {
1623 nip = (uint64_t)nip;
1624 msr &= (uint64_t)msrm;
1625 } else {
1626 nip = (uint32_t)nip;
1627 msr = (uint32_t)(msr & msrm);
1628 if (keep_msrh)
1629 msr |= env->msr & ~((uint64_t)0xFFFFFFFF);
1631 #else
1632 nip = (uint32_t)nip;
1633 msr &= (uint32_t)msrm;
1634 #endif
1635 /* XXX: beware: this is false if VLE is supported */
1636 env->nip = nip & ~((target_ulong)0x00000003);
1637 hreg_store_msr(env, msr, 1);
1638 #if defined (DEBUG_OP)
1639 cpu_dump_rfi(env->nip, env->msr);
1640 #endif
1641 /* No need to raise an exception here,
1642 * as rfi is always the last insn of a TB
1644 env->interrupt_request |= CPU_INTERRUPT_EXITTB;
1647 void helper_rfi (void)
1649 do_rfi(env->spr[SPR_SRR0], env->spr[SPR_SRR1],
1650 ~((target_ulong)0x0), 1);
1653 #if defined(TARGET_PPC64)
1654 void helper_rfid (void)
1656 do_rfi(env->spr[SPR_SRR0], env->spr[SPR_SRR1],
1657 ~((target_ulong)0x0), 0);
1660 void helper_hrfid (void)
1662 do_rfi(env->spr[SPR_HSRR0], env->spr[SPR_HSRR1],
1663 ~((target_ulong)0x0), 0);
1665 #endif
1666 #endif
1668 void helper_tw (target_ulong arg1, target_ulong arg2, uint32_t flags)
1670 if (!likely(!(((int32_t)arg1 < (int32_t)arg2 && (flags & 0x10)) ||
1671 ((int32_t)arg1 > (int32_t)arg2 && (flags & 0x08)) ||
1672 ((int32_t)arg1 == (int32_t)arg2 && (flags & 0x04)) ||
1673 ((uint32_t)arg1 < (uint32_t)arg2 && (flags & 0x02)) ||
1674 ((uint32_t)arg1 > (uint32_t)arg2 && (flags & 0x01))))) {
1675 helper_raise_exception_err(POWERPC_EXCP_PROGRAM, POWERPC_EXCP_TRAP);
1679 #if defined(TARGET_PPC64)
1680 void helper_td (target_ulong arg1, target_ulong arg2, uint32_t flags)
1682 if (!likely(!(((int64_t)arg1 < (int64_t)arg2 && (flags & 0x10)) ||
1683 ((int64_t)arg1 > (int64_t)arg2 && (flags & 0x08)) ||
1684 ((int64_t)arg1 == (int64_t)arg2 && (flags & 0x04)) ||
1685 ((uint64_t)arg1 < (uint64_t)arg2 && (flags & 0x02)) ||
1686 ((uint64_t)arg1 > (uint64_t)arg2 && (flags & 0x01)))))
1687 helper_raise_exception_err(POWERPC_EXCP_PROGRAM, POWERPC_EXCP_TRAP);
1689 #endif
1691 /*****************************************************************************/
1692 /* PowerPC 601 specific instructions (POWER bridge) */
1694 target_ulong helper_clcs (uint32_t arg)
1696 switch (arg) {
1697 case 0x0CUL:
1698 /* Instruction cache line size */
1699 return env->icache_line_size;
1700 break;
1701 case 0x0DUL:
1702 /* Data cache line size */
1703 return env->dcache_line_size;
1704 break;
1705 case 0x0EUL:
1706 /* Minimum cache line size */
1707 return (env->icache_line_size < env->dcache_line_size) ?
1708 env->icache_line_size : env->dcache_line_size;
1709 break;
1710 case 0x0FUL:
1711 /* Maximum cache line size */
1712 return (env->icache_line_size > env->dcache_line_size) ?
1713 env->icache_line_size : env->dcache_line_size;
1714 break;
1715 default:
1716 /* Undefined */
1717 return 0;
1718 break;
1722 target_ulong helper_div (target_ulong arg1, target_ulong arg2)
1724 uint64_t tmp = (uint64_t)arg1 << 32 | env->spr[SPR_MQ];
1726 if (((int32_t)tmp == INT32_MIN && (int32_t)arg2 == (int32_t)-1) ||
1727 (int32_t)arg2 == 0) {
1728 env->spr[SPR_MQ] = 0;
1729 return INT32_MIN;
1730 } else {
1731 env->spr[SPR_MQ] = tmp % arg2;
1732 return tmp / (int32_t)arg2;
1736 target_ulong helper_divo (target_ulong arg1, target_ulong arg2)
1738 uint64_t tmp = (uint64_t)arg1 << 32 | env->spr[SPR_MQ];
1740 if (((int32_t)tmp == INT32_MIN && (int32_t)arg2 == (int32_t)-1) ||
1741 (int32_t)arg2 == 0) {
1742 env->xer |= (1 << XER_OV) | (1 << XER_SO);
1743 env->spr[SPR_MQ] = 0;
1744 return INT32_MIN;
1745 } else {
1746 env->spr[SPR_MQ] = tmp % arg2;
1747 tmp /= (int32_t)arg2;
1748 if ((int32_t)tmp != tmp) {
1749 env->xer |= (1 << XER_OV) | (1 << XER_SO);
1750 } else {
1751 env->xer &= ~(1 << XER_OV);
1753 return tmp;
1757 target_ulong helper_divs (target_ulong arg1, target_ulong arg2)
1759 if (((int32_t)arg1 == INT32_MIN && (int32_t)arg2 == (int32_t)-1) ||
1760 (int32_t)arg2 == 0) {
1761 env->spr[SPR_MQ] = 0;
1762 return INT32_MIN;
1763 } else {
1764 env->spr[SPR_MQ] = (int32_t)arg1 % (int32_t)arg2;
1765 return (int32_t)arg1 / (int32_t)arg2;
1769 target_ulong helper_divso (target_ulong arg1, target_ulong arg2)
1771 if (((int32_t)arg1 == INT32_MIN && (int32_t)arg2 == (int32_t)-1) ||
1772 (int32_t)arg2 == 0) {
1773 env->xer |= (1 << XER_OV) | (1 << XER_SO);
1774 env->spr[SPR_MQ] = 0;
1775 return INT32_MIN;
1776 } else {
1777 env->xer &= ~(1 << XER_OV);
1778 env->spr[SPR_MQ] = (int32_t)arg1 % (int32_t)arg2;
1779 return (int32_t)arg1 / (int32_t)arg2;
1783 #if !defined (CONFIG_USER_ONLY)
1784 target_ulong helper_rac (target_ulong addr)
1786 mmu_ctx_t ctx;
1787 int nb_BATs;
1788 target_ulong ret = 0;
1790 /* We don't have to generate many instances of this instruction,
1791 * as rac is supervisor only.
1793 /* XXX: FIX THIS: Pretend we have no BAT */
1794 nb_BATs = env->nb_BATs;
1795 env->nb_BATs = 0;
1796 if (get_physical_address(env, &ctx, addr, 0, ACCESS_INT) == 0)
1797 ret = ctx.raddr;
1798 env->nb_BATs = nb_BATs;
1799 return ret;
1802 void helper_rfsvc (void)
1804 do_rfi(env->lr, env->ctr, 0x0000FFFF, 0);
1806 #endif
1808 /*****************************************************************************/
1809 /* 602 specific instructions */
1810 /* mfrom is the most crazy instruction ever seen, imho ! */
1811 /* Real implementation uses a ROM table. Do the same */
1812 /* Extremly decomposed:
1813 * -arg / 256
1814 * return 256 * log10(10 + 1.0) + 0.5
1816 #if !defined (CONFIG_USER_ONLY)
1817 target_ulong helper_602_mfrom (target_ulong arg)
1819 if (likely(arg < 602)) {
1820 #include "mfrom_table.c"
1821 return mfrom_ROM_table[arg];
1822 } else {
1823 return 0;
1826 #endif
1828 /*****************************************************************************/
1829 /* Embedded PowerPC specific helpers */
1831 /* XXX: to be improved to check access rights when in user-mode */
1832 target_ulong helper_load_dcr (target_ulong dcrn)
1834 target_ulong val = 0;
1836 if (unlikely(env->dcr_env == NULL)) {
1837 qemu_log("No DCR environment\n");
1838 helper_raise_exception_err(POWERPC_EXCP_PROGRAM,
1839 POWERPC_EXCP_INVAL | POWERPC_EXCP_INVAL_INVAL);
1840 } else if (unlikely(ppc_dcr_read(env->dcr_env, dcrn, &val) != 0)) {
1841 qemu_log("DCR read error %d %03x\n", (int)dcrn, (int)dcrn);
1842 helper_raise_exception_err(POWERPC_EXCP_PROGRAM,
1843 POWERPC_EXCP_INVAL | POWERPC_EXCP_PRIV_REG);
1845 return val;
1848 void helper_store_dcr (target_ulong dcrn, target_ulong val)
1850 if (unlikely(env->dcr_env == NULL)) {
1851 qemu_log("No DCR environment\n");
1852 helper_raise_exception_err(POWERPC_EXCP_PROGRAM,
1853 POWERPC_EXCP_INVAL | POWERPC_EXCP_INVAL_INVAL);
1854 } else if (unlikely(ppc_dcr_write(env->dcr_env, dcrn, val) != 0)) {
1855 qemu_log("DCR write error %d %03x\n", (int)dcrn, (int)dcrn);
1856 helper_raise_exception_err(POWERPC_EXCP_PROGRAM,
1857 POWERPC_EXCP_INVAL | POWERPC_EXCP_PRIV_REG);
1861 #if !defined(CONFIG_USER_ONLY)
1862 void helper_40x_rfci (void)
1864 do_rfi(env->spr[SPR_40x_SRR2], env->spr[SPR_40x_SRR3],
1865 ~((target_ulong)0xFFFF0000), 0);
1868 void helper_rfci (void)
1870 do_rfi(env->spr[SPR_BOOKE_CSRR0], SPR_BOOKE_CSRR1,
1871 ~((target_ulong)0x3FFF0000), 0);
1874 void helper_rfdi (void)
1876 do_rfi(env->spr[SPR_BOOKE_DSRR0], SPR_BOOKE_DSRR1,
1877 ~((target_ulong)0x3FFF0000), 0);
1880 void helper_rfmci (void)
1882 do_rfi(env->spr[SPR_BOOKE_MCSRR0], SPR_BOOKE_MCSRR1,
1883 ~((target_ulong)0x3FFF0000), 0);
1885 #endif
1887 /* 440 specific */
1888 target_ulong helper_dlmzb (target_ulong high, target_ulong low, uint32_t update_Rc)
1890 target_ulong mask;
1891 int i;
1893 i = 1;
1894 for (mask = 0xFF000000; mask != 0; mask = mask >> 8) {
1895 if ((high & mask) == 0) {
1896 if (update_Rc) {
1897 env->crf[0] = 0x4;
1899 goto done;
1901 i++;
1903 for (mask = 0xFF000000; mask != 0; mask = mask >> 8) {
1904 if ((low & mask) == 0) {
1905 if (update_Rc) {
1906 env->crf[0] = 0x8;
1908 goto done;
1910 i++;
1912 if (update_Rc) {
1913 env->crf[0] = 0x2;
1915 done:
1916 env->xer = (env->xer & ~0x7F) | i;
1917 if (update_Rc) {
1918 env->crf[0] |= xer_so;
1920 return i;
1923 /*****************************************************************************/
1924 /* Altivec extension helpers */
1925 #if defined(WORDS_BIGENDIAN)
1926 #define HI_IDX 0
1927 #define LO_IDX 1
1928 #else
1929 #define HI_IDX 1
1930 #define LO_IDX 0
1931 #endif
1933 #if defined(WORDS_BIGENDIAN)
1934 #define VECTOR_FOR_INORDER_I(index, element) \
1935 for (index = 0; index < ARRAY_SIZE(r->element); index++)
1936 #else
1937 #define VECTOR_FOR_INORDER_I(index, element) \
1938 for (index = ARRAY_SIZE(r->element)-1; index >= 0; index--)
1939 #endif
1941 /* If X is a NaN, store the corresponding QNaN into RESULT. Otherwise,
1942 * execute the following block. */
1943 #define DO_HANDLE_NAN(result, x) \
1944 if (float32_is_nan(x) || float32_is_signaling_nan(x)) { \
1945 CPU_FloatU __f; \
1946 __f.f = x; \
1947 __f.l = __f.l | (1 << 22); /* Set QNaN bit. */ \
1948 result = __f.f; \
1949 } else
1951 #define HANDLE_NAN1(result, x) \
1952 DO_HANDLE_NAN(result, x)
1953 #define HANDLE_NAN2(result, x, y) \
1954 DO_HANDLE_NAN(result, x) DO_HANDLE_NAN(result, y)
1955 #define HANDLE_NAN3(result, x, y, z) \
1956 DO_HANDLE_NAN(result, x) DO_HANDLE_NAN(result, y) DO_HANDLE_NAN(result, z)
1958 /* Saturating arithmetic helpers. */
1959 #define SATCVT(from, to, from_type, to_type, min, max, use_min, use_max) \
1960 static always_inline to_type cvt##from##to (from_type x, int *sat) \
1962 to_type r; \
1963 if (use_min && x < min) { \
1964 r = min; \
1965 *sat = 1; \
1966 } else if (use_max && x > max) { \
1967 r = max; \
1968 *sat = 1; \
1969 } else { \
1970 r = x; \
1972 return r; \
1974 SATCVT(sh, sb, int16_t, int8_t, INT8_MIN, INT8_MAX, 1, 1)
1975 SATCVT(sw, sh, int32_t, int16_t, INT16_MIN, INT16_MAX, 1, 1)
1976 SATCVT(sd, sw, int64_t, int32_t, INT32_MIN, INT32_MAX, 1, 1)
1978 /* Work around gcc problems with the macro version */
1979 static always_inline uint8_t cvtuhub(uint16_t x, int *sat)
1981 uint8_t r;
1983 if (x > UINT8_MAX) {
1984 r = UINT8_MAX;
1985 *sat = 1;
1986 } else {
1987 r = x;
1989 return r;
1991 //SATCVT(uh, ub, uint16_t, uint8_t, 0, UINT8_MAX, 0, 1)
1992 SATCVT(uw, uh, uint32_t, uint16_t, 0, UINT16_MAX, 0, 1)
1993 SATCVT(ud, uw, uint64_t, uint32_t, 0, UINT32_MAX, 0, 1)
1994 SATCVT(sh, ub, int16_t, uint8_t, 0, UINT8_MAX, 1, 1)
1995 SATCVT(sw, uh, int32_t, uint16_t, 0, UINT16_MAX, 1, 1)
1996 SATCVT(sd, uw, int64_t, uint32_t, 0, UINT32_MAX, 1, 1)
1997 #undef SATCVT
1999 #define LVE(name, access, swap, element) \
2000 void helper_##name (ppc_avr_t *r, target_ulong addr) \
2002 size_t n_elems = ARRAY_SIZE(r->element); \
2003 int adjust = HI_IDX*(n_elems-1); \
2004 int sh = sizeof(r->element[0]) >> 1; \
2005 int index = (addr & 0xf) >> sh; \
2006 if(msr_le) { \
2007 r->element[LO_IDX ? index : (adjust - index)] = swap(access(addr)); \
2008 } else { \
2009 r->element[LO_IDX ? index : (adjust - index)] = access(addr); \
2012 #define I(x) (x)
2013 LVE(lvebx, ldub, I, u8)
2014 LVE(lvehx, lduw, bswap16, u16)
2015 LVE(lvewx, ldl, bswap32, u32)
2016 #undef I
2017 #undef LVE
2019 void helper_lvsl (ppc_avr_t *r, target_ulong sh)
2021 int i, j = (sh & 0xf);
2023 VECTOR_FOR_INORDER_I (i, u8) {
2024 r->u8[i] = j++;
2028 void helper_lvsr (ppc_avr_t *r, target_ulong sh)
2030 int i, j = 0x10 - (sh & 0xf);
2032 VECTOR_FOR_INORDER_I (i, u8) {
2033 r->u8[i] = j++;
2037 #define STVE(name, access, swap, element) \
2038 void helper_##name (ppc_avr_t *r, target_ulong addr) \
2040 size_t n_elems = ARRAY_SIZE(r->element); \
2041 int adjust = HI_IDX*(n_elems-1); \
2042 int sh = sizeof(r->element[0]) >> 1; \
2043 int index = (addr & 0xf) >> sh; \
2044 if(msr_le) { \
2045 access(addr, swap(r->element[LO_IDX ? index : (adjust - index)])); \
2046 } else { \
2047 access(addr, r->element[LO_IDX ? index : (adjust - index)]); \
2050 #define I(x) (x)
2051 STVE(stvebx, stb, I, u8)
2052 STVE(stvehx, stw, bswap16, u16)
2053 STVE(stvewx, stl, bswap32, u32)
2054 #undef I
2055 #undef LVE
2057 void helper_mtvscr (ppc_avr_t *r)
2059 #if defined(WORDS_BIGENDIAN)
2060 env->vscr = r->u32[3];
2061 #else
2062 env->vscr = r->u32[0];
2063 #endif
2064 set_flush_to_zero(vscr_nj, &env->vec_status);
2067 void helper_vaddcuw (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
2069 int i;
2070 for (i = 0; i < ARRAY_SIZE(r->u32); i++) {
2071 r->u32[i] = ~a->u32[i] < b->u32[i];
2075 #define VARITH_DO(name, op, element) \
2076 void helper_v##name (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
2078 int i; \
2079 for (i = 0; i < ARRAY_SIZE(r->element); i++) { \
2080 r->element[i] = a->element[i] op b->element[i]; \
2083 #define VARITH(suffix, element) \
2084 VARITH_DO(add##suffix, +, element) \
2085 VARITH_DO(sub##suffix, -, element)
2086 VARITH(ubm, u8)
2087 VARITH(uhm, u16)
2088 VARITH(uwm, u32)
2089 #undef VARITH_DO
2090 #undef VARITH
2092 #define VARITHFP(suffix, func) \
2093 void helper_v##suffix (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
2095 int i; \
2096 for (i = 0; i < ARRAY_SIZE(r->f); i++) { \
2097 HANDLE_NAN2(r->f[i], a->f[i], b->f[i]) { \
2098 r->f[i] = func(a->f[i], b->f[i], &env->vec_status); \
2102 VARITHFP(addfp, float32_add)
2103 VARITHFP(subfp, float32_sub)
2104 #undef VARITHFP
2106 #define VARITHSAT_CASE(type, op, cvt, element) \
2108 type result = (type)a->element[i] op (type)b->element[i]; \
2109 r->element[i] = cvt(result, &sat); \
2112 #define VARITHSAT_DO(name, op, optype, cvt, element) \
2113 void helper_v##name (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
2115 int sat = 0; \
2116 int i; \
2117 for (i = 0; i < ARRAY_SIZE(r->element); i++) { \
2118 switch (sizeof(r->element[0])) { \
2119 case 1: VARITHSAT_CASE(optype, op, cvt, element); break; \
2120 case 2: VARITHSAT_CASE(optype, op, cvt, element); break; \
2121 case 4: VARITHSAT_CASE(optype, op, cvt, element); break; \
2124 if (sat) { \
2125 env->vscr |= (1 << VSCR_SAT); \
2128 #define VARITHSAT_SIGNED(suffix, element, optype, cvt) \
2129 VARITHSAT_DO(adds##suffix##s, +, optype, cvt, element) \
2130 VARITHSAT_DO(subs##suffix##s, -, optype, cvt, element)
2131 #define VARITHSAT_UNSIGNED(suffix, element, optype, cvt) \
2132 VARITHSAT_DO(addu##suffix##s, +, optype, cvt, element) \
2133 VARITHSAT_DO(subu##suffix##s, -, optype, cvt, element)
2134 VARITHSAT_SIGNED(b, s8, int16_t, cvtshsb)
2135 VARITHSAT_SIGNED(h, s16, int32_t, cvtswsh)
2136 VARITHSAT_SIGNED(w, s32, int64_t, cvtsdsw)
2137 VARITHSAT_UNSIGNED(b, u8, uint16_t, cvtshub)
2138 VARITHSAT_UNSIGNED(h, u16, uint32_t, cvtswuh)
2139 VARITHSAT_UNSIGNED(w, u32, uint64_t, cvtsduw)
2140 #undef VARITHSAT_CASE
2141 #undef VARITHSAT_DO
2142 #undef VARITHSAT_SIGNED
2143 #undef VARITHSAT_UNSIGNED
2145 #define VAVG_DO(name, element, etype) \
2146 void helper_v##name (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
2148 int i; \
2149 for (i = 0; i < ARRAY_SIZE(r->element); i++) { \
2150 etype x = (etype)a->element[i] + (etype)b->element[i] + 1; \
2151 r->element[i] = x >> 1; \
2155 #define VAVG(type, signed_element, signed_type, unsigned_element, unsigned_type) \
2156 VAVG_DO(avgs##type, signed_element, signed_type) \
2157 VAVG_DO(avgu##type, unsigned_element, unsigned_type)
2158 VAVG(b, s8, int16_t, u8, uint16_t)
2159 VAVG(h, s16, int32_t, u16, uint32_t)
2160 VAVG(w, s32, int64_t, u32, uint64_t)
2161 #undef VAVG_DO
2162 #undef VAVG
2164 #define VCF(suffix, cvt, element) \
2165 void helper_vcf##suffix (ppc_avr_t *r, ppc_avr_t *b, uint32_t uim) \
2167 int i; \
2168 for (i = 0; i < ARRAY_SIZE(r->f); i++) { \
2169 float32 t = cvt(b->element[i], &env->vec_status); \
2170 r->f[i] = float32_scalbn (t, -uim, &env->vec_status); \
2173 VCF(ux, uint32_to_float32, u32)
2174 VCF(sx, int32_to_float32, s32)
2175 #undef VCF
2177 #define VCMP_DO(suffix, compare, element, record) \
2178 void helper_vcmp##suffix (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
2180 uint32_t ones = (uint32_t)-1; \
2181 uint32_t all = ones; \
2182 uint32_t none = 0; \
2183 int i; \
2184 for (i = 0; i < ARRAY_SIZE(r->element); i++) { \
2185 uint32_t result = (a->element[i] compare b->element[i] ? ones : 0x0); \
2186 switch (sizeof (a->element[0])) { \
2187 case 4: r->u32[i] = result; break; \
2188 case 2: r->u16[i] = result; break; \
2189 case 1: r->u8[i] = result; break; \
2191 all &= result; \
2192 none |= result; \
2194 if (record) { \
2195 env->crf[6] = ((all != 0) << 3) | ((none == 0) << 1); \
2198 #define VCMP(suffix, compare, element) \
2199 VCMP_DO(suffix, compare, element, 0) \
2200 VCMP_DO(suffix##_dot, compare, element, 1)
2201 VCMP(equb, ==, u8)
2202 VCMP(equh, ==, u16)
2203 VCMP(equw, ==, u32)
2204 VCMP(gtub, >, u8)
2205 VCMP(gtuh, >, u16)
2206 VCMP(gtuw, >, u32)
2207 VCMP(gtsb, >, s8)
2208 VCMP(gtsh, >, s16)
2209 VCMP(gtsw, >, s32)
2210 #undef VCMP_DO
2211 #undef VCMP
2213 #define VCMPFP_DO(suffix, compare, order, record) \
2214 void helper_vcmp##suffix (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
2216 uint32_t ones = (uint32_t)-1; \
2217 uint32_t all = ones; \
2218 uint32_t none = 0; \
2219 int i; \
2220 for (i = 0; i < ARRAY_SIZE(r->f); i++) { \
2221 uint32_t result; \
2222 int rel = float32_compare_quiet(a->f[i], b->f[i], &env->vec_status); \
2223 if (rel == float_relation_unordered) { \
2224 result = 0; \
2225 } else if (rel compare order) { \
2226 result = ones; \
2227 } else { \
2228 result = 0; \
2230 r->u32[i] = result; \
2231 all &= result; \
2232 none |= result; \
2234 if (record) { \
2235 env->crf[6] = ((all != 0) << 3) | ((none == 0) << 1); \
2238 #define VCMPFP(suffix, compare, order) \
2239 VCMPFP_DO(suffix, compare, order, 0) \
2240 VCMPFP_DO(suffix##_dot, compare, order, 1)
2241 VCMPFP(eqfp, ==, float_relation_equal)
2242 VCMPFP(gefp, !=, float_relation_less)
2243 VCMPFP(gtfp, ==, float_relation_greater)
2244 #undef VCMPFP_DO
2245 #undef VCMPFP
2247 static always_inline void vcmpbfp_internal (ppc_avr_t *r, ppc_avr_t *a,
2248 ppc_avr_t *b, int record)
2250 int i;
2251 int all_in = 0;
2252 for (i = 0; i < ARRAY_SIZE(r->f); i++) {
2253 int le_rel = float32_compare_quiet(a->f[i], b->f[i], &env->vec_status);
2254 if (le_rel == float_relation_unordered) {
2255 r->u32[i] = 0xc0000000;
2256 /* ALL_IN does not need to be updated here. */
2257 } else {
2258 float32 bneg = float32_chs(b->f[i]);
2259 int ge_rel = float32_compare_quiet(a->f[i], bneg, &env->vec_status);
2260 int le = le_rel != float_relation_greater;
2261 int ge = ge_rel != float_relation_less;
2262 r->u32[i] = ((!le) << 31) | ((!ge) << 30);
2263 all_in |= (!le | !ge);
2266 if (record) {
2267 env->crf[6] = (all_in == 0) << 1;
2271 void helper_vcmpbfp (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
2273 vcmpbfp_internal(r, a, b, 0);
2276 void helper_vcmpbfp_dot (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
2278 vcmpbfp_internal(r, a, b, 1);
2281 #define VCT(suffix, satcvt, element) \
2282 void helper_vct##suffix (ppc_avr_t *r, ppc_avr_t *b, uint32_t uim) \
2284 int i; \
2285 int sat = 0; \
2286 float_status s = env->vec_status; \
2287 set_float_rounding_mode(float_round_to_zero, &s); \
2288 for (i = 0; i < ARRAY_SIZE(r->f); i++) { \
2289 if (float32_is_nan(b->f[i]) || \
2290 float32_is_signaling_nan(b->f[i])) { \
2291 r->element[i] = 0; \
2292 } else { \
2293 float64 t = float32_to_float64(b->f[i], &s); \
2294 int64_t j; \
2295 t = float64_scalbn(t, uim, &s); \
2296 j = float64_to_int64(t, &s); \
2297 r->element[i] = satcvt(j, &sat); \
2300 if (sat) { \
2301 env->vscr |= (1 << VSCR_SAT); \
2304 VCT(uxs, cvtsduw, u32)
2305 VCT(sxs, cvtsdsw, s32)
2306 #undef VCT
2308 void helper_vmaddfp (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
2310 int i;
2311 for (i = 0; i < ARRAY_SIZE(r->f); i++) {
2312 HANDLE_NAN3(r->f[i], a->f[i], b->f[i], c->f[i]) {
2313 /* Need to do the computation in higher precision and round
2314 * once at the end. */
2315 float64 af, bf, cf, t;
2316 af = float32_to_float64(a->f[i], &env->vec_status);
2317 bf = float32_to_float64(b->f[i], &env->vec_status);
2318 cf = float32_to_float64(c->f[i], &env->vec_status);
2319 t = float64_mul(af, cf, &env->vec_status);
2320 t = float64_add(t, bf, &env->vec_status);
2321 r->f[i] = float64_to_float32(t, &env->vec_status);
2326 void helper_vmhaddshs (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
2328 int sat = 0;
2329 int i;
2331 for (i = 0; i < ARRAY_SIZE(r->s16); i++) {
2332 int32_t prod = a->s16[i] * b->s16[i];
2333 int32_t t = (int32_t)c->s16[i] + (prod >> 15);
2334 r->s16[i] = cvtswsh (t, &sat);
2337 if (sat) {
2338 env->vscr |= (1 << VSCR_SAT);
2342 void helper_vmhraddshs (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
2344 int sat = 0;
2345 int i;
2347 for (i = 0; i < ARRAY_SIZE(r->s16); i++) {
2348 int32_t prod = a->s16[i] * b->s16[i] + 0x00004000;
2349 int32_t t = (int32_t)c->s16[i] + (prod >> 15);
2350 r->s16[i] = cvtswsh (t, &sat);
2353 if (sat) {
2354 env->vscr |= (1 << VSCR_SAT);
2358 #define VMINMAX_DO(name, compare, element) \
2359 void helper_v##name (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
2361 int i; \
2362 for (i = 0; i < ARRAY_SIZE(r->element); i++) { \
2363 if (a->element[i] compare b->element[i]) { \
2364 r->element[i] = b->element[i]; \
2365 } else { \
2366 r->element[i] = a->element[i]; \
2370 #define VMINMAX(suffix, element) \
2371 VMINMAX_DO(min##suffix, >, element) \
2372 VMINMAX_DO(max##suffix, <, element)
2373 VMINMAX(sb, s8)
2374 VMINMAX(sh, s16)
2375 VMINMAX(sw, s32)
2376 VMINMAX(ub, u8)
2377 VMINMAX(uh, u16)
2378 VMINMAX(uw, u32)
2379 #undef VMINMAX_DO
2380 #undef VMINMAX
2382 #define VMINMAXFP(suffix, rT, rF) \
2383 void helper_v##suffix (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
2385 int i; \
2386 for (i = 0; i < ARRAY_SIZE(r->f); i++) { \
2387 HANDLE_NAN2(r->f[i], a->f[i], b->f[i]) { \
2388 if (float32_lt_quiet(a->f[i], b->f[i], &env->vec_status)) { \
2389 r->f[i] = rT->f[i]; \
2390 } else { \
2391 r->f[i] = rF->f[i]; \
2396 VMINMAXFP(minfp, a, b)
2397 VMINMAXFP(maxfp, b, a)
2398 #undef VMINMAXFP
2400 void helper_vmladduhm (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
2402 int i;
2403 for (i = 0; i < ARRAY_SIZE(r->s16); i++) {
2404 int32_t prod = a->s16[i] * b->s16[i];
2405 r->s16[i] = (int16_t) (prod + c->s16[i]);
2409 #define VMRG_DO(name, element, highp) \
2410 void helper_v##name (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
2412 ppc_avr_t result; \
2413 int i; \
2414 size_t n_elems = ARRAY_SIZE(r->element); \
2415 for (i = 0; i < n_elems/2; i++) { \
2416 if (highp) { \
2417 result.element[i*2+HI_IDX] = a->element[i]; \
2418 result.element[i*2+LO_IDX] = b->element[i]; \
2419 } else { \
2420 result.element[n_elems - i*2 - (1+HI_IDX)] = b->element[n_elems - i - 1]; \
2421 result.element[n_elems - i*2 - (1+LO_IDX)] = a->element[n_elems - i - 1]; \
2424 *r = result; \
2426 #if defined(WORDS_BIGENDIAN)
2427 #define MRGHI 0
2428 #define MRGLO 1
2429 #else
2430 #define MRGHI 1
2431 #define MRGLO 0
2432 #endif
2433 #define VMRG(suffix, element) \
2434 VMRG_DO(mrgl##suffix, element, MRGHI) \
2435 VMRG_DO(mrgh##suffix, element, MRGLO)
2436 VMRG(b, u8)
2437 VMRG(h, u16)
2438 VMRG(w, u32)
2439 #undef VMRG_DO
2440 #undef VMRG
2441 #undef MRGHI
2442 #undef MRGLO
2444 void helper_vmsummbm (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
2446 int32_t prod[16];
2447 int i;
2449 for (i = 0; i < ARRAY_SIZE(r->s8); i++) {
2450 prod[i] = (int32_t)a->s8[i] * b->u8[i];
2453 VECTOR_FOR_INORDER_I(i, s32) {
2454 r->s32[i] = c->s32[i] + prod[4*i] + prod[4*i+1] + prod[4*i+2] + prod[4*i+3];
2458 void helper_vmsumshm (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
2460 int32_t prod[8];
2461 int i;
2463 for (i = 0; i < ARRAY_SIZE(r->s16); i++) {
2464 prod[i] = a->s16[i] * b->s16[i];
2467 VECTOR_FOR_INORDER_I(i, s32) {
2468 r->s32[i] = c->s32[i] + prod[2*i] + prod[2*i+1];
2472 void helper_vmsumshs (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
2474 int32_t prod[8];
2475 int i;
2476 int sat = 0;
2478 for (i = 0; i < ARRAY_SIZE(r->s16); i++) {
2479 prod[i] = (int32_t)a->s16[i] * b->s16[i];
2482 VECTOR_FOR_INORDER_I (i, s32) {
2483 int64_t t = (int64_t)c->s32[i] + prod[2*i] + prod[2*i+1];
2484 r->u32[i] = cvtsdsw(t, &sat);
2487 if (sat) {
2488 env->vscr |= (1 << VSCR_SAT);
2492 void helper_vmsumubm (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
2494 uint16_t prod[16];
2495 int i;
2497 for (i = 0; i < ARRAY_SIZE(r->u8); i++) {
2498 prod[i] = a->u8[i] * b->u8[i];
2501 VECTOR_FOR_INORDER_I(i, u32) {
2502 r->u32[i] = c->u32[i] + prod[4*i] + prod[4*i+1] + prod[4*i+2] + prod[4*i+3];
2506 void helper_vmsumuhm (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
2508 uint32_t prod[8];
2509 int i;
2511 for (i = 0; i < ARRAY_SIZE(r->u16); i++) {
2512 prod[i] = a->u16[i] * b->u16[i];
2515 VECTOR_FOR_INORDER_I(i, u32) {
2516 r->u32[i] = c->u32[i] + prod[2*i] + prod[2*i+1];
2520 void helper_vmsumuhs (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
2522 uint32_t prod[8];
2523 int i;
2524 int sat = 0;
2526 for (i = 0; i < ARRAY_SIZE(r->u16); i++) {
2527 prod[i] = a->u16[i] * b->u16[i];
2530 VECTOR_FOR_INORDER_I (i, s32) {
2531 uint64_t t = (uint64_t)c->u32[i] + prod[2*i] + prod[2*i+1];
2532 r->u32[i] = cvtuduw(t, &sat);
2535 if (sat) {
2536 env->vscr |= (1 << VSCR_SAT);
2540 #define VMUL_DO(name, mul_element, prod_element, evenp) \
2541 void helper_v##name (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
2543 int i; \
2544 VECTOR_FOR_INORDER_I(i, prod_element) { \
2545 if (evenp) { \
2546 r->prod_element[i] = a->mul_element[i*2+HI_IDX] * b->mul_element[i*2+HI_IDX]; \
2547 } else { \
2548 r->prod_element[i] = a->mul_element[i*2+LO_IDX] * b->mul_element[i*2+LO_IDX]; \
2552 #define VMUL(suffix, mul_element, prod_element) \
2553 VMUL_DO(mule##suffix, mul_element, prod_element, 1) \
2554 VMUL_DO(mulo##suffix, mul_element, prod_element, 0)
2555 VMUL(sb, s8, s16)
2556 VMUL(sh, s16, s32)
2557 VMUL(ub, u8, u16)
2558 VMUL(uh, u16, u32)
2559 #undef VMUL_DO
2560 #undef VMUL
2562 void helper_vnmsubfp (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
2564 int i;
2565 for (i = 0; i < ARRAY_SIZE(r->f); i++) {
2566 HANDLE_NAN3(r->f[i], a->f[i], b->f[i], c->f[i]) {
2567 /* Need to do the computation is higher precision and round
2568 * once at the end. */
2569 float64 af, bf, cf, t;
2570 af = float32_to_float64(a->f[i], &env->vec_status);
2571 bf = float32_to_float64(b->f[i], &env->vec_status);
2572 cf = float32_to_float64(c->f[i], &env->vec_status);
2573 t = float64_mul(af, cf, &env->vec_status);
2574 t = float64_sub(t, bf, &env->vec_status);
2575 t = float64_chs(t);
2576 r->f[i] = float64_to_float32(t, &env->vec_status);
2581 void helper_vperm (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
2583 ppc_avr_t result;
2584 int i;
2585 VECTOR_FOR_INORDER_I (i, u8) {
2586 int s = c->u8[i] & 0x1f;
2587 #if defined(WORDS_BIGENDIAN)
2588 int index = s & 0xf;
2589 #else
2590 int index = 15 - (s & 0xf);
2591 #endif
2592 if (s & 0x10) {
2593 result.u8[i] = b->u8[index];
2594 } else {
2595 result.u8[i] = a->u8[index];
2598 *r = result;
2601 #if defined(WORDS_BIGENDIAN)
2602 #define PKBIG 1
2603 #else
2604 #define PKBIG 0
2605 #endif
2606 void helper_vpkpx (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
2608 int i, j;
2609 ppc_avr_t result;
2610 #if defined(WORDS_BIGENDIAN)
2611 const ppc_avr_t *x[2] = { a, b };
2612 #else
2613 const ppc_avr_t *x[2] = { b, a };
2614 #endif
2616 VECTOR_FOR_INORDER_I (i, u64) {
2617 VECTOR_FOR_INORDER_I (j, u32){
2618 uint32_t e = x[i]->u32[j];
2619 result.u16[4*i+j] = (((e >> 9) & 0xfc00) |
2620 ((e >> 6) & 0x3e0) |
2621 ((e >> 3) & 0x1f));
2624 *r = result;
2627 #define VPK(suffix, from, to, cvt, dosat) \
2628 void helper_vpk##suffix (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
2630 int i; \
2631 int sat = 0; \
2632 ppc_avr_t result; \
2633 ppc_avr_t *a0 = PKBIG ? a : b; \
2634 ppc_avr_t *a1 = PKBIG ? b : a; \
2635 VECTOR_FOR_INORDER_I (i, from) { \
2636 result.to[i] = cvt(a0->from[i], &sat); \
2637 result.to[i+ARRAY_SIZE(r->from)] = cvt(a1->from[i], &sat); \
2639 *r = result; \
2640 if (dosat && sat) { \
2641 env->vscr |= (1 << VSCR_SAT); \
2644 #define I(x, y) (x)
2645 VPK(shss, s16, s8, cvtshsb, 1)
2646 VPK(shus, s16, u8, cvtshub, 1)
2647 VPK(swss, s32, s16, cvtswsh, 1)
2648 VPK(swus, s32, u16, cvtswuh, 1)
2649 VPK(uhus, u16, u8, cvtuhub, 1)
2650 VPK(uwus, u32, u16, cvtuwuh, 1)
2651 VPK(uhum, u16, u8, I, 0)
2652 VPK(uwum, u32, u16, I, 0)
2653 #undef I
2654 #undef VPK
2655 #undef PKBIG
2657 void helper_vrefp (ppc_avr_t *r, ppc_avr_t *b)
2659 int i;
2660 for (i = 0; i < ARRAY_SIZE(r->f); i++) {
2661 HANDLE_NAN1(r->f[i], b->f[i]) {
2662 r->f[i] = float32_div(float32_one, b->f[i], &env->vec_status);
2667 #define VRFI(suffix, rounding) \
2668 void helper_vrfi##suffix (ppc_avr_t *r, ppc_avr_t *b) \
2670 int i; \
2671 float_status s = env->vec_status; \
2672 set_float_rounding_mode(rounding, &s); \
2673 for (i = 0; i < ARRAY_SIZE(r->f); i++) { \
2674 HANDLE_NAN1(r->f[i], b->f[i]) { \
2675 r->f[i] = float32_round_to_int (b->f[i], &s); \
2679 VRFI(n, float_round_nearest_even)
2680 VRFI(m, float_round_down)
2681 VRFI(p, float_round_up)
2682 VRFI(z, float_round_to_zero)
2683 #undef VRFI
2685 #define VROTATE(suffix, element) \
2686 void helper_vrl##suffix (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
2688 int i; \
2689 for (i = 0; i < ARRAY_SIZE(r->element); i++) { \
2690 unsigned int mask = ((1 << (3 + (sizeof (a->element[0]) >> 1))) - 1); \
2691 unsigned int shift = b->element[i] & mask; \
2692 r->element[i] = (a->element[i] << shift) | (a->element[i] >> (sizeof(a->element[0]) * 8 - shift)); \
2695 VROTATE(b, u8)
2696 VROTATE(h, u16)
2697 VROTATE(w, u32)
2698 #undef VROTATE
2700 void helper_vrsqrtefp (ppc_avr_t *r, ppc_avr_t *b)
2702 int i;
2703 for (i = 0; i < ARRAY_SIZE(r->f); i++) {
2704 HANDLE_NAN1(r->f[i], b->f[i]) {
2705 float32 t = float32_sqrt(b->f[i], &env->vec_status);
2706 r->f[i] = float32_div(float32_one, t, &env->vec_status);
2711 void helper_vsel (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
2713 r->u64[0] = (a->u64[0] & ~c->u64[0]) | (b->u64[0] & c->u64[0]);
2714 r->u64[1] = (a->u64[1] & ~c->u64[1]) | (b->u64[1] & c->u64[1]);
2717 void helper_vlogefp (ppc_avr_t *r, ppc_avr_t *b)
2719 int i;
2720 for (i = 0; i < ARRAY_SIZE(r->f); i++) {
2721 HANDLE_NAN1(r->f[i], b->f[i]) {
2722 r->f[i] = float32_log2(b->f[i], &env->vec_status);
2727 #if defined(WORDS_BIGENDIAN)
2728 #define LEFT 0
2729 #define RIGHT 1
2730 #else
2731 #define LEFT 1
2732 #define RIGHT 0
2733 #endif
2734 /* The specification says that the results are undefined if all of the
2735 * shift counts are not identical. We check to make sure that they are
2736 * to conform to what real hardware appears to do. */
2737 #define VSHIFT(suffix, leftp) \
2738 void helper_vs##suffix (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
2740 int shift = b->u8[LO_IDX*15] & 0x7; \
2741 int doit = 1; \
2742 int i; \
2743 for (i = 0; i < ARRAY_SIZE(r->u8); i++) { \
2744 doit = doit && ((b->u8[i] & 0x7) == shift); \
2746 if (doit) { \
2747 if (shift == 0) { \
2748 *r = *a; \
2749 } else if (leftp) { \
2750 uint64_t carry = a->u64[LO_IDX] >> (64 - shift); \
2751 r->u64[HI_IDX] = (a->u64[HI_IDX] << shift) | carry; \
2752 r->u64[LO_IDX] = a->u64[LO_IDX] << shift; \
2753 } else { \
2754 uint64_t carry = a->u64[HI_IDX] << (64 - shift); \
2755 r->u64[LO_IDX] = (a->u64[LO_IDX] >> shift) | carry; \
2756 r->u64[HI_IDX] = a->u64[HI_IDX] >> shift; \
2760 VSHIFT(l, LEFT)
2761 VSHIFT(r, RIGHT)
2762 #undef VSHIFT
2763 #undef LEFT
2764 #undef RIGHT
2766 #define VSL(suffix, element) \
2767 void helper_vsl##suffix (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
2769 int i; \
2770 for (i = 0; i < ARRAY_SIZE(r->element); i++) { \
2771 unsigned int mask = ((1 << (3 + (sizeof (a->element[0]) >> 1))) - 1); \
2772 unsigned int shift = b->element[i] & mask; \
2773 r->element[i] = a->element[i] << shift; \
2776 VSL(b, u8)
2777 VSL(h, u16)
2778 VSL(w, u32)
2779 #undef VSL
2781 void helper_vsldoi (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, uint32_t shift)
2783 int sh = shift & 0xf;
2784 int i;
2785 ppc_avr_t result;
2787 #if defined(WORDS_BIGENDIAN)
2788 for (i = 0; i < ARRAY_SIZE(r->u8); i++) {
2789 int index = sh + i;
2790 if (index > 0xf) {
2791 result.u8[i] = b->u8[index-0x10];
2792 } else {
2793 result.u8[i] = a->u8[index];
2796 #else
2797 for (i = 0; i < ARRAY_SIZE(r->u8); i++) {
2798 int index = (16 - sh) + i;
2799 if (index > 0xf) {
2800 result.u8[i] = a->u8[index-0x10];
2801 } else {
2802 result.u8[i] = b->u8[index];
2805 #endif
2806 *r = result;
2809 void helper_vslo (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
2811 int sh = (b->u8[LO_IDX*0xf] >> 3) & 0xf;
2813 #if defined (WORDS_BIGENDIAN)
2814 memmove (&r->u8[0], &a->u8[sh], 16-sh);
2815 memset (&r->u8[16-sh], 0, sh);
2816 #else
2817 memmove (&r->u8[sh], &a->u8[0], 16-sh);
2818 memset (&r->u8[0], 0, sh);
2819 #endif
2822 /* Experimental testing shows that hardware masks the immediate. */
2823 #define _SPLAT_MASKED(element) (splat & (ARRAY_SIZE(r->element) - 1))
2824 #if defined(WORDS_BIGENDIAN)
2825 #define SPLAT_ELEMENT(element) _SPLAT_MASKED(element)
2826 #else
2827 #define SPLAT_ELEMENT(element) (ARRAY_SIZE(r->element)-1 - _SPLAT_MASKED(element))
2828 #endif
2829 #define VSPLT(suffix, element) \
2830 void helper_vsplt##suffix (ppc_avr_t *r, ppc_avr_t *b, uint32_t splat) \
2832 uint32_t s = b->element[SPLAT_ELEMENT(element)]; \
2833 int i; \
2834 for (i = 0; i < ARRAY_SIZE(r->element); i++) { \
2835 r->element[i] = s; \
2838 VSPLT(b, u8)
2839 VSPLT(h, u16)
2840 VSPLT(w, u32)
2841 #undef VSPLT
2842 #undef SPLAT_ELEMENT
2843 #undef _SPLAT_MASKED
2845 #define VSPLTI(suffix, element, splat_type) \
2846 void helper_vspltis##suffix (ppc_avr_t *r, uint32_t splat) \
2848 splat_type x = (int8_t)(splat << 3) >> 3; \
2849 int i; \
2850 for (i = 0; i < ARRAY_SIZE(r->element); i++) { \
2851 r->element[i] = x; \
2854 VSPLTI(b, s8, int8_t)
2855 VSPLTI(h, s16, int16_t)
2856 VSPLTI(w, s32, int32_t)
2857 #undef VSPLTI
2859 #define VSR(suffix, element) \
2860 void helper_vsr##suffix (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
2862 int i; \
2863 for (i = 0; i < ARRAY_SIZE(r->element); i++) { \
2864 unsigned int mask = ((1 << (3 + (sizeof (a->element[0]) >> 1))) - 1); \
2865 unsigned int shift = b->element[i] & mask; \
2866 r->element[i] = a->element[i] >> shift; \
2869 VSR(ab, s8)
2870 VSR(ah, s16)
2871 VSR(aw, s32)
2872 VSR(b, u8)
2873 VSR(h, u16)
2874 VSR(w, u32)
2875 #undef VSR
2877 void helper_vsro (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
2879 int sh = (b->u8[LO_IDX*0xf] >> 3) & 0xf;
2881 #if defined (WORDS_BIGENDIAN)
2882 memmove (&r->u8[sh], &a->u8[0], 16-sh);
2883 memset (&r->u8[0], 0, sh);
2884 #else
2885 memmove (&r->u8[0], &a->u8[sh], 16-sh);
2886 memset (&r->u8[16-sh], 0, sh);
2887 #endif
2890 void helper_vsubcuw (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
2892 int i;
2893 for (i = 0; i < ARRAY_SIZE(r->u32); i++) {
2894 r->u32[i] = a->u32[i] >= b->u32[i];
2898 void helper_vsumsws (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
2900 int64_t t;
2901 int i, upper;
2902 ppc_avr_t result;
2903 int sat = 0;
2905 #if defined(WORDS_BIGENDIAN)
2906 upper = ARRAY_SIZE(r->s32)-1;
2907 #else
2908 upper = 0;
2909 #endif
2910 t = (int64_t)b->s32[upper];
2911 for (i = 0; i < ARRAY_SIZE(r->s32); i++) {
2912 t += a->s32[i];
2913 result.s32[i] = 0;
2915 result.s32[upper] = cvtsdsw(t, &sat);
2916 *r = result;
2918 if (sat) {
2919 env->vscr |= (1 << VSCR_SAT);
2923 void helper_vsum2sws (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
2925 int i, j, upper;
2926 ppc_avr_t result;
2927 int sat = 0;
2929 #if defined(WORDS_BIGENDIAN)
2930 upper = 1;
2931 #else
2932 upper = 0;
2933 #endif
2934 for (i = 0; i < ARRAY_SIZE(r->u64); i++) {
2935 int64_t t = (int64_t)b->s32[upper+i*2];
2936 result.u64[i] = 0;
2937 for (j = 0; j < ARRAY_SIZE(r->u64); j++) {
2938 t += a->s32[2*i+j];
2940 result.s32[upper+i*2] = cvtsdsw(t, &sat);
2943 *r = result;
2944 if (sat) {
2945 env->vscr |= (1 << VSCR_SAT);
2949 void helper_vsum4sbs (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
2951 int i, j;
2952 int sat = 0;
2954 for (i = 0; i < ARRAY_SIZE(r->s32); i++) {
2955 int64_t t = (int64_t)b->s32[i];
2956 for (j = 0; j < ARRAY_SIZE(r->s32); j++) {
2957 t += a->s8[4*i+j];
2959 r->s32[i] = cvtsdsw(t, &sat);
2962 if (sat) {
2963 env->vscr |= (1 << VSCR_SAT);
2967 void helper_vsum4shs (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
2969 int sat = 0;
2970 int i;
2972 for (i = 0; i < ARRAY_SIZE(r->s32); i++) {
2973 int64_t t = (int64_t)b->s32[i];
2974 t += a->s16[2*i] + a->s16[2*i+1];
2975 r->s32[i] = cvtsdsw(t, &sat);
2978 if (sat) {
2979 env->vscr |= (1 << VSCR_SAT);
2983 void helper_vsum4ubs (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
2985 int i, j;
2986 int sat = 0;
2988 for (i = 0; i < ARRAY_SIZE(r->u32); i++) {
2989 uint64_t t = (uint64_t)b->u32[i];
2990 for (j = 0; j < ARRAY_SIZE(r->u32); j++) {
2991 t += a->u8[4*i+j];
2993 r->u32[i] = cvtuduw(t, &sat);
2996 if (sat) {
2997 env->vscr |= (1 << VSCR_SAT);
3001 #if defined(WORDS_BIGENDIAN)
3002 #define UPKHI 1
3003 #define UPKLO 0
3004 #else
3005 #define UPKHI 0
3006 #define UPKLO 1
3007 #endif
3008 #define VUPKPX(suffix, hi) \
3009 void helper_vupk##suffix (ppc_avr_t *r, ppc_avr_t *b) \
3011 int i; \
3012 ppc_avr_t result; \
3013 for (i = 0; i < ARRAY_SIZE(r->u32); i++) { \
3014 uint16_t e = b->u16[hi ? i : i+4]; \
3015 uint8_t a = (e >> 15) ? 0xff : 0; \
3016 uint8_t r = (e >> 10) & 0x1f; \
3017 uint8_t g = (e >> 5) & 0x1f; \
3018 uint8_t b = e & 0x1f; \
3019 result.u32[i] = (a << 24) | (r << 16) | (g << 8) | b; \
3021 *r = result; \
3023 VUPKPX(lpx, UPKLO)
3024 VUPKPX(hpx, UPKHI)
3025 #undef VUPKPX
3027 #define VUPK(suffix, unpacked, packee, hi) \
3028 void helper_vupk##suffix (ppc_avr_t *r, ppc_avr_t *b) \
3030 int i; \
3031 ppc_avr_t result; \
3032 if (hi) { \
3033 for (i = 0; i < ARRAY_SIZE(r->unpacked); i++) { \
3034 result.unpacked[i] = b->packee[i]; \
3036 } else { \
3037 for (i = ARRAY_SIZE(r->unpacked); i < ARRAY_SIZE(r->packee); i++) { \
3038 result.unpacked[i-ARRAY_SIZE(r->unpacked)] = b->packee[i]; \
3041 *r = result; \
3043 VUPK(hsb, s16, s8, UPKHI)
3044 VUPK(hsh, s32, s16, UPKHI)
3045 VUPK(lsb, s16, s8, UPKLO)
3046 VUPK(lsh, s32, s16, UPKLO)
3047 #undef VUPK
3048 #undef UPKHI
3049 #undef UPKLO
3051 #undef DO_HANDLE_NAN
3052 #undef HANDLE_NAN1
3053 #undef HANDLE_NAN2
3054 #undef HANDLE_NAN3
3055 #undef VECTOR_FOR_INORDER_I
3056 #undef HI_IDX
3057 #undef LO_IDX
3059 /*****************************************************************************/
3060 /* SPE extension helpers */
3061 /* Use a table to make this quicker */
3062 static uint8_t hbrev[16] = {
3063 0x0, 0x8, 0x4, 0xC, 0x2, 0xA, 0x6, 0xE,
3064 0x1, 0x9, 0x5, 0xD, 0x3, 0xB, 0x7, 0xF,
3067 static always_inline uint8_t byte_reverse (uint8_t val)
3069 return hbrev[val >> 4] | (hbrev[val & 0xF] << 4);
3072 static always_inline uint32_t word_reverse (uint32_t val)
3074 return byte_reverse(val >> 24) | (byte_reverse(val >> 16) << 8) |
3075 (byte_reverse(val >> 8) << 16) | (byte_reverse(val) << 24);
3078 #define MASKBITS 16 // Random value - to be fixed (implementation dependant)
3079 target_ulong helper_brinc (target_ulong arg1, target_ulong arg2)
3081 uint32_t a, b, d, mask;
3083 mask = UINT32_MAX >> (32 - MASKBITS);
3084 a = arg1 & mask;
3085 b = arg2 & mask;
3086 d = word_reverse(1 + word_reverse(a | ~b));
3087 return (arg1 & ~mask) | (d & b);
3090 uint32_t helper_cntlsw32 (uint32_t val)
3092 if (val & 0x80000000)
3093 return clz32(~val);
3094 else
3095 return clz32(val);
3098 uint32_t helper_cntlzw32 (uint32_t val)
3100 return clz32(val);
3103 /* Single-precision floating-point conversions */
3104 static always_inline uint32_t efscfsi (uint32_t val)
3106 CPU_FloatU u;
3108 u.f = int32_to_float32(val, &env->vec_status);
3110 return u.l;
3113 static always_inline uint32_t efscfui (uint32_t val)
3115 CPU_FloatU u;
3117 u.f = uint32_to_float32(val, &env->vec_status);
3119 return u.l;
3122 static always_inline int32_t efsctsi (uint32_t val)
3124 CPU_FloatU u;
3126 u.l = val;
3127 /* NaN are not treated the same way IEEE 754 does */
3128 if (unlikely(float32_is_nan(u.f)))
3129 return 0;
3131 return float32_to_int32(u.f, &env->vec_status);
3134 static always_inline uint32_t efsctui (uint32_t val)
3136 CPU_FloatU u;
3138 u.l = val;
3139 /* NaN are not treated the same way IEEE 754 does */
3140 if (unlikely(float32_is_nan(u.f)))
3141 return 0;
3143 return float32_to_uint32(u.f, &env->vec_status);
3146 static always_inline uint32_t efsctsiz (uint32_t val)
3148 CPU_FloatU u;
3150 u.l = val;
3151 /* NaN are not treated the same way IEEE 754 does */
3152 if (unlikely(float32_is_nan(u.f)))
3153 return 0;
3155 return float32_to_int32_round_to_zero(u.f, &env->vec_status);
3158 static always_inline uint32_t efsctuiz (uint32_t val)
3160 CPU_FloatU u;
3162 u.l = val;
3163 /* NaN are not treated the same way IEEE 754 does */
3164 if (unlikely(float32_is_nan(u.f)))
3165 return 0;
3167 return float32_to_uint32_round_to_zero(u.f, &env->vec_status);
3170 static always_inline uint32_t efscfsf (uint32_t val)
3172 CPU_FloatU u;
3173 float32 tmp;
3175 u.f = int32_to_float32(val, &env->vec_status);
3176 tmp = int64_to_float32(1ULL << 32, &env->vec_status);
3177 u.f = float32_div(u.f, tmp, &env->vec_status);
3179 return u.l;
3182 static always_inline uint32_t efscfuf (uint32_t val)
3184 CPU_FloatU u;
3185 float32 tmp;
3187 u.f = uint32_to_float32(val, &env->vec_status);
3188 tmp = uint64_to_float32(1ULL << 32, &env->vec_status);
3189 u.f = float32_div(u.f, tmp, &env->vec_status);
3191 return u.l;
3194 static always_inline uint32_t efsctsf (uint32_t val)
3196 CPU_FloatU u;
3197 float32 tmp;
3199 u.l = val;
3200 /* NaN are not treated the same way IEEE 754 does */
3201 if (unlikely(float32_is_nan(u.f)))
3202 return 0;
3203 tmp = uint64_to_float32(1ULL << 32, &env->vec_status);
3204 u.f = float32_mul(u.f, tmp, &env->vec_status);
3206 return float32_to_int32(u.f, &env->vec_status);
3209 static always_inline uint32_t efsctuf (uint32_t val)
3211 CPU_FloatU u;
3212 float32 tmp;
3214 u.l = val;
3215 /* NaN are not treated the same way IEEE 754 does */
3216 if (unlikely(float32_is_nan(u.f)))
3217 return 0;
3218 tmp = uint64_to_float32(1ULL << 32, &env->vec_status);
3219 u.f = float32_mul(u.f, tmp, &env->vec_status);
3221 return float32_to_uint32(u.f, &env->vec_status);
3224 #define HELPER_SPE_SINGLE_CONV(name) \
3225 uint32_t helper_e##name (uint32_t val) \
3227 return e##name(val); \
3229 /* efscfsi */
3230 HELPER_SPE_SINGLE_CONV(fscfsi);
3231 /* efscfui */
3232 HELPER_SPE_SINGLE_CONV(fscfui);
3233 /* efscfuf */
3234 HELPER_SPE_SINGLE_CONV(fscfuf);
3235 /* efscfsf */
3236 HELPER_SPE_SINGLE_CONV(fscfsf);
3237 /* efsctsi */
3238 HELPER_SPE_SINGLE_CONV(fsctsi);
3239 /* efsctui */
3240 HELPER_SPE_SINGLE_CONV(fsctui);
3241 /* efsctsiz */
3242 HELPER_SPE_SINGLE_CONV(fsctsiz);
3243 /* efsctuiz */
3244 HELPER_SPE_SINGLE_CONV(fsctuiz);
3245 /* efsctsf */
3246 HELPER_SPE_SINGLE_CONV(fsctsf);
3247 /* efsctuf */
3248 HELPER_SPE_SINGLE_CONV(fsctuf);
3250 #define HELPER_SPE_VECTOR_CONV(name) \
3251 uint64_t helper_ev##name (uint64_t val) \
3253 return ((uint64_t)e##name(val >> 32) << 32) | \
3254 (uint64_t)e##name(val); \
3256 /* evfscfsi */
3257 HELPER_SPE_VECTOR_CONV(fscfsi);
3258 /* evfscfui */
3259 HELPER_SPE_VECTOR_CONV(fscfui);
3260 /* evfscfuf */
3261 HELPER_SPE_VECTOR_CONV(fscfuf);
3262 /* evfscfsf */
3263 HELPER_SPE_VECTOR_CONV(fscfsf);
3264 /* evfsctsi */
3265 HELPER_SPE_VECTOR_CONV(fsctsi);
3266 /* evfsctui */
3267 HELPER_SPE_VECTOR_CONV(fsctui);
3268 /* evfsctsiz */
3269 HELPER_SPE_VECTOR_CONV(fsctsiz);
3270 /* evfsctuiz */
3271 HELPER_SPE_VECTOR_CONV(fsctuiz);
3272 /* evfsctsf */
3273 HELPER_SPE_VECTOR_CONV(fsctsf);
3274 /* evfsctuf */
3275 HELPER_SPE_VECTOR_CONV(fsctuf);
3277 /* Single-precision floating-point arithmetic */
3278 static always_inline uint32_t efsadd (uint32_t op1, uint32_t op2)
3280 CPU_FloatU u1, u2;
3281 u1.l = op1;
3282 u2.l = op2;
3283 u1.f = float32_add(u1.f, u2.f, &env->vec_status);
3284 return u1.l;
3287 static always_inline uint32_t efssub (uint32_t op1, uint32_t op2)
3289 CPU_FloatU u1, u2;
3290 u1.l = op1;
3291 u2.l = op2;
3292 u1.f = float32_sub(u1.f, u2.f, &env->vec_status);
3293 return u1.l;
3296 static always_inline uint32_t efsmul (uint32_t op1, uint32_t op2)
3298 CPU_FloatU u1, u2;
3299 u1.l = op1;
3300 u2.l = op2;
3301 u1.f = float32_mul(u1.f, u2.f, &env->vec_status);
3302 return u1.l;
3305 static always_inline uint32_t efsdiv (uint32_t op1, uint32_t op2)
3307 CPU_FloatU u1, u2;
3308 u1.l = op1;
3309 u2.l = op2;
3310 u1.f = float32_div(u1.f, u2.f, &env->vec_status);
3311 return u1.l;
3314 #define HELPER_SPE_SINGLE_ARITH(name) \
3315 uint32_t helper_e##name (uint32_t op1, uint32_t op2) \
3317 return e##name(op1, op2); \
3319 /* efsadd */
3320 HELPER_SPE_SINGLE_ARITH(fsadd);
3321 /* efssub */
3322 HELPER_SPE_SINGLE_ARITH(fssub);
3323 /* efsmul */
3324 HELPER_SPE_SINGLE_ARITH(fsmul);
3325 /* efsdiv */
3326 HELPER_SPE_SINGLE_ARITH(fsdiv);
3328 #define HELPER_SPE_VECTOR_ARITH(name) \
3329 uint64_t helper_ev##name (uint64_t op1, uint64_t op2) \
3331 return ((uint64_t)e##name(op1 >> 32, op2 >> 32) << 32) | \
3332 (uint64_t)e##name(op1, op2); \
3334 /* evfsadd */
3335 HELPER_SPE_VECTOR_ARITH(fsadd);
3336 /* evfssub */
3337 HELPER_SPE_VECTOR_ARITH(fssub);
3338 /* evfsmul */
3339 HELPER_SPE_VECTOR_ARITH(fsmul);
3340 /* evfsdiv */
3341 HELPER_SPE_VECTOR_ARITH(fsdiv);
3343 /* Single-precision floating-point comparisons */
3344 static always_inline uint32_t efststlt (uint32_t op1, uint32_t op2)
3346 CPU_FloatU u1, u2;
3347 u1.l = op1;
3348 u2.l = op2;
3349 return float32_lt(u1.f, u2.f, &env->vec_status) ? 4 : 0;
3352 static always_inline uint32_t efststgt (uint32_t op1, uint32_t op2)
3354 CPU_FloatU u1, u2;
3355 u1.l = op1;
3356 u2.l = op2;
3357 return float32_le(u1.f, u2.f, &env->vec_status) ? 0 : 4;
3360 static always_inline uint32_t efststeq (uint32_t op1, uint32_t op2)
3362 CPU_FloatU u1, u2;
3363 u1.l = op1;
3364 u2.l = op2;
3365 return float32_eq(u1.f, u2.f, &env->vec_status) ? 4 : 0;
3368 static always_inline uint32_t efscmplt (uint32_t op1, uint32_t op2)
3370 /* XXX: TODO: test special values (NaN, infinites, ...) */
3371 return efststlt(op1, op2);
3374 static always_inline uint32_t efscmpgt (uint32_t op1, uint32_t op2)
3376 /* XXX: TODO: test special values (NaN, infinites, ...) */
3377 return efststgt(op1, op2);
3380 static always_inline uint32_t efscmpeq (uint32_t op1, uint32_t op2)
3382 /* XXX: TODO: test special values (NaN, infinites, ...) */
3383 return efststeq(op1, op2);
3386 #define HELPER_SINGLE_SPE_CMP(name) \
3387 uint32_t helper_e##name (uint32_t op1, uint32_t op2) \
3389 return e##name(op1, op2) << 2; \
3391 /* efststlt */
3392 HELPER_SINGLE_SPE_CMP(fststlt);
3393 /* efststgt */
3394 HELPER_SINGLE_SPE_CMP(fststgt);
3395 /* efststeq */
3396 HELPER_SINGLE_SPE_CMP(fststeq);
3397 /* efscmplt */
3398 HELPER_SINGLE_SPE_CMP(fscmplt);
3399 /* efscmpgt */
3400 HELPER_SINGLE_SPE_CMP(fscmpgt);
3401 /* efscmpeq */
3402 HELPER_SINGLE_SPE_CMP(fscmpeq);
3404 static always_inline uint32_t evcmp_merge (int t0, int t1)
3406 return (t0 << 3) | (t1 << 2) | ((t0 | t1) << 1) | (t0 & t1);
3409 #define HELPER_VECTOR_SPE_CMP(name) \
3410 uint32_t helper_ev##name (uint64_t op1, uint64_t op2) \
3412 return evcmp_merge(e##name(op1 >> 32, op2 >> 32), e##name(op1, op2)); \
3414 /* evfststlt */
3415 HELPER_VECTOR_SPE_CMP(fststlt);
3416 /* evfststgt */
3417 HELPER_VECTOR_SPE_CMP(fststgt);
3418 /* evfststeq */
3419 HELPER_VECTOR_SPE_CMP(fststeq);
3420 /* evfscmplt */
3421 HELPER_VECTOR_SPE_CMP(fscmplt);
3422 /* evfscmpgt */
3423 HELPER_VECTOR_SPE_CMP(fscmpgt);
3424 /* evfscmpeq */
3425 HELPER_VECTOR_SPE_CMP(fscmpeq);
3427 /* Double-precision floating-point conversion */
3428 uint64_t helper_efdcfsi (uint32_t val)
3430 CPU_DoubleU u;
3432 u.d = int32_to_float64(val, &env->vec_status);
3434 return u.ll;
3437 uint64_t helper_efdcfsid (uint64_t val)
3439 CPU_DoubleU u;
3441 u.d = int64_to_float64(val, &env->vec_status);
3443 return u.ll;
3446 uint64_t helper_efdcfui (uint32_t val)
3448 CPU_DoubleU u;
3450 u.d = uint32_to_float64(val, &env->vec_status);
3452 return u.ll;
3455 uint64_t helper_efdcfuid (uint64_t val)
3457 CPU_DoubleU u;
3459 u.d = uint64_to_float64(val, &env->vec_status);
3461 return u.ll;
3464 uint32_t helper_efdctsi (uint64_t val)
3466 CPU_DoubleU u;
3468 u.ll = val;
3469 /* NaN are not treated the same way IEEE 754 does */
3470 if (unlikely(float64_is_nan(u.d)))
3471 return 0;
3473 return float64_to_int32(u.d, &env->vec_status);
3476 uint32_t helper_efdctui (uint64_t val)
3478 CPU_DoubleU u;
3480 u.ll = val;
3481 /* NaN are not treated the same way IEEE 754 does */
3482 if (unlikely(float64_is_nan(u.d)))
3483 return 0;
3485 return float64_to_uint32(u.d, &env->vec_status);
3488 uint32_t helper_efdctsiz (uint64_t val)
3490 CPU_DoubleU u;
3492 u.ll = val;
3493 /* NaN are not treated the same way IEEE 754 does */
3494 if (unlikely(float64_is_nan(u.d)))
3495 return 0;
3497 return float64_to_int32_round_to_zero(u.d, &env->vec_status);
3500 uint64_t helper_efdctsidz (uint64_t val)
3502 CPU_DoubleU u;
3504 u.ll = val;
3505 /* NaN are not treated the same way IEEE 754 does */
3506 if (unlikely(float64_is_nan(u.d)))
3507 return 0;
3509 return float64_to_int64_round_to_zero(u.d, &env->vec_status);
3512 uint32_t helper_efdctuiz (uint64_t val)
3514 CPU_DoubleU u;
3516 u.ll = val;
3517 /* NaN are not treated the same way IEEE 754 does */
3518 if (unlikely(float64_is_nan(u.d)))
3519 return 0;
3521 return float64_to_uint32_round_to_zero(u.d, &env->vec_status);
3524 uint64_t helper_efdctuidz (uint64_t val)
3526 CPU_DoubleU u;
3528 u.ll = val;
3529 /* NaN are not treated the same way IEEE 754 does */
3530 if (unlikely(float64_is_nan(u.d)))
3531 return 0;
3533 return float64_to_uint64_round_to_zero(u.d, &env->vec_status);
3536 uint64_t helper_efdcfsf (uint32_t val)
3538 CPU_DoubleU u;
3539 float64 tmp;
3541 u.d = int32_to_float64(val, &env->vec_status);
3542 tmp = int64_to_float64(1ULL << 32, &env->vec_status);
3543 u.d = float64_div(u.d, tmp, &env->vec_status);
3545 return u.ll;
3548 uint64_t helper_efdcfuf (uint32_t val)
3550 CPU_DoubleU u;
3551 float64 tmp;
3553 u.d = uint32_to_float64(val, &env->vec_status);
3554 tmp = int64_to_float64(1ULL << 32, &env->vec_status);
3555 u.d = float64_div(u.d, tmp, &env->vec_status);
3557 return u.ll;
3560 uint32_t helper_efdctsf (uint64_t val)
3562 CPU_DoubleU u;
3563 float64 tmp;
3565 u.ll = val;
3566 /* NaN are not treated the same way IEEE 754 does */
3567 if (unlikely(float64_is_nan(u.d)))
3568 return 0;
3569 tmp = uint64_to_float64(1ULL << 32, &env->vec_status);
3570 u.d = float64_mul(u.d, tmp, &env->vec_status);
3572 return float64_to_int32(u.d, &env->vec_status);
3575 uint32_t helper_efdctuf (uint64_t val)
3577 CPU_DoubleU u;
3578 float64 tmp;
3580 u.ll = val;
3581 /* NaN are not treated the same way IEEE 754 does */
3582 if (unlikely(float64_is_nan(u.d)))
3583 return 0;
3584 tmp = uint64_to_float64(1ULL << 32, &env->vec_status);
3585 u.d = float64_mul(u.d, tmp, &env->vec_status);
3587 return float64_to_uint32(u.d, &env->vec_status);
3590 uint32_t helper_efscfd (uint64_t val)
3592 CPU_DoubleU u1;
3593 CPU_FloatU u2;
3595 u1.ll = val;
3596 u2.f = float64_to_float32(u1.d, &env->vec_status);
3598 return u2.l;
3601 uint64_t helper_efdcfs (uint32_t val)
3603 CPU_DoubleU u2;
3604 CPU_FloatU u1;
3606 u1.l = val;
3607 u2.d = float32_to_float64(u1.f, &env->vec_status);
3609 return u2.ll;
3612 /* Double precision fixed-point arithmetic */
3613 uint64_t helper_efdadd (uint64_t op1, uint64_t op2)
3615 CPU_DoubleU u1, u2;
3616 u1.ll = op1;
3617 u2.ll = op2;
3618 u1.d = float64_add(u1.d, u2.d, &env->vec_status);
3619 return u1.ll;
3622 uint64_t helper_efdsub (uint64_t op1, uint64_t op2)
3624 CPU_DoubleU u1, u2;
3625 u1.ll = op1;
3626 u2.ll = op2;
3627 u1.d = float64_sub(u1.d, u2.d, &env->vec_status);
3628 return u1.ll;
3631 uint64_t helper_efdmul (uint64_t op1, uint64_t op2)
3633 CPU_DoubleU u1, u2;
3634 u1.ll = op1;
3635 u2.ll = op2;
3636 u1.d = float64_mul(u1.d, u2.d, &env->vec_status);
3637 return u1.ll;
3640 uint64_t helper_efddiv (uint64_t op1, uint64_t op2)
3642 CPU_DoubleU u1, u2;
3643 u1.ll = op1;
3644 u2.ll = op2;
3645 u1.d = float64_div(u1.d, u2.d, &env->vec_status);
3646 return u1.ll;
3649 /* Double precision floating point helpers */
3650 uint32_t helper_efdtstlt (uint64_t op1, uint64_t op2)
3652 CPU_DoubleU u1, u2;
3653 u1.ll = op1;
3654 u2.ll = op2;
3655 return float64_lt(u1.d, u2.d, &env->vec_status) ? 4 : 0;
3658 uint32_t helper_efdtstgt (uint64_t op1, uint64_t op2)
3660 CPU_DoubleU u1, u2;
3661 u1.ll = op1;
3662 u2.ll = op2;
3663 return float64_le(u1.d, u2.d, &env->vec_status) ? 0 : 4;
3666 uint32_t helper_efdtsteq (uint64_t op1, uint64_t op2)
3668 CPU_DoubleU u1, u2;
3669 u1.ll = op1;
3670 u2.ll = op2;
3671 return float64_eq(u1.d, u2.d, &env->vec_status) ? 4 : 0;
3674 uint32_t helper_efdcmplt (uint64_t op1, uint64_t op2)
3676 /* XXX: TODO: test special values (NaN, infinites, ...) */
3677 return helper_efdtstlt(op1, op2);
3680 uint32_t helper_efdcmpgt (uint64_t op1, uint64_t op2)
3682 /* XXX: TODO: test special values (NaN, infinites, ...) */
3683 return helper_efdtstgt(op1, op2);
3686 uint32_t helper_efdcmpeq (uint64_t op1, uint64_t op2)
3688 /* XXX: TODO: test special values (NaN, infinites, ...) */
3689 return helper_efdtsteq(op1, op2);
3692 /*****************************************************************************/
3693 /* Softmmu support */
3694 #if !defined (CONFIG_USER_ONLY)
3696 #define MMUSUFFIX _mmu
3698 #define SHIFT 0
3699 #include "softmmu_template.h"
3701 #define SHIFT 1
3702 #include "softmmu_template.h"
3704 #define SHIFT 2
3705 #include "softmmu_template.h"
3707 #define SHIFT 3
3708 #include "softmmu_template.h"
3710 /* try to fill the TLB and return an exception if error. If retaddr is
3711 NULL, it means that the function was called in C code (i.e. not
3712 from generated code or from helper.c) */
3713 /* XXX: fix it to restore all registers */
3714 void tlb_fill (target_ulong addr, int is_write, int mmu_idx, void *retaddr)
3716 TranslationBlock *tb;
3717 CPUState *saved_env;
3718 unsigned long pc;
3719 int ret;
3721 /* XXX: hack to restore env in all cases, even if not called from
3722 generated code */
3723 saved_env = env;
3724 env = cpu_single_env;
3725 ret = cpu_ppc_handle_mmu_fault(env, addr, is_write, mmu_idx, 1);
3726 if (unlikely(ret != 0)) {
3727 if (likely(retaddr)) {
3728 /* now we have a real cpu fault */
3729 pc = (unsigned long)retaddr;
3730 tb = tb_find_pc(pc);
3731 if (likely(tb)) {
3732 /* the PC is inside the translated code. It means that we have
3733 a virtual CPU fault */
3734 cpu_restore_state(tb, env, pc, NULL);
3737 helper_raise_exception_err(env->exception_index, env->error_code);
3739 env = saved_env;
3742 /* Segment registers load and store */
3743 target_ulong helper_load_sr (target_ulong sr_num)
3745 #if defined(TARGET_PPC64)
3746 if (env->mmu_model & POWERPC_MMU_64)
3747 return ppc_load_sr(env, sr_num);
3748 #endif
3749 return env->sr[sr_num];
3752 void helper_store_sr (target_ulong sr_num, target_ulong val)
3754 ppc_store_sr(env, sr_num, val);
3757 /* SLB management */
3758 #if defined(TARGET_PPC64)
3759 target_ulong helper_load_slb (target_ulong slb_nr)
3761 return ppc_load_slb(env, slb_nr);
3764 void helper_store_slb (target_ulong rb, target_ulong rs)
3766 ppc_store_slb(env, rb, rs);
3769 void helper_slbia (void)
3771 ppc_slb_invalidate_all(env);
3774 void helper_slbie (target_ulong addr)
3776 ppc_slb_invalidate_one(env, addr);
3779 #endif /* defined(TARGET_PPC64) */
3781 /* TLB management */
3782 void helper_tlbia (void)
3784 ppc_tlb_invalidate_all(env);
3787 void helper_tlbie (target_ulong addr)
3789 ppc_tlb_invalidate_one(env, addr);
3792 /* Software driven TLBs management */
3793 /* PowerPC 602/603 software TLB load instructions helpers */
3794 static void do_6xx_tlb (target_ulong new_EPN, int is_code)
3796 target_ulong RPN, CMP, EPN;
3797 int way;
3799 RPN = env->spr[SPR_RPA];
3800 if (is_code) {
3801 CMP = env->spr[SPR_ICMP];
3802 EPN = env->spr[SPR_IMISS];
3803 } else {
3804 CMP = env->spr[SPR_DCMP];
3805 EPN = env->spr[SPR_DMISS];
3807 way = (env->spr[SPR_SRR1] >> 17) & 1;
3808 LOG_SWTLB("%s: EPN " ADDRX " " ADDRX " PTE0 " ADDRX
3809 " PTE1 " ADDRX " way %d\n",
3810 __func__, new_EPN, EPN, CMP, RPN, way);
3811 /* Store this TLB */
3812 ppc6xx_tlb_store(env, (uint32_t)(new_EPN & TARGET_PAGE_MASK),
3813 way, is_code, CMP, RPN);
3816 void helper_6xx_tlbd (target_ulong EPN)
3818 do_6xx_tlb(EPN, 0);
3821 void helper_6xx_tlbi (target_ulong EPN)
3823 do_6xx_tlb(EPN, 1);
3826 /* PowerPC 74xx software TLB load instructions helpers */
3827 static void do_74xx_tlb (target_ulong new_EPN, int is_code)
3829 target_ulong RPN, CMP, EPN;
3830 int way;
3832 RPN = env->spr[SPR_PTELO];
3833 CMP = env->spr[SPR_PTEHI];
3834 EPN = env->spr[SPR_TLBMISS] & ~0x3;
3835 way = env->spr[SPR_TLBMISS] & 0x3;
3836 LOG_SWTLB("%s: EPN " ADDRX " " ADDRX " PTE0 " ADDRX
3837 " PTE1 " ADDRX " way %d\n",
3838 __func__, new_EPN, EPN, CMP, RPN, way);
3839 /* Store this TLB */
3840 ppc6xx_tlb_store(env, (uint32_t)(new_EPN & TARGET_PAGE_MASK),
3841 way, is_code, CMP, RPN);
3844 void helper_74xx_tlbd (target_ulong EPN)
3846 do_74xx_tlb(EPN, 0);
3849 void helper_74xx_tlbi (target_ulong EPN)
3851 do_74xx_tlb(EPN, 1);
3854 static always_inline target_ulong booke_tlb_to_page_size (int size)
3856 return 1024 << (2 * size);
3859 static always_inline int booke_page_size_to_tlb (target_ulong page_size)
3861 int size;
3863 switch (page_size) {
3864 case 0x00000400UL:
3865 size = 0x0;
3866 break;
3867 case 0x00001000UL:
3868 size = 0x1;
3869 break;
3870 case 0x00004000UL:
3871 size = 0x2;
3872 break;
3873 case 0x00010000UL:
3874 size = 0x3;
3875 break;
3876 case 0x00040000UL:
3877 size = 0x4;
3878 break;
3879 case 0x00100000UL:
3880 size = 0x5;
3881 break;
3882 case 0x00400000UL:
3883 size = 0x6;
3884 break;
3885 case 0x01000000UL:
3886 size = 0x7;
3887 break;
3888 case 0x04000000UL:
3889 size = 0x8;
3890 break;
3891 case 0x10000000UL:
3892 size = 0x9;
3893 break;
3894 case 0x40000000UL:
3895 size = 0xA;
3896 break;
3897 #if defined (TARGET_PPC64)
3898 case 0x000100000000ULL:
3899 size = 0xB;
3900 break;
3901 case 0x000400000000ULL:
3902 size = 0xC;
3903 break;
3904 case 0x001000000000ULL:
3905 size = 0xD;
3906 break;
3907 case 0x004000000000ULL:
3908 size = 0xE;
3909 break;
3910 case 0x010000000000ULL:
3911 size = 0xF;
3912 break;
3913 #endif
3914 default:
3915 size = -1;
3916 break;
3919 return size;
3922 /* Helpers for 4xx TLB management */
3923 target_ulong helper_4xx_tlbre_lo (target_ulong entry)
3925 ppcemb_tlb_t *tlb;
3926 target_ulong ret;
3927 int size;
3929 entry &= 0x3F;
3930 tlb = &env->tlb[entry].tlbe;
3931 ret = tlb->EPN;
3932 if (tlb->prot & PAGE_VALID)
3933 ret |= 0x400;
3934 size = booke_page_size_to_tlb(tlb->size);
3935 if (size < 0 || size > 0x7)
3936 size = 1;
3937 ret |= size << 7;
3938 env->spr[SPR_40x_PID] = tlb->PID;
3939 return ret;
3942 target_ulong helper_4xx_tlbre_hi (target_ulong entry)
3944 ppcemb_tlb_t *tlb;
3945 target_ulong ret;
3947 entry &= 0x3F;
3948 tlb = &env->tlb[entry].tlbe;
3949 ret = tlb->RPN;
3950 if (tlb->prot & PAGE_EXEC)
3951 ret |= 0x200;
3952 if (tlb->prot & PAGE_WRITE)
3953 ret |= 0x100;
3954 return ret;
3957 void helper_4xx_tlbwe_hi (target_ulong entry, target_ulong val)
3959 ppcemb_tlb_t *tlb;
3960 target_ulong page, end;
3962 LOG_SWTLB("%s entry %d val " ADDRX "\n", __func__, (int)entry, val);
3963 entry &= 0x3F;
3964 tlb = &env->tlb[entry].tlbe;
3965 /* Invalidate previous TLB (if it's valid) */
3966 if (tlb->prot & PAGE_VALID) {
3967 end = tlb->EPN + tlb->size;
3968 LOG_SWTLB("%s: invalidate old TLB %d start " ADDRX
3969 " end " ADDRX "\n", __func__, (int)entry, tlb->EPN, end);
3970 for (page = tlb->EPN; page < end; page += TARGET_PAGE_SIZE)
3971 tlb_flush_page(env, page);
3973 tlb->size = booke_tlb_to_page_size((val >> 7) & 0x7);
3974 /* We cannot handle TLB size < TARGET_PAGE_SIZE.
3975 * If this ever occurs, one should use the ppcemb target instead
3976 * of the ppc or ppc64 one
3978 if ((val & 0x40) && tlb->size < TARGET_PAGE_SIZE) {
3979 cpu_abort(env, "TLB size " TARGET_FMT_lu " < %u "
3980 "are not supported (%d)\n",
3981 tlb->size, TARGET_PAGE_SIZE, (int)((val >> 7) & 0x7));
3983 tlb->EPN = val & ~(tlb->size - 1);
3984 if (val & 0x40)
3985 tlb->prot |= PAGE_VALID;
3986 else
3987 tlb->prot &= ~PAGE_VALID;
3988 if (val & 0x20) {
3989 /* XXX: TO BE FIXED */
3990 cpu_abort(env, "Little-endian TLB entries are not supported by now\n");
3992 tlb->PID = env->spr[SPR_40x_PID]; /* PID */
3993 tlb->attr = val & 0xFF;
3994 LOG_SWTLB("%s: set up TLB %d RPN " PADDRX " EPN " ADDRX
3995 " size " ADDRX " prot %c%c%c%c PID %d\n", __func__,
3996 (int)entry, tlb->RPN, tlb->EPN, tlb->size,
3997 tlb->prot & PAGE_READ ? 'r' : '-',
3998 tlb->prot & PAGE_WRITE ? 'w' : '-',
3999 tlb->prot & PAGE_EXEC ? 'x' : '-',
4000 tlb->prot & PAGE_VALID ? 'v' : '-', (int)tlb->PID);
4001 /* Invalidate new TLB (if valid) */
4002 if (tlb->prot & PAGE_VALID) {
4003 end = tlb->EPN + tlb->size;
4004 LOG_SWTLB("%s: invalidate TLB %d start " ADDRX
4005 " end " ADDRX "\n", __func__, (int)entry, tlb->EPN, end);
4006 for (page = tlb->EPN; page < end; page += TARGET_PAGE_SIZE)
4007 tlb_flush_page(env, page);
4011 void helper_4xx_tlbwe_lo (target_ulong entry, target_ulong val)
4013 ppcemb_tlb_t *tlb;
4015 LOG_SWTLB("%s entry %i val " ADDRX "\n", __func__, (int)entry, val);
4016 entry &= 0x3F;
4017 tlb = &env->tlb[entry].tlbe;
4018 tlb->RPN = val & 0xFFFFFC00;
4019 tlb->prot = PAGE_READ;
4020 if (val & 0x200)
4021 tlb->prot |= PAGE_EXEC;
4022 if (val & 0x100)
4023 tlb->prot |= PAGE_WRITE;
4024 LOG_SWTLB("%s: set up TLB %d RPN " PADDRX " EPN " ADDRX
4025 " size " ADDRX " prot %c%c%c%c PID %d\n", __func__,
4026 (int)entry, tlb->RPN, tlb->EPN, tlb->size,
4027 tlb->prot & PAGE_READ ? 'r' : '-',
4028 tlb->prot & PAGE_WRITE ? 'w' : '-',
4029 tlb->prot & PAGE_EXEC ? 'x' : '-',
4030 tlb->prot & PAGE_VALID ? 'v' : '-', (int)tlb->PID);
4033 target_ulong helper_4xx_tlbsx (target_ulong address)
4035 return ppcemb_tlb_search(env, address, env->spr[SPR_40x_PID]);
4038 /* PowerPC 440 TLB management */
4039 void helper_440_tlbwe (uint32_t word, target_ulong entry, target_ulong value)
4041 ppcemb_tlb_t *tlb;
4042 target_ulong EPN, RPN, size;
4043 int do_flush_tlbs;
4045 LOG_SWTLB("%s word %d entry %d value " ADDRX "\n",
4046 __func__, word, (int)entry, value);
4047 do_flush_tlbs = 0;
4048 entry &= 0x3F;
4049 tlb = &env->tlb[entry].tlbe;
4050 switch (word) {
4051 default:
4052 /* Just here to please gcc */
4053 case 0:
4054 EPN = value & 0xFFFFFC00;
4055 if ((tlb->prot & PAGE_VALID) && EPN != tlb->EPN)
4056 do_flush_tlbs = 1;
4057 tlb->EPN = EPN;
4058 size = booke_tlb_to_page_size((value >> 4) & 0xF);
4059 if ((tlb->prot & PAGE_VALID) && tlb->size < size)
4060 do_flush_tlbs = 1;
4061 tlb->size = size;
4062 tlb->attr &= ~0x1;
4063 tlb->attr |= (value >> 8) & 1;
4064 if (value & 0x200) {
4065 tlb->prot |= PAGE_VALID;
4066 } else {
4067 if (tlb->prot & PAGE_VALID) {
4068 tlb->prot &= ~PAGE_VALID;
4069 do_flush_tlbs = 1;
4072 tlb->PID = env->spr[SPR_440_MMUCR] & 0x000000FF;
4073 if (do_flush_tlbs)
4074 tlb_flush(env, 1);
4075 break;
4076 case 1:
4077 RPN = value & 0xFFFFFC0F;
4078 if ((tlb->prot & PAGE_VALID) && tlb->RPN != RPN)
4079 tlb_flush(env, 1);
4080 tlb->RPN = RPN;
4081 break;
4082 case 2:
4083 tlb->attr = (tlb->attr & 0x1) | (value & 0x0000FF00);
4084 tlb->prot = tlb->prot & PAGE_VALID;
4085 if (value & 0x1)
4086 tlb->prot |= PAGE_READ << 4;
4087 if (value & 0x2)
4088 tlb->prot |= PAGE_WRITE << 4;
4089 if (value & 0x4)
4090 tlb->prot |= PAGE_EXEC << 4;
4091 if (value & 0x8)
4092 tlb->prot |= PAGE_READ;
4093 if (value & 0x10)
4094 tlb->prot |= PAGE_WRITE;
4095 if (value & 0x20)
4096 tlb->prot |= PAGE_EXEC;
4097 break;
4101 target_ulong helper_440_tlbre (uint32_t word, target_ulong entry)
4103 ppcemb_tlb_t *tlb;
4104 target_ulong ret;
4105 int size;
4107 entry &= 0x3F;
4108 tlb = &env->tlb[entry].tlbe;
4109 switch (word) {
4110 default:
4111 /* Just here to please gcc */
4112 case 0:
4113 ret = tlb->EPN;
4114 size = booke_page_size_to_tlb(tlb->size);
4115 if (size < 0 || size > 0xF)
4116 size = 1;
4117 ret |= size << 4;
4118 if (tlb->attr & 0x1)
4119 ret |= 0x100;
4120 if (tlb->prot & PAGE_VALID)
4121 ret |= 0x200;
4122 env->spr[SPR_440_MMUCR] &= ~0x000000FF;
4123 env->spr[SPR_440_MMUCR] |= tlb->PID;
4124 break;
4125 case 1:
4126 ret = tlb->RPN;
4127 break;
4128 case 2:
4129 ret = tlb->attr & ~0x1;
4130 if (tlb->prot & (PAGE_READ << 4))
4131 ret |= 0x1;
4132 if (tlb->prot & (PAGE_WRITE << 4))
4133 ret |= 0x2;
4134 if (tlb->prot & (PAGE_EXEC << 4))
4135 ret |= 0x4;
4136 if (tlb->prot & PAGE_READ)
4137 ret |= 0x8;
4138 if (tlb->prot & PAGE_WRITE)
4139 ret |= 0x10;
4140 if (tlb->prot & PAGE_EXEC)
4141 ret |= 0x20;
4142 break;
4144 return ret;
4147 target_ulong helper_440_tlbsx (target_ulong address)
4149 return ppcemb_tlb_search(env, address, env->spr[SPR_440_MMUCR] & 0xFF);
4152 #endif /* !CONFIG_USER_ONLY */