kvm: qemu: LSI SCSI and e1000 unregister callbacks
[kvm-userspace.git] / qemu / target-alpha / op_helper.c
blob072499e30603ae630b94b68910f56b58985df978
1 /*
2 * Alpha emulation cpu micro-operations helpers for qemu.
4 * Copyright (c) 2007 Jocelyn Mayer
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 #include "exec.h"
22 #include "host-utils.h"
23 #include "softfloat.h"
25 #include "op_helper.h"
27 #define MEMSUFFIX _raw
28 #include "op_helper_mem.h"
30 #if !defined(CONFIG_USER_ONLY)
31 #define MEMSUFFIX _kernel
32 #include "op_helper_mem.h"
34 #define MEMSUFFIX _executive
35 #include "op_helper_mem.h"
37 #define MEMSUFFIX _supervisor
38 #include "op_helper_mem.h"
40 #define MEMSUFFIX _user
41 #include "op_helper_mem.h"
43 /* This is used for pal modes */
44 #define MEMSUFFIX _data
45 #include "op_helper_mem.h"
46 #endif
48 void helper_tb_flush (void)
50 tlb_flush(env, 1);
53 void cpu_dump_EA (target_ulong EA);
54 void helper_print_mem_EA (target_ulong EA)
56 cpu_dump_EA(EA);
59 /*****************************************************************************/
60 /* Exceptions processing helpers */
61 void helper_excp (uint32_t excp, uint32_t error)
63 env->exception_index = excp;
64 env->error_code = error;
65 cpu_loop_exit();
68 void helper_amask (void)
70 switch (env->implver) {
71 case IMPLVER_2106x:
72 /* EV4, EV45, LCA, LCA45 & EV5 */
73 break;
74 case IMPLVER_21164:
75 case IMPLVER_21264:
76 case IMPLVER_21364:
77 T0 &= ~env->amask;
78 break;
82 void helper_load_pcc (void)
84 /* XXX: TODO */
85 T0 = 0;
88 void helper_load_implver (void)
90 T0 = env->implver;
93 void helper_load_fpcr (void)
95 T0 = 0;
96 #ifdef CONFIG_SOFTFLOAT
97 T0 |= env->fp_status.float_exception_flags << 52;
98 if (env->fp_status.float_exception_flags)
99 T0 |= 1ULL << 63;
100 env->ipr[IPR_EXC_SUM] &= ~0x3E:
101 env->ipr[IPR_EXC_SUM] |= env->fp_status.float_exception_flags << 1;
102 #endif
103 switch (env->fp_status.float_rounding_mode) {
104 case float_round_nearest_even:
105 T0 |= 2ULL << 58;
106 break;
107 case float_round_down:
108 T0 |= 1ULL << 58;
109 break;
110 case float_round_up:
111 T0 |= 3ULL << 58;
112 break;
113 case float_round_to_zero:
114 break;
118 void helper_store_fpcr (void)
120 #ifdef CONFIG_SOFTFLOAT
121 set_float_exception_flags((T0 >> 52) & 0x3F, &FP_STATUS);
122 #endif
123 switch ((T0 >> 58) & 3) {
124 case 0:
125 set_float_rounding_mode(float_round_to_zero, &FP_STATUS);
126 break;
127 case 1:
128 set_float_rounding_mode(float_round_down, &FP_STATUS);
129 break;
130 case 2:
131 set_float_rounding_mode(float_round_nearest_even, &FP_STATUS);
132 break;
133 case 3:
134 set_float_rounding_mode(float_round_up, &FP_STATUS);
135 break;
139 void helper_load_irf (void)
141 /* XXX: TODO */
142 T0 = 0;
145 void helper_set_irf (void)
147 /* XXX: TODO */
150 void helper_clear_irf (void)
152 /* XXX: TODO */
155 void helper_addqv (void)
157 T2 = T0;
158 T0 += T1;
159 if (unlikely((T2 ^ T1 ^ (-1ULL)) & (T2 ^ T0) & (1ULL << 63))) {
160 helper_excp(EXCP_ARITH, EXCP_ARITH_OVERFLOW);
164 void helper_addlv (void)
166 T2 = T0;
167 T0 = (uint32_t)(T0 + T1);
168 if (unlikely((T2 ^ T1 ^ (-1UL)) & (T2 ^ T0) & (1UL << 31))) {
169 helper_excp(EXCP_ARITH, EXCP_ARITH_OVERFLOW);
173 void helper_subqv (void)
175 T2 = T0;
176 T0 -= T1;
177 if (unlikely(((~T2) ^ T0 ^ (-1ULL)) & ((~T2) ^ T1) & (1ULL << 63))) {
178 helper_excp(EXCP_ARITH, EXCP_ARITH_OVERFLOW);
182 void helper_sublv (void)
184 T2 = T0;
185 T0 = (uint32_t)(T0 - T1);
186 if (unlikely(((~T2) ^ T0 ^ (-1UL)) & ((~T2) ^ T1) & (1UL << 31))) {
187 helper_excp(EXCP_ARITH, EXCP_ARITH_OVERFLOW);
191 void helper_mullv (void)
193 int64_t res = (int64_t)T0 * (int64_t)T1;
195 if (unlikely((int32_t)res != res)) {
196 helper_excp(EXCP_ARITH, EXCP_ARITH_OVERFLOW);
198 T0 = (int64_t)((int32_t)res);
201 void helper_mulqv ()
203 uint64_t tl, th;
205 muls64(&tl, &th, T0, T1);
206 /* If th != 0 && th != -1, then we had an overflow */
207 if (unlikely((th + 1) > 1)) {
208 helper_excp(EXCP_ARITH, EXCP_ARITH_OVERFLOW);
210 T0 = tl;
213 void helper_ctpop (void)
215 T0 = ctpop64(T0);
218 void helper_ctlz (void)
220 T0 = clz64(T0);
223 void helper_cttz (void)
225 T0 = ctz64(T0);
228 static always_inline uint64_t byte_zap (uint64_t op, uint8_t mskb)
230 uint64_t mask;
232 mask = 0;
233 mask |= ((mskb >> 0) & 1) * 0x00000000000000FFULL;
234 mask |= ((mskb >> 1) & 1) * 0x000000000000FF00ULL;
235 mask |= ((mskb >> 2) & 1) * 0x0000000000FF0000ULL;
236 mask |= ((mskb >> 3) & 1) * 0x00000000FF000000ULL;
237 mask |= ((mskb >> 4) & 1) * 0x000000FF00000000ULL;
238 mask |= ((mskb >> 5) & 1) * 0x0000FF0000000000ULL;
239 mask |= ((mskb >> 6) & 1) * 0x00FF000000000000ULL;
240 mask |= ((mskb >> 7) & 1) * 0xFF00000000000000ULL;
242 return op & ~mask;
245 void helper_mskbl (void)
247 T0 = byte_zap(T0, 0x01 << (T1 & 7));
250 void helper_extbl (void)
252 T0 >>= (T1 & 7) * 8;
253 T0 = byte_zap(T0, 0xFE);
256 void helper_insbl (void)
258 T0 <<= (T1 & 7) * 8;
259 T0 = byte_zap(T0, ~(0x01 << (T1 & 7)));
262 void helper_mskwl (void)
264 T0 = byte_zap(T0, 0x03 << (T1 & 7));
267 void helper_extwl (void)
269 T0 >>= (T1 & 7) * 8;
270 T0 = byte_zap(T0, 0xFC);
273 void helper_inswl (void)
275 T0 <<= (T1 & 7) * 8;
276 T0 = byte_zap(T0, ~(0x03 << (T1 & 7)));
279 void helper_mskll (void)
281 T0 = byte_zap(T0, 0x0F << (T1 & 7));
284 void helper_extll (void)
286 T0 >>= (T1 & 7) * 8;
287 T0 = byte_zap(T0, 0xF0);
290 void helper_insll (void)
292 T0 <<= (T1 & 7) * 8;
293 T0 = byte_zap(T0, ~(0x0F << (T1 & 7)));
296 void helper_zap (void)
298 T0 = byte_zap(T0, T1);
301 void helper_zapnot (void)
303 T0 = byte_zap(T0, ~T1);
306 void helper_mskql (void)
308 T0 = byte_zap(T0, 0xFF << (T1 & 7));
311 void helper_extql (void)
313 T0 >>= (T1 & 7) * 8;
314 T0 = byte_zap(T0, 0x00);
317 void helper_insql (void)
319 T0 <<= (T1 & 7) * 8;
320 T0 = byte_zap(T0, ~(0xFF << (T1 & 7)));
323 void helper_mskwh (void)
325 T0 = byte_zap(T0, (0x03 << (T1 & 7)) >> 8);
328 void helper_inswh (void)
330 T0 >>= 64 - ((T1 & 7) * 8);
331 T0 = byte_zap(T0, ~((0x03 << (T1 & 7)) >> 8));
334 void helper_extwh (void)
336 T0 <<= 64 - ((T1 & 7) * 8);
337 T0 = byte_zap(T0, ~0x07);
340 void helper_msklh (void)
342 T0 = byte_zap(T0, (0x0F << (T1 & 7)) >> 8);
345 void helper_inslh (void)
347 T0 >>= 64 - ((T1 & 7) * 8);
348 T0 = byte_zap(T0, ~((0x0F << (T1 & 7)) >> 8));
351 void helper_extlh (void)
353 T0 <<= 64 - ((T1 & 7) * 8);
354 T0 = byte_zap(T0, ~0x0F);
357 void helper_mskqh (void)
359 T0 = byte_zap(T0, (0xFF << (T1 & 7)) >> 8);
362 void helper_insqh (void)
364 T0 >>= 64 - ((T1 & 7) * 8);
365 T0 = byte_zap(T0, ~((0xFF << (T1 & 7)) >> 8));
368 void helper_extqh (void)
370 T0 <<= 64 - ((T1 & 7) * 8);
371 T0 = byte_zap(T0, 0x00);
374 void helper_cmpbge (void)
376 uint8_t opa, opb, res;
377 int i;
379 res = 0;
380 for (i = 0; i < 7; i++) {
381 opa = T0 >> (i * 8);
382 opb = T1 >> (i * 8);
383 if (opa >= opb)
384 res |= 1 << i;
386 T0 = res;
389 void helper_cmov_fir (int freg)
391 if (FT0 != 0)
392 env->fir[freg] = FT1;
395 void helper_sqrts (void)
397 FT0 = float32_sqrt(FT0, &FP_STATUS);
400 void helper_cpys (void)
402 union {
403 double d;
404 uint64_t i;
405 } p, q, r;
407 p.d = FT0;
408 q.d = FT1;
409 r.i = p.i & 0x8000000000000000ULL;
410 r.i |= q.i & ~0x8000000000000000ULL;
411 FT0 = r.d;
414 void helper_cpysn (void)
416 union {
417 double d;
418 uint64_t i;
419 } p, q, r;
421 p.d = FT0;
422 q.d = FT1;
423 r.i = (~p.i) & 0x8000000000000000ULL;
424 r.i |= q.i & ~0x8000000000000000ULL;
425 FT0 = r.d;
428 void helper_cpyse (void)
430 union {
431 double d;
432 uint64_t i;
433 } p, q, r;
435 p.d = FT0;
436 q.d = FT1;
437 r.i = p.i & 0xFFF0000000000000ULL;
438 r.i |= q.i & ~0xFFF0000000000000ULL;
439 FT0 = r.d;
442 void helper_itofs (void)
444 union {
445 double d;
446 uint64_t i;
447 } p;
449 p.d = FT0;
450 FT0 = int64_to_float32(p.i, &FP_STATUS);
453 void helper_ftois (void)
455 union {
456 double d;
457 uint64_t i;
458 } p;
460 p.i = float32_to_int64(FT0, &FP_STATUS);
461 FT0 = p.d;
464 void helper_sqrtt (void)
466 FT0 = float64_sqrt(FT0, &FP_STATUS);
469 void helper_cmptun (void)
471 union {
472 double d;
473 uint64_t i;
474 } p;
476 p.i = 0;
477 if (float64_is_nan(FT0) || float64_is_nan(FT1))
478 p.i = 0x4000000000000000ULL;
479 FT0 = p.d;
482 void helper_cmpteq (void)
484 union {
485 double d;
486 uint64_t i;
487 } p;
489 p.i = 0;
490 if (float64_eq(FT0, FT1, &FP_STATUS))
491 p.i = 0x4000000000000000ULL;
492 FT0 = p.d;
495 void helper_cmptle (void)
497 union {
498 double d;
499 uint64_t i;
500 } p;
502 p.i = 0;
503 if (float64_le(FT0, FT1, &FP_STATUS))
504 p.i = 0x4000000000000000ULL;
505 FT0 = p.d;
508 void helper_cmptlt (void)
510 union {
511 double d;
512 uint64_t i;
513 } p;
515 p.i = 0;
516 if (float64_lt(FT0, FT1, &FP_STATUS))
517 p.i = 0x4000000000000000ULL;
518 FT0 = p.d;
521 void helper_itoft (void)
523 union {
524 double d;
525 uint64_t i;
526 } p;
528 p.d = FT0;
529 FT0 = int64_to_float64(p.i, &FP_STATUS);
532 void helper_ftoit (void)
534 union {
535 double d;
536 uint64_t i;
537 } p;
539 p.i = float64_to_int64(FT0, &FP_STATUS);
540 FT0 = p.d;
543 static always_inline int vaxf_is_valid (float ff)
545 union {
546 float f;
547 uint32_t i;
548 } p;
549 uint32_t exp, mant;
551 p.f = ff;
552 exp = (p.i >> 23) & 0xFF;
553 mant = p.i & 0x007FFFFF;
554 if (exp == 0 && ((p.i & 0x80000000) || mant != 0)) {
555 /* Reserved operands / Dirty zero */
556 return 0;
559 return 1;
562 static always_inline float vaxf_to_ieee32 (float ff)
564 union {
565 float f;
566 uint32_t i;
567 } p;
568 uint32_t exp;
570 p.f = ff;
571 exp = (p.i >> 23) & 0xFF;
572 if (exp < 3) {
573 /* Underflow */
574 p.f = 0.0;
575 } else {
576 p.f *= 0.25;
579 return p.f;
582 static always_inline float ieee32_to_vaxf (float fi)
584 union {
585 float f;
586 uint32_t i;
587 } p;
588 uint32_t exp, mant;
590 p.f = fi;
591 exp = (p.i >> 23) & 0xFF;
592 mant = p.i & 0x007FFFFF;
593 if (exp == 255) {
594 /* NaN or infinity */
595 p.i = 1;
596 } else if (exp == 0) {
597 if (mant == 0) {
598 /* Zero */
599 p.i = 0;
600 } else {
601 /* Denormalized */
602 p.f *= 2.0;
604 } else {
605 if (exp >= 253) {
606 /* Overflow */
607 p.i = 1;
608 } else {
609 p.f *= 4.0;
613 return p.f;
616 void helper_addf (void)
618 float ft0, ft1, ft2;
620 if (!vaxf_is_valid(FT0) || !vaxf_is_valid(FT1)) {
621 /* XXX: TODO */
623 ft0 = vaxf_to_ieee32(FT0);
624 ft1 = vaxf_to_ieee32(FT1);
625 ft2 = float32_add(ft0, ft1, &FP_STATUS);
626 FT0 = ieee32_to_vaxf(ft2);
629 void helper_subf (void)
631 float ft0, ft1, ft2;
633 if (!vaxf_is_valid(FT0) || !vaxf_is_valid(FT1)) {
634 /* XXX: TODO */
636 ft0 = vaxf_to_ieee32(FT0);
637 ft1 = vaxf_to_ieee32(FT1);
638 ft2 = float32_sub(ft0, ft1, &FP_STATUS);
639 FT0 = ieee32_to_vaxf(ft2);
642 void helper_mulf (void)
644 float ft0, ft1, ft2;
646 if (!vaxf_is_valid(FT0) || !vaxf_is_valid(FT1)) {
647 /* XXX: TODO */
649 ft0 = vaxf_to_ieee32(FT0);
650 ft1 = vaxf_to_ieee32(FT1);
651 ft2 = float32_mul(ft0, ft1, &FP_STATUS);
652 FT0 = ieee32_to_vaxf(ft2);
655 void helper_divf (void)
657 float ft0, ft1, ft2;
659 if (!vaxf_is_valid(FT0) || !vaxf_is_valid(FT1)) {
660 /* XXX: TODO */
662 ft0 = vaxf_to_ieee32(FT0);
663 ft1 = vaxf_to_ieee32(FT1);
664 ft2 = float32_div(ft0, ft1, &FP_STATUS);
665 FT0 = ieee32_to_vaxf(ft2);
668 void helper_sqrtf (void)
670 float ft0, ft1;
672 if (!vaxf_is_valid(FT0) || !vaxf_is_valid(FT1)) {
673 /* XXX: TODO */
675 ft0 = vaxf_to_ieee32(FT0);
676 ft1 = float32_sqrt(ft0, &FP_STATUS);
677 FT0 = ieee32_to_vaxf(ft1);
680 void helper_itoff (void)
682 /* XXX: TODO */
685 static always_inline int vaxg_is_valid (double ff)
687 union {
688 double f;
689 uint64_t i;
690 } p;
691 uint64_t exp, mant;
693 p.f = ff;
694 exp = (p.i >> 52) & 0x7FF;
695 mant = p.i & 0x000FFFFFFFFFFFFFULL;
696 if (exp == 0 && ((p.i & 0x8000000000000000ULL) || mant != 0)) {
697 /* Reserved operands / Dirty zero */
698 return 0;
701 return 1;
704 static always_inline double vaxg_to_ieee64 (double fg)
706 union {
707 double f;
708 uint64_t i;
709 } p;
710 uint32_t exp;
712 p.f = fg;
713 exp = (p.i >> 52) & 0x7FF;
714 if (exp < 3) {
715 /* Underflow */
716 p.f = 0.0;
717 } else {
718 p.f *= 0.25;
721 return p.f;
724 static always_inline double ieee64_to_vaxg (double fi)
726 union {
727 double f;
728 uint64_t i;
729 } p;
730 uint64_t mant;
731 uint32_t exp;
733 p.f = fi;
734 exp = (p.i >> 52) & 0x7FF;
735 mant = p.i & 0x000FFFFFFFFFFFFFULL;
736 if (exp == 255) {
737 /* NaN or infinity */
738 p.i = 1; /* VAX dirty zero */
739 } else if (exp == 0) {
740 if (mant == 0) {
741 /* Zero */
742 p.i = 0;
743 } else {
744 /* Denormalized */
745 p.f *= 2.0;
747 } else {
748 if (exp >= 2045) {
749 /* Overflow */
750 p.i = 1; /* VAX dirty zero */
751 } else {
752 p.f *= 4.0;
756 return p.f;
759 void helper_addg (void)
761 double ft0, ft1, ft2;
763 if (!vaxg_is_valid(FT0) || !vaxg_is_valid(FT1)) {
764 /* XXX: TODO */
766 ft0 = vaxg_to_ieee64(FT0);
767 ft1 = vaxg_to_ieee64(FT1);
768 ft2 = float64_add(ft0, ft1, &FP_STATUS);
769 FT0 = ieee64_to_vaxg(ft2);
772 void helper_subg (void)
774 double ft0, ft1, ft2;
776 if (!vaxg_is_valid(FT0) || !vaxg_is_valid(FT1)) {
777 /* XXX: TODO */
779 ft0 = vaxg_to_ieee64(FT0);
780 ft1 = vaxg_to_ieee64(FT1);
781 ft2 = float64_sub(ft0, ft1, &FP_STATUS);
782 FT0 = ieee64_to_vaxg(ft2);
785 void helper_mulg (void)
787 double ft0, ft1, ft2;
789 if (!vaxg_is_valid(FT0) || !vaxg_is_valid(FT1)) {
790 /* XXX: TODO */
792 ft0 = vaxg_to_ieee64(FT0);
793 ft1 = vaxg_to_ieee64(FT1);
794 ft2 = float64_mul(ft0, ft1, &FP_STATUS);
795 FT0 = ieee64_to_vaxg(ft2);
798 void helper_divg (void)
800 double ft0, ft1, ft2;
802 if (!vaxg_is_valid(FT0) || !vaxg_is_valid(FT1)) {
803 /* XXX: TODO */
805 ft0 = vaxg_to_ieee64(FT0);
806 ft1 = vaxg_to_ieee64(FT1);
807 ft2 = float64_div(ft0, ft1, &FP_STATUS);
808 FT0 = ieee64_to_vaxg(ft2);
811 void helper_sqrtg (void)
813 double ft0, ft1;
815 if (!vaxg_is_valid(FT0) || !vaxg_is_valid(FT1)) {
816 /* XXX: TODO */
818 ft0 = vaxg_to_ieee64(FT0);
819 ft1 = float64_sqrt(ft0, &FP_STATUS);
820 FT0 = ieee64_to_vaxg(ft1);
823 void helper_cmpgeq (void)
825 union {
826 double d;
827 uint64_t u;
828 } p;
829 double ft0, ft1;
831 if (!vaxg_is_valid(FT0) || !vaxg_is_valid(FT1)) {
832 /* XXX: TODO */
834 ft0 = vaxg_to_ieee64(FT0);
835 ft1 = vaxg_to_ieee64(FT1);
836 p.u = 0;
837 if (float64_eq(ft0, ft1, &FP_STATUS))
838 p.u = 0x4000000000000000ULL;
839 FT0 = p.d;
842 void helper_cmpglt (void)
844 union {
845 double d;
846 uint64_t u;
847 } p;
848 double ft0, ft1;
850 if (!vaxg_is_valid(FT0) || !vaxg_is_valid(FT1)) {
851 /* XXX: TODO */
853 ft0 = vaxg_to_ieee64(FT0);
854 ft1 = vaxg_to_ieee64(FT1);
855 p.u = 0;
856 if (float64_lt(ft0, ft1, &FP_STATUS))
857 p.u = 0x4000000000000000ULL;
858 FT0 = p.d;
861 void helper_cmpgle (void)
863 union {
864 double d;
865 uint64_t u;
866 } p;
867 double ft0, ft1;
869 if (!vaxg_is_valid(FT0) || !vaxg_is_valid(FT1)) {
870 /* XXX: TODO */
872 ft0 = vaxg_to_ieee64(FT0);
873 ft1 = vaxg_to_ieee64(FT1);
874 p.u = 0;
875 if (float64_le(ft0, ft1, &FP_STATUS))
876 p.u = 0x4000000000000000ULL;
877 FT0 = p.d;
880 void helper_cvtqs (void)
882 union {
883 double d;
884 uint64_t u;
885 } p;
887 p.d = FT0;
888 FT0 = (float)p.u;
891 void helper_cvttq (void)
893 union {
894 double d;
895 uint64_t u;
896 } p;
898 p.u = FT0;
899 FT0 = p.d;
902 void helper_cvtqt (void)
904 union {
905 double d;
906 uint64_t u;
907 } p;
909 p.d = FT0;
910 FT0 = p.u;
913 void helper_cvtqf (void)
915 union {
916 double d;
917 uint64_t u;
918 } p;
920 p.d = FT0;
921 FT0 = ieee32_to_vaxf(p.u);
924 void helper_cvtgf (void)
926 double ft0;
928 ft0 = vaxg_to_ieee64(FT0);
929 FT0 = ieee32_to_vaxf(ft0);
932 void helper_cvtgd (void)
934 /* XXX: TODO */
937 void helper_cvtgq (void)
939 union {
940 double d;
941 uint64_t u;
942 } p;
944 p.u = vaxg_to_ieee64(FT0);
945 FT0 = p.d;
948 void helper_cvtqg (void)
950 union {
951 double d;
952 uint64_t u;
953 } p;
955 p.d = FT0;
956 FT0 = ieee64_to_vaxg(p.u);
959 void helper_cvtdg (void)
961 /* XXX: TODO */
964 void helper_cvtlq (void)
966 union {
967 double d;
968 uint64_t u;
969 } p, q;
971 p.d = FT0;
972 q.u = (p.u >> 29) & 0x3FFFFFFF;
973 q.u |= (p.u >> 32);
974 q.u = (int64_t)((int32_t)q.u);
975 FT0 = q.d;
978 static always_inline void __helper_cvtql (int s, int v)
980 union {
981 double d;
982 uint64_t u;
983 } p, q;
985 p.d = FT0;
986 q.u = ((uint64_t)(p.u & 0xC0000000)) << 32;
987 q.u |= ((uint64_t)(p.u & 0x7FFFFFFF)) << 29;
988 FT0 = q.d;
989 if (v && (int64_t)((int32_t)p.u) != (int64_t)p.u) {
990 helper_excp(EXCP_ARITH, EXCP_ARITH_OVERFLOW);
992 if (s) {
993 /* TODO */
997 void helper_cvtql (void)
999 __helper_cvtql(0, 0);
1002 void helper_cvtqlv (void)
1004 __helper_cvtql(0, 1);
1007 void helper_cvtqlsv (void)
1009 __helper_cvtql(1, 1);
1012 void helper_cmpfeq (void)
1014 if (float64_eq(FT0, FT1, &FP_STATUS))
1015 T0 = 1;
1016 else
1017 T0 = 0;
1020 void helper_cmpfne (void)
1022 if (float64_eq(FT0, FT1, &FP_STATUS))
1023 T0 = 0;
1024 else
1025 T0 = 1;
1028 void helper_cmpflt (void)
1030 if (float64_lt(FT0, FT1, &FP_STATUS))
1031 T0 = 1;
1032 else
1033 T0 = 0;
1036 void helper_cmpfle (void)
1038 if (float64_lt(FT0, FT1, &FP_STATUS))
1039 T0 = 1;
1040 else
1041 T0 = 0;
1044 void helper_cmpfgt (void)
1046 if (float64_le(FT0, FT1, &FP_STATUS))
1047 T0 = 0;
1048 else
1049 T0 = 1;
1052 void helper_cmpfge (void)
1054 if (float64_lt(FT0, FT1, &FP_STATUS))
1055 T0 = 0;
1056 else
1057 T0 = 1;
1060 #if !defined (CONFIG_USER_ONLY)
1061 void helper_mfpr (int iprn)
1063 uint64_t val;
1065 if (cpu_alpha_mfpr(env, iprn, &val) == 0)
1066 T0 = val;
1069 void helper_mtpr (int iprn)
1071 cpu_alpha_mtpr(env, iprn, T0, NULL);
1073 #endif
1075 #if defined(HOST_SPARC) || defined(HOST_SPARC64)
1076 void helper_reset_FT0 (void)
1078 FT0 = 0;
1081 void helper_reset_FT1 (void)
1083 FT1 = 0;
1086 void helper_reset_FT2 (void)
1088 FT2 = 0;
1090 #endif
1092 /*****************************************************************************/
1093 /* Softmmu support */
1094 #if !defined (CONFIG_USER_ONLY)
1096 #ifdef __s390__
1097 # define GETPC() ((void*)((unsigned long)__builtin_return_address(0) & 0x7fffffffUL))
1098 #else
1099 # define GETPC() (__builtin_return_address(0))
1100 #endif
1102 /* XXX: the two following helpers are pure hacks.
1103 * Hopefully, we emulate the PALcode, then we should never see
1104 * HW_LD / HW_ST instructions.
1106 void helper_ld_phys_to_virt (void)
1108 uint64_t tlb_addr, physaddr;
1109 int index, mmu_idx;
1110 void *retaddr;
1112 mmu_idx = cpu_mmu_index(env);
1113 index = (T0 >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1114 redo:
1115 tlb_addr = env->tlb_table[mmu_idx][index].addr_read;
1116 if ((T0 & TARGET_PAGE_MASK) ==
1117 (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
1118 physaddr = T0 + env->tlb_table[mmu_idx][index].addend;
1119 } else {
1120 /* the page is not in the TLB : fill it */
1121 retaddr = GETPC();
1122 tlb_fill(T0, 0, mmu_idx, retaddr);
1123 goto redo;
1125 T0 = physaddr;
1128 void helper_st_phys_to_virt (void)
1130 uint64_t tlb_addr, physaddr;
1131 int index, mmu_idx;
1132 void *retaddr;
1134 mmu_idx = cpu_mmu_index(env);
1135 index = (T0 >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1136 redo:
1137 tlb_addr = env->tlb_table[mmu_idx][index].addr_write;
1138 if ((T0 & TARGET_PAGE_MASK) ==
1139 (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
1140 physaddr = T0 + env->tlb_table[mmu_idx][index].addend;
1141 } else {
1142 /* the page is not in the TLB : fill it */
1143 retaddr = GETPC();
1144 tlb_fill(T0, 1, mmu_idx, retaddr);
1145 goto redo;
1147 T0 = physaddr;
1150 #define MMUSUFFIX _mmu
1152 #define SHIFT 0
1153 #include "softmmu_template.h"
1155 #define SHIFT 1
1156 #include "softmmu_template.h"
1158 #define SHIFT 2
1159 #include "softmmu_template.h"
1161 #define SHIFT 3
1162 #include "softmmu_template.h"
1164 /* try to fill the TLB and return an exception if error. If retaddr is
1165 NULL, it means that the function was called in C code (i.e. not
1166 from generated code or from helper.c) */
1167 /* XXX: fix it to restore all registers */
1168 void tlb_fill (target_ulong addr, int is_write, int mmu_idx, void *retaddr)
1170 TranslationBlock *tb;
1171 CPUState *saved_env;
1172 unsigned long pc;
1173 int ret;
1175 /* XXX: hack to restore env in all cases, even if not called from
1176 generated code */
1177 saved_env = env;
1178 env = cpu_single_env;
1179 ret = cpu_alpha_handle_mmu_fault(env, addr, is_write, mmu_idx, 1);
1180 if (!likely(ret == 0)) {
1181 if (likely(retaddr)) {
1182 /* now we have a real cpu fault */
1183 pc = (unsigned long)retaddr;
1184 tb = tb_find_pc(pc);
1185 if (likely(tb)) {
1186 /* the PC is inside the translated code. It means that we have
1187 a virtual CPU fault */
1188 cpu_restore_state(tb, env, pc, NULL);
1191 /* Exception index and error code are already set */
1192 cpu_loop_exit();
1194 env = saved_env;
1197 #endif