Bug 497723 - forgot to restore callgrind output cleanup
[valgrind.git] / VEX / priv / guest_mips_helpers.c
blob79197378cc74933a34ee226e34892d40c7239ca2
2 /*---------------------------------------------------------------*/
3 /*--- begin guest_mips_helpers.c ---*/
4 /*---------------------------------------------------------------*/
6 /*
7 This file is part of Valgrind, a dynamic binary instrumentation
8 framework.
10 Copyright (C) 2010-2017 RT-RK
12 This program is free software; you can redistribute it and/or
13 modify it under the terms of the GNU General Public License as
14 published by the Free Software Foundation; either version 2 of the
15 License, or (at your option) any later version.
17 This program is distributed in the hope that it will be useful, but
18 WITHOUT ANY WARRANTY; without even the implied warranty of
19 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 General Public License for more details.
22 You should have received a copy of the GNU General Public License
23 along with this program; if not, see <http://www.gnu.org/licenses/>.
25 The GNU General Public License is contained in the file COPYING.
28 #include "libvex_basictypes.h"
29 #include "libvex_emnote.h"
30 #include "libvex_guest_mips32.h"
31 #include "libvex_guest_mips64.h"
32 #include "libvex_ir.h"
33 #include "libvex.h"
35 #include "main_util.h"
36 #include "main_globals.h"
37 #include "guest_generic_bb_to_IR.h"
38 #include "guest_mips_defs.h"
40 #if defined (__GNUC__)
41 #define GCC_VERSION (__GNUC__ * 100 + __GNUC_MINOR__)
42 #else
43 #define GCC_VERSION 0
44 #endif
46 /* This file contains helper functions for mips guest code. Calls to
47 these functions are generated by the back end.
50 #define ALWAYSDEFD32(field) \
51 { offsetof(VexGuestMIPS32State, field), \
52 (sizeof ((VexGuestMIPS32State*)0)->field) }
54 #define ALWAYSDEFD64(field) \
55 { offsetof(VexGuestMIPS64State, field), \
56 (sizeof ((VexGuestMIPS64State*)0)->field) }
58 IRExpr *guest_mips32_spechelper(const HChar * function_name, IRExpr ** args,
59 IRStmt ** precedingStmts, Int n_precedingStmts)
61 return NULL;
64 IRExpr *guest_mips64_spechelper ( const HChar * function_name, IRExpr ** args,
65 IRStmt ** precedingStmts,
66 Int n_precedingStmts )
68 return NULL;
71 /* VISIBLE TO LIBVEX CLIENT */
72 void LibVEX_GuestMIPS32_initialise( /*OUT*/ VexGuestMIPS32State * vex_state)
74 vex_state->guest_r0 = 0; /* Hardwired to 0 */
75 vex_state->guest_r1 = 0; /* Assembler temporary */
76 vex_state->guest_r2 = 0; /* Values for function returns ... */
77 vex_state->guest_r3 = 0; /* ...and expression evaluation */
78 vex_state->guest_r4 = 0; /* Function arguments */
79 vex_state->guest_r5 = 0;
80 vex_state->guest_r6 = 0;
81 vex_state->guest_r7 = 0;
82 vex_state->guest_r8 = 0; /* Temporaries */
83 vex_state->guest_r9 = 0;
84 vex_state->guest_r10 = 0;
85 vex_state->guest_r11 = 0;
86 vex_state->guest_r12 = 0;
87 vex_state->guest_r13 = 0;
88 vex_state->guest_r14 = 0;
89 vex_state->guest_r15 = 0;
90 vex_state->guest_r16 = 0; /* Saved temporaries */
91 vex_state->guest_r17 = 0;
92 vex_state->guest_r18 = 0;
93 vex_state->guest_r19 = 0;
94 vex_state->guest_r20 = 0;
95 vex_state->guest_r21 = 0;
96 vex_state->guest_r22 = 0;
97 vex_state->guest_r23 = 0;
98 vex_state->guest_r24 = 0; /* Temporaries */
99 vex_state->guest_r25 = 0;
100 vex_state->guest_r26 = 0; /* Reserved for OS kernel */
101 vex_state->guest_r27 = 0;
102 vex_state->guest_r28 = 0; /* Global pointer */
103 vex_state->guest_r29 = 0; /* Stack pointer */
104 vex_state->guest_r30 = 0; /* Frame pointer */
105 vex_state->guest_r31 = 0; /* Return address */
106 vex_state->guest_PC = 0; /* Program counter */
107 vex_state->guest_HI = 0; /* Multiply and divide register higher result */
108 vex_state->guest_LO = 0; /* Multiply and divide register lower result */
110 /* FPU Registers */
111 vex_state->guest_f0 = 0x7ff800007ff80000ULL; /* Floting point GP registers */
112 vex_state->guest_f1 = 0x7ff800007ff80000ULL;
113 vex_state->guest_f2 = 0x7ff800007ff80000ULL;
114 vex_state->guest_f3 = 0x7ff800007ff80000ULL;
115 vex_state->guest_f4 = 0x7ff800007ff80000ULL;
116 vex_state->guest_f5 = 0x7ff800007ff80000ULL;
117 vex_state->guest_f6 = 0x7ff800007ff80000ULL;
118 vex_state->guest_f7 = 0x7ff800007ff80000ULL;
119 vex_state->guest_f8 = 0x7ff800007ff80000ULL;
120 vex_state->guest_f9 = 0x7ff800007ff80000ULL;
121 vex_state->guest_f10 = 0x7ff800007ff80000ULL;
122 vex_state->guest_f11 = 0x7ff800007ff80000ULL;
123 vex_state->guest_f12 = 0x7ff800007ff80000ULL;
124 vex_state->guest_f13 = 0x7ff800007ff80000ULL;
125 vex_state->guest_f14 = 0x7ff800007ff80000ULL;
126 vex_state->guest_f15 = 0x7ff800007ff80000ULL;
127 vex_state->guest_f16 = 0x7ff800007ff80000ULL;
128 vex_state->guest_f17 = 0x7ff800007ff80000ULL;
129 vex_state->guest_f18 = 0x7ff800007ff80000ULL;
130 vex_state->guest_f19 = 0x7ff800007ff80000ULL;
131 vex_state->guest_f20 = 0x7ff800007ff80000ULL;
132 vex_state->guest_f21 = 0x7ff800007ff80000ULL;
133 vex_state->guest_f22 = 0x7ff800007ff80000ULL;
134 vex_state->guest_f23 = 0x7ff800007ff80000ULL;
135 vex_state->guest_f24 = 0x7ff800007ff80000ULL;
136 vex_state->guest_f25 = 0x7ff800007ff80000ULL;
137 vex_state->guest_f26 = 0x7ff800007ff80000ULL;
138 vex_state->guest_f27 = 0x7ff800007ff80000ULL;
139 vex_state->guest_f28 = 0x7ff800007ff80000ULL;
140 vex_state->guest_f29 = 0x7ff800007ff80000ULL;
141 vex_state->guest_f30 = 0x7ff800007ff80000ULL;
142 vex_state->guest_f31 = 0x7ff800007ff80000ULL;
144 vex_state->guest_FIR = 0; /* FP implementation and revision register */
145 vex_state->guest_FCCR = 0; /* FP condition codes register */
146 vex_state->guest_FEXR = 0; /* FP exceptions register */
147 vex_state->guest_FENR = 0; /* FP enables register */
148 vex_state->guest_FCSR = 0; /* FP control/status register */
149 vex_state->guest_ULR = 0; /* TLS */
151 /* Various pseudo-regs mandated by Vex or Valgrind. */
152 /* Emulation notes */
153 vex_state->guest_EMNOTE = 0;
155 /* For clflush: record start and length of area to invalidate */
156 vex_state->guest_CMSTART = 0;
157 vex_state->guest_CMLEN = 0;
158 vex_state->host_EvC_COUNTER = 0;
159 vex_state->host_EvC_FAILADDR = 0;
161 /* Used to record the unredirected guest address at the start of
162 a translation whose start has been redirected. By reading
163 this pseudo-register shortly afterwards, the translation can
164 find out what the corresponding no-redirection address was.
165 Note, this is only set for wrap-style redirects, not for
166 replace-style ones. */
167 vex_state->guest_NRADDR = 0;
169 vex_state->guest_COND = 0;
171 vex_state->guest_CP0_status = 0;
172 vex_state->guest_CP0_Config5 = 0;
174 vex_state->guest_LLaddr = 0xFFFFFFFF;
175 vex_state->guest_LLdata = 0;
177 /* MIPS32 DSP ASE(r2) specific registers */
178 vex_state->guest_DSPControl = 0; /* DSPControl register */
179 vex_state->guest_ac0 = 0; /* Accumulator 0 */
180 vex_state->guest_ac1 = 0; /* Accumulator 1 */
181 vex_state->guest_ac2 = 0; /* Accumulator 2 */
182 vex_state->guest_ac3 = 0; /* Accumulator 3 */
184 vex_state->guest_w0.w64[0] = 0;
185 vex_state->guest_w0.w64[1] = 0;
186 vex_state->guest_w1.w64[0] = 0;
187 vex_state->guest_w1.w64[1] = 0;
188 vex_state->guest_w2.w64[0] = 0;
189 vex_state->guest_w2.w64[1] = 0;
191 vex_state->guest_IP_AT_SYSCALL = 0;
194 void LibVEX_GuestMIPS64_initialise ( /*OUT*/ VexGuestMIPS64State * vex_state )
196 vex_state->guest_r0 = 0; /* Hardwired to 0 */
197 vex_state->guest_r1 = 0; /* Assembler temporary */
198 vex_state->guest_r2 = 0; /* Values for function returns ... */
199 vex_state->guest_r3 = 0;
200 vex_state->guest_r4 = 0; /* Function arguments */
201 vex_state->guest_r5 = 0;
202 vex_state->guest_r6 = 0;
203 vex_state->guest_r7 = 0;
204 vex_state->guest_r8 = 0;
205 vex_state->guest_r9 = 0;
206 vex_state->guest_r10 = 0;
207 vex_state->guest_r11 = 0;
208 vex_state->guest_r12 = 0; /* Temporaries */
209 vex_state->guest_r13 = 0;
210 vex_state->guest_r14 = 0;
211 vex_state->guest_r15 = 0;
212 vex_state->guest_r16 = 0; /* Saved temporaries */
213 vex_state->guest_r17 = 0;
214 vex_state->guest_r18 = 0;
215 vex_state->guest_r19 = 0;
216 vex_state->guest_r20 = 0;
217 vex_state->guest_r21 = 0;
218 vex_state->guest_r22 = 0;
219 vex_state->guest_r23 = 0;
220 vex_state->guest_r24 = 0; /* Temporaries */
221 vex_state->guest_r25 = 0;
222 vex_state->guest_r26 = 0; /* Reserved for OS kernel */
223 vex_state->guest_r27 = 0;
224 vex_state->guest_r28 = 0; /* Global pointer */
225 vex_state->guest_r29 = 0; /* Stack pointer */
226 vex_state->guest_r30 = 0; /* Frame pointer */
227 vex_state->guest_r31 = 0; /* Return address */
228 vex_state->guest_PC = 0; /* Program counter */
229 vex_state->guest_HI = 0; /* Multiply and divide register higher result */
230 vex_state->guest_LO = 0; /* Multiply and divide register lower result */
232 /* FPU Registers */
233 vex_state->guest_f0 = 0x7ff800007ff80000ULL; /* Floting point registers */
234 vex_state->guest_f1 = 0x7ff800007ff80000ULL;
235 vex_state->guest_f2 = 0x7ff800007ff80000ULL;
236 vex_state->guest_f3 = 0x7ff800007ff80000ULL;
237 vex_state->guest_f4 = 0x7ff800007ff80000ULL;
238 vex_state->guest_f5 = 0x7ff800007ff80000ULL;
239 vex_state->guest_f6 = 0x7ff800007ff80000ULL;
240 vex_state->guest_f7 = 0x7ff800007ff80000ULL;
241 vex_state->guest_f8 = 0x7ff800007ff80000ULL;
242 vex_state->guest_f9 = 0x7ff800007ff80000ULL;
243 vex_state->guest_f10 = 0x7ff800007ff80000ULL;
244 vex_state->guest_f11 = 0x7ff800007ff80000ULL;
245 vex_state->guest_f12 = 0x7ff800007ff80000ULL;
246 vex_state->guest_f13 = 0x7ff800007ff80000ULL;
247 vex_state->guest_f14 = 0x7ff800007ff80000ULL;
248 vex_state->guest_f15 = 0x7ff800007ff80000ULL;
249 vex_state->guest_f16 = 0x7ff800007ff80000ULL;
250 vex_state->guest_f17 = 0x7ff800007ff80000ULL;
251 vex_state->guest_f18 = 0x7ff800007ff80000ULL;
252 vex_state->guest_f19 = 0x7ff800007ff80000ULL;
253 vex_state->guest_f20 = 0x7ff800007ff80000ULL;
254 vex_state->guest_f21 = 0x7ff800007ff80000ULL;
255 vex_state->guest_f22 = 0x7ff800007ff80000ULL;
256 vex_state->guest_f23 = 0x7ff800007ff80000ULL;
257 vex_state->guest_f24 = 0x7ff800007ff80000ULL;
258 vex_state->guest_f25 = 0x7ff800007ff80000ULL;
259 vex_state->guest_f26 = 0x7ff800007ff80000ULL;
260 vex_state->guest_f27 = 0x7ff800007ff80000ULL;
261 vex_state->guest_f28 = 0x7ff800007ff80000ULL;
262 vex_state->guest_f29 = 0x7ff800007ff80000ULL;
263 vex_state->guest_f30 = 0x7ff800007ff80000ULL;
264 vex_state->guest_f31 = 0x7ff800007ff80000ULL;
266 vex_state->guest_FIR = 0; /* FP implementation and revision register */
267 vex_state->guest_FCCR = 0; /* FP condition codes register */
268 vex_state->guest_FEXR = 0; /* FP exceptions register */
269 vex_state->guest_FENR = 0; /* FP enables register */
270 vex_state->guest_FCSR = 0; /* FP control/status register */
272 vex_state->guest_ULR = 0;
274 /* Various pseudo-regs mandated by Vex or Valgrind. */
275 /* Emulation notes */
276 vex_state->guest_EMNOTE = 0;
278 /* For clflush: record start and length of area to invalidate */
279 vex_state->guest_CMSTART = 0;
280 vex_state->guest_CMLEN = 0;
281 vex_state->host_EvC_COUNTER = 0;
282 vex_state->host_EvC_FAILADDR = 0;
284 /* Used to record the unredirected guest address at the start of
285 a translation whose start has been redirected. By reading
286 this pseudo-register shortly afterwards, the translation can
287 find out what the corresponding no-redirection address was.
288 Note, this is only set for wrap-style redirects, not for
289 replace-style ones. */
290 vex_state->guest_NRADDR = 0;
292 vex_state->guest_COND = 0;
294 vex_state->guest_CP0_status = MIPS_CP0_STATUS_FR;
296 vex_state->guest_LLaddr = 0xFFFFFFFFFFFFFFFFULL;
297 vex_state->guest_LLdata = 0;
299 vex_state->guest_IP_AT_SYSCALL = 0;
301 vex_state->guest_MSACSR = 0;
304 /*-----------------------------------------------------------*/
305 /*--- Describing the mips guest state, for the benefit ---*/
306 /*--- of iropt and instrumenters. ---*/
307 /*-----------------------------------------------------------*/
309 /* Figure out if any part of the guest state contained in minoff
310 .. maxoff requires precise memory exceptions. If in doubt return
311 True (but this generates significantly slower code).
313 We enforce precise exns for guest SP, PC.
315 Only SP is needed in mode VexRegUpdSpAtMemAccess.
317 Bool guest_mips32_state_requires_precise_mem_exns (
318 Int minoff, Int maxoff, VexRegisterUpdates pxControl
321 Int sp_min = offsetof(VexGuestMIPS32State, guest_r29);
322 Int sp_max = sp_min + 4 - 1;
323 Int pc_min = offsetof(VexGuestMIPS32State, guest_PC);
324 Int pc_max = pc_min + 4 - 1;
326 if (maxoff < sp_min || minoff > sp_max) {
327 /* no overlap with sp */
328 if (pxControl == VexRegUpdSpAtMemAccess)
329 return False; /* We only need to check stack pointer. */
330 } else {
331 return True;
334 if (maxoff < pc_min || minoff > pc_max) {
335 /* no overlap with pc */
336 } else {
337 return True;
340 /* We appear to need precise updates of R11 in order to get proper
341 stacktraces from non-optimised code. */
342 Int fp_min = offsetof(VexGuestMIPS32State, guest_r30);
343 Int fp_max = fp_min + 4 - 1;
345 if (maxoff < fp_min || minoff > fp_max) {
346 /* no overlap with fp */
347 } else {
348 return True;
351 return False;
354 Bool guest_mips64_state_requires_precise_mem_exns (
355 Int minoff, Int maxoff, VexRegisterUpdates pxControl
358 Int sp_min = offsetof(VexGuestMIPS64State, guest_r29);
359 Int sp_max = sp_min + 8 - 1;
360 Int pc_min = offsetof(VexGuestMIPS64State, guest_PC);
361 Int pc_max = pc_min + 8 - 1;
363 if ( maxoff < sp_min || minoff > sp_max ) {
364 /* no overlap with sp */
365 if (pxControl == VexRegUpdSpAtMemAccess)
366 return False; /* We only need to check stack pointer. */
367 } else {
368 return True;
371 if ( maxoff < pc_min || minoff > pc_max ) {
372 /* no overlap with pc */
373 } else {
374 return True;
377 Int fp_min = offsetof(VexGuestMIPS64State, guest_r30);
378 Int fp_max = fp_min + 8 - 1;
380 if ( maxoff < fp_min || minoff > fp_max ) {
381 /* no overlap with fp */
382 } else {
383 return True;
386 return False;
389 VexGuestLayout mips32Guest_layout = {
390 /* Total size of the guest state, in bytes. */
391 .total_sizeB = sizeof(VexGuestMIPS32State),
392 /* Describe the stack pointer. */
393 .offset_SP = offsetof(VexGuestMIPS32State, guest_r29),
394 .sizeof_SP = 4,
395 /* Describe the frame pointer. */
396 .offset_FP = offsetof(VexGuestMIPS32State, guest_r30),
397 .sizeof_FP = 4,
398 /* Describe the instruction pointer. */
399 .offset_IP = offsetof(VexGuestMIPS32State, guest_PC),
400 .sizeof_IP = 4,
401 /* Describe any sections to be regarded by Memcheck as
402 'always-defined'. */
403 .n_alwaysDefd = 8,
404 /* ? :( */
405 .alwaysDefd = {
406 /* 0 */ ALWAYSDEFD32(guest_r0),
407 /* 1 */ ALWAYSDEFD32(guest_r1),
408 /* 2 */ ALWAYSDEFD32(guest_EMNOTE),
409 /* 3 */ ALWAYSDEFD32(guest_CMSTART),
410 /* 4 */ ALWAYSDEFD32(guest_CMLEN),
411 /* 5 */ ALWAYSDEFD32(guest_r29),
412 /* 6 */ ALWAYSDEFD32(guest_r31),
413 /* 7 */ ALWAYSDEFD32(guest_ULR)
417 VexGuestLayout mips64Guest_layout = {
418 /* Total size of the guest state, in bytes. */
419 .total_sizeB = sizeof(VexGuestMIPS64State),
420 /* Describe the stack pointer. */
421 .offset_SP = offsetof(VexGuestMIPS64State, guest_r29),
422 .sizeof_SP = 8,
423 /* Describe the frame pointer. */
424 .offset_FP = offsetof(VexGuestMIPS64State, guest_r30),
425 .sizeof_FP = 8,
426 /* Describe the instruction pointer. */
427 .offset_IP = offsetof(VexGuestMIPS64State, guest_PC),
428 .sizeof_IP = 8,
429 /* Describe any sections to be regarded by Memcheck as
430 'always-defined'. */
431 .n_alwaysDefd = 7,
432 /* ? :( */
433 .alwaysDefd = {
434 /* 0 */ ALWAYSDEFD64 (guest_r0),
435 /* 1 */ ALWAYSDEFD64 (guest_EMNOTE),
436 /* 2 */ ALWAYSDEFD64 (guest_CMSTART),
437 /* 3 */ ALWAYSDEFD64 (guest_CMLEN),
438 /* 4 */ ALWAYSDEFD64 (guest_r29),
439 /* 5 */ ALWAYSDEFD64 (guest_r31),
440 /* 6 */ ALWAYSDEFD64 (guest_ULR)
444 #define ASM_VOLATILE_RDHWR(opcode) \
445 __asm__ __volatile__(".word 0x7C02003B | "#opcode" << 11 \n\t" \
446 : "+r" (x) : : \
449 HWord mips_dirtyhelper_rdhwr ( UInt rd )
451 #if defined(__mips__)
452 register HWord x __asm__("v0") = 0;
454 switch (rd) {
455 case 0: /* x = CPUNum() */
456 ASM_VOLATILE_RDHWR(0); /* rdhwr v0, $0 */
457 break;
459 case 1: /* x = SYNCI_Step() */
460 ASM_VOLATILE_RDHWR(1); /* rdhwr v0, $1 */
461 break;
463 case 2: /* x = CC() */
464 ASM_VOLATILE_RDHWR(2); /* rdhwr v0, $2 */
465 break;
467 case 3: /* x = CCRes() */
468 ASM_VOLATILE_RDHWR(3); /* rdhwr v0, $3 */
469 break;
471 case 31: /* x = CVMX_get_cycles() */
472 ASM_VOLATILE_RDHWR(31); /* rdhwr v0, $31 */
473 break;
475 default:
476 vassert(0);
477 break;
479 return x;
480 #else
481 return 0;
482 #endif
485 #define ASM_VOLATILE_UNARY32(inst) \
486 __asm__ volatile(".set push" "\n\t" \
487 ".set hardfloat" "\n\t" \
488 "cfc1 $8, $31" "\n\t" \
489 "ctc1 %2, $31" "\n\t" \
490 "mtc1 %1, $f20" "\n\t" \
491 #inst" $f20, $f20" "\n\t" \
492 "cfc1 %0, $31" "\n\t" \
493 "ctc1 $8, $31" "\n\t" \
494 ".set pop" "\n\t" \
495 : "=r" (ret) \
496 : "r" (loFsVal), "r" (fcsr) \
497 : "$8", "$f20" \
500 #define ASM_VOLATILE_UNARY32_DOUBLE(inst) \
501 __asm__ volatile(".set push" "\n\t" \
502 ".set hardfloat" "\n\t" \
503 "cfc1 $8, $31" "\n\t" \
504 "ctc1 %2, $31" "\n\t" \
505 "ldc1 $f20, 0(%1)" "\n\t" \
506 #inst" $f20, $f20" "\n\t" \
507 "cfc1 %0, $31" "\n\t" \
508 "ctc1 $8, $31" "\n\t" \
509 ".set pop" "\n\t" \
510 : "=r" (ret) \
511 : "r" (&fsVal), "r" (fcsr) \
512 : "$8", "$f20", "$f21" \
515 #define ASM_VOLATILE_UNARY64(inst) \
516 __asm__ volatile(".set push" "\n\t" \
517 ".set hardfloat" "\n\t" \
518 ".set fp=64" "\n\t" \
519 "cfc1 $8, $31" "\n\t" \
520 "ctc1 %2, $31" "\n\t" \
521 "ldc1 $f24, 0(%1)" "\n\t" \
522 #inst" $f24, $f24" "\n\t" \
523 "cfc1 %0, $31" "\n\t" \
524 "ctc1 $8, $31" "\n\t" \
525 ".set pop" "\n\t" \
526 : "=r" (ret) \
527 : "r" (&(addr[fs])), "r" (fcsr) \
528 : "$8", "$f24" \
531 #define ASM_VOLATILE_MSA_UNARY(inst) \
532 __asm__ volatile(".set push" "\n\t" \
533 ".set mips32r2" "\n\t" \
534 ".set hardfloat" "\n\t" \
535 ".set fp=64" "\n\t" \
536 ".set msa" "\n\t" \
537 ".set noreorder" "\n\t" \
538 "cfcmsa $t0, $1" "\n\t" \
539 "ctcmsa $1, %2" "\n\t" \
540 "ld.b $w24, 0(%1)" "\n\t" \
541 #inst" $w24, $w24" "\n\t" \
542 "cfcmsa %0, $1" "\n\t" \
543 "ctcmsa $1, $t0" "\n\t" \
544 ".set pop" "\n\t" \
545 : "=r" (ret) \
546 : "r" (&(addr[ws])), "r" (msacsr) \
547 : "t0" \
550 #define ASM_VOLATILE_BINARY32(inst) \
551 __asm__ volatile(".set push" "\n\t" \
552 ".set hardfloat" "\n\t" \
553 "cfc1 $8, $31" "\n\t" \
554 "ctc1 %3, $31" "\n\t" \
555 "mtc1 %1, $f20" "\n\t" \
556 "mtc1 %2, $f22" "\n\t" \
557 #inst" $f20, $f20, $f22" "\n\t" \
558 "cfc1 %0, $31" "\n\t" \
559 "ctc1 $8, $31" "\n\t" \
560 ".set pop" "\n\t" \
561 : "=r" (ret) \
562 : "r" (loFsVal), "r" (loFtVal), "r" (fcsr) \
563 : "$8", "$f20", "$f22" \
566 #define ASM_VOLATILE_BINARY32_DOUBLE(inst) \
567 __asm__ volatile(".set push" "\n\t" \
568 ".set hardfloat" "\n\t" \
569 "cfc1 $8, $31" "\n\t" \
570 "ctc1 %3, $31" "\n\t" \
571 "ldc1 $f20, 0(%1)" "\n\t" \
572 "ldc1 $f22, 0(%2)" "\n\t" \
573 #inst" $f20, $f20, $f22" "\n\t" \
574 "cfc1 %0, $31" "\n\t" \
575 "ctc1 $8, $31" "\n\t" \
576 ".set pop" "\n\t" \
577 : "=r" (ret) \
578 : "r" (&fsVal), "r" (&ftVal), "r" (fcsr) \
579 : "$8", "$f20", "$f21", "$f22", "$f23" \
582 #define ASM_VOLATILE_BINARY64(inst) \
583 __asm__ volatile(".set push" "\n\t" \
584 ".set hardfloat" "\n\t" \
585 "cfc1 $8, $31" "\n\t" \
586 "ctc1 %3, $31" "\n\t" \
587 "ldc1 $f24, 0(%1)" "\n\t" \
588 "ldc1 $f26, 0(%2)" "\n\t" \
589 #inst" $f24, $f24, $f26" "\n\t" \
590 "cfc1 %0, $31" "\n\t" \
591 "ctc1 $8, $31" "\n\t" \
592 ".set pop" "\n\t" \
593 : "=r" (ret) \
594 : "r" (&(addr[fs])), "r" (&(addr[ft])), "r" (fcsr) \
595 : "$8", "$f24", "$f26" \
598 #define ASM_VOLATILE_MSA_BINARY(inst) \
599 __asm__ volatile(".set push" "\n\t" \
600 ".set mips32r2" "\n\t" \
601 ".set hardfloat" "\n\t" \
602 ".set fp=64" "\n\t" \
603 ".set msa" "\n\t" \
604 "cfcmsa $t0, $1" "\n\t" \
605 "ctcmsa $1, %3" "\n\t" \
606 "ld.b $w24, 0(%1)" "\n\t" \
607 "ld.b $w26, 0(%2)" "\n\t" \
608 #inst" $w24, $w24, $w26" "\n\t" \
609 "cfcmsa %0, $1" "\n\t" \
610 "ctcmsa $1, $t0" "\n\t" \
611 ".set pop" "\n\t" \
612 : "=r" (ret) \
613 : "r" (&(addr[ws])), "r" (&(addr[wt])), "r" (msacsr)\
614 : "t0" \
617 /* TODO: Add cases for all fpu instructions because all fpu instructions are
618 change the value of FCSR register. */
619 extern UInt mips_dirtyhelper_calculate_FCSR_fp32 ( void* gs, UInt fs, UInt ft,
620 flt_op inst )
622 UInt ret = 0;
623 #if defined(__mips__)
624 VexGuestMIPS32State* guest_state = (VexGuestMIPS32State*)gs;
625 UInt loFsVal, hiFsVal, loFtVal, hiFtVal;
626 #if defined (_MIPSEL)
627 ULong *addr = (ULong *)&guest_state->guest_f0;
628 loFsVal = (UInt)addr[fs];
629 hiFsVal = (UInt)addr[fs+1];
630 loFtVal = (UInt)addr[ft];
631 hiFtVal = (UInt)addr[ft+1];
632 #elif defined (_MIPSEB)
633 UInt *addr = (UInt *)&guest_state->guest_f0;
634 loFsVal = (UInt)addr[fs*2];
635 hiFsVal = (UInt)addr[fs*2+2];
636 loFtVal = (UInt)addr[ft*2];
637 hiFtVal = (UInt)addr[ft*2+2];
638 #endif
639 ULong fsVal = ((ULong) hiFsVal) << 32 | loFsVal;
640 ULong ftVal = ((ULong) hiFtVal) << 32 | loFtVal;
641 UInt fcsr = guest_state->guest_FCSR;
642 switch (inst) {
643 case ROUNDWD:
644 ASM_VOLATILE_UNARY32_DOUBLE(round.w.d)
645 break;
646 case FLOORWS:
647 ASM_VOLATILE_UNARY32(floor.w.s)
648 break;
649 case FLOORWD:
650 ASM_VOLATILE_UNARY32_DOUBLE(floor.w.d)
651 break;
652 case TRUNCWS:
653 ASM_VOLATILE_UNARY32(trunc.w.s)
654 break;
655 case TRUNCWD:
656 ASM_VOLATILE_UNARY32_DOUBLE(trunc.w.d)
657 break;
658 case CEILWS:
659 ASM_VOLATILE_UNARY32(ceil.w.s)
660 break;
661 case CEILWD:
662 ASM_VOLATILE_UNARY32_DOUBLE(ceil.w.d)
663 break;
664 case CVTDS:
665 ASM_VOLATILE_UNARY32(cvt.d.s)
666 break;
667 case CVTDW:
668 ASM_VOLATILE_UNARY32(cvt.d.w)
669 break;
670 case CVTSW:
671 ASM_VOLATILE_UNARY32(cvt.s.w)
672 break;
673 case CVTSD:
674 ASM_VOLATILE_UNARY32_DOUBLE(cvt.s.d)
675 break;
676 case CVTWS:
677 ASM_VOLATILE_UNARY32(cvt.w.s)
678 break;
679 case CVTWD:
680 ASM_VOLATILE_UNARY32_DOUBLE(cvt.w.d)
681 break;
682 case ROUNDWS:
683 ASM_VOLATILE_UNARY32(round.w.s)
684 break;
685 case ADDS:
686 ASM_VOLATILE_BINARY32(add.s)
687 break;
688 case ADDD:
689 ASM_VOLATILE_BINARY32_DOUBLE(add.d)
690 break;
691 case SUBS:
692 ASM_VOLATILE_BINARY32(sub.s)
693 break;
694 case SUBD:
695 ASM_VOLATILE_BINARY32_DOUBLE(sub.d)
696 break;
697 case DIVS:
698 ASM_VOLATILE_BINARY32(div.s)
699 break;
700 default:
701 vassert(0);
702 break;
704 #endif
705 return ret;
708 /* TODO: Add cases for all fpu instructions because all fpu instructions are
709 change the value of FCSR register. */
710 extern UInt mips_dirtyhelper_calculate_FCSR_fp64 ( void* gs, UInt fs, UInt ft,
711 flt_op inst )
713 UInt ret = 0;
714 #if defined(__mips__) && ((__mips == 64) || \
715 (defined(__mips_isa_rev) && (__mips_isa_rev >= 2)))
716 #if defined(VGA_mips32)
717 VexGuestMIPS32State* guest_state = (VexGuestMIPS32State*)gs;
718 #else
719 VexGuestMIPS64State* guest_state = (VexGuestMIPS64State*)gs;
720 #endif
721 ULong *addr = (ULong *)&guest_state->guest_f0;
722 UInt fcsr = guest_state->guest_FCSR;
723 switch (inst) {
724 case ROUNDWD:
725 ASM_VOLATILE_UNARY64(round.w.d)
726 break;
727 case FLOORWS:
728 ASM_VOLATILE_UNARY64(floor.w.s)
729 break;
730 case FLOORWD:
731 ASM_VOLATILE_UNARY64(floor.w.d)
732 break;
733 case TRUNCWS:
734 ASM_VOLATILE_UNARY64(trunc.w.s)
735 break;
736 case TRUNCWD:
737 ASM_VOLATILE_UNARY64(trunc.w.d)
738 break;
739 case CEILWS:
740 ASM_VOLATILE_UNARY64(ceil.w.s)
741 break;
742 case CEILWD:
743 ASM_VOLATILE_UNARY64(ceil.w.d)
744 break;
745 case CVTDS:
746 ASM_VOLATILE_UNARY64(cvt.d.s)
747 break;
748 case CVTDW:
749 ASM_VOLATILE_UNARY64(cvt.d.w)
750 break;
751 case CVTSW:
752 ASM_VOLATILE_UNARY64(cvt.s.w)
753 break;
754 case CVTSD:
755 ASM_VOLATILE_UNARY64(cvt.s.d)
756 break;
757 case CVTWS:
758 ASM_VOLATILE_UNARY64(cvt.w.s)
759 break;
760 case CVTWD:
761 ASM_VOLATILE_UNARY64(cvt.w.d)
762 break;
763 case ROUNDWS:
764 ASM_VOLATILE_UNARY64(round.w.s)
765 break;
766 case CEILLS:
767 ASM_VOLATILE_UNARY64(ceil.l.s)
768 break;
769 case CEILLD:
770 ASM_VOLATILE_UNARY64(ceil.l.d)
771 break;
772 case CVTDL:
773 ASM_VOLATILE_UNARY64(cvt.d.l)
774 break;
775 case CVTLS:
776 ASM_VOLATILE_UNARY64(cvt.l.s)
777 break;
778 case CVTLD:
779 ASM_VOLATILE_UNARY64(cvt.l.d)
780 break;
781 case CVTSL:
782 ASM_VOLATILE_UNARY64(cvt.s.l)
783 break;
784 case FLOORLS:
785 ASM_VOLATILE_UNARY64(floor.l.s)
786 break;
787 case FLOORLD:
788 ASM_VOLATILE_UNARY64(floor.l.d)
789 break;
790 case ROUNDLS:
791 ASM_VOLATILE_UNARY64(round.l.s)
792 break;
793 case ROUNDLD:
794 ASM_VOLATILE_UNARY64(round.l.d)
795 break;
796 case TRUNCLS:
797 ASM_VOLATILE_UNARY64(trunc.l.s)
798 break;
799 case TRUNCLD:
800 ASM_VOLATILE_UNARY64(trunc.l.d)
801 break;
802 case ADDS:
803 ASM_VOLATILE_BINARY64(add.s)
804 break;
805 case ADDD:
806 ASM_VOLATILE_BINARY64(add.d)
807 break;
808 case SUBS:
809 ASM_VOLATILE_BINARY64(sub.s)
810 break;
811 case SUBD:
812 ASM_VOLATILE_BINARY64(sub.d)
813 break;
814 case DIVS:
815 ASM_VOLATILE_BINARY64(div.s)
816 break;
817 #if defined(__mips_isa_rev) && (__mips_isa_rev >= 6)
818 case RINTS:
819 ASM_VOLATILE_UNARY64(rint.s)
820 break;
821 case RINTD:
822 ASM_VOLATILE_UNARY64(rint.d)
823 break;
824 case MAXS:
825 ASM_VOLATILE_BINARY64(max.s)
826 break;
827 case MAXD:
828 ASM_VOLATILE_BINARY64(max.d)
829 break;
830 case MINS:
831 ASM_VOLATILE_BINARY64(min.s)
832 break;
833 case MIND:
834 ASM_VOLATILE_BINARY64(min.d)
835 break;
836 case MAXAS:
837 ASM_VOLATILE_BINARY64(maxa.s)
838 break;
839 case MAXAD:
840 ASM_VOLATILE_BINARY64(maxa.d)
841 break;
842 case MINAS:
843 ASM_VOLATILE_BINARY64(mina.s)
844 break;
845 case MINAD:
846 ASM_VOLATILE_BINARY64(mina.d)
847 break;
848 case CMPAFS:
849 ASM_VOLATILE_BINARY64(cmp.af.s)
850 break;
851 case CMPAFD:
852 ASM_VOLATILE_BINARY64(cmp.af.d)
853 break;
854 case CMPSAFS:
855 ASM_VOLATILE_BINARY64(cmp.saf.s)
856 break;
857 case CMPSAFD:
858 ASM_VOLATILE_BINARY64(cmp.saf.d)
859 break;
860 #endif
861 default:
862 vassert(0);
863 break;
865 #endif
866 return ret;
870 extern UInt mips_dirtyhelper_calculate_MSACSR ( void* gs, UInt ws, UInt wt,
871 msa_flt_op inst ) {
872 UInt ret = 0;
873 /* GCC 4.8 and later support MIPS MSA. */
874 #if defined(__mips__) && (defined(__clang__) || (GCC_VERSION >= 408))
875 #if defined(VGA_mips32)
876 VexGuestMIPS32State* guest_state = (VexGuestMIPS32State*)gs;
877 #else
878 VexGuestMIPS64State* guest_state = (VexGuestMIPS64State*)gs;
879 #endif
880 V128 *addr = (V128 *)&guest_state->guest_w0;
881 UInt msacsr = guest_state->guest_MSACSR;
883 switch (inst) {
884 case FADDW:
885 ASM_VOLATILE_MSA_BINARY(fadd.w)
886 break;
888 case FADDD:
889 ASM_VOLATILE_MSA_BINARY(fadd.d)
890 break;
892 case FSUBW:
893 ASM_VOLATILE_MSA_BINARY(fsub.w);
894 break;
896 case FSUBD:
897 ASM_VOLATILE_MSA_BINARY(fsub.d);
898 break;
900 case FMULW:
901 ASM_VOLATILE_MSA_BINARY(fmul.w);
902 break;
904 case FMULD:
905 ASM_VOLATILE_MSA_BINARY(fmul.d);
906 break;
908 case FDIVW:
909 ASM_VOLATILE_MSA_BINARY(fdiv.w);
910 break;
912 case FDIVD:
913 ASM_VOLATILE_MSA_BINARY(fdiv.d);
914 break;
916 case FMADDW:
917 ASM_VOLATILE_MSA_BINARY(fmadd.w);
918 break;
920 case FMADDD:
921 ASM_VOLATILE_MSA_BINARY(fmadd.d);
922 break;
924 case FCAFW:
925 ASM_VOLATILE_MSA_BINARY(fcaf.w);
926 break;
928 case FCAFD:
929 ASM_VOLATILE_MSA_BINARY(fcaf.d);
930 break;
932 case FSAFW:
933 ASM_VOLATILE_MSA_BINARY(fsaf.w);
934 break;
936 case FSAFD:
937 ASM_VOLATILE_MSA_BINARY(fsaf.d);
938 break;
940 case FCEQW:
941 ASM_VOLATILE_MSA_BINARY(fceq.w);
942 break;
944 case FCEQD:
945 ASM_VOLATILE_MSA_BINARY(fceq.d);
946 break;
948 case FSEQW:
949 ASM_VOLATILE_MSA_BINARY(fseq.w);
950 break;
952 case FSEQD:
953 ASM_VOLATILE_MSA_BINARY(fseq.d);
954 break;
956 case FCLTW:
957 ASM_VOLATILE_MSA_BINARY(fclt.w);
958 break;
960 case FCLTD:
961 ASM_VOLATILE_MSA_BINARY(fclt.d);
962 break;
964 case FSLTW:
965 ASM_VOLATILE_MSA_BINARY(fslt.w);
966 break;
968 case FSLTD:
969 ASM_VOLATILE_MSA_BINARY(fslt.d);
970 break;
972 case FCLEW:
973 ASM_VOLATILE_MSA_BINARY(fcle.w);
974 break;
976 case FCLED:
977 ASM_VOLATILE_MSA_BINARY(fcle.d);
978 break;
980 case FSLEW:
981 ASM_VOLATILE_MSA_BINARY(fsle.w);
982 break;
984 case FSLED:
985 ASM_VOLATILE_MSA_BINARY(fsle.d);
986 break;
988 case FCNEW:
989 ASM_VOLATILE_MSA_BINARY(fcne.w);
990 break;
992 case FCNED:
993 ASM_VOLATILE_MSA_BINARY(fcne.d);
994 break;
996 case FSNEW:
997 ASM_VOLATILE_MSA_BINARY(fsne.w);
998 break;
1000 case FSNED:
1001 ASM_VOLATILE_MSA_BINARY(fsne.d);
1002 break;
1004 case FEXP2W:
1005 ASM_VOLATILE_MSA_BINARY(fexp2.w);
1006 break;
1008 case FEXP2D:
1009 ASM_VOLATILE_MSA_BINARY(fexp2.d);
1010 break;
1012 case FMINW:
1013 ASM_VOLATILE_MSA_BINARY(fmin.w);
1014 break;
1016 case FMIND:
1017 ASM_VOLATILE_MSA_BINARY(fmin.d);
1018 break;
1020 case FMINAW:
1021 ASM_VOLATILE_MSA_BINARY(fmin_a.w);
1022 break;
1024 case FMINAD:
1025 ASM_VOLATILE_MSA_BINARY(fmin_a.d);
1026 break;
1028 case FCUNW:
1029 ASM_VOLATILE_MSA_BINARY(fcun.w);
1030 break;
1032 case FCUND:
1033 ASM_VOLATILE_MSA_BINARY(fcun.d);
1034 break;
1036 case FSUNW:
1037 ASM_VOLATILE_MSA_BINARY(fsun.w);
1038 break;
1040 case FSUND:
1041 ASM_VOLATILE_MSA_BINARY(fsun.d);
1042 break;
1044 case FCORW:
1045 ASM_VOLATILE_MSA_BINARY(fcor.w);
1046 break;
1048 case FCORD:
1049 ASM_VOLATILE_MSA_BINARY(fcor.d);
1050 break;
1052 case FSORW:
1053 ASM_VOLATILE_MSA_BINARY(fsor.w);
1054 break;
1056 case FSORD:
1057 ASM_VOLATILE_MSA_BINARY(fsor.d);
1058 break;
1060 case FCUEQW:
1061 ASM_VOLATILE_MSA_BINARY(fcueq.w);
1062 break;
1064 case FCUEQD:
1065 ASM_VOLATILE_MSA_BINARY(fcueq.d);
1066 break;
1068 case FSUEQW:
1069 ASM_VOLATILE_MSA_BINARY(fsueq.w);
1070 break;
1072 case FSUEQD:
1073 ASM_VOLATILE_MSA_BINARY(fsueq.d);
1074 break;
1076 case FCUNEW:
1077 ASM_VOLATILE_MSA_BINARY(fcune.w);
1078 break;
1080 case FCUNED:
1081 ASM_VOLATILE_MSA_BINARY(fcune.d);
1082 break;
1084 case FSUNEW:
1085 ASM_VOLATILE_MSA_BINARY(fsune.w);
1086 break;
1088 case FSUNED:
1089 ASM_VOLATILE_MSA_BINARY(fsune.d);
1090 break;
1092 case FCULEW:
1093 ASM_VOLATILE_MSA_BINARY(fcule.w);
1094 break;
1096 case FCULED:
1097 ASM_VOLATILE_MSA_BINARY(fcule.d);
1098 break;
1100 case FSULEW:
1101 ASM_VOLATILE_MSA_BINARY(fsule.w);
1102 break;
1104 case FSULED:
1105 ASM_VOLATILE_MSA_BINARY(fsule.d);
1106 break;
1108 case FCULTW:
1109 ASM_VOLATILE_MSA_BINARY(fcult.w);
1110 break;
1112 case FCULTD:
1113 ASM_VOLATILE_MSA_BINARY(fcult.d);
1114 break;
1116 case FSULTW:
1117 ASM_VOLATILE_MSA_BINARY(fsult.w);
1118 break;
1120 case FSULTD:
1121 ASM_VOLATILE_MSA_BINARY(fsult.d);
1122 break;
1124 case FMAXW:
1125 ASM_VOLATILE_MSA_BINARY(fmax.w);
1126 break;
1128 case FMAXD:
1129 ASM_VOLATILE_MSA_BINARY(fmax.d);
1130 break;
1132 case FMAXAW:
1133 ASM_VOLATILE_MSA_BINARY(fmax_a.w);
1134 break;
1136 case FMAXAD:
1137 ASM_VOLATILE_MSA_BINARY(fmax_a.d);
1138 break;
1140 case FFINTSW:
1141 ASM_VOLATILE_MSA_UNARY(ffint_s.w);
1142 break;
1144 case FFINTSD:
1145 ASM_VOLATILE_MSA_UNARY(ffint_s.d);
1146 break;
1148 case FRCPW:
1149 ASM_VOLATILE_MSA_UNARY(frcp.w);
1150 break;
1152 case FRCPD:
1153 ASM_VOLATILE_MSA_UNARY(frcp.d);
1154 break;
1156 case FRSQRTW:
1157 ASM_VOLATILE_MSA_UNARY(frsqrt.w);
1158 break;
1160 case FRSQRTD:
1161 ASM_VOLATILE_MSA_UNARY(frsqrt.d);
1162 break;
1164 case FSQRTW:
1165 ASM_VOLATILE_MSA_UNARY(fsqrt.w);
1166 break;
1168 case FSQRTD:
1169 ASM_VOLATILE_MSA_UNARY(fsqrt.d);
1170 break;
1172 case FRINTW:
1173 ASM_VOLATILE_MSA_UNARY(frint.w);
1174 break;
1176 case FRINTD:
1177 ASM_VOLATILE_MSA_UNARY(frint.d);
1178 break;
1180 case FTRUNCUW:
1181 ASM_VOLATILE_MSA_UNARY(ftrunc_u.w);
1182 break;
1184 case FTRUNCUD:
1185 ASM_VOLATILE_MSA_UNARY(ftrunc_u.d);
1186 break;
1188 case FTRUNCSW:
1189 ASM_VOLATILE_MSA_UNARY(ftrunc_s.w);
1190 break;
1192 case FTRUNCSD:
1193 ASM_VOLATILE_MSA_UNARY(ftrunc_s.d);
1194 break;
1196 case FEXDOH:
1197 ASM_VOLATILE_MSA_BINARY(fexdo.h);
1198 break;
1200 case FEXDOW:
1201 ASM_VOLATILE_MSA_BINARY(fexdo.w);
1202 break;
1204 case FEXUPRW:
1205 ASM_VOLATILE_MSA_UNARY(fexupr.w);
1206 break;
1208 case FEXUPRD:
1209 ASM_VOLATILE_MSA_UNARY(fexupr.d);
1210 break;
1212 case FEXUPLW:
1213 ASM_VOLATILE_MSA_UNARY(fexupl.w);
1214 break;
1216 case FEXUPLD:
1217 ASM_VOLATILE_MSA_UNARY(fexupl.d);
1218 break;
1220 case FTQH:
1221 ASM_VOLATILE_MSA_BINARY(ftq.h);
1222 break;
1224 case FTQW:
1225 ASM_VOLATILE_MSA_BINARY(ftq.w);
1226 break;
1228 case FFQRD:
1229 ASM_VOLATILE_MSA_UNARY(ffqr.d);
1230 break;
1232 case FFQRW:
1233 ASM_VOLATILE_MSA_UNARY(ffqr.w);
1234 break;
1236 case FFQLD:
1237 ASM_VOLATILE_MSA_UNARY(ffql.d);
1238 break;
1240 case FFQLW:
1241 ASM_VOLATILE_MSA_UNARY(ffql.w);
1242 break;
1244 case FTINT_SD:
1245 ASM_VOLATILE_MSA_UNARY(ftint_s.d);
1246 break;
1248 case FTINT_SW:
1249 ASM_VOLATILE_MSA_UNARY(ftint_s.w);
1250 break;
1252 case FTINT_UD:
1253 ASM_VOLATILE_MSA_UNARY(ftint_u.d);
1254 break;
1256 case FTINT_UW:
1257 ASM_VOLATILE_MSA_UNARY(ftint_u.w);
1258 break;
1260 case FLOG2D:
1261 ASM_VOLATILE_MSA_UNARY(flog2.d);
1262 break;
1264 case FLOG2W:
1265 ASM_VOLATILE_MSA_UNARY(flog2.w);
1266 break;
1268 case FFINT_UD:
1269 ASM_VOLATILE_MSA_UNARY(ffint_u.d);
1270 break;
1272 case FFINT_UW:
1273 ASM_VOLATILE_MSA_UNARY(ffint_u.w);
1274 break;
1277 #endif
1278 return ret;
1281 extern UInt mips_dirtyhelper_get_MSAIR(void) {
1282 UInt ret = 0;
1283 /* GCC 4.8 and later support MIPS MSA. */
1284 #if defined(__mips__) && (defined(__clang__) || (GCC_VERSION >= 408))
1285 __asm__ volatile(".set push \n\t"
1286 ".set mips32r2 \n\t"
1287 ".set hardfloat \n\t"
1288 ".set fp=64 \n\t"
1289 ".set msa \n\t"
1290 ".set noreorder \n\t"
1291 "cfcmsa %0, $0 \n\t"
1292 ".set pop \n\t"
1293 : "=r" (ret) : : );
1294 #endif
1295 return ret;
1301 /*---------------------------------------------------------------*/
1302 /*--- end guest_mips_helpers.c ---*/
1303 /*---------------------------------------------------------------*/