drd/tests/tsan_thread_wrappers_pthread.h: Fix MyThread::ThreadBody()
[valgrind.git] / VEX / priv / guest_mips_helpers.c
blobaf047ad9c14205d8a10f29c7a2c785f6197c0c12
2 /*---------------------------------------------------------------*/
3 /*--- begin guest_mips_helpers.c ---*/
4 /*---------------------------------------------------------------*/
6 /*
7 This file is part of Valgrind, a dynamic binary instrumentation
8 framework.
10 Copyright (C) 2010-2017 RT-RK
11 mips-valgrind@rt-rk.com
13 This program is free software; you can redistribute it and/or
14 modify it under the terms of the GNU General Public License as
15 published by the Free Software Foundation; either version 2 of the
16 License, or (at your option) any later version.
18 This program is distributed in the hope that it will be useful, but
19 WITHOUT ANY WARRANTY; without even the implied warranty of
20 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
21 General Public License for more details.
23 You should have received a copy of the GNU General Public License
24 along with this program; if not, write to the Free Software
25 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
26 02111-1307, USA.
28 The GNU General Public License is contained in the file COPYING.
31 #include "libvex_basictypes.h"
32 #include "libvex_emnote.h"
33 #include "libvex_guest_mips32.h"
34 #include "libvex_guest_mips64.h"
35 #include "libvex_ir.h"
36 #include "libvex.h"
38 #include "main_util.h"
39 #include "main_globals.h"
40 #include "guest_generic_bb_to_IR.h"
41 #include "guest_mips_defs.h"
43 #if defined (__GNUC__)
44 #define GCC_VERSION (__GNUC__ * 100 + __GNUC_MINOR__)
45 #else
46 #define GCC_VERSION 0
47 #endif
49 /* This file contains helper functions for mips guest code. Calls to
50 these functions are generated by the back end.
53 #define ALWAYSDEFD32(field) \
54 { offsetof(VexGuestMIPS32State, field), \
55 (sizeof ((VexGuestMIPS32State*)0)->field) }
57 #define ALWAYSDEFD64(field) \
58 { offsetof(VexGuestMIPS64State, field), \
59 (sizeof ((VexGuestMIPS64State*)0)->field) }
61 IRExpr *guest_mips32_spechelper(const HChar * function_name, IRExpr ** args,
62 IRStmt ** precedingStmts, Int n_precedingStmts)
64 return NULL;
67 IRExpr *guest_mips64_spechelper ( const HChar * function_name, IRExpr ** args,
68 IRStmt ** precedingStmts,
69 Int n_precedingStmts )
71 return NULL;
74 /* VISIBLE TO LIBVEX CLIENT */
75 void LibVEX_GuestMIPS32_initialise( /*OUT*/ VexGuestMIPS32State * vex_state)
77 vex_state->guest_r0 = 0; /* Hardwired to 0 */
78 vex_state->guest_r1 = 0; /* Assembler temporary */
79 vex_state->guest_r2 = 0; /* Values for function returns ... */
80 vex_state->guest_r3 = 0; /* ...and expression evaluation */
81 vex_state->guest_r4 = 0; /* Function arguments */
82 vex_state->guest_r5 = 0;
83 vex_state->guest_r6 = 0;
84 vex_state->guest_r7 = 0;
85 vex_state->guest_r8 = 0; /* Temporaries */
86 vex_state->guest_r9 = 0;
87 vex_state->guest_r10 = 0;
88 vex_state->guest_r11 = 0;
89 vex_state->guest_r12 = 0;
90 vex_state->guest_r13 = 0;
91 vex_state->guest_r14 = 0;
92 vex_state->guest_r15 = 0;
93 vex_state->guest_r16 = 0; /* Saved temporaries */
94 vex_state->guest_r17 = 0;
95 vex_state->guest_r18 = 0;
96 vex_state->guest_r19 = 0;
97 vex_state->guest_r20 = 0;
98 vex_state->guest_r21 = 0;
99 vex_state->guest_r22 = 0;
100 vex_state->guest_r23 = 0;
101 vex_state->guest_r24 = 0; /* Temporaries */
102 vex_state->guest_r25 = 0;
103 vex_state->guest_r26 = 0; /* Reserved for OS kernel */
104 vex_state->guest_r27 = 0;
105 vex_state->guest_r28 = 0; /* Global pointer */
106 vex_state->guest_r29 = 0; /* Stack pointer */
107 vex_state->guest_r30 = 0; /* Frame pointer */
108 vex_state->guest_r31 = 0; /* Return address */
109 vex_state->guest_PC = 0; /* Program counter */
110 vex_state->guest_HI = 0; /* Multiply and divide register higher result */
111 vex_state->guest_LO = 0; /* Multiply and divide register lower result */
113 /* FPU Registers */
114 vex_state->guest_f0 = 0x7ff800007ff80000ULL; /* Floting point GP registers */
115 vex_state->guest_f1 = 0x7ff800007ff80000ULL;
116 vex_state->guest_f2 = 0x7ff800007ff80000ULL;
117 vex_state->guest_f3 = 0x7ff800007ff80000ULL;
118 vex_state->guest_f4 = 0x7ff800007ff80000ULL;
119 vex_state->guest_f5 = 0x7ff800007ff80000ULL;
120 vex_state->guest_f6 = 0x7ff800007ff80000ULL;
121 vex_state->guest_f7 = 0x7ff800007ff80000ULL;
122 vex_state->guest_f8 = 0x7ff800007ff80000ULL;
123 vex_state->guest_f9 = 0x7ff800007ff80000ULL;
124 vex_state->guest_f10 = 0x7ff800007ff80000ULL;
125 vex_state->guest_f11 = 0x7ff800007ff80000ULL;
126 vex_state->guest_f12 = 0x7ff800007ff80000ULL;
127 vex_state->guest_f13 = 0x7ff800007ff80000ULL;
128 vex_state->guest_f14 = 0x7ff800007ff80000ULL;
129 vex_state->guest_f15 = 0x7ff800007ff80000ULL;
130 vex_state->guest_f16 = 0x7ff800007ff80000ULL;
131 vex_state->guest_f17 = 0x7ff800007ff80000ULL;
132 vex_state->guest_f18 = 0x7ff800007ff80000ULL;
133 vex_state->guest_f19 = 0x7ff800007ff80000ULL;
134 vex_state->guest_f20 = 0x7ff800007ff80000ULL;
135 vex_state->guest_f21 = 0x7ff800007ff80000ULL;
136 vex_state->guest_f22 = 0x7ff800007ff80000ULL;
137 vex_state->guest_f23 = 0x7ff800007ff80000ULL;
138 vex_state->guest_f24 = 0x7ff800007ff80000ULL;
139 vex_state->guest_f25 = 0x7ff800007ff80000ULL;
140 vex_state->guest_f26 = 0x7ff800007ff80000ULL;
141 vex_state->guest_f27 = 0x7ff800007ff80000ULL;
142 vex_state->guest_f28 = 0x7ff800007ff80000ULL;
143 vex_state->guest_f29 = 0x7ff800007ff80000ULL;
144 vex_state->guest_f30 = 0x7ff800007ff80000ULL;
145 vex_state->guest_f31 = 0x7ff800007ff80000ULL;
147 vex_state->guest_FIR = 0; /* FP implementation and revision register */
148 vex_state->guest_FCCR = 0; /* FP condition codes register */
149 vex_state->guest_FEXR = 0; /* FP exceptions register */
150 vex_state->guest_FENR = 0; /* FP enables register */
151 vex_state->guest_FCSR = 0; /* FP control/status register */
152 vex_state->guest_ULR = 0; /* TLS */
154 /* Various pseudo-regs mandated by Vex or Valgrind. */
155 /* Emulation notes */
156 vex_state->guest_EMNOTE = 0;
158 /* For clflush: record start and length of area to invalidate */
159 vex_state->guest_CMSTART = 0;
160 vex_state->guest_CMLEN = 0;
161 vex_state->host_EvC_COUNTER = 0;
162 vex_state->host_EvC_FAILADDR = 0;
164 /* Used to record the unredirected guest address at the start of
165 a translation whose start has been redirected. By reading
166 this pseudo-register shortly afterwards, the translation can
167 find out what the corresponding no-redirection address was.
168 Note, this is only set for wrap-style redirects, not for
169 replace-style ones. */
170 vex_state->guest_NRADDR = 0;
172 vex_state->guest_COND = 0;
174 vex_state->guest_CP0_status = 0;
175 vex_state->guest_CP0_Config5 = 0;
177 vex_state->guest_LLaddr = 0xFFFFFFFF;
178 vex_state->guest_LLdata = 0;
180 /* MIPS32 DSP ASE(r2) specific registers */
181 vex_state->guest_DSPControl = 0; /* DSPControl register */
182 vex_state->guest_ac0 = 0; /* Accumulator 0 */
183 vex_state->guest_ac1 = 0; /* Accumulator 1 */
184 vex_state->guest_ac2 = 0; /* Accumulator 2 */
185 vex_state->guest_ac3 = 0; /* Accumulator 3 */
187 vex_state->guest_w0.w64[0] = 0;
188 vex_state->guest_w0.w64[1] = 0;
189 vex_state->guest_w1.w64[0] = 0;
190 vex_state->guest_w1.w64[1] = 0;
191 vex_state->guest_w2.w64[0] = 0;
192 vex_state->guest_w2.w64[1] = 0;
195 void LibVEX_GuestMIPS64_initialise ( /*OUT*/ VexGuestMIPS64State * vex_state )
197 vex_state->guest_r0 = 0; /* Hardwired to 0 */
198 vex_state->guest_r1 = 0; /* Assembler temporary */
199 vex_state->guest_r2 = 0; /* Values for function returns ... */
200 vex_state->guest_r3 = 0;
201 vex_state->guest_r4 = 0; /* Function arguments */
202 vex_state->guest_r5 = 0;
203 vex_state->guest_r6 = 0;
204 vex_state->guest_r7 = 0;
205 vex_state->guest_r8 = 0;
206 vex_state->guest_r9 = 0;
207 vex_state->guest_r10 = 0;
208 vex_state->guest_r11 = 0;
209 vex_state->guest_r12 = 0; /* Temporaries */
210 vex_state->guest_r13 = 0;
211 vex_state->guest_r14 = 0;
212 vex_state->guest_r15 = 0;
213 vex_state->guest_r16 = 0; /* Saved temporaries */
214 vex_state->guest_r17 = 0;
215 vex_state->guest_r18 = 0;
216 vex_state->guest_r19 = 0;
217 vex_state->guest_r20 = 0;
218 vex_state->guest_r21 = 0;
219 vex_state->guest_r22 = 0;
220 vex_state->guest_r23 = 0;
221 vex_state->guest_r24 = 0; /* Temporaries */
222 vex_state->guest_r25 = 0;
223 vex_state->guest_r26 = 0; /* Reserved for OS kernel */
224 vex_state->guest_r27 = 0;
225 vex_state->guest_r28 = 0; /* Global pointer */
226 vex_state->guest_r29 = 0; /* Stack pointer */
227 vex_state->guest_r30 = 0; /* Frame pointer */
228 vex_state->guest_r31 = 0; /* Return address */
229 vex_state->guest_PC = 0; /* Program counter */
230 vex_state->guest_HI = 0; /* Multiply and divide register higher result */
231 vex_state->guest_LO = 0; /* Multiply and divide register lower result */
233 /* FPU Registers */
234 vex_state->guest_f0 = 0x7ff800007ff80000ULL; /* Floting point registers */
235 vex_state->guest_f1 = 0x7ff800007ff80000ULL;
236 vex_state->guest_f2 = 0x7ff800007ff80000ULL;
237 vex_state->guest_f3 = 0x7ff800007ff80000ULL;
238 vex_state->guest_f4 = 0x7ff800007ff80000ULL;
239 vex_state->guest_f5 = 0x7ff800007ff80000ULL;
240 vex_state->guest_f6 = 0x7ff800007ff80000ULL;
241 vex_state->guest_f7 = 0x7ff800007ff80000ULL;
242 vex_state->guest_f8 = 0x7ff800007ff80000ULL;
243 vex_state->guest_f9 = 0x7ff800007ff80000ULL;
244 vex_state->guest_f10 = 0x7ff800007ff80000ULL;
245 vex_state->guest_f11 = 0x7ff800007ff80000ULL;
246 vex_state->guest_f12 = 0x7ff800007ff80000ULL;
247 vex_state->guest_f13 = 0x7ff800007ff80000ULL;
248 vex_state->guest_f14 = 0x7ff800007ff80000ULL;
249 vex_state->guest_f15 = 0x7ff800007ff80000ULL;
250 vex_state->guest_f16 = 0x7ff800007ff80000ULL;
251 vex_state->guest_f17 = 0x7ff800007ff80000ULL;
252 vex_state->guest_f18 = 0x7ff800007ff80000ULL;
253 vex_state->guest_f19 = 0x7ff800007ff80000ULL;
254 vex_state->guest_f20 = 0x7ff800007ff80000ULL;
255 vex_state->guest_f21 = 0x7ff800007ff80000ULL;
256 vex_state->guest_f22 = 0x7ff800007ff80000ULL;
257 vex_state->guest_f23 = 0x7ff800007ff80000ULL;
258 vex_state->guest_f24 = 0x7ff800007ff80000ULL;
259 vex_state->guest_f25 = 0x7ff800007ff80000ULL;
260 vex_state->guest_f26 = 0x7ff800007ff80000ULL;
261 vex_state->guest_f27 = 0x7ff800007ff80000ULL;
262 vex_state->guest_f28 = 0x7ff800007ff80000ULL;
263 vex_state->guest_f29 = 0x7ff800007ff80000ULL;
264 vex_state->guest_f30 = 0x7ff800007ff80000ULL;
265 vex_state->guest_f31 = 0x7ff800007ff80000ULL;
267 vex_state->guest_FIR = 0; /* FP implementation and revision register */
268 vex_state->guest_FCCR = 0; /* FP condition codes register */
269 vex_state->guest_FEXR = 0; /* FP exceptions register */
270 vex_state->guest_FENR = 0; /* FP enables register */
271 vex_state->guest_FCSR = 0; /* FP control/status register */
273 vex_state->guest_ULR = 0;
275 /* Various pseudo-regs mandated by Vex or Valgrind. */
276 /* Emulation notes */
277 vex_state->guest_EMNOTE = 0;
279 /* For clflush: record start and length of area to invalidate */
280 vex_state->guest_CMSTART = 0;
281 vex_state->guest_CMLEN = 0;
282 vex_state->host_EvC_COUNTER = 0;
283 vex_state->host_EvC_FAILADDR = 0;
285 /* Used to record the unredirected guest address at the start of
286 a translation whose start has been redirected. By reading
287 this pseudo-register shortly afterwards, the translation can
288 find out what the corresponding no-redirection address was.
289 Note, this is only set for wrap-style redirects, not for
290 replace-style ones. */
291 vex_state->guest_NRADDR = 0;
293 vex_state->guest_COND = 0;
295 vex_state->guest_CP0_status = MIPS_CP0_STATUS_FR;
297 vex_state->guest_LLaddr = 0xFFFFFFFFFFFFFFFFULL;
298 vex_state->guest_LLdata = 0;
300 vex_state->guest_MSACSR = 0;
303 /*-----------------------------------------------------------*/
304 /*--- Describing the mips guest state, for the benefit ---*/
305 /*--- of iropt and instrumenters. ---*/
306 /*-----------------------------------------------------------*/
308 /* Figure out if any part of the guest state contained in minoff
309 .. maxoff requires precise memory exceptions. If in doubt return
310 True (but this generates significantly slower code).
312 We enforce precise exns for guest SP, PC.
314 Only SP is needed in mode VexRegUpdSpAtMemAccess.
316 Bool guest_mips32_state_requires_precise_mem_exns (
317 Int minoff, Int maxoff, VexRegisterUpdates pxControl
320 Int sp_min = offsetof(VexGuestMIPS32State, guest_r29);
321 Int sp_max = sp_min + 4 - 1;
322 Int pc_min = offsetof(VexGuestMIPS32State, guest_PC);
323 Int pc_max = pc_min + 4 - 1;
325 if (maxoff < sp_min || minoff > sp_max) {
326 /* no overlap with sp */
327 if (pxControl == VexRegUpdSpAtMemAccess)
328 return False; /* We only need to check stack pointer. */
329 } else {
330 return True;
333 if (maxoff < pc_min || minoff > pc_max) {
334 /* no overlap with pc */
335 } else {
336 return True;
339 /* We appear to need precise updates of R11 in order to get proper
340 stacktraces from non-optimised code. */
341 Int fp_min = offsetof(VexGuestMIPS32State, guest_r30);
342 Int fp_max = fp_min + 4 - 1;
344 if (maxoff < fp_min || minoff > fp_max) {
345 /* no overlap with fp */
346 } else {
347 return True;
350 return False;
353 Bool guest_mips64_state_requires_precise_mem_exns (
354 Int minoff, Int maxoff, VexRegisterUpdates pxControl
357 Int sp_min = offsetof(VexGuestMIPS64State, guest_r29);
358 Int sp_max = sp_min + 8 - 1;
359 Int pc_min = offsetof(VexGuestMIPS64State, guest_PC);
360 Int pc_max = pc_min + 8 - 1;
362 if ( maxoff < sp_min || minoff > sp_max ) {
363 /* no overlap with sp */
364 if (pxControl == VexRegUpdSpAtMemAccess)
365 return False; /* We only need to check stack pointer. */
366 } else {
367 return True;
370 if ( maxoff < pc_min || minoff > pc_max ) {
371 /* no overlap with pc */
372 } else {
373 return True;
376 Int fp_min = offsetof(VexGuestMIPS64State, guest_r30);
377 Int fp_max = fp_min + 8 - 1;
379 if ( maxoff < fp_min || minoff > fp_max ) {
380 /* no overlap with fp */
381 } else {
382 return True;
385 return False;
388 VexGuestLayout mips32Guest_layout = {
389 /* Total size of the guest state, in bytes. */
390 .total_sizeB = sizeof(VexGuestMIPS32State),
391 /* Describe the stack pointer. */
392 .offset_SP = offsetof(VexGuestMIPS32State, guest_r29),
393 .sizeof_SP = 4,
394 /* Describe the frame pointer. */
395 .offset_FP = offsetof(VexGuestMIPS32State, guest_r30),
396 .sizeof_FP = 4,
397 /* Describe the instruction pointer. */
398 .offset_IP = offsetof(VexGuestMIPS32State, guest_PC),
399 .sizeof_IP = 4,
400 /* Describe any sections to be regarded by Memcheck as
401 'always-defined'. */
402 .n_alwaysDefd = 8,
403 /* ? :( */
404 .alwaysDefd = {
405 /* 0 */ ALWAYSDEFD32(guest_r0),
406 /* 1 */ ALWAYSDEFD32(guest_r1),
407 /* 2 */ ALWAYSDEFD32(guest_EMNOTE),
408 /* 3 */ ALWAYSDEFD32(guest_CMSTART),
409 /* 4 */ ALWAYSDEFD32(guest_CMLEN),
410 /* 5 */ ALWAYSDEFD32(guest_r29),
411 /* 6 */ ALWAYSDEFD32(guest_r31),
412 /* 7 */ ALWAYSDEFD32(guest_ULR)
416 VexGuestLayout mips64Guest_layout = {
417 /* Total size of the guest state, in bytes. */
418 .total_sizeB = sizeof(VexGuestMIPS64State),
419 /* Describe the stack pointer. */
420 .offset_SP = offsetof(VexGuestMIPS64State, guest_r29),
421 .sizeof_SP = 8,
422 /* Describe the frame pointer. */
423 .offset_FP = offsetof(VexGuestMIPS64State, guest_r30),
424 .sizeof_FP = 8,
425 /* Describe the instruction pointer. */
426 .offset_IP = offsetof(VexGuestMIPS64State, guest_PC),
427 .sizeof_IP = 8,
428 /* Describe any sections to be regarded by Memcheck as
429 'always-defined'. */
430 .n_alwaysDefd = 7,
431 /* ? :( */
432 .alwaysDefd = {
433 /* 0 */ ALWAYSDEFD64 (guest_r0),
434 /* 1 */ ALWAYSDEFD64 (guest_EMNOTE),
435 /* 2 */ ALWAYSDEFD64 (guest_CMSTART),
436 /* 3 */ ALWAYSDEFD64 (guest_CMLEN),
437 /* 4 */ ALWAYSDEFD64 (guest_r29),
438 /* 5 */ ALWAYSDEFD64 (guest_r31),
439 /* 6 */ ALWAYSDEFD64 (guest_ULR)
443 #define ASM_VOLATILE_RDHWR(opcode) \
444 __asm__ __volatile__(".word 0x7C02003B | "#opcode" << 11 \n\t" \
445 : "+r" (x) : : \
448 HWord mips_dirtyhelper_rdhwr ( UInt rd )
450 #if defined(__mips__)
451 register HWord x __asm__("v0") = 0;
453 switch (rd) {
454 case 0: /* x = CPUNum() */
455 ASM_VOLATILE_RDHWR(0); /* rdhwr v0, $0 */
456 break;
458 case 1: /* x = SYNCI_Step() */
459 ASM_VOLATILE_RDHWR(1); /* rdhwr v0, $1 */
460 break;
462 case 2: /* x = CC() */
463 ASM_VOLATILE_RDHWR(2); /* rdhwr v0, $2 */
464 break;
466 case 3: /* x = CCRes() */
467 ASM_VOLATILE_RDHWR(3); /* rdhwr v0, $3 */
468 break;
470 case 31: /* x = CVMX_get_cycles() */
471 ASM_VOLATILE_RDHWR(31); /* rdhwr v0, $31 */
472 break;
474 default:
475 vassert(0);
476 break;
478 return x;
479 #else
480 return 0;
481 #endif
484 #define ASM_VOLATILE_UNARY32(inst) \
485 __asm__ volatile(".set push" "\n\t" \
486 ".set hardfloat" "\n\t" \
487 "cfc1 $8, $31" "\n\t" \
488 "ctc1 %2, $31" "\n\t" \
489 "mtc1 %1, $f20" "\n\t" \
490 #inst" $f20, $f20" "\n\t" \
491 "cfc1 %0, $31" "\n\t" \
492 "ctc1 $8, $31" "\n\t" \
493 ".set pop" "\n\t" \
494 : "=r" (ret) \
495 : "r" (loFsVal), "r" (fcsr) \
496 : "$8", "$f20" \
499 #define ASM_VOLATILE_UNARY32_DOUBLE(inst) \
500 __asm__ volatile(".set push" "\n\t" \
501 ".set hardfloat" "\n\t" \
502 "cfc1 $8, $31" "\n\t" \
503 "ctc1 %2, $31" "\n\t" \
504 "ldc1 $f20, 0(%1)" "\n\t" \
505 #inst" $f20, $f20" "\n\t" \
506 "cfc1 %0, $31" "\n\t" \
507 "ctc1 $8, $31" "\n\t" \
508 ".set pop" "\n\t" \
509 : "=r" (ret) \
510 : "r" (&fsVal), "r" (fcsr) \
511 : "$8", "$f20", "$f21" \
514 #define ASM_VOLATILE_UNARY64(inst) \
515 __asm__ volatile(".set push" "\n\t" \
516 ".set hardfloat" "\n\t" \
517 ".set fp=64" "\n\t" \
518 "cfc1 $8, $31" "\n\t" \
519 "ctc1 %2, $31" "\n\t" \
520 "ldc1 $f24, 0(%1)" "\n\t" \
521 #inst" $f24, $f24" "\n\t" \
522 "cfc1 %0, $31" "\n\t" \
523 "ctc1 $8, $31" "\n\t" \
524 ".set pop" "\n\t" \
525 : "=r" (ret) \
526 : "r" (&(addr[fs])), "r" (fcsr) \
527 : "$8", "$f24" \
530 #define ASM_VOLATILE_MSA_UNARY(inst) \
531 __asm__ volatile(".set push" "\n\t" \
532 ".set mips32r2" "\n\t" \
533 ".set hardfloat" "\n\t" \
534 ".set fp=64" "\n\t" \
535 ".set msa" "\n\t" \
536 ".set noreorder" "\n\t" \
537 "cfcmsa $t0, $1" "\n\t" \
538 "ctcmsa $1, %2" "\n\t" \
539 "ld.b $w24, 0(%1)" "\n\t" \
540 #inst" $w24, $w24" "\n\t" \
541 "cfcmsa %0, $1" "\n\t" \
542 "ctcmsa $1, $t0" "\n\t" \
543 ".set pop" "\n\t" \
544 : "=r" (ret) \
545 : "r" (&(addr[ws])), "r" (msacsr) \
546 : "t0" \
549 #define ASM_VOLATILE_BINARY32(inst) \
550 __asm__ volatile(".set push" "\n\t" \
551 ".set hardfloat" "\n\t" \
552 "cfc1 $8, $31" "\n\t" \
553 "ctc1 %3, $31" "\n\t" \
554 "mtc1 %1, $f20" "\n\t" \
555 "mtc1 %2, $f22" "\n\t" \
556 #inst" $f20, $f20, $f22" "\n\t" \
557 "cfc1 %0, $31" "\n\t" \
558 "ctc1 $8, $31" "\n\t" \
559 ".set pop" "\n\t" \
560 : "=r" (ret) \
561 : "r" (loFsVal), "r" (loFtVal), "r" (fcsr) \
562 : "$8", "$f20", "$f22" \
565 #define ASM_VOLATILE_BINARY32_DOUBLE(inst) \
566 __asm__ volatile(".set push" "\n\t" \
567 ".set hardfloat" "\n\t" \
568 "cfc1 $8, $31" "\n\t" \
569 "ctc1 %3, $31" "\n\t" \
570 "ldc1 $f20, 0(%1)" "\n\t" \
571 "ldc1 $f22, 0(%2)" "\n\t" \
572 #inst" $f20, $f20, $f22" "\n\t" \
573 "cfc1 %0, $31" "\n\t" \
574 "ctc1 $8, $31" "\n\t" \
575 ".set pop" "\n\t" \
576 : "=r" (ret) \
577 : "r" (&fsVal), "r" (&ftVal), "r" (fcsr) \
578 : "$8", "$f20", "$f21", "$f22", "$f23" \
581 #define ASM_VOLATILE_BINARY64(inst) \
582 __asm__ volatile(".set push" "\n\t" \
583 ".set hardfloat" "\n\t" \
584 "cfc1 $8, $31" "\n\t" \
585 "ctc1 %3, $31" "\n\t" \
586 "ldc1 $f24, 0(%1)" "\n\t" \
587 "ldc1 $f26, 0(%2)" "\n\t" \
588 #inst" $f24, $f24, $f26" "\n\t" \
589 "cfc1 %0, $31" "\n\t" \
590 "ctc1 $8, $31" "\n\t" \
591 ".set pop" "\n\t" \
592 : "=r" (ret) \
593 : "r" (&(addr[fs])), "r" (&(addr[ft])), "r" (fcsr) \
594 : "$8", "$f24", "$f26" \
597 #define ASM_VOLATILE_MSA_BINARY(inst) \
598 __asm__ volatile(".set push" "\n\t" \
599 ".set mips32r2" "\n\t" \
600 ".set hardfloat" "\n\t" \
601 ".set fp=64" "\n\t" \
602 ".set msa" "\n\t" \
603 "cfcmsa $t0, $1" "\n\t" \
604 "ctcmsa $1, %3" "\n\t" \
605 "ld.b $w24, 0(%1)" "\n\t" \
606 "ld.b $w26, 0(%2)" "\n\t" \
607 #inst" $w24, $w24, $w26" "\n\t" \
608 "cfcmsa %0, $1" "\n\t" \
609 "ctcmsa $1, $t0" "\n\t" \
610 ".set pop" "\n\t" \
611 : "=r" (ret) \
612 : "r" (&(addr[ws])), "r" (&(addr[wt])), "r" (msacsr)\
613 : "t0" \
616 /* TODO: Add cases for all fpu instructions because all fpu instructions are
617 change the value of FCSR register. */
618 extern UInt mips_dirtyhelper_calculate_FCSR_fp32 ( void* gs, UInt fs, UInt ft,
619 flt_op inst )
621 UInt ret = 0;
622 #if defined(__mips__)
623 VexGuestMIPS32State* guest_state = (VexGuestMIPS32State*)gs;
624 UInt loFsVal, hiFsVal, loFtVal, hiFtVal;
625 #if defined (_MIPSEL)
626 ULong *addr = (ULong *)&guest_state->guest_f0;
627 loFsVal = (UInt)addr[fs];
628 hiFsVal = (UInt)addr[fs+1];
629 loFtVal = (UInt)addr[ft];
630 hiFtVal = (UInt)addr[ft+1];
631 #elif defined (_MIPSEB)
632 UInt *addr = (UInt *)&guest_state->guest_f0;
633 loFsVal = (UInt)addr[fs*2];
634 hiFsVal = (UInt)addr[fs*2+2];
635 loFtVal = (UInt)addr[ft*2];
636 hiFtVal = (UInt)addr[ft*2+2];
637 #endif
638 ULong fsVal = ((ULong) hiFsVal) << 32 | loFsVal;
639 ULong ftVal = ((ULong) hiFtVal) << 32 | loFtVal;
640 UInt fcsr = guest_state->guest_FCSR;
641 switch (inst) {
642 case ROUNDWD:
643 ASM_VOLATILE_UNARY32_DOUBLE(round.w.d)
644 break;
645 case FLOORWS:
646 ASM_VOLATILE_UNARY32(floor.w.s)
647 break;
648 case FLOORWD:
649 ASM_VOLATILE_UNARY32_DOUBLE(floor.w.d)
650 break;
651 case TRUNCWS:
652 ASM_VOLATILE_UNARY32(trunc.w.s)
653 break;
654 case TRUNCWD:
655 ASM_VOLATILE_UNARY32_DOUBLE(trunc.w.d)
656 break;
657 case CEILWS:
658 ASM_VOLATILE_UNARY32(ceil.w.s)
659 break;
660 case CEILWD:
661 ASM_VOLATILE_UNARY32_DOUBLE(ceil.w.d)
662 break;
663 case CVTDS:
664 ASM_VOLATILE_UNARY32(cvt.d.s)
665 break;
666 case CVTDW:
667 ASM_VOLATILE_UNARY32(cvt.d.w)
668 break;
669 case CVTSW:
670 ASM_VOLATILE_UNARY32(cvt.s.w)
671 break;
672 case CVTSD:
673 ASM_VOLATILE_UNARY32_DOUBLE(cvt.s.d)
674 break;
675 case CVTWS:
676 ASM_VOLATILE_UNARY32(cvt.w.s)
677 break;
678 case CVTWD:
679 ASM_VOLATILE_UNARY32_DOUBLE(cvt.w.d)
680 break;
681 case ROUNDWS:
682 ASM_VOLATILE_UNARY32(round.w.s)
683 break;
684 case ADDS:
685 ASM_VOLATILE_BINARY32(add.s)
686 break;
687 case ADDD:
688 ASM_VOLATILE_BINARY32_DOUBLE(add.d)
689 break;
690 case SUBS:
691 ASM_VOLATILE_BINARY32(sub.s)
692 break;
693 case SUBD:
694 ASM_VOLATILE_BINARY32_DOUBLE(sub.d)
695 break;
696 case DIVS:
697 ASM_VOLATILE_BINARY32(div.s)
698 break;
699 default:
700 vassert(0);
701 break;
703 #endif
704 return ret;
707 /* TODO: Add cases for all fpu instructions because all fpu instructions are
708 change the value of FCSR register. */
709 extern UInt mips_dirtyhelper_calculate_FCSR_fp64 ( void* gs, UInt fs, UInt ft,
710 flt_op inst )
712 UInt ret = 0;
713 #if defined(__mips__) && ((__mips == 64) || \
714 (defined(__mips_isa_rev) && (__mips_isa_rev >= 2)))
715 #if defined(VGA_mips32)
716 VexGuestMIPS32State* guest_state = (VexGuestMIPS32State*)gs;
717 #else
718 VexGuestMIPS64State* guest_state = (VexGuestMIPS64State*)gs;
719 #endif
720 ULong *addr = (ULong *)&guest_state->guest_f0;
721 UInt fcsr = guest_state->guest_FCSR;
722 switch (inst) {
723 case ROUNDWD:
724 ASM_VOLATILE_UNARY64(round.w.d)
725 break;
726 case FLOORWS:
727 ASM_VOLATILE_UNARY64(floor.w.s)
728 break;
729 case FLOORWD:
730 ASM_VOLATILE_UNARY64(floor.w.d)
731 break;
732 case TRUNCWS:
733 ASM_VOLATILE_UNARY64(trunc.w.s)
734 break;
735 case TRUNCWD:
736 ASM_VOLATILE_UNARY64(trunc.w.d)
737 break;
738 case CEILWS:
739 ASM_VOLATILE_UNARY64(ceil.w.s)
740 break;
741 case CEILWD:
742 ASM_VOLATILE_UNARY64(ceil.w.d)
743 break;
744 case CVTDS:
745 ASM_VOLATILE_UNARY64(cvt.d.s)
746 break;
747 case CVTDW:
748 ASM_VOLATILE_UNARY64(cvt.d.w)
749 break;
750 case CVTSW:
751 ASM_VOLATILE_UNARY64(cvt.s.w)
752 break;
753 case CVTSD:
754 ASM_VOLATILE_UNARY64(cvt.s.d)
755 break;
756 case CVTWS:
757 ASM_VOLATILE_UNARY64(cvt.w.s)
758 break;
759 case CVTWD:
760 ASM_VOLATILE_UNARY64(cvt.w.d)
761 break;
762 case ROUNDWS:
763 ASM_VOLATILE_UNARY64(round.w.s)
764 break;
765 case CEILLS:
766 ASM_VOLATILE_UNARY64(ceil.l.s)
767 break;
768 case CEILLD:
769 ASM_VOLATILE_UNARY64(ceil.l.d)
770 break;
771 case CVTDL:
772 ASM_VOLATILE_UNARY64(cvt.d.l)
773 break;
774 case CVTLS:
775 ASM_VOLATILE_UNARY64(cvt.l.s)
776 break;
777 case CVTLD:
778 ASM_VOLATILE_UNARY64(cvt.l.d)
779 break;
780 case CVTSL:
781 ASM_VOLATILE_UNARY64(cvt.s.l)
782 break;
783 case FLOORLS:
784 ASM_VOLATILE_UNARY64(floor.l.s)
785 break;
786 case FLOORLD:
787 ASM_VOLATILE_UNARY64(floor.l.d)
788 break;
789 case ROUNDLS:
790 ASM_VOLATILE_UNARY64(round.l.s)
791 break;
792 case ROUNDLD:
793 ASM_VOLATILE_UNARY64(round.l.d)
794 break;
795 case TRUNCLS:
796 ASM_VOLATILE_UNARY64(trunc.l.s)
797 break;
798 case TRUNCLD:
799 ASM_VOLATILE_UNARY64(trunc.l.d)
800 break;
801 case ADDS:
802 ASM_VOLATILE_BINARY64(add.s)
803 break;
804 case ADDD:
805 ASM_VOLATILE_BINARY64(add.d)
806 break;
807 case SUBS:
808 ASM_VOLATILE_BINARY64(sub.s)
809 break;
810 case SUBD:
811 ASM_VOLATILE_BINARY64(sub.d)
812 break;
813 case DIVS:
814 ASM_VOLATILE_BINARY64(div.s)
815 break;
816 #if defined(__mips_isa_rev) && (__mips_isa_rev >= 6)
817 case RINTS:
818 ASM_VOLATILE_UNARY64(rint.s)
819 break;
820 case RINTD:
821 ASM_VOLATILE_UNARY64(rint.d)
822 break;
823 case MAXS:
824 ASM_VOLATILE_BINARY64(max.s)
825 break;
826 case MAXD:
827 ASM_VOLATILE_BINARY64(max.d)
828 break;
829 case MINS:
830 ASM_VOLATILE_BINARY64(min.s)
831 break;
832 case MIND:
833 ASM_VOLATILE_BINARY64(min.d)
834 break;
835 case MAXAS:
836 ASM_VOLATILE_BINARY64(maxa.s)
837 break;
838 case MAXAD:
839 ASM_VOLATILE_BINARY64(maxa.d)
840 break;
841 case MINAS:
842 ASM_VOLATILE_BINARY64(mina.s)
843 break;
844 case MINAD:
845 ASM_VOLATILE_BINARY64(mina.d)
846 break;
847 case CMPAFS:
848 ASM_VOLATILE_BINARY64(cmp.af.s)
849 break;
850 case CMPAFD:
851 ASM_VOLATILE_BINARY64(cmp.af.d)
852 break;
853 case CMPSAFS:
854 ASM_VOLATILE_BINARY64(cmp.saf.s)
855 break;
856 case CMPSAFD:
857 ASM_VOLATILE_BINARY64(cmp.saf.d)
858 break;
859 #endif
860 default:
861 vassert(0);
862 break;
864 #endif
865 return ret;
869 extern UInt mips_dirtyhelper_calculate_MSACSR ( void* gs, UInt ws, UInt wt,
870 msa_flt_op inst ) {
871 UInt ret = 0;
872 /* GCC 4.8 and later support MIPS MSA. */
873 #if defined(__mips__) && (defined(__clang__) || (GCC_VERSION >= 408))
874 #if defined(VGA_mips32)
875 VexGuestMIPS32State* guest_state = (VexGuestMIPS32State*)gs;
876 #else
877 VexGuestMIPS64State* guest_state = (VexGuestMIPS64State*)gs;
878 #endif
879 V128 *addr = (V128 *)&guest_state->guest_w0;
880 UInt msacsr = guest_state->guest_MSACSR;
882 switch (inst) {
883 case FADDW:
884 ASM_VOLATILE_MSA_BINARY(fadd.w)
885 break;
887 case FADDD:
888 ASM_VOLATILE_MSA_BINARY(fadd.d)
889 break;
891 case FSUBW:
892 ASM_VOLATILE_MSA_BINARY(fsub.w);
893 break;
895 case FSUBD:
896 ASM_VOLATILE_MSA_BINARY(fsub.d);
897 break;
899 case FMULW:
900 ASM_VOLATILE_MSA_BINARY(fmul.w);
901 break;
903 case FMULD:
904 ASM_VOLATILE_MSA_BINARY(fmul.d);
905 break;
907 case FDIVW:
908 ASM_VOLATILE_MSA_BINARY(fdiv.w);
909 break;
911 case FDIVD:
912 ASM_VOLATILE_MSA_BINARY(fdiv.d);
913 break;
915 case FMADDW:
916 ASM_VOLATILE_MSA_BINARY(fmadd.w);
917 break;
919 case FMADDD:
920 ASM_VOLATILE_MSA_BINARY(fmadd.d);
921 break;
923 case FCAFW:
924 ASM_VOLATILE_MSA_BINARY(fcaf.w);
925 break;
927 case FCAFD:
928 ASM_VOLATILE_MSA_BINARY(fcaf.d);
929 break;
931 case FSAFW:
932 ASM_VOLATILE_MSA_BINARY(fsaf.w);
933 break;
935 case FSAFD:
936 ASM_VOLATILE_MSA_BINARY(fsaf.d);
937 break;
939 case FCEQW:
940 ASM_VOLATILE_MSA_BINARY(fceq.w);
941 break;
943 case FCEQD:
944 ASM_VOLATILE_MSA_BINARY(fceq.d);
945 break;
947 case FSEQW:
948 ASM_VOLATILE_MSA_BINARY(fseq.w);
949 break;
951 case FSEQD:
952 ASM_VOLATILE_MSA_BINARY(fseq.d);
953 break;
955 case FCLTW:
956 ASM_VOLATILE_MSA_BINARY(fclt.w);
957 break;
959 case FCLTD:
960 ASM_VOLATILE_MSA_BINARY(fclt.d);
961 break;
963 case FSLTW:
964 ASM_VOLATILE_MSA_BINARY(fslt.w);
965 break;
967 case FSLTD:
968 ASM_VOLATILE_MSA_BINARY(fslt.d);
969 break;
971 case FCLEW:
972 ASM_VOLATILE_MSA_BINARY(fcle.w);
973 break;
975 case FCLED:
976 ASM_VOLATILE_MSA_BINARY(fcle.d);
977 break;
979 case FSLEW:
980 ASM_VOLATILE_MSA_BINARY(fsle.w);
981 break;
983 case FSLED:
984 ASM_VOLATILE_MSA_BINARY(fsle.d);
985 break;
987 case FCNEW:
988 ASM_VOLATILE_MSA_BINARY(fcne.w);
989 break;
991 case FCNED:
992 ASM_VOLATILE_MSA_BINARY(fcne.d);
993 break;
995 case FSNEW:
996 ASM_VOLATILE_MSA_BINARY(fsne.w);
997 break;
999 case FSNED:
1000 ASM_VOLATILE_MSA_BINARY(fsne.d);
1001 break;
1003 case FEXP2W:
1004 ASM_VOLATILE_MSA_BINARY(fexp2.w);
1005 break;
1007 case FEXP2D:
1008 ASM_VOLATILE_MSA_BINARY(fexp2.d);
1009 break;
1011 case FMINW:
1012 ASM_VOLATILE_MSA_BINARY(fmin.w);
1013 break;
1015 case FMIND:
1016 ASM_VOLATILE_MSA_BINARY(fmin.d);
1017 break;
1019 case FMINAW:
1020 ASM_VOLATILE_MSA_BINARY(fmin_a.w);
1021 break;
1023 case FMINAD:
1024 ASM_VOLATILE_MSA_BINARY(fmin_a.d);
1025 break;
1027 case FCUNW:
1028 ASM_VOLATILE_MSA_BINARY(fcun.w);
1029 break;
1031 case FCUND:
1032 ASM_VOLATILE_MSA_BINARY(fcun.d);
1033 break;
1035 case FSUNW:
1036 ASM_VOLATILE_MSA_BINARY(fsun.w);
1037 break;
1039 case FSUND:
1040 ASM_VOLATILE_MSA_BINARY(fsun.d);
1041 break;
1043 case FCORW:
1044 ASM_VOLATILE_MSA_BINARY(fcor.w);
1045 break;
1047 case FCORD:
1048 ASM_VOLATILE_MSA_BINARY(fcor.d);
1049 break;
1051 case FSORW:
1052 ASM_VOLATILE_MSA_BINARY(fsor.w);
1053 break;
1055 case FSORD:
1056 ASM_VOLATILE_MSA_BINARY(fsor.d);
1057 break;
1059 case FCUEQW:
1060 ASM_VOLATILE_MSA_BINARY(fcueq.w);
1061 break;
1063 case FCUEQD:
1064 ASM_VOLATILE_MSA_BINARY(fcueq.d);
1065 break;
1067 case FSUEQW:
1068 ASM_VOLATILE_MSA_BINARY(fsueq.w);
1069 break;
1071 case FSUEQD:
1072 ASM_VOLATILE_MSA_BINARY(fsueq.d);
1073 break;
1075 case FCUNEW:
1076 ASM_VOLATILE_MSA_BINARY(fcune.w);
1077 break;
1079 case FCUNED:
1080 ASM_VOLATILE_MSA_BINARY(fcune.d);
1081 break;
1083 case FSUNEW:
1084 ASM_VOLATILE_MSA_BINARY(fsune.w);
1085 break;
1087 case FSUNED:
1088 ASM_VOLATILE_MSA_BINARY(fsune.d);
1089 break;
1091 case FCULEW:
1092 ASM_VOLATILE_MSA_BINARY(fcule.w);
1093 break;
1095 case FCULED:
1096 ASM_VOLATILE_MSA_BINARY(fcule.d);
1097 break;
1099 case FSULEW:
1100 ASM_VOLATILE_MSA_BINARY(fsule.w);
1101 break;
1103 case FSULED:
1104 ASM_VOLATILE_MSA_BINARY(fsule.d);
1105 break;
1107 case FCULTW:
1108 ASM_VOLATILE_MSA_BINARY(fcult.w);
1109 break;
1111 case FCULTD:
1112 ASM_VOLATILE_MSA_BINARY(fcult.d);
1113 break;
1115 case FSULTW:
1116 ASM_VOLATILE_MSA_BINARY(fsult.w);
1117 break;
1119 case FSULTD:
1120 ASM_VOLATILE_MSA_BINARY(fsult.d);
1122 case FMAXW:
1123 ASM_VOLATILE_MSA_BINARY(fmax.w);
1124 break;
1126 case FMAXD:
1127 ASM_VOLATILE_MSA_BINARY(fmax.d);
1128 break;
1130 case FMAXAW:
1131 ASM_VOLATILE_MSA_BINARY(fmax_a.w);
1132 break;
1134 case FMAXAD:
1135 ASM_VOLATILE_MSA_BINARY(fmax_a.d);
1136 break;
1138 case FFINTSW:
1139 ASM_VOLATILE_MSA_UNARY(ffint_s.w);
1140 break;
1142 case FFINTSD:
1143 ASM_VOLATILE_MSA_UNARY(ffint_s.d);
1144 break;
1146 case FRCPW:
1147 ASM_VOLATILE_MSA_UNARY(frcp.w);
1148 break;
1150 case FRCPD:
1151 ASM_VOLATILE_MSA_UNARY(frcp.d);
1152 break;
1154 case FRSQRTW:
1155 ASM_VOLATILE_MSA_UNARY(frsqrt.w);
1156 break;
1158 case FRSQRTD:
1159 ASM_VOLATILE_MSA_UNARY(frsqrt.d);
1160 break;
1162 case FSQRTW:
1163 ASM_VOLATILE_MSA_UNARY(fsqrt.w);
1164 break;
1166 case FSQRTD:
1167 ASM_VOLATILE_MSA_UNARY(fsqrt.d);
1168 break;
1170 case FRINTW:
1171 ASM_VOLATILE_MSA_UNARY(frint.w);
1172 break;
1174 case FRINTD:
1175 ASM_VOLATILE_MSA_UNARY(frint.d);
1177 case FTRUNCUW:
1178 ASM_VOLATILE_MSA_UNARY(ftrunc_u.w);
1179 break;
1181 case FTRUNCUD:
1182 ASM_VOLATILE_MSA_UNARY(ftrunc_u.d);
1183 break;
1185 case FTRUNCSW:
1186 ASM_VOLATILE_MSA_UNARY(ftrunc_s.w);
1187 break;
1189 case FTRUNCSD:
1190 ASM_VOLATILE_MSA_UNARY(ftrunc_s.d);
1191 break;
1193 case FEXDOH:
1194 ASM_VOLATILE_MSA_BINARY(fexdo.h);
1195 break;
1197 case FEXDOW:
1198 ASM_VOLATILE_MSA_BINARY(fexdo.w);
1199 break;
1201 case FEXUPRW:
1202 ASM_VOLATILE_MSA_UNARY(fexupr.w);
1203 break;
1205 case FEXUPRD:
1206 ASM_VOLATILE_MSA_UNARY(fexupr.d);
1207 break;
1209 case FEXUPLW:
1210 ASM_VOLATILE_MSA_UNARY(fexupl.w);
1211 break;
1213 case FEXUPLD:
1214 ASM_VOLATILE_MSA_UNARY(fexupl.d);
1215 break;
1217 case FTQH:
1218 ASM_VOLATILE_MSA_BINARY(ftq.h);
1219 break;
1221 case FTQW:
1222 ASM_VOLATILE_MSA_BINARY(ftq.w);
1223 break;
1225 case FFQRD:
1226 ASM_VOLATILE_MSA_UNARY(ffqr.d);
1227 break;
1229 case FFQRW:
1230 ASM_VOLATILE_MSA_UNARY(ffqr.w);
1231 break;
1233 case FFQLD:
1234 ASM_VOLATILE_MSA_UNARY(ffql.d);
1235 break;
1237 case FFQLW:
1238 ASM_VOLATILE_MSA_UNARY(ffql.w);
1239 break;
1241 case FTINT_SD:
1242 ASM_VOLATILE_MSA_UNARY(ftint_s.d);
1243 break;
1245 case FTINT_SW:
1246 ASM_VOLATILE_MSA_UNARY(ftint_s.w);
1247 break;
1249 case FTINT_UD:
1250 ASM_VOLATILE_MSA_UNARY(ftint_u.d);
1251 break;
1253 case FTINT_UW:
1254 ASM_VOLATILE_MSA_UNARY(ftint_u.w);
1255 break;
1257 case FLOG2D:
1258 ASM_VOLATILE_MSA_UNARY(flog2.d);
1259 break;
1261 case FLOG2W:
1262 ASM_VOLATILE_MSA_UNARY(flog2.w);
1263 break;
1265 case FFINT_UD:
1266 ASM_VOLATILE_MSA_UNARY(ffint_u.d);
1267 break;
1269 case FFINT_UW:
1270 ASM_VOLATILE_MSA_UNARY(ffint_u.w);
1271 break;
1274 #endif
1275 return ret;
1278 extern UInt mips_dirtyhelper_get_MSAIR() {
1279 UInt ret = 0;
1280 /* GCC 4.8 and later support MIPS MSA. */
1281 #if defined(__mips__) && (defined(__clang__) || (GCC_VERSION >= 408))
1282 __asm__ volatile(".set push \n\t"
1283 ".set mips32r2 \n\t"
1284 ".set hardfloat \n\t"
1285 ".set fp=64 \n\t"
1286 ".set msa \n\t"
1287 ".set noreorder \n\t"
1288 "cfcmsa %0, $0 \n\t"
1289 ".set pop \n\t"
1290 : "=r" (ret) : : );
1291 #endif
1292 return ret;
1298 /*---------------------------------------------------------------*/
1299 /*--- end guest_mips_helpers.c ---*/
1300 /*---------------------------------------------------------------*/