2 /*---------------------------------------------------------------*/
3 /*--- begin guest_mips_helpers.c ---*/
4 /*---------------------------------------------------------------*/
7 This file is part of Valgrind, a dynamic binary instrumentation
10 Copyright (C) 2010-2017 RT-RK
12 This program is free software; you can redistribute it and/or
13 modify it under the terms of the GNU General Public License as
14 published by the Free Software Foundation; either version 2 of the
15 License, or (at your option) any later version.
17 This program is distributed in the hope that it will be useful, but
18 WITHOUT ANY WARRANTY; without even the implied warranty of
19 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 General Public License for more details.
22 You should have received a copy of the GNU General Public License
23 along with this program; if not, see <http://www.gnu.org/licenses/>.
25 The GNU General Public License is contained in the file COPYING.
28 #include "libvex_basictypes.h"
29 #include "libvex_emnote.h"
30 #include "libvex_guest_mips32.h"
31 #include "libvex_guest_mips64.h"
32 #include "libvex_ir.h"
35 #include "main_util.h"
36 #include "main_globals.h"
37 #include "guest_generic_bb_to_IR.h"
38 #include "guest_mips_defs.h"
40 #if defined (__GNUC__)
41 #define GCC_VERSION (__GNUC__ * 100 + __GNUC_MINOR__)
46 /* This file contains helper functions for mips guest code. Calls to
47 these functions are generated by the back end.
50 #define ALWAYSDEFD32(field) \
51 { offsetof(VexGuestMIPS32State, field), \
52 (sizeof ((VexGuestMIPS32State*)0)->field) }
54 #define ALWAYSDEFD64(field) \
55 { offsetof(VexGuestMIPS64State, field), \
56 (sizeof ((VexGuestMIPS64State*)0)->field) }
58 IRExpr
*guest_mips32_spechelper(const HChar
* function_name
, IRExpr
** args
,
59 IRStmt
** precedingStmts
, Int n_precedingStmts
)
64 IRExpr
*guest_mips64_spechelper ( const HChar
* function_name
, IRExpr
** args
,
65 IRStmt
** precedingStmts
,
66 Int n_precedingStmts
)
71 /* VISIBLE TO LIBVEX CLIENT */
72 void LibVEX_GuestMIPS32_initialise( /*OUT*/ VexGuestMIPS32State
* vex_state
)
74 vex_state
->guest_r0
= 0; /* Hardwired to 0 */
75 vex_state
->guest_r1
= 0; /* Assembler temporary */
76 vex_state
->guest_r2
= 0; /* Values for function returns ... */
77 vex_state
->guest_r3
= 0; /* ...and expression evaluation */
78 vex_state
->guest_r4
= 0; /* Function arguments */
79 vex_state
->guest_r5
= 0;
80 vex_state
->guest_r6
= 0;
81 vex_state
->guest_r7
= 0;
82 vex_state
->guest_r8
= 0; /* Temporaries */
83 vex_state
->guest_r9
= 0;
84 vex_state
->guest_r10
= 0;
85 vex_state
->guest_r11
= 0;
86 vex_state
->guest_r12
= 0;
87 vex_state
->guest_r13
= 0;
88 vex_state
->guest_r14
= 0;
89 vex_state
->guest_r15
= 0;
90 vex_state
->guest_r16
= 0; /* Saved temporaries */
91 vex_state
->guest_r17
= 0;
92 vex_state
->guest_r18
= 0;
93 vex_state
->guest_r19
= 0;
94 vex_state
->guest_r20
= 0;
95 vex_state
->guest_r21
= 0;
96 vex_state
->guest_r22
= 0;
97 vex_state
->guest_r23
= 0;
98 vex_state
->guest_r24
= 0; /* Temporaries */
99 vex_state
->guest_r25
= 0;
100 vex_state
->guest_r26
= 0; /* Reserved for OS kernel */
101 vex_state
->guest_r27
= 0;
102 vex_state
->guest_r28
= 0; /* Global pointer */
103 vex_state
->guest_r29
= 0; /* Stack pointer */
104 vex_state
->guest_r30
= 0; /* Frame pointer */
105 vex_state
->guest_r31
= 0; /* Return address */
106 vex_state
->guest_PC
= 0; /* Program counter */
107 vex_state
->guest_HI
= 0; /* Multiply and divide register higher result */
108 vex_state
->guest_LO
= 0; /* Multiply and divide register lower result */
111 vex_state
->guest_f0
= 0x7ff800007ff80000ULL
; /* Floting point GP registers */
112 vex_state
->guest_f1
= 0x7ff800007ff80000ULL
;
113 vex_state
->guest_f2
= 0x7ff800007ff80000ULL
;
114 vex_state
->guest_f3
= 0x7ff800007ff80000ULL
;
115 vex_state
->guest_f4
= 0x7ff800007ff80000ULL
;
116 vex_state
->guest_f5
= 0x7ff800007ff80000ULL
;
117 vex_state
->guest_f6
= 0x7ff800007ff80000ULL
;
118 vex_state
->guest_f7
= 0x7ff800007ff80000ULL
;
119 vex_state
->guest_f8
= 0x7ff800007ff80000ULL
;
120 vex_state
->guest_f9
= 0x7ff800007ff80000ULL
;
121 vex_state
->guest_f10
= 0x7ff800007ff80000ULL
;
122 vex_state
->guest_f11
= 0x7ff800007ff80000ULL
;
123 vex_state
->guest_f12
= 0x7ff800007ff80000ULL
;
124 vex_state
->guest_f13
= 0x7ff800007ff80000ULL
;
125 vex_state
->guest_f14
= 0x7ff800007ff80000ULL
;
126 vex_state
->guest_f15
= 0x7ff800007ff80000ULL
;
127 vex_state
->guest_f16
= 0x7ff800007ff80000ULL
;
128 vex_state
->guest_f17
= 0x7ff800007ff80000ULL
;
129 vex_state
->guest_f18
= 0x7ff800007ff80000ULL
;
130 vex_state
->guest_f19
= 0x7ff800007ff80000ULL
;
131 vex_state
->guest_f20
= 0x7ff800007ff80000ULL
;
132 vex_state
->guest_f21
= 0x7ff800007ff80000ULL
;
133 vex_state
->guest_f22
= 0x7ff800007ff80000ULL
;
134 vex_state
->guest_f23
= 0x7ff800007ff80000ULL
;
135 vex_state
->guest_f24
= 0x7ff800007ff80000ULL
;
136 vex_state
->guest_f25
= 0x7ff800007ff80000ULL
;
137 vex_state
->guest_f26
= 0x7ff800007ff80000ULL
;
138 vex_state
->guest_f27
= 0x7ff800007ff80000ULL
;
139 vex_state
->guest_f28
= 0x7ff800007ff80000ULL
;
140 vex_state
->guest_f29
= 0x7ff800007ff80000ULL
;
141 vex_state
->guest_f30
= 0x7ff800007ff80000ULL
;
142 vex_state
->guest_f31
= 0x7ff800007ff80000ULL
;
144 vex_state
->guest_FIR
= 0; /* FP implementation and revision register */
145 vex_state
->guest_FCCR
= 0; /* FP condition codes register */
146 vex_state
->guest_FEXR
= 0; /* FP exceptions register */
147 vex_state
->guest_FENR
= 0; /* FP enables register */
148 vex_state
->guest_FCSR
= 0; /* FP control/status register */
149 vex_state
->guest_ULR
= 0; /* TLS */
151 /* Various pseudo-regs mandated by Vex or Valgrind. */
152 /* Emulation notes */
153 vex_state
->guest_EMNOTE
= 0;
155 /* For clflush: record start and length of area to invalidate */
156 vex_state
->guest_CMSTART
= 0;
157 vex_state
->guest_CMLEN
= 0;
158 vex_state
->host_EvC_COUNTER
= 0;
159 vex_state
->host_EvC_FAILADDR
= 0;
161 /* Used to record the unredirected guest address at the start of
162 a translation whose start has been redirected. By reading
163 this pseudo-register shortly afterwards, the translation can
164 find out what the corresponding no-redirection address was.
165 Note, this is only set for wrap-style redirects, not for
166 replace-style ones. */
167 vex_state
->guest_NRADDR
= 0;
169 vex_state
->guest_COND
= 0;
171 vex_state
->guest_CP0_status
= 0;
172 vex_state
->guest_CP0_Config5
= 0;
174 vex_state
->guest_LLaddr
= 0xFFFFFFFF;
175 vex_state
->guest_LLdata
= 0;
177 /* MIPS32 DSP ASE(r2) specific registers */
178 vex_state
->guest_DSPControl
= 0; /* DSPControl register */
179 vex_state
->guest_ac0
= 0; /* Accumulator 0 */
180 vex_state
->guest_ac1
= 0; /* Accumulator 1 */
181 vex_state
->guest_ac2
= 0; /* Accumulator 2 */
182 vex_state
->guest_ac3
= 0; /* Accumulator 3 */
184 vex_state
->guest_w0
.w64
[0] = 0;
185 vex_state
->guest_w0
.w64
[1] = 0;
186 vex_state
->guest_w1
.w64
[0] = 0;
187 vex_state
->guest_w1
.w64
[1] = 0;
188 vex_state
->guest_w2
.w64
[0] = 0;
189 vex_state
->guest_w2
.w64
[1] = 0;
191 vex_state
->guest_IP_AT_SYSCALL
= 0;
194 void LibVEX_GuestMIPS64_initialise ( /*OUT*/ VexGuestMIPS64State
* vex_state
)
196 vex_state
->guest_r0
= 0; /* Hardwired to 0 */
197 vex_state
->guest_r1
= 0; /* Assembler temporary */
198 vex_state
->guest_r2
= 0; /* Values for function returns ... */
199 vex_state
->guest_r3
= 0;
200 vex_state
->guest_r4
= 0; /* Function arguments */
201 vex_state
->guest_r5
= 0;
202 vex_state
->guest_r6
= 0;
203 vex_state
->guest_r7
= 0;
204 vex_state
->guest_r8
= 0;
205 vex_state
->guest_r9
= 0;
206 vex_state
->guest_r10
= 0;
207 vex_state
->guest_r11
= 0;
208 vex_state
->guest_r12
= 0; /* Temporaries */
209 vex_state
->guest_r13
= 0;
210 vex_state
->guest_r14
= 0;
211 vex_state
->guest_r15
= 0;
212 vex_state
->guest_r16
= 0; /* Saved temporaries */
213 vex_state
->guest_r17
= 0;
214 vex_state
->guest_r18
= 0;
215 vex_state
->guest_r19
= 0;
216 vex_state
->guest_r20
= 0;
217 vex_state
->guest_r21
= 0;
218 vex_state
->guest_r22
= 0;
219 vex_state
->guest_r23
= 0;
220 vex_state
->guest_r24
= 0; /* Temporaries */
221 vex_state
->guest_r25
= 0;
222 vex_state
->guest_r26
= 0; /* Reserved for OS kernel */
223 vex_state
->guest_r27
= 0;
224 vex_state
->guest_r28
= 0; /* Global pointer */
225 vex_state
->guest_r29
= 0; /* Stack pointer */
226 vex_state
->guest_r30
= 0; /* Frame pointer */
227 vex_state
->guest_r31
= 0; /* Return address */
228 vex_state
->guest_PC
= 0; /* Program counter */
229 vex_state
->guest_HI
= 0; /* Multiply and divide register higher result */
230 vex_state
->guest_LO
= 0; /* Multiply and divide register lower result */
233 vex_state
->guest_f0
= 0x7ff800007ff80000ULL
; /* Floting point registers */
234 vex_state
->guest_f1
= 0x7ff800007ff80000ULL
;
235 vex_state
->guest_f2
= 0x7ff800007ff80000ULL
;
236 vex_state
->guest_f3
= 0x7ff800007ff80000ULL
;
237 vex_state
->guest_f4
= 0x7ff800007ff80000ULL
;
238 vex_state
->guest_f5
= 0x7ff800007ff80000ULL
;
239 vex_state
->guest_f6
= 0x7ff800007ff80000ULL
;
240 vex_state
->guest_f7
= 0x7ff800007ff80000ULL
;
241 vex_state
->guest_f8
= 0x7ff800007ff80000ULL
;
242 vex_state
->guest_f9
= 0x7ff800007ff80000ULL
;
243 vex_state
->guest_f10
= 0x7ff800007ff80000ULL
;
244 vex_state
->guest_f11
= 0x7ff800007ff80000ULL
;
245 vex_state
->guest_f12
= 0x7ff800007ff80000ULL
;
246 vex_state
->guest_f13
= 0x7ff800007ff80000ULL
;
247 vex_state
->guest_f14
= 0x7ff800007ff80000ULL
;
248 vex_state
->guest_f15
= 0x7ff800007ff80000ULL
;
249 vex_state
->guest_f16
= 0x7ff800007ff80000ULL
;
250 vex_state
->guest_f17
= 0x7ff800007ff80000ULL
;
251 vex_state
->guest_f18
= 0x7ff800007ff80000ULL
;
252 vex_state
->guest_f19
= 0x7ff800007ff80000ULL
;
253 vex_state
->guest_f20
= 0x7ff800007ff80000ULL
;
254 vex_state
->guest_f21
= 0x7ff800007ff80000ULL
;
255 vex_state
->guest_f22
= 0x7ff800007ff80000ULL
;
256 vex_state
->guest_f23
= 0x7ff800007ff80000ULL
;
257 vex_state
->guest_f24
= 0x7ff800007ff80000ULL
;
258 vex_state
->guest_f25
= 0x7ff800007ff80000ULL
;
259 vex_state
->guest_f26
= 0x7ff800007ff80000ULL
;
260 vex_state
->guest_f27
= 0x7ff800007ff80000ULL
;
261 vex_state
->guest_f28
= 0x7ff800007ff80000ULL
;
262 vex_state
->guest_f29
= 0x7ff800007ff80000ULL
;
263 vex_state
->guest_f30
= 0x7ff800007ff80000ULL
;
264 vex_state
->guest_f31
= 0x7ff800007ff80000ULL
;
266 vex_state
->guest_FIR
= 0; /* FP implementation and revision register */
267 vex_state
->guest_FCCR
= 0; /* FP condition codes register */
268 vex_state
->guest_FEXR
= 0; /* FP exceptions register */
269 vex_state
->guest_FENR
= 0; /* FP enables register */
270 vex_state
->guest_FCSR
= 0; /* FP control/status register */
272 vex_state
->guest_ULR
= 0;
274 /* Various pseudo-regs mandated by Vex or Valgrind. */
275 /* Emulation notes */
276 vex_state
->guest_EMNOTE
= 0;
278 /* For clflush: record start and length of area to invalidate */
279 vex_state
->guest_CMSTART
= 0;
280 vex_state
->guest_CMLEN
= 0;
281 vex_state
->host_EvC_COUNTER
= 0;
282 vex_state
->host_EvC_FAILADDR
= 0;
284 /* Used to record the unredirected guest address at the start of
285 a translation whose start has been redirected. By reading
286 this pseudo-register shortly afterwards, the translation can
287 find out what the corresponding no-redirection address was.
288 Note, this is only set for wrap-style redirects, not for
289 replace-style ones. */
290 vex_state
->guest_NRADDR
= 0;
292 vex_state
->guest_COND
= 0;
294 vex_state
->guest_CP0_status
= MIPS_CP0_STATUS_FR
;
296 vex_state
->guest_LLaddr
= 0xFFFFFFFFFFFFFFFFULL
;
297 vex_state
->guest_LLdata
= 0;
299 vex_state
->guest_IP_AT_SYSCALL
= 0;
301 vex_state
->guest_MSACSR
= 0;
304 /*-----------------------------------------------------------*/
305 /*--- Describing the mips guest state, for the benefit ---*/
306 /*--- of iropt and instrumenters. ---*/
307 /*-----------------------------------------------------------*/
309 /* Figure out if any part of the guest state contained in minoff
310 .. maxoff requires precise memory exceptions. If in doubt return
311 True (but this generates significantly slower code).
313 We enforce precise exns for guest SP, PC.
315 Only SP is needed in mode VexRegUpdSpAtMemAccess.
317 Bool
guest_mips32_state_requires_precise_mem_exns (
318 Int minoff
, Int maxoff
, VexRegisterUpdates pxControl
321 Int sp_min
= offsetof(VexGuestMIPS32State
, guest_r29
);
322 Int sp_max
= sp_min
+ 4 - 1;
323 Int pc_min
= offsetof(VexGuestMIPS32State
, guest_PC
);
324 Int pc_max
= pc_min
+ 4 - 1;
326 if (maxoff
< sp_min
|| minoff
> sp_max
) {
327 /* no overlap with sp */
328 if (pxControl
== VexRegUpdSpAtMemAccess
)
329 return False
; /* We only need to check stack pointer. */
334 if (maxoff
< pc_min
|| minoff
> pc_max
) {
335 /* no overlap with pc */
340 /* We appear to need precise updates of R11 in order to get proper
341 stacktraces from non-optimised code. */
342 Int fp_min
= offsetof(VexGuestMIPS32State
, guest_r30
);
343 Int fp_max
= fp_min
+ 4 - 1;
345 if (maxoff
< fp_min
|| minoff
> fp_max
) {
346 /* no overlap with fp */
354 Bool
guest_mips64_state_requires_precise_mem_exns (
355 Int minoff
, Int maxoff
, VexRegisterUpdates pxControl
358 Int sp_min
= offsetof(VexGuestMIPS64State
, guest_r29
);
359 Int sp_max
= sp_min
+ 8 - 1;
360 Int pc_min
= offsetof(VexGuestMIPS64State
, guest_PC
);
361 Int pc_max
= pc_min
+ 8 - 1;
363 if ( maxoff
< sp_min
|| minoff
> sp_max
) {
364 /* no overlap with sp */
365 if (pxControl
== VexRegUpdSpAtMemAccess
)
366 return False
; /* We only need to check stack pointer. */
371 if ( maxoff
< pc_min
|| minoff
> pc_max
) {
372 /* no overlap with pc */
377 Int fp_min
= offsetof(VexGuestMIPS64State
, guest_r30
);
378 Int fp_max
= fp_min
+ 8 - 1;
380 if ( maxoff
< fp_min
|| minoff
> fp_max
) {
381 /* no overlap with fp */
389 VexGuestLayout mips32Guest_layout
= {
390 /* Total size of the guest state, in bytes. */
391 .total_sizeB
= sizeof(VexGuestMIPS32State
),
392 /* Describe the stack pointer. */
393 .offset_SP
= offsetof(VexGuestMIPS32State
, guest_r29
),
395 /* Describe the frame pointer. */
396 .offset_FP
= offsetof(VexGuestMIPS32State
, guest_r30
),
398 /* Describe the instruction pointer. */
399 .offset_IP
= offsetof(VexGuestMIPS32State
, guest_PC
),
401 /* Describe any sections to be regarded by Memcheck as
406 /* 0 */ ALWAYSDEFD32(guest_r0
),
407 /* 1 */ ALWAYSDEFD32(guest_r1
),
408 /* 2 */ ALWAYSDEFD32(guest_EMNOTE
),
409 /* 3 */ ALWAYSDEFD32(guest_CMSTART
),
410 /* 4 */ ALWAYSDEFD32(guest_CMLEN
),
411 /* 5 */ ALWAYSDEFD32(guest_r29
),
412 /* 6 */ ALWAYSDEFD32(guest_r31
),
413 /* 7 */ ALWAYSDEFD32(guest_ULR
)
417 VexGuestLayout mips64Guest_layout
= {
418 /* Total size of the guest state, in bytes. */
419 .total_sizeB
= sizeof(VexGuestMIPS64State
),
420 /* Describe the stack pointer. */
421 .offset_SP
= offsetof(VexGuestMIPS64State
, guest_r29
),
423 /* Describe the frame pointer. */
424 .offset_FP
= offsetof(VexGuestMIPS64State
, guest_r30
),
426 /* Describe the instruction pointer. */
427 .offset_IP
= offsetof(VexGuestMIPS64State
, guest_PC
),
429 /* Describe any sections to be regarded by Memcheck as
434 /* 0 */ ALWAYSDEFD64 (guest_r0
),
435 /* 1 */ ALWAYSDEFD64 (guest_EMNOTE
),
436 /* 2 */ ALWAYSDEFD64 (guest_CMSTART
),
437 /* 3 */ ALWAYSDEFD64 (guest_CMLEN
),
438 /* 4 */ ALWAYSDEFD64 (guest_r29
),
439 /* 5 */ ALWAYSDEFD64 (guest_r31
),
440 /* 6 */ ALWAYSDEFD64 (guest_ULR
)
444 #define ASM_VOLATILE_RDHWR(opcode) \
445 __asm__ __volatile__(".word 0x7C02003B | "#opcode" << 11 \n\t" \
449 HWord
mips_dirtyhelper_rdhwr ( UInt rd
)
451 #if defined(__mips__)
452 register HWord x
__asm__("v0") = 0;
455 case 0: /* x = CPUNum() */
456 ASM_VOLATILE_RDHWR(0); /* rdhwr v0, $0 */
459 case 1: /* x = SYNCI_Step() */
460 ASM_VOLATILE_RDHWR(1); /* rdhwr v0, $1 */
463 case 2: /* x = CC() */
464 ASM_VOLATILE_RDHWR(2); /* rdhwr v0, $2 */
467 case 3: /* x = CCRes() */
468 ASM_VOLATILE_RDHWR(3); /* rdhwr v0, $3 */
471 case 31: /* x = CVMX_get_cycles() */
472 ASM_VOLATILE_RDHWR(31); /* rdhwr v0, $31 */
485 #define ASM_VOLATILE_UNARY32(inst) \
486 __asm__ volatile(".set push" "\n\t" \
487 ".set hardfloat" "\n\t" \
488 "cfc1 $8, $31" "\n\t" \
489 "ctc1 %2, $31" "\n\t" \
490 "mtc1 %1, $f20" "\n\t" \
491 #inst" $f20, $f20" "\n\t" \
492 "cfc1 %0, $31" "\n\t" \
493 "ctc1 $8, $31" "\n\t" \
496 : "r" (loFsVal), "r" (fcsr) \
500 #define ASM_VOLATILE_UNARY32_DOUBLE(inst) \
501 __asm__ volatile(".set push" "\n\t" \
502 ".set hardfloat" "\n\t" \
503 "cfc1 $8, $31" "\n\t" \
504 "ctc1 %2, $31" "\n\t" \
505 "ldc1 $f20, 0(%1)" "\n\t" \
506 #inst" $f20, $f20" "\n\t" \
507 "cfc1 %0, $31" "\n\t" \
508 "ctc1 $8, $31" "\n\t" \
511 : "r" (&fsVal), "r" (fcsr) \
512 : "$8", "$f20", "$f21" \
515 #define ASM_VOLATILE_UNARY64(inst) \
516 __asm__ volatile(".set push" "\n\t" \
517 ".set hardfloat" "\n\t" \
518 ".set fp=64" "\n\t" \
519 "cfc1 $8, $31" "\n\t" \
520 "ctc1 %2, $31" "\n\t" \
521 "ldc1 $f24, 0(%1)" "\n\t" \
522 #inst" $f24, $f24" "\n\t" \
523 "cfc1 %0, $31" "\n\t" \
524 "ctc1 $8, $31" "\n\t" \
527 : "r" (&(addr[fs])), "r" (fcsr) \
531 #define ASM_VOLATILE_MSA_UNARY(inst) \
532 __asm__ volatile(".set push" "\n\t" \
533 ".set mips32r2" "\n\t" \
534 ".set hardfloat" "\n\t" \
535 ".set fp=64" "\n\t" \
537 ".set noreorder" "\n\t" \
538 "cfcmsa $t0, $1" "\n\t" \
539 "ctcmsa $1, %2" "\n\t" \
540 "ld.b $w24, 0(%1)" "\n\t" \
541 #inst" $w24, $w24" "\n\t" \
542 "cfcmsa %0, $1" "\n\t" \
543 "ctcmsa $1, $t0" "\n\t" \
546 : "r" (&(addr[ws])), "r" (msacsr) \
550 #define ASM_VOLATILE_BINARY32(inst) \
551 __asm__ volatile(".set push" "\n\t" \
552 ".set hardfloat" "\n\t" \
553 "cfc1 $8, $31" "\n\t" \
554 "ctc1 %3, $31" "\n\t" \
555 "mtc1 %1, $f20" "\n\t" \
556 "mtc1 %2, $f22" "\n\t" \
557 #inst" $f20, $f20, $f22" "\n\t" \
558 "cfc1 %0, $31" "\n\t" \
559 "ctc1 $8, $31" "\n\t" \
562 : "r" (loFsVal), "r" (loFtVal), "r" (fcsr) \
563 : "$8", "$f20", "$f22" \
566 #define ASM_VOLATILE_BINARY32_DOUBLE(inst) \
567 __asm__ volatile(".set push" "\n\t" \
568 ".set hardfloat" "\n\t" \
569 "cfc1 $8, $31" "\n\t" \
570 "ctc1 %3, $31" "\n\t" \
571 "ldc1 $f20, 0(%1)" "\n\t" \
572 "ldc1 $f22, 0(%2)" "\n\t" \
573 #inst" $f20, $f20, $f22" "\n\t" \
574 "cfc1 %0, $31" "\n\t" \
575 "ctc1 $8, $31" "\n\t" \
578 : "r" (&fsVal), "r" (&ftVal), "r" (fcsr) \
579 : "$8", "$f20", "$f21", "$f22", "$f23" \
582 #define ASM_VOLATILE_BINARY64(inst) \
583 __asm__ volatile(".set push" "\n\t" \
584 ".set hardfloat" "\n\t" \
585 "cfc1 $8, $31" "\n\t" \
586 "ctc1 %3, $31" "\n\t" \
587 "ldc1 $f24, 0(%1)" "\n\t" \
588 "ldc1 $f26, 0(%2)" "\n\t" \
589 #inst" $f24, $f24, $f26" "\n\t" \
590 "cfc1 %0, $31" "\n\t" \
591 "ctc1 $8, $31" "\n\t" \
594 : "r" (&(addr[fs])), "r" (&(addr[ft])), "r" (fcsr) \
595 : "$8", "$f24", "$f26" \
598 #define ASM_VOLATILE_MSA_BINARY(inst) \
599 __asm__ volatile(".set push" "\n\t" \
600 ".set mips32r2" "\n\t" \
601 ".set hardfloat" "\n\t" \
602 ".set fp=64" "\n\t" \
604 "cfcmsa $t0, $1" "\n\t" \
605 "ctcmsa $1, %3" "\n\t" \
606 "ld.b $w24, 0(%1)" "\n\t" \
607 "ld.b $w26, 0(%2)" "\n\t" \
608 #inst" $w24, $w24, $w26" "\n\t" \
609 "cfcmsa %0, $1" "\n\t" \
610 "ctcmsa $1, $t0" "\n\t" \
613 : "r" (&(addr[ws])), "r" (&(addr[wt])), "r" (msacsr)\
617 /* TODO: Add cases for all fpu instructions because all fpu instructions are
618 change the value of FCSR register. */
619 extern UInt
mips_dirtyhelper_calculate_FCSR_fp32 ( void* gs
, UInt fs
, UInt ft
,
623 #if defined(__mips__)
624 VexGuestMIPS32State
* guest_state
= (VexGuestMIPS32State
*)gs
;
625 UInt loFsVal
, hiFsVal
, loFtVal
, hiFtVal
;
626 #if defined (_MIPSEL)
627 ULong
*addr
= (ULong
*)&guest_state
->guest_f0
;
628 loFsVal
= (UInt
)addr
[fs
];
629 hiFsVal
= (UInt
)addr
[fs
+1];
630 loFtVal
= (UInt
)addr
[ft
];
631 hiFtVal
= (UInt
)addr
[ft
+1];
632 #elif defined (_MIPSEB)
633 UInt
*addr
= (UInt
*)&guest_state
->guest_f0
;
634 loFsVal
= (UInt
)addr
[fs
*2];
635 hiFsVal
= (UInt
)addr
[fs
*2+2];
636 loFtVal
= (UInt
)addr
[ft
*2];
637 hiFtVal
= (UInt
)addr
[ft
*2+2];
639 ULong fsVal
= ((ULong
) hiFsVal
) << 32 | loFsVal
;
640 ULong ftVal
= ((ULong
) hiFtVal
) << 32 | loFtVal
;
641 UInt fcsr
= guest_state
->guest_FCSR
;
644 ASM_VOLATILE_UNARY32_DOUBLE(round
.w
.d
)
647 ASM_VOLATILE_UNARY32(floor
.w
.s
)
650 ASM_VOLATILE_UNARY32_DOUBLE(floor
.w
.d
)
653 ASM_VOLATILE_UNARY32(trunc
.w
.s
)
656 ASM_VOLATILE_UNARY32_DOUBLE(trunc
.w
.d
)
659 ASM_VOLATILE_UNARY32(ceil
.w
.s
)
662 ASM_VOLATILE_UNARY32_DOUBLE(ceil
.w
.d
)
665 ASM_VOLATILE_UNARY32(cvt
.d
.s
)
668 ASM_VOLATILE_UNARY32(cvt
.d
.w
)
671 ASM_VOLATILE_UNARY32(cvt
.s
.w
)
674 ASM_VOLATILE_UNARY32_DOUBLE(cvt
.s
.d
)
677 ASM_VOLATILE_UNARY32(cvt
.w
.s
)
680 ASM_VOLATILE_UNARY32_DOUBLE(cvt
.w
.d
)
683 ASM_VOLATILE_UNARY32(round
.w
.s
)
686 ASM_VOLATILE_BINARY32(add
.s
)
689 ASM_VOLATILE_BINARY32_DOUBLE(add
.d
)
692 ASM_VOLATILE_BINARY32(sub
.s
)
695 ASM_VOLATILE_BINARY32_DOUBLE(sub
.d
)
698 ASM_VOLATILE_BINARY32(div
.s
)
708 /* TODO: Add cases for all fpu instructions because all fpu instructions are
709 change the value of FCSR register. */
710 extern UInt
mips_dirtyhelper_calculate_FCSR_fp64 ( void* gs
, UInt fs
, UInt ft
,
714 #if defined(__mips__) && ((__mips == 64) || \
715 (defined(__mips_isa_rev) && (__mips_isa_rev >= 2)))
716 #if defined(VGA_mips32)
717 VexGuestMIPS32State
* guest_state
= (VexGuestMIPS32State
*)gs
;
719 VexGuestMIPS64State
* guest_state
= (VexGuestMIPS64State
*)gs
;
721 ULong
*addr
= (ULong
*)&guest_state
->guest_f0
;
722 UInt fcsr
= guest_state
->guest_FCSR
;
725 ASM_VOLATILE_UNARY64(round
.w
.d
)
728 ASM_VOLATILE_UNARY64(floor
.w
.s
)
731 ASM_VOLATILE_UNARY64(floor
.w
.d
)
734 ASM_VOLATILE_UNARY64(trunc
.w
.s
)
737 ASM_VOLATILE_UNARY64(trunc
.w
.d
)
740 ASM_VOLATILE_UNARY64(ceil
.w
.s
)
743 ASM_VOLATILE_UNARY64(ceil
.w
.d
)
746 ASM_VOLATILE_UNARY64(cvt
.d
.s
)
749 ASM_VOLATILE_UNARY64(cvt
.d
.w
)
752 ASM_VOLATILE_UNARY64(cvt
.s
.w
)
755 ASM_VOLATILE_UNARY64(cvt
.s
.d
)
758 ASM_VOLATILE_UNARY64(cvt
.w
.s
)
761 ASM_VOLATILE_UNARY64(cvt
.w
.d
)
764 ASM_VOLATILE_UNARY64(round
.w
.s
)
767 ASM_VOLATILE_UNARY64(ceil
.l
.s
)
770 ASM_VOLATILE_UNARY64(ceil
.l
.d
)
773 ASM_VOLATILE_UNARY64(cvt
.d
.l
)
776 ASM_VOLATILE_UNARY64(cvt
.l
.s
)
779 ASM_VOLATILE_UNARY64(cvt
.l
.d
)
782 ASM_VOLATILE_UNARY64(cvt
.s
.l
)
785 ASM_VOLATILE_UNARY64(floor
.l
.s
)
788 ASM_VOLATILE_UNARY64(floor
.l
.d
)
791 ASM_VOLATILE_UNARY64(round
.l
.s
)
794 ASM_VOLATILE_UNARY64(round
.l
.d
)
797 ASM_VOLATILE_UNARY64(trunc
.l
.s
)
800 ASM_VOLATILE_UNARY64(trunc
.l
.d
)
803 ASM_VOLATILE_BINARY64(add
.s
)
806 ASM_VOLATILE_BINARY64(add
.d
)
809 ASM_VOLATILE_BINARY64(sub
.s
)
812 ASM_VOLATILE_BINARY64(sub
.d
)
815 ASM_VOLATILE_BINARY64(div
.s
)
817 #if defined(__mips_isa_rev) && (__mips_isa_rev >= 6)
819 ASM_VOLATILE_UNARY64(rint
.s
)
822 ASM_VOLATILE_UNARY64(rint
.d
)
825 ASM_VOLATILE_BINARY64(max
.s
)
828 ASM_VOLATILE_BINARY64(max
.d
)
831 ASM_VOLATILE_BINARY64(min
.s
)
834 ASM_VOLATILE_BINARY64(min
.d
)
837 ASM_VOLATILE_BINARY64(maxa
.s
)
840 ASM_VOLATILE_BINARY64(maxa
.d
)
843 ASM_VOLATILE_BINARY64(mina
.s
)
846 ASM_VOLATILE_BINARY64(mina
.d
)
849 ASM_VOLATILE_BINARY64(cmp
.af
.s
)
852 ASM_VOLATILE_BINARY64(cmp
.af
.d
)
855 ASM_VOLATILE_BINARY64(cmp
.saf
.s
)
858 ASM_VOLATILE_BINARY64(cmp
.saf
.d
)
870 extern UInt
mips_dirtyhelper_calculate_MSACSR ( void* gs
, UInt ws
, UInt wt
,
873 /* GCC 4.8 and later support MIPS MSA. */
874 #if defined(__mips__) && (defined(__clang__) || (GCC_VERSION >= 408))
875 #if defined(VGA_mips32)
876 VexGuestMIPS32State
* guest_state
= (VexGuestMIPS32State
*)gs
;
878 VexGuestMIPS64State
* guest_state
= (VexGuestMIPS64State
*)gs
;
880 V128
*addr
= (V128
*)&guest_state
->guest_w0
;
881 UInt msacsr
= guest_state
->guest_MSACSR
;
885 ASM_VOLATILE_MSA_BINARY(fadd
.w
)
889 ASM_VOLATILE_MSA_BINARY(fadd
.d
)
893 ASM_VOLATILE_MSA_BINARY(fsub
.w
);
897 ASM_VOLATILE_MSA_BINARY(fsub
.d
);
901 ASM_VOLATILE_MSA_BINARY(fmul
.w
);
905 ASM_VOLATILE_MSA_BINARY(fmul
.d
);
909 ASM_VOLATILE_MSA_BINARY(fdiv
.w
);
913 ASM_VOLATILE_MSA_BINARY(fdiv
.d
);
917 ASM_VOLATILE_MSA_BINARY(fmadd
.w
);
921 ASM_VOLATILE_MSA_BINARY(fmadd
.d
);
925 ASM_VOLATILE_MSA_BINARY(fcaf
.w
);
929 ASM_VOLATILE_MSA_BINARY(fcaf
.d
);
933 ASM_VOLATILE_MSA_BINARY(fsaf
.w
);
937 ASM_VOLATILE_MSA_BINARY(fsaf
.d
);
941 ASM_VOLATILE_MSA_BINARY(fceq
.w
);
945 ASM_VOLATILE_MSA_BINARY(fceq
.d
);
949 ASM_VOLATILE_MSA_BINARY(fseq
.w
);
953 ASM_VOLATILE_MSA_BINARY(fseq
.d
);
957 ASM_VOLATILE_MSA_BINARY(fclt
.w
);
961 ASM_VOLATILE_MSA_BINARY(fclt
.d
);
965 ASM_VOLATILE_MSA_BINARY(fslt
.w
);
969 ASM_VOLATILE_MSA_BINARY(fslt
.d
);
973 ASM_VOLATILE_MSA_BINARY(fcle
.w
);
977 ASM_VOLATILE_MSA_BINARY(fcle
.d
);
981 ASM_VOLATILE_MSA_BINARY(fsle
.w
);
985 ASM_VOLATILE_MSA_BINARY(fsle
.d
);
989 ASM_VOLATILE_MSA_BINARY(fcne
.w
);
993 ASM_VOLATILE_MSA_BINARY(fcne
.d
);
997 ASM_VOLATILE_MSA_BINARY(fsne
.w
);
1001 ASM_VOLATILE_MSA_BINARY(fsne
.d
);
1005 ASM_VOLATILE_MSA_BINARY(fexp2
.w
);
1009 ASM_VOLATILE_MSA_BINARY(fexp2
.d
);
1013 ASM_VOLATILE_MSA_BINARY(fmin
.w
);
1017 ASM_VOLATILE_MSA_BINARY(fmin
.d
);
1021 ASM_VOLATILE_MSA_BINARY(fmin_a
.w
);
1025 ASM_VOLATILE_MSA_BINARY(fmin_a
.d
);
1029 ASM_VOLATILE_MSA_BINARY(fcun
.w
);
1033 ASM_VOLATILE_MSA_BINARY(fcun
.d
);
1037 ASM_VOLATILE_MSA_BINARY(fsun
.w
);
1041 ASM_VOLATILE_MSA_BINARY(fsun
.d
);
1045 ASM_VOLATILE_MSA_BINARY(fcor
.w
);
1049 ASM_VOLATILE_MSA_BINARY(fcor
.d
);
1053 ASM_VOLATILE_MSA_BINARY(fsor
.w
);
1057 ASM_VOLATILE_MSA_BINARY(fsor
.d
);
1061 ASM_VOLATILE_MSA_BINARY(fcueq
.w
);
1065 ASM_VOLATILE_MSA_BINARY(fcueq
.d
);
1069 ASM_VOLATILE_MSA_BINARY(fsueq
.w
);
1073 ASM_VOLATILE_MSA_BINARY(fsueq
.d
);
1077 ASM_VOLATILE_MSA_BINARY(fcune
.w
);
1081 ASM_VOLATILE_MSA_BINARY(fcune
.d
);
1085 ASM_VOLATILE_MSA_BINARY(fsune
.w
);
1089 ASM_VOLATILE_MSA_BINARY(fsune
.d
);
1093 ASM_VOLATILE_MSA_BINARY(fcule
.w
);
1097 ASM_VOLATILE_MSA_BINARY(fcule
.d
);
1101 ASM_VOLATILE_MSA_BINARY(fsule
.w
);
1105 ASM_VOLATILE_MSA_BINARY(fsule
.d
);
1109 ASM_VOLATILE_MSA_BINARY(fcult
.w
);
1113 ASM_VOLATILE_MSA_BINARY(fcult
.d
);
1117 ASM_VOLATILE_MSA_BINARY(fsult
.w
);
1121 ASM_VOLATILE_MSA_BINARY(fsult
.d
);
1125 ASM_VOLATILE_MSA_BINARY(fmax
.w
);
1129 ASM_VOLATILE_MSA_BINARY(fmax
.d
);
1133 ASM_VOLATILE_MSA_BINARY(fmax_a
.w
);
1137 ASM_VOLATILE_MSA_BINARY(fmax_a
.d
);
1141 ASM_VOLATILE_MSA_UNARY(ffint_s
.w
);
1145 ASM_VOLATILE_MSA_UNARY(ffint_s
.d
);
1149 ASM_VOLATILE_MSA_UNARY(frcp
.w
);
1153 ASM_VOLATILE_MSA_UNARY(frcp
.d
);
1157 ASM_VOLATILE_MSA_UNARY(frsqrt
.w
);
1161 ASM_VOLATILE_MSA_UNARY(frsqrt
.d
);
1165 ASM_VOLATILE_MSA_UNARY(fsqrt
.w
);
1169 ASM_VOLATILE_MSA_UNARY(fsqrt
.d
);
1173 ASM_VOLATILE_MSA_UNARY(frint
.w
);
1177 ASM_VOLATILE_MSA_UNARY(frint
.d
);
1181 ASM_VOLATILE_MSA_UNARY(ftrunc_u
.w
);
1185 ASM_VOLATILE_MSA_UNARY(ftrunc_u
.d
);
1189 ASM_VOLATILE_MSA_UNARY(ftrunc_s
.w
);
1193 ASM_VOLATILE_MSA_UNARY(ftrunc_s
.d
);
1197 ASM_VOLATILE_MSA_BINARY(fexdo
.h
);
1201 ASM_VOLATILE_MSA_BINARY(fexdo
.w
);
1205 ASM_VOLATILE_MSA_UNARY(fexupr
.w
);
1209 ASM_VOLATILE_MSA_UNARY(fexupr
.d
);
1213 ASM_VOLATILE_MSA_UNARY(fexupl
.w
);
1217 ASM_VOLATILE_MSA_UNARY(fexupl
.d
);
1221 ASM_VOLATILE_MSA_BINARY(ftq
.h
);
1225 ASM_VOLATILE_MSA_BINARY(ftq
.w
);
1229 ASM_VOLATILE_MSA_UNARY(ffqr
.d
);
1233 ASM_VOLATILE_MSA_UNARY(ffqr
.w
);
1237 ASM_VOLATILE_MSA_UNARY(ffql
.d
);
1241 ASM_VOLATILE_MSA_UNARY(ffql
.w
);
1245 ASM_VOLATILE_MSA_UNARY(ftint_s
.d
);
1249 ASM_VOLATILE_MSA_UNARY(ftint_s
.w
);
1253 ASM_VOLATILE_MSA_UNARY(ftint_u
.d
);
1257 ASM_VOLATILE_MSA_UNARY(ftint_u
.w
);
1261 ASM_VOLATILE_MSA_UNARY(flog2
.d
);
1265 ASM_VOLATILE_MSA_UNARY(flog2
.w
);
1269 ASM_VOLATILE_MSA_UNARY(ffint_u
.d
);
1273 ASM_VOLATILE_MSA_UNARY(ffint_u
.w
);
1281 extern UInt
mips_dirtyhelper_get_MSAIR(void) {
1283 /* GCC 4.8 and later support MIPS MSA. */
1284 #if defined(__mips__) && (defined(__clang__) || (GCC_VERSION >= 408))
1285 __asm__
volatile(".set push \n\t"
1286 ".set mips32r2 \n\t"
1287 ".set hardfloat \n\t"
1290 ".set noreorder \n\t"
1291 "cfcmsa %0, $0 \n\t"
1301 /*---------------------------------------------------------------*/
1302 /*--- end guest_mips_helpers.c ---*/
1303 /*---------------------------------------------------------------*/