2 /*---------------------------------------------------------------*/
3 /*--- begin guest_mips_helpers.c ---*/
4 /*---------------------------------------------------------------*/
7 This file is part of Valgrind, a dynamic binary instrumentation
10 Copyright (C) 2010-2017 RT-RK
11 mips-valgrind@rt-rk.com
13 This program is free software; you can redistribute it and/or
14 modify it under the terms of the GNU General Public License as
15 published by the Free Software Foundation; either version 2 of the
16 License, or (at your option) any later version.
18 This program is distributed in the hope that it will be useful, but
19 WITHOUT ANY WARRANTY; without even the implied warranty of
20 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
21 General Public License for more details.
23 You should have received a copy of the GNU General Public License
24 along with this program; if not, write to the Free Software
25 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
28 The GNU General Public License is contained in the file COPYING.
31 #include "libvex_basictypes.h"
32 #include "libvex_emnote.h"
33 #include "libvex_guest_mips32.h"
34 #include "libvex_guest_mips64.h"
35 #include "libvex_ir.h"
38 #include "main_util.h"
39 #include "main_globals.h"
40 #include "guest_generic_bb_to_IR.h"
41 #include "guest_mips_defs.h"
43 #if defined (__GNUC__)
44 #define GCC_VERSION (__GNUC__ * 100 + __GNUC_MINOR__)
49 /* This file contains helper functions for mips guest code. Calls to
50 these functions are generated by the back end.
53 #define ALWAYSDEFD32(field) \
54 { offsetof(VexGuestMIPS32State, field), \
55 (sizeof ((VexGuestMIPS32State*)0)->field) }
57 #define ALWAYSDEFD64(field) \
58 { offsetof(VexGuestMIPS64State, field), \
59 (sizeof ((VexGuestMIPS64State*)0)->field) }
61 IRExpr
*guest_mips32_spechelper(const HChar
* function_name
, IRExpr
** args
,
62 IRStmt
** precedingStmts
, Int n_precedingStmts
)
67 IRExpr
*guest_mips64_spechelper ( const HChar
* function_name
, IRExpr
** args
,
68 IRStmt
** precedingStmts
,
69 Int n_precedingStmts
)
74 /* VISIBLE TO LIBVEX CLIENT */
75 void LibVEX_GuestMIPS32_initialise( /*OUT*/ VexGuestMIPS32State
* vex_state
)
77 vex_state
->guest_r0
= 0; /* Hardwired to 0 */
78 vex_state
->guest_r1
= 0; /* Assembler temporary */
79 vex_state
->guest_r2
= 0; /* Values for function returns ... */
80 vex_state
->guest_r3
= 0; /* ...and expression evaluation */
81 vex_state
->guest_r4
= 0; /* Function arguments */
82 vex_state
->guest_r5
= 0;
83 vex_state
->guest_r6
= 0;
84 vex_state
->guest_r7
= 0;
85 vex_state
->guest_r8
= 0; /* Temporaries */
86 vex_state
->guest_r9
= 0;
87 vex_state
->guest_r10
= 0;
88 vex_state
->guest_r11
= 0;
89 vex_state
->guest_r12
= 0;
90 vex_state
->guest_r13
= 0;
91 vex_state
->guest_r14
= 0;
92 vex_state
->guest_r15
= 0;
93 vex_state
->guest_r16
= 0; /* Saved temporaries */
94 vex_state
->guest_r17
= 0;
95 vex_state
->guest_r18
= 0;
96 vex_state
->guest_r19
= 0;
97 vex_state
->guest_r20
= 0;
98 vex_state
->guest_r21
= 0;
99 vex_state
->guest_r22
= 0;
100 vex_state
->guest_r23
= 0;
101 vex_state
->guest_r24
= 0; /* Temporaries */
102 vex_state
->guest_r25
= 0;
103 vex_state
->guest_r26
= 0; /* Reserved for OS kernel */
104 vex_state
->guest_r27
= 0;
105 vex_state
->guest_r28
= 0; /* Global pointer */
106 vex_state
->guest_r29
= 0; /* Stack pointer */
107 vex_state
->guest_r30
= 0; /* Frame pointer */
108 vex_state
->guest_r31
= 0; /* Return address */
109 vex_state
->guest_PC
= 0; /* Program counter */
110 vex_state
->guest_HI
= 0; /* Multiply and divide register higher result */
111 vex_state
->guest_LO
= 0; /* Multiply and divide register lower result */
114 vex_state
->guest_f0
= 0x7ff800007ff80000ULL
; /* Floting point GP registers */
115 vex_state
->guest_f1
= 0x7ff800007ff80000ULL
;
116 vex_state
->guest_f2
= 0x7ff800007ff80000ULL
;
117 vex_state
->guest_f3
= 0x7ff800007ff80000ULL
;
118 vex_state
->guest_f4
= 0x7ff800007ff80000ULL
;
119 vex_state
->guest_f5
= 0x7ff800007ff80000ULL
;
120 vex_state
->guest_f6
= 0x7ff800007ff80000ULL
;
121 vex_state
->guest_f7
= 0x7ff800007ff80000ULL
;
122 vex_state
->guest_f8
= 0x7ff800007ff80000ULL
;
123 vex_state
->guest_f9
= 0x7ff800007ff80000ULL
;
124 vex_state
->guest_f10
= 0x7ff800007ff80000ULL
;
125 vex_state
->guest_f11
= 0x7ff800007ff80000ULL
;
126 vex_state
->guest_f12
= 0x7ff800007ff80000ULL
;
127 vex_state
->guest_f13
= 0x7ff800007ff80000ULL
;
128 vex_state
->guest_f14
= 0x7ff800007ff80000ULL
;
129 vex_state
->guest_f15
= 0x7ff800007ff80000ULL
;
130 vex_state
->guest_f16
= 0x7ff800007ff80000ULL
;
131 vex_state
->guest_f17
= 0x7ff800007ff80000ULL
;
132 vex_state
->guest_f18
= 0x7ff800007ff80000ULL
;
133 vex_state
->guest_f19
= 0x7ff800007ff80000ULL
;
134 vex_state
->guest_f20
= 0x7ff800007ff80000ULL
;
135 vex_state
->guest_f21
= 0x7ff800007ff80000ULL
;
136 vex_state
->guest_f22
= 0x7ff800007ff80000ULL
;
137 vex_state
->guest_f23
= 0x7ff800007ff80000ULL
;
138 vex_state
->guest_f24
= 0x7ff800007ff80000ULL
;
139 vex_state
->guest_f25
= 0x7ff800007ff80000ULL
;
140 vex_state
->guest_f26
= 0x7ff800007ff80000ULL
;
141 vex_state
->guest_f27
= 0x7ff800007ff80000ULL
;
142 vex_state
->guest_f28
= 0x7ff800007ff80000ULL
;
143 vex_state
->guest_f29
= 0x7ff800007ff80000ULL
;
144 vex_state
->guest_f30
= 0x7ff800007ff80000ULL
;
145 vex_state
->guest_f31
= 0x7ff800007ff80000ULL
;
147 vex_state
->guest_FIR
= 0; /* FP implementation and revision register */
148 vex_state
->guest_FCCR
= 0; /* FP condition codes register */
149 vex_state
->guest_FEXR
= 0; /* FP exceptions register */
150 vex_state
->guest_FENR
= 0; /* FP enables register */
151 vex_state
->guest_FCSR
= 0; /* FP control/status register */
152 vex_state
->guest_ULR
= 0; /* TLS */
154 /* Various pseudo-regs mandated by Vex or Valgrind. */
155 /* Emulation notes */
156 vex_state
->guest_EMNOTE
= 0;
158 /* For clflush: record start and length of area to invalidate */
159 vex_state
->guest_CMSTART
= 0;
160 vex_state
->guest_CMLEN
= 0;
161 vex_state
->host_EvC_COUNTER
= 0;
162 vex_state
->host_EvC_FAILADDR
= 0;
164 /* Used to record the unredirected guest address at the start of
165 a translation whose start has been redirected. By reading
166 this pseudo-register shortly afterwards, the translation can
167 find out what the corresponding no-redirection address was.
168 Note, this is only set for wrap-style redirects, not for
169 replace-style ones. */
170 vex_state
->guest_NRADDR
= 0;
172 vex_state
->guest_COND
= 0;
174 vex_state
->guest_CP0_status
= 0;
175 vex_state
->guest_CP0_Config5
= 0;
177 vex_state
->guest_LLaddr
= 0xFFFFFFFF;
178 vex_state
->guest_LLdata
= 0;
180 /* MIPS32 DSP ASE(r2) specific registers */
181 vex_state
->guest_DSPControl
= 0; /* DSPControl register */
182 vex_state
->guest_ac0
= 0; /* Accumulator 0 */
183 vex_state
->guest_ac1
= 0; /* Accumulator 1 */
184 vex_state
->guest_ac2
= 0; /* Accumulator 2 */
185 vex_state
->guest_ac3
= 0; /* Accumulator 3 */
187 vex_state
->guest_w0
.w64
[0] = 0;
188 vex_state
->guest_w0
.w64
[1] = 0;
189 vex_state
->guest_w1
.w64
[0] = 0;
190 vex_state
->guest_w1
.w64
[1] = 0;
191 vex_state
->guest_w2
.w64
[0] = 0;
192 vex_state
->guest_w2
.w64
[1] = 0;
195 void LibVEX_GuestMIPS64_initialise ( /*OUT*/ VexGuestMIPS64State
* vex_state
)
197 vex_state
->guest_r0
= 0; /* Hardwired to 0 */
198 vex_state
->guest_r1
= 0; /* Assembler temporary */
199 vex_state
->guest_r2
= 0; /* Values for function returns ... */
200 vex_state
->guest_r3
= 0;
201 vex_state
->guest_r4
= 0; /* Function arguments */
202 vex_state
->guest_r5
= 0;
203 vex_state
->guest_r6
= 0;
204 vex_state
->guest_r7
= 0;
205 vex_state
->guest_r8
= 0;
206 vex_state
->guest_r9
= 0;
207 vex_state
->guest_r10
= 0;
208 vex_state
->guest_r11
= 0;
209 vex_state
->guest_r12
= 0; /* Temporaries */
210 vex_state
->guest_r13
= 0;
211 vex_state
->guest_r14
= 0;
212 vex_state
->guest_r15
= 0;
213 vex_state
->guest_r16
= 0; /* Saved temporaries */
214 vex_state
->guest_r17
= 0;
215 vex_state
->guest_r18
= 0;
216 vex_state
->guest_r19
= 0;
217 vex_state
->guest_r20
= 0;
218 vex_state
->guest_r21
= 0;
219 vex_state
->guest_r22
= 0;
220 vex_state
->guest_r23
= 0;
221 vex_state
->guest_r24
= 0; /* Temporaries */
222 vex_state
->guest_r25
= 0;
223 vex_state
->guest_r26
= 0; /* Reserved for OS kernel */
224 vex_state
->guest_r27
= 0;
225 vex_state
->guest_r28
= 0; /* Global pointer */
226 vex_state
->guest_r29
= 0; /* Stack pointer */
227 vex_state
->guest_r30
= 0; /* Frame pointer */
228 vex_state
->guest_r31
= 0; /* Return address */
229 vex_state
->guest_PC
= 0; /* Program counter */
230 vex_state
->guest_HI
= 0; /* Multiply and divide register higher result */
231 vex_state
->guest_LO
= 0; /* Multiply and divide register lower result */
234 vex_state
->guest_f0
= 0x7ff800007ff80000ULL
; /* Floting point registers */
235 vex_state
->guest_f1
= 0x7ff800007ff80000ULL
;
236 vex_state
->guest_f2
= 0x7ff800007ff80000ULL
;
237 vex_state
->guest_f3
= 0x7ff800007ff80000ULL
;
238 vex_state
->guest_f4
= 0x7ff800007ff80000ULL
;
239 vex_state
->guest_f5
= 0x7ff800007ff80000ULL
;
240 vex_state
->guest_f6
= 0x7ff800007ff80000ULL
;
241 vex_state
->guest_f7
= 0x7ff800007ff80000ULL
;
242 vex_state
->guest_f8
= 0x7ff800007ff80000ULL
;
243 vex_state
->guest_f9
= 0x7ff800007ff80000ULL
;
244 vex_state
->guest_f10
= 0x7ff800007ff80000ULL
;
245 vex_state
->guest_f11
= 0x7ff800007ff80000ULL
;
246 vex_state
->guest_f12
= 0x7ff800007ff80000ULL
;
247 vex_state
->guest_f13
= 0x7ff800007ff80000ULL
;
248 vex_state
->guest_f14
= 0x7ff800007ff80000ULL
;
249 vex_state
->guest_f15
= 0x7ff800007ff80000ULL
;
250 vex_state
->guest_f16
= 0x7ff800007ff80000ULL
;
251 vex_state
->guest_f17
= 0x7ff800007ff80000ULL
;
252 vex_state
->guest_f18
= 0x7ff800007ff80000ULL
;
253 vex_state
->guest_f19
= 0x7ff800007ff80000ULL
;
254 vex_state
->guest_f20
= 0x7ff800007ff80000ULL
;
255 vex_state
->guest_f21
= 0x7ff800007ff80000ULL
;
256 vex_state
->guest_f22
= 0x7ff800007ff80000ULL
;
257 vex_state
->guest_f23
= 0x7ff800007ff80000ULL
;
258 vex_state
->guest_f24
= 0x7ff800007ff80000ULL
;
259 vex_state
->guest_f25
= 0x7ff800007ff80000ULL
;
260 vex_state
->guest_f26
= 0x7ff800007ff80000ULL
;
261 vex_state
->guest_f27
= 0x7ff800007ff80000ULL
;
262 vex_state
->guest_f28
= 0x7ff800007ff80000ULL
;
263 vex_state
->guest_f29
= 0x7ff800007ff80000ULL
;
264 vex_state
->guest_f30
= 0x7ff800007ff80000ULL
;
265 vex_state
->guest_f31
= 0x7ff800007ff80000ULL
;
267 vex_state
->guest_FIR
= 0; /* FP implementation and revision register */
268 vex_state
->guest_FCCR
= 0; /* FP condition codes register */
269 vex_state
->guest_FEXR
= 0; /* FP exceptions register */
270 vex_state
->guest_FENR
= 0; /* FP enables register */
271 vex_state
->guest_FCSR
= 0; /* FP control/status register */
273 vex_state
->guest_ULR
= 0;
275 /* Various pseudo-regs mandated by Vex or Valgrind. */
276 /* Emulation notes */
277 vex_state
->guest_EMNOTE
= 0;
279 /* For clflush: record start and length of area to invalidate */
280 vex_state
->guest_CMSTART
= 0;
281 vex_state
->guest_CMLEN
= 0;
282 vex_state
->host_EvC_COUNTER
= 0;
283 vex_state
->host_EvC_FAILADDR
= 0;
285 /* Used to record the unredirected guest address at the start of
286 a translation whose start has been redirected. By reading
287 this pseudo-register shortly afterwards, the translation can
288 find out what the corresponding no-redirection address was.
289 Note, this is only set for wrap-style redirects, not for
290 replace-style ones. */
291 vex_state
->guest_NRADDR
= 0;
293 vex_state
->guest_COND
= 0;
295 vex_state
->guest_CP0_status
= MIPS_CP0_STATUS_FR
;
297 vex_state
->guest_LLaddr
= 0xFFFFFFFFFFFFFFFFULL
;
298 vex_state
->guest_LLdata
= 0;
300 vex_state
->guest_MSACSR
= 0;
303 /*-----------------------------------------------------------*/
304 /*--- Describing the mips guest state, for the benefit ---*/
305 /*--- of iropt and instrumenters. ---*/
306 /*-----------------------------------------------------------*/
308 /* Figure out if any part of the guest state contained in minoff
309 .. maxoff requires precise memory exceptions. If in doubt return
310 True (but this generates significantly slower code).
312 We enforce precise exns for guest SP, PC.
314 Only SP is needed in mode VexRegUpdSpAtMemAccess.
316 Bool
guest_mips32_state_requires_precise_mem_exns (
317 Int minoff
, Int maxoff
, VexRegisterUpdates pxControl
320 Int sp_min
= offsetof(VexGuestMIPS32State
, guest_r29
);
321 Int sp_max
= sp_min
+ 4 - 1;
322 Int pc_min
= offsetof(VexGuestMIPS32State
, guest_PC
);
323 Int pc_max
= pc_min
+ 4 - 1;
325 if (maxoff
< sp_min
|| minoff
> sp_max
) {
326 /* no overlap with sp */
327 if (pxControl
== VexRegUpdSpAtMemAccess
)
328 return False
; /* We only need to check stack pointer. */
333 if (maxoff
< pc_min
|| minoff
> pc_max
) {
334 /* no overlap with pc */
339 /* We appear to need precise updates of R11 in order to get proper
340 stacktraces from non-optimised code. */
341 Int fp_min
= offsetof(VexGuestMIPS32State
, guest_r30
);
342 Int fp_max
= fp_min
+ 4 - 1;
344 if (maxoff
< fp_min
|| minoff
> fp_max
) {
345 /* no overlap with fp */
353 Bool
guest_mips64_state_requires_precise_mem_exns (
354 Int minoff
, Int maxoff
, VexRegisterUpdates pxControl
357 Int sp_min
= offsetof(VexGuestMIPS64State
, guest_r29
);
358 Int sp_max
= sp_min
+ 8 - 1;
359 Int pc_min
= offsetof(VexGuestMIPS64State
, guest_PC
);
360 Int pc_max
= pc_min
+ 8 - 1;
362 if ( maxoff
< sp_min
|| minoff
> sp_max
) {
363 /* no overlap with sp */
364 if (pxControl
== VexRegUpdSpAtMemAccess
)
365 return False
; /* We only need to check stack pointer. */
370 if ( maxoff
< pc_min
|| minoff
> pc_max
) {
371 /* no overlap with pc */
376 Int fp_min
= offsetof(VexGuestMIPS64State
, guest_r30
);
377 Int fp_max
= fp_min
+ 8 - 1;
379 if ( maxoff
< fp_min
|| minoff
> fp_max
) {
380 /* no overlap with fp */
388 VexGuestLayout mips32Guest_layout
= {
389 /* Total size of the guest state, in bytes. */
390 .total_sizeB
= sizeof(VexGuestMIPS32State
),
391 /* Describe the stack pointer. */
392 .offset_SP
= offsetof(VexGuestMIPS32State
, guest_r29
),
394 /* Describe the frame pointer. */
395 .offset_FP
= offsetof(VexGuestMIPS32State
, guest_r30
),
397 /* Describe the instruction pointer. */
398 .offset_IP
= offsetof(VexGuestMIPS32State
, guest_PC
),
400 /* Describe any sections to be regarded by Memcheck as
405 /* 0 */ ALWAYSDEFD32(guest_r0
),
406 /* 1 */ ALWAYSDEFD32(guest_r1
),
407 /* 2 */ ALWAYSDEFD32(guest_EMNOTE
),
408 /* 3 */ ALWAYSDEFD32(guest_CMSTART
),
409 /* 4 */ ALWAYSDEFD32(guest_CMLEN
),
410 /* 5 */ ALWAYSDEFD32(guest_r29
),
411 /* 6 */ ALWAYSDEFD32(guest_r31
),
412 /* 7 */ ALWAYSDEFD32(guest_ULR
)
416 VexGuestLayout mips64Guest_layout
= {
417 /* Total size of the guest state, in bytes. */
418 .total_sizeB
= sizeof(VexGuestMIPS64State
),
419 /* Describe the stack pointer. */
420 .offset_SP
= offsetof(VexGuestMIPS64State
, guest_r29
),
422 /* Describe the frame pointer. */
423 .offset_FP
= offsetof(VexGuestMIPS64State
, guest_r30
),
425 /* Describe the instruction pointer. */
426 .offset_IP
= offsetof(VexGuestMIPS64State
, guest_PC
),
428 /* Describe any sections to be regarded by Memcheck as
433 /* 0 */ ALWAYSDEFD64 (guest_r0
),
434 /* 1 */ ALWAYSDEFD64 (guest_EMNOTE
),
435 /* 2 */ ALWAYSDEFD64 (guest_CMSTART
),
436 /* 3 */ ALWAYSDEFD64 (guest_CMLEN
),
437 /* 4 */ ALWAYSDEFD64 (guest_r29
),
438 /* 5 */ ALWAYSDEFD64 (guest_r31
),
439 /* 6 */ ALWAYSDEFD64 (guest_ULR
)
443 #define ASM_VOLATILE_RDHWR(opcode) \
444 __asm__ __volatile__(".word 0x7C02003B | "#opcode" << 11 \n\t" \
448 HWord
mips_dirtyhelper_rdhwr ( UInt rd
)
450 #if defined(__mips__)
451 register HWord x
__asm__("v0") = 0;
454 case 0: /* x = CPUNum() */
455 ASM_VOLATILE_RDHWR(0); /* rdhwr v0, $0 */
458 case 1: /* x = SYNCI_Step() */
459 ASM_VOLATILE_RDHWR(1); /* rdhwr v0, $1 */
462 case 2: /* x = CC() */
463 ASM_VOLATILE_RDHWR(2); /* rdhwr v0, $2 */
466 case 3: /* x = CCRes() */
467 ASM_VOLATILE_RDHWR(3); /* rdhwr v0, $3 */
470 case 31: /* x = CVMX_get_cycles() */
471 ASM_VOLATILE_RDHWR(31); /* rdhwr v0, $31 */
484 #define ASM_VOLATILE_UNARY32(inst) \
485 __asm__ volatile(".set push" "\n\t" \
486 ".set hardfloat" "\n\t" \
487 "cfc1 $8, $31" "\n\t" \
488 "ctc1 %2, $31" "\n\t" \
489 "mtc1 %1, $f20" "\n\t" \
490 #inst" $f20, $f20" "\n\t" \
491 "cfc1 %0, $31" "\n\t" \
492 "ctc1 $8, $31" "\n\t" \
495 : "r" (loFsVal), "r" (fcsr) \
499 #define ASM_VOLATILE_UNARY32_DOUBLE(inst) \
500 __asm__ volatile(".set push" "\n\t" \
501 ".set hardfloat" "\n\t" \
502 "cfc1 $8, $31" "\n\t" \
503 "ctc1 %2, $31" "\n\t" \
504 "ldc1 $f20, 0(%1)" "\n\t" \
505 #inst" $f20, $f20" "\n\t" \
506 "cfc1 %0, $31" "\n\t" \
507 "ctc1 $8, $31" "\n\t" \
510 : "r" (&fsVal), "r" (fcsr) \
511 : "$8", "$f20", "$f21" \
514 #define ASM_VOLATILE_UNARY64(inst) \
515 __asm__ volatile(".set push" "\n\t" \
516 ".set hardfloat" "\n\t" \
517 ".set fp=64" "\n\t" \
518 "cfc1 $8, $31" "\n\t" \
519 "ctc1 %2, $31" "\n\t" \
520 "ldc1 $f24, 0(%1)" "\n\t" \
521 #inst" $f24, $f24" "\n\t" \
522 "cfc1 %0, $31" "\n\t" \
523 "ctc1 $8, $31" "\n\t" \
526 : "r" (&(addr[fs])), "r" (fcsr) \
530 #define ASM_VOLATILE_MSA_UNARY(inst) \
531 __asm__ volatile(".set push" "\n\t" \
532 ".set mips32r2" "\n\t" \
533 ".set hardfloat" "\n\t" \
534 ".set fp=64" "\n\t" \
536 ".set noreorder" "\n\t" \
537 "cfcmsa $t0, $1" "\n\t" \
538 "ctcmsa $1, %2" "\n\t" \
539 "ld.b $w24, 0(%1)" "\n\t" \
540 #inst" $w24, $w24" "\n\t" \
541 "cfcmsa %0, $1" "\n\t" \
542 "ctcmsa $1, $t0" "\n\t" \
545 : "r" (&(addr[ws])), "r" (msacsr) \
549 #define ASM_VOLATILE_BINARY32(inst) \
550 __asm__ volatile(".set push" "\n\t" \
551 ".set hardfloat" "\n\t" \
552 "cfc1 $8, $31" "\n\t" \
553 "ctc1 %3, $31" "\n\t" \
554 "mtc1 %1, $f20" "\n\t" \
555 "mtc1 %2, $f22" "\n\t" \
556 #inst" $f20, $f20, $f22" "\n\t" \
557 "cfc1 %0, $31" "\n\t" \
558 "ctc1 $8, $31" "\n\t" \
561 : "r" (loFsVal), "r" (loFtVal), "r" (fcsr) \
562 : "$8", "$f20", "$f22" \
565 #define ASM_VOLATILE_BINARY32_DOUBLE(inst) \
566 __asm__ volatile(".set push" "\n\t" \
567 ".set hardfloat" "\n\t" \
568 "cfc1 $8, $31" "\n\t" \
569 "ctc1 %3, $31" "\n\t" \
570 "ldc1 $f20, 0(%1)" "\n\t" \
571 "ldc1 $f22, 0(%2)" "\n\t" \
572 #inst" $f20, $f20, $f22" "\n\t" \
573 "cfc1 %0, $31" "\n\t" \
574 "ctc1 $8, $31" "\n\t" \
577 : "r" (&fsVal), "r" (&ftVal), "r" (fcsr) \
578 : "$8", "$f20", "$f21", "$f22", "$f23" \
581 #define ASM_VOLATILE_BINARY64(inst) \
582 __asm__ volatile(".set push" "\n\t" \
583 ".set hardfloat" "\n\t" \
584 "cfc1 $8, $31" "\n\t" \
585 "ctc1 %3, $31" "\n\t" \
586 "ldc1 $f24, 0(%1)" "\n\t" \
587 "ldc1 $f26, 0(%2)" "\n\t" \
588 #inst" $f24, $f24, $f26" "\n\t" \
589 "cfc1 %0, $31" "\n\t" \
590 "ctc1 $8, $31" "\n\t" \
593 : "r" (&(addr[fs])), "r" (&(addr[ft])), "r" (fcsr) \
594 : "$8", "$f24", "$f26" \
597 #define ASM_VOLATILE_MSA_BINARY(inst) \
598 __asm__ volatile(".set push" "\n\t" \
599 ".set mips32r2" "\n\t" \
600 ".set hardfloat" "\n\t" \
601 ".set fp=64" "\n\t" \
603 "cfcmsa $t0, $1" "\n\t" \
604 "ctcmsa $1, %3" "\n\t" \
605 "ld.b $w24, 0(%1)" "\n\t" \
606 "ld.b $w26, 0(%2)" "\n\t" \
607 #inst" $w24, $w24, $w26" "\n\t" \
608 "cfcmsa %0, $1" "\n\t" \
609 "ctcmsa $1, $t0" "\n\t" \
612 : "r" (&(addr[ws])), "r" (&(addr[wt])), "r" (msacsr)\
616 /* TODO: Add cases for all fpu instructions because all fpu instructions are
617 change the value of FCSR register. */
618 extern UInt
mips_dirtyhelper_calculate_FCSR_fp32 ( void* gs
, UInt fs
, UInt ft
,
622 #if defined(__mips__)
623 VexGuestMIPS32State
* guest_state
= (VexGuestMIPS32State
*)gs
;
624 UInt loFsVal
, hiFsVal
, loFtVal
, hiFtVal
;
625 #if defined (_MIPSEL)
626 ULong
*addr
= (ULong
*)&guest_state
->guest_f0
;
627 loFsVal
= (UInt
)addr
[fs
];
628 hiFsVal
= (UInt
)addr
[fs
+1];
629 loFtVal
= (UInt
)addr
[ft
];
630 hiFtVal
= (UInt
)addr
[ft
+1];
631 #elif defined (_MIPSEB)
632 UInt
*addr
= (UInt
*)&guest_state
->guest_f0
;
633 loFsVal
= (UInt
)addr
[fs
*2];
634 hiFsVal
= (UInt
)addr
[fs
*2+2];
635 loFtVal
= (UInt
)addr
[ft
*2];
636 hiFtVal
= (UInt
)addr
[ft
*2+2];
638 ULong fsVal
= ((ULong
) hiFsVal
) << 32 | loFsVal
;
639 ULong ftVal
= ((ULong
) hiFtVal
) << 32 | loFtVal
;
640 UInt fcsr
= guest_state
->guest_FCSR
;
643 ASM_VOLATILE_UNARY32_DOUBLE(round
.w
.d
)
646 ASM_VOLATILE_UNARY32(floor
.w
.s
)
649 ASM_VOLATILE_UNARY32_DOUBLE(floor
.w
.d
)
652 ASM_VOLATILE_UNARY32(trunc
.w
.s
)
655 ASM_VOLATILE_UNARY32_DOUBLE(trunc
.w
.d
)
658 ASM_VOLATILE_UNARY32(ceil
.w
.s
)
661 ASM_VOLATILE_UNARY32_DOUBLE(ceil
.w
.d
)
664 ASM_VOLATILE_UNARY32(cvt
.d
.s
)
667 ASM_VOLATILE_UNARY32(cvt
.d
.w
)
670 ASM_VOLATILE_UNARY32(cvt
.s
.w
)
673 ASM_VOLATILE_UNARY32_DOUBLE(cvt
.s
.d
)
676 ASM_VOLATILE_UNARY32(cvt
.w
.s
)
679 ASM_VOLATILE_UNARY32_DOUBLE(cvt
.w
.d
)
682 ASM_VOLATILE_UNARY32(round
.w
.s
)
685 ASM_VOLATILE_BINARY32(add
.s
)
688 ASM_VOLATILE_BINARY32_DOUBLE(add
.d
)
691 ASM_VOLATILE_BINARY32(sub
.s
)
694 ASM_VOLATILE_BINARY32_DOUBLE(sub
.d
)
697 ASM_VOLATILE_BINARY32(div
.s
)
707 /* TODO: Add cases for all fpu instructions because all fpu instructions are
708 change the value of FCSR register. */
709 extern UInt
mips_dirtyhelper_calculate_FCSR_fp64 ( void* gs
, UInt fs
, UInt ft
,
713 #if defined(__mips__) && ((__mips == 64) || \
714 (defined(__mips_isa_rev) && (__mips_isa_rev >= 2)))
715 #if defined(VGA_mips32)
716 VexGuestMIPS32State
* guest_state
= (VexGuestMIPS32State
*)gs
;
718 VexGuestMIPS64State
* guest_state
= (VexGuestMIPS64State
*)gs
;
720 ULong
*addr
= (ULong
*)&guest_state
->guest_f0
;
721 UInt fcsr
= guest_state
->guest_FCSR
;
724 ASM_VOLATILE_UNARY64(round
.w
.d
)
727 ASM_VOLATILE_UNARY64(floor
.w
.s
)
730 ASM_VOLATILE_UNARY64(floor
.w
.d
)
733 ASM_VOLATILE_UNARY64(trunc
.w
.s
)
736 ASM_VOLATILE_UNARY64(trunc
.w
.d
)
739 ASM_VOLATILE_UNARY64(ceil
.w
.s
)
742 ASM_VOLATILE_UNARY64(ceil
.w
.d
)
745 ASM_VOLATILE_UNARY64(cvt
.d
.s
)
748 ASM_VOLATILE_UNARY64(cvt
.d
.w
)
751 ASM_VOLATILE_UNARY64(cvt
.s
.w
)
754 ASM_VOLATILE_UNARY64(cvt
.s
.d
)
757 ASM_VOLATILE_UNARY64(cvt
.w
.s
)
760 ASM_VOLATILE_UNARY64(cvt
.w
.d
)
763 ASM_VOLATILE_UNARY64(round
.w
.s
)
766 ASM_VOLATILE_UNARY64(ceil
.l
.s
)
769 ASM_VOLATILE_UNARY64(ceil
.l
.d
)
772 ASM_VOLATILE_UNARY64(cvt
.d
.l
)
775 ASM_VOLATILE_UNARY64(cvt
.l
.s
)
778 ASM_VOLATILE_UNARY64(cvt
.l
.d
)
781 ASM_VOLATILE_UNARY64(cvt
.s
.l
)
784 ASM_VOLATILE_UNARY64(floor
.l
.s
)
787 ASM_VOLATILE_UNARY64(floor
.l
.d
)
790 ASM_VOLATILE_UNARY64(round
.l
.s
)
793 ASM_VOLATILE_UNARY64(round
.l
.d
)
796 ASM_VOLATILE_UNARY64(trunc
.l
.s
)
799 ASM_VOLATILE_UNARY64(trunc
.l
.d
)
802 ASM_VOLATILE_BINARY64(add
.s
)
805 ASM_VOLATILE_BINARY64(add
.d
)
808 ASM_VOLATILE_BINARY64(sub
.s
)
811 ASM_VOLATILE_BINARY64(sub
.d
)
814 ASM_VOLATILE_BINARY64(div
.s
)
816 #if defined(__mips_isa_rev) && (__mips_isa_rev >= 6)
818 ASM_VOLATILE_UNARY64(rint
.s
)
821 ASM_VOLATILE_UNARY64(rint
.d
)
824 ASM_VOLATILE_BINARY64(max
.s
)
827 ASM_VOLATILE_BINARY64(max
.d
)
830 ASM_VOLATILE_BINARY64(min
.s
)
833 ASM_VOLATILE_BINARY64(min
.d
)
836 ASM_VOLATILE_BINARY64(maxa
.s
)
839 ASM_VOLATILE_BINARY64(maxa
.d
)
842 ASM_VOLATILE_BINARY64(mina
.s
)
845 ASM_VOLATILE_BINARY64(mina
.d
)
848 ASM_VOLATILE_BINARY64(cmp
.af
.s
)
851 ASM_VOLATILE_BINARY64(cmp
.af
.d
)
854 ASM_VOLATILE_BINARY64(cmp
.saf
.s
)
857 ASM_VOLATILE_BINARY64(cmp
.saf
.d
)
869 extern UInt
mips_dirtyhelper_calculate_MSACSR ( void* gs
, UInt ws
, UInt wt
,
872 /* GCC 4.8 and later support MIPS MSA. */
873 #if defined(__mips__) && (defined(__clang__) || (GCC_VERSION >= 408))
874 #if defined(VGA_mips32)
875 VexGuestMIPS32State
* guest_state
= (VexGuestMIPS32State
*)gs
;
877 VexGuestMIPS64State
* guest_state
= (VexGuestMIPS64State
*)gs
;
879 V128
*addr
= (V128
*)&guest_state
->guest_w0
;
880 UInt msacsr
= guest_state
->guest_MSACSR
;
884 ASM_VOLATILE_MSA_BINARY(fadd
.w
)
888 ASM_VOLATILE_MSA_BINARY(fadd
.d
)
892 ASM_VOLATILE_MSA_BINARY(fsub
.w
);
896 ASM_VOLATILE_MSA_BINARY(fsub
.d
);
900 ASM_VOLATILE_MSA_BINARY(fmul
.w
);
904 ASM_VOLATILE_MSA_BINARY(fmul
.d
);
908 ASM_VOLATILE_MSA_BINARY(fdiv
.w
);
912 ASM_VOLATILE_MSA_BINARY(fdiv
.d
);
916 ASM_VOLATILE_MSA_BINARY(fmadd
.w
);
920 ASM_VOLATILE_MSA_BINARY(fmadd
.d
);
924 ASM_VOLATILE_MSA_BINARY(fcaf
.w
);
928 ASM_VOLATILE_MSA_BINARY(fcaf
.d
);
932 ASM_VOLATILE_MSA_BINARY(fsaf
.w
);
936 ASM_VOLATILE_MSA_BINARY(fsaf
.d
);
940 ASM_VOLATILE_MSA_BINARY(fceq
.w
);
944 ASM_VOLATILE_MSA_BINARY(fceq
.d
);
948 ASM_VOLATILE_MSA_BINARY(fseq
.w
);
952 ASM_VOLATILE_MSA_BINARY(fseq
.d
);
956 ASM_VOLATILE_MSA_BINARY(fclt
.w
);
960 ASM_VOLATILE_MSA_BINARY(fclt
.d
);
964 ASM_VOLATILE_MSA_BINARY(fslt
.w
);
968 ASM_VOLATILE_MSA_BINARY(fslt
.d
);
972 ASM_VOLATILE_MSA_BINARY(fcle
.w
);
976 ASM_VOLATILE_MSA_BINARY(fcle
.d
);
980 ASM_VOLATILE_MSA_BINARY(fsle
.w
);
984 ASM_VOLATILE_MSA_BINARY(fsle
.d
);
988 ASM_VOLATILE_MSA_BINARY(fcne
.w
);
992 ASM_VOLATILE_MSA_BINARY(fcne
.d
);
996 ASM_VOLATILE_MSA_BINARY(fsne
.w
);
1000 ASM_VOLATILE_MSA_BINARY(fsne
.d
);
1004 ASM_VOLATILE_MSA_BINARY(fexp2
.w
);
1008 ASM_VOLATILE_MSA_BINARY(fexp2
.d
);
1012 ASM_VOLATILE_MSA_BINARY(fmin
.w
);
1016 ASM_VOLATILE_MSA_BINARY(fmin
.d
);
1020 ASM_VOLATILE_MSA_BINARY(fmin_a
.w
);
1024 ASM_VOLATILE_MSA_BINARY(fmin_a
.d
);
1028 ASM_VOLATILE_MSA_BINARY(fcun
.w
);
1032 ASM_VOLATILE_MSA_BINARY(fcun
.d
);
1036 ASM_VOLATILE_MSA_BINARY(fsun
.w
);
1040 ASM_VOLATILE_MSA_BINARY(fsun
.d
);
1044 ASM_VOLATILE_MSA_BINARY(fcor
.w
);
1048 ASM_VOLATILE_MSA_BINARY(fcor
.d
);
1052 ASM_VOLATILE_MSA_BINARY(fsor
.w
);
1056 ASM_VOLATILE_MSA_BINARY(fsor
.d
);
1060 ASM_VOLATILE_MSA_BINARY(fcueq
.w
);
1064 ASM_VOLATILE_MSA_BINARY(fcueq
.d
);
1068 ASM_VOLATILE_MSA_BINARY(fsueq
.w
);
1072 ASM_VOLATILE_MSA_BINARY(fsueq
.d
);
1076 ASM_VOLATILE_MSA_BINARY(fcune
.w
);
1080 ASM_VOLATILE_MSA_BINARY(fcune
.d
);
1084 ASM_VOLATILE_MSA_BINARY(fsune
.w
);
1088 ASM_VOLATILE_MSA_BINARY(fsune
.d
);
1092 ASM_VOLATILE_MSA_BINARY(fcule
.w
);
1096 ASM_VOLATILE_MSA_BINARY(fcule
.d
);
1100 ASM_VOLATILE_MSA_BINARY(fsule
.w
);
1104 ASM_VOLATILE_MSA_BINARY(fsule
.d
);
1108 ASM_VOLATILE_MSA_BINARY(fcult
.w
);
1112 ASM_VOLATILE_MSA_BINARY(fcult
.d
);
1116 ASM_VOLATILE_MSA_BINARY(fsult
.w
);
1120 ASM_VOLATILE_MSA_BINARY(fsult
.d
);
1123 ASM_VOLATILE_MSA_BINARY(fmax
.w
);
1127 ASM_VOLATILE_MSA_BINARY(fmax
.d
);
1131 ASM_VOLATILE_MSA_BINARY(fmax_a
.w
);
1135 ASM_VOLATILE_MSA_BINARY(fmax_a
.d
);
1139 ASM_VOLATILE_MSA_UNARY(ffint_s
.w
);
1143 ASM_VOLATILE_MSA_UNARY(ffint_s
.d
);
1147 ASM_VOLATILE_MSA_UNARY(frcp
.w
);
1151 ASM_VOLATILE_MSA_UNARY(frcp
.d
);
1155 ASM_VOLATILE_MSA_UNARY(frsqrt
.w
);
1159 ASM_VOLATILE_MSA_UNARY(frsqrt
.d
);
1163 ASM_VOLATILE_MSA_UNARY(fsqrt
.w
);
1167 ASM_VOLATILE_MSA_UNARY(fsqrt
.d
);
1171 ASM_VOLATILE_MSA_UNARY(frint
.w
);
1175 ASM_VOLATILE_MSA_UNARY(frint
.d
);
1178 ASM_VOLATILE_MSA_UNARY(ftrunc_u
.w
);
1182 ASM_VOLATILE_MSA_UNARY(ftrunc_u
.d
);
1186 ASM_VOLATILE_MSA_UNARY(ftrunc_s
.w
);
1190 ASM_VOLATILE_MSA_UNARY(ftrunc_s
.d
);
1194 ASM_VOLATILE_MSA_BINARY(fexdo
.h
);
1198 ASM_VOLATILE_MSA_BINARY(fexdo
.w
);
1202 ASM_VOLATILE_MSA_UNARY(fexupr
.w
);
1206 ASM_VOLATILE_MSA_UNARY(fexupr
.d
);
1210 ASM_VOLATILE_MSA_UNARY(fexupl
.w
);
1214 ASM_VOLATILE_MSA_UNARY(fexupl
.d
);
1218 ASM_VOLATILE_MSA_BINARY(ftq
.h
);
1222 ASM_VOLATILE_MSA_BINARY(ftq
.w
);
1226 ASM_VOLATILE_MSA_UNARY(ffqr
.d
);
1230 ASM_VOLATILE_MSA_UNARY(ffqr
.w
);
1234 ASM_VOLATILE_MSA_UNARY(ffql
.d
);
1238 ASM_VOLATILE_MSA_UNARY(ffql
.w
);
1242 ASM_VOLATILE_MSA_UNARY(ftint_s
.d
);
1246 ASM_VOLATILE_MSA_UNARY(ftint_s
.w
);
1250 ASM_VOLATILE_MSA_UNARY(ftint_u
.d
);
1254 ASM_VOLATILE_MSA_UNARY(ftint_u
.w
);
1258 ASM_VOLATILE_MSA_UNARY(flog2
.d
);
1262 ASM_VOLATILE_MSA_UNARY(flog2
.w
);
1266 ASM_VOLATILE_MSA_UNARY(ffint_u
.d
);
1270 ASM_VOLATILE_MSA_UNARY(ffint_u
.w
);
1278 extern UInt
mips_dirtyhelper_get_MSAIR() {
1280 /* GCC 4.8 and later support MIPS MSA. */
1281 #if defined(__mips__) && (defined(__clang__) || (GCC_VERSION >= 408))
1282 __asm__
volatile(".set push \n\t"
1283 ".set mips32r2 \n\t"
1284 ".set hardfloat \n\t"
1287 ".set noreorder \n\t"
1288 "cfcmsa %0, $0 \n\t"
1298 /*---------------------------------------------------------------*/
1299 /*--- end guest_mips_helpers.c ---*/
1300 /*---------------------------------------------------------------*/