2 /*--------------------------------------------------------------------*/
3 /*--- Interface to LibVEX_Translate, and the SP-update pass ---*/
4 /*--- m_translate.c ---*/
5 /*--------------------------------------------------------------------*/
8 This file is part of Valgrind, a dynamic binary instrumentation
11 Copyright (C) 2000-2017 Julian Seward
14 This program is free software; you can redistribute it and/or
15 modify it under the terms of the GNU General Public License as
16 published by the Free Software Foundation; either version 2 of the
17 License, or (at your option) any later version.
19 This program is distributed in the hope that it will be useful, but
20 WITHOUT ANY WARRANTY; without even the implied warranty of
21 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
22 General Public License for more details.
24 You should have received a copy of the GNU General Public License
25 along with this program; if not, see <http://www.gnu.org/licenses/>.
27 The GNU General Public License is contained in the file COPYING.
30 #include "pub_core_basics.h"
31 #include "pub_core_vki.h"
32 #include "pub_core_aspacemgr.h"
34 #include "pub_core_machine.h" // VG_(fnptr_to_fnentry)
36 // VG_(machine_get_VexArchInfo)
37 #include "pub_core_libcbase.h"
38 #include "pub_core_libcassert.h"
39 #include "pub_core_libcprint.h"
40 #include "pub_core_options.h"
42 #include "pub_core_debuginfo.h" // VG_(get_fnname_w_offset)
43 #include "pub_core_redir.h" // VG_(redir_do_lookup)
45 #include "pub_core_signals.h" // VG_(synth_fault_{perms,mapping}
46 #include "pub_core_stacks.h" // VG_(unknown_SP_update*)()
47 #include "pub_core_tooliface.h" // VG_(tdict)
49 #include "pub_core_translate.h"
50 #include "pub_core_transtab.h"
51 #include "pub_core_dispatch.h" // VG_(run_innerloop__dispatch_{un}profiled)
52 // VG_(run_a_noredir_translation__return_point)
54 #include "pub_core_threadstate.h" // VexGuestArchState
55 #include "pub_core_trampoline.h" // VG_(ppctoc_magic_redirect_return_stub)
57 #include "pub_core_execontext.h" // VG_(make_depth_1_ExeContext_from_Addr)
59 #include "pub_core_gdbserver.h" // VG_(instrument_for_gdbserver_if_needed)
61 #include "libvex_emnote.h" // For PPC, EmWarn_PPC64_redir_underflow
63 /*------------------------------------------------------------*/
65 /*------------------------------------------------------------*/
67 static ULong n_TRACE_total_constructed
= 0;
68 static ULong n_TRACE_total_guest_insns
= 0;
69 static ULong n_TRACE_total_uncond_branches_followed
= 0;
70 static ULong n_TRACE_total_cond_branches_followed
= 0;
72 static ULong n_SP_updates_new_fast
= 0;
73 static ULong n_SP_updates_new_generic_known
= 0;
74 static ULong n_SP_updates_die_fast
= 0;
75 static ULong n_SP_updates_die_generic_known
= 0;
76 static ULong n_SP_updates_generic_unknown
= 0;
78 static ULong n_PX_VexRegUpdSpAtMemAccess
= 0;
79 static ULong n_PX_VexRegUpdUnwindregsAtMemAccess
= 0;
80 static ULong n_PX_VexRegUpdAllregsAtMemAccess
= 0;
81 static ULong n_PX_VexRegUpdAllregsAtEachInsn
= 0;
83 void VG_(print_translation_stats
) ( void )
87 "translate: %'llu guest insns, %'llu traces, "
88 "%'llu uncond chased, %llu cond chased\n",
89 n_TRACE_total_guest_insns
, n_TRACE_total_constructed
,
90 n_TRACE_total_uncond_branches_followed
,
91 n_TRACE_total_cond_branches_followed
);
92 UInt n_SP_updates
= n_SP_updates_new_fast
+ n_SP_updates_new_generic_known
93 + n_SP_updates_die_fast
+ n_SP_updates_die_generic_known
94 + n_SP_updates_generic_unknown
;
95 if (n_SP_updates
== 0) {
96 VG_(message
)(Vg_DebugMsg
, "translate: no SP updates identified\n");
98 VG_(message
)(Vg_DebugMsg
,
99 "translate: fast new/die SP updates identified: "
100 "%'llu (%3.1f%%)/%'llu (%3.1f%%)\n",
101 n_SP_updates_new_fast
, n_SP_updates_new_fast
* 100.0 / n_SP_updates
,
102 n_SP_updates_die_fast
, n_SP_updates_die_fast
* 100.0 / n_SP_updates
);
104 VG_(message
)(Vg_DebugMsg
,
105 "translate: generic_known new/die SP updates identified: "
106 "%'llu (%3.1f%%)/%'llu (%3.1f%%)\n",
107 n_SP_updates_new_generic_known
,
108 n_SP_updates_new_generic_known
* 100.0 / n_SP_updates
,
109 n_SP_updates_die_generic_known
,
110 n_SP_updates_die_generic_known
* 100.0 / n_SP_updates
);
112 VG_(message
)(Vg_DebugMsg
,
113 "translate: generic_unknown SP updates identified: %'llu (%3.1f%%)\n",
114 n_SP_updates_generic_unknown
,
115 n_SP_updates_generic_unknown
* 100.0 / n_SP_updates
);
120 "translate: PX: SPonly %'llu, UnwRegs %'llu,"
121 " AllRegs %'llu, AllRegsAllInsns %'llu\n",
122 n_PX_VexRegUpdSpAtMemAccess
, n_PX_VexRegUpdUnwindregsAtMemAccess
,
123 n_PX_VexRegUpdAllregsAtMemAccess
, n_PX_VexRegUpdAllregsAtEachInsn
);
126 /*------------------------------------------------------------*/
127 /*--- %SP-update pass ---*/
128 /*------------------------------------------------------------*/
130 static Bool
need_to_handle_SP_assignment(void)
132 return VG_(tdict
).any_new_mem_stack
|| VG_(tdict
).any_die_mem_stack
;
135 // - The SP aliases are held in an array which is used as a circular buffer.
136 // This misses very few constant updates of SP (ie. < 0.1%) while using a
137 // small, constant structure that will also never fill up and cause
138 // execution to abort.
139 // - Unused slots have a .temp value of 'IRTemp_INVALID'.
140 // - 'next_SP_alias_slot' is the index where the next alias will be stored.
141 // - If the buffer fills, we circle around and start over-writing
142 // non-IRTemp_INVALID values. This is rare, and the overwriting of a
143 // value that would have subsequently be used is even rarer.
144 // - Every slot below next_SP_alias_slot holds a non-IRTemp_INVALID value.
145 // The rest either all won't (if we haven't yet circled around) or all
146 // will (if we have circled around).
155 // With 32 slots the buffer fills very rarely -- eg. once in a run of GCC.
156 // And I've tested with smaller values and the wrap-around case works ok.
158 static SP_Alias SP_aliases
[N_ALIASES
];
159 static Int next_SP_alias_slot
= 0;
161 static void clear_SP_aliases(void)
164 for (i
= 0; i
< N_ALIASES
; i
++) {
165 SP_aliases
[i
].temp
= IRTemp_INVALID
;
166 SP_aliases
[i
].delta
= 0;
168 next_SP_alias_slot
= 0;
171 static void add_SP_alias(IRTemp temp
, Long delta
)
173 vg_assert(temp
!= IRTemp_INVALID
);
174 SP_aliases
[ next_SP_alias_slot
].temp
= temp
;
175 SP_aliases
[ next_SP_alias_slot
].delta
= delta
;
176 next_SP_alias_slot
++;
177 if (N_ALIASES
== next_SP_alias_slot
) next_SP_alias_slot
= 0;
180 static Bool
get_SP_delta(IRTemp temp
, Long
* delta
)
182 Int i
; // i must be signed!
183 vg_assert(IRTemp_INVALID
!= temp
);
184 // Search backwards between current buffer position and the start.
185 for (i
= next_SP_alias_slot
-1; i
>= 0; i
--) {
186 if (temp
== SP_aliases
[i
].temp
) {
187 *delta
= SP_aliases
[i
].delta
;
191 // Search backwards between the end and the current buffer position.
192 for (i
= N_ALIASES
-1; i
>= next_SP_alias_slot
; i
--) {
193 if (temp
== SP_aliases
[i
].temp
) {
194 *delta
= SP_aliases
[i
].delta
;
201 static void update_SP_aliases(Long delta
)
204 for (i
= 0; i
< N_ALIASES
; i
++) {
205 if (SP_aliases
[i
].temp
== IRTemp_INVALID
) {
208 SP_aliases
[i
].delta
+= delta
;
212 /* Given a guest IP, get an origin tag for a 1-element stack trace,
213 and wrap it up in an IR atom that can be passed as the origin-tag
214 value for a stack-adjustment helper function. */
215 static IRExpr
* mk_ecu_Expr ( Addr guest_IP
)
219 = VG_(make_depth_1_ExeContext_from_Addr
)( guest_IP
);
221 ecu
= VG_(get_ECU_from_ExeContext
)( ec
);
222 vg_assert(VG_(is_plausible_ECU
)(ecu
));
223 /* This is always safe to do, since ecu is only 32 bits, and
224 HWord is 32 or 64. */
225 return mkIRExpr_HWord( (HWord
)ecu
);
228 /* When gdbserver is activated, the translation of a block must
229 first be done by the tool function, then followed by a pass
230 which (if needed) instruments the code for gdbserver.
233 IRSB
* tool_instrument_then_gdbserver_if_needed ( VgCallbackClosure
* closureV
,
235 const VexGuestLayout
* layout
,
236 const VexGuestExtents
* vge
,
237 const VexArchInfo
* vai
,
241 return VG_(instrument_for_gdbserver_if_needed
)
242 (VG_(tdict
).tool_instrument (closureV
,
255 /* For tools that want to know about SP changes, this pass adds
256 in the appropriate hooks. We have to do it after the tool's
257 instrumentation, so the tool doesn't have to worry about the C calls
258 it adds in, and we must do it before register allocation because
259 spilled temps make it much harder to work out the SP deltas.
260 This it is done with Vex's "second instrumentation" pass.
262 Basically, we look for GET(SP)/PUT(SP) pairs and track constant
263 increments/decrements of SP between them. (This requires tracking one or
264 more "aliases", which are not exact aliases but instead are tempregs
265 whose value is equal to the SP's plus or minus a known constant.)
266 If all the changes to SP leading up to a PUT(SP) are by known, small
267 constants, we can do a specific call to eg. new_mem_stack_4, otherwise
268 we fall back to the case that handles an unknown SP change.
270 There is some extra complexity to deal correctly with updates to
271 only parts of SP. Bizarre, but it has been known to happen.
274 IRSB
* vg_SP_update_pass ( void* closureV
,
276 const VexGuestLayout
* layout
,
277 const VexGuestExtents
* vge
,
278 const VexArchInfo
* vai
,
282 Int i
, j
, k
, minoff_ST
, maxoff_ST
, sizeof_SP
, offset_SP
;
283 Int first_SP
, last_SP
, first_Put
, last_Put
;
291 /* Set up stuff for tracking the guest IP */
292 Bool curr_IP_known
= False
;
296 IRSB
* bb
= emptyIRSB();
297 bb
->tyenv
= deepCopyIRTypeEnv(sb_in
->tyenv
);
298 bb
->next
= deepCopyIRExpr(sb_in
->next
);
299 bb
->jumpkind
= sb_in
->jumpkind
;
300 bb
->offsIP
= sb_in
->offsIP
;
304 sizeof_SP
= layout
->sizeof_SP
;
305 offset_SP
= layout
->offset_SP
;
306 typeof_SP
= sizeof_SP
==4 ? Ity_I32
: Ity_I64
;
307 vg_assert(sizeof_SP
== 4 || sizeof_SP
== 8);
309 /* --- Start of #defines --- */
311 # define IS_ADD(op) (sizeof_SP==4 ? ((op)==Iop_Add32) : ((op)==Iop_Add64))
312 # define IS_SUB(op) (sizeof_SP==4 ? ((op)==Iop_Sub32) : ((op)==Iop_Sub64))
314 # define IS_ADD_OR_SUB(op) (IS_ADD(op) || IS_SUB(op))
316 # define GET_CONST(con) \
317 (sizeof_SP==4 ? (Long)(Int)(con->Ico.U32) \
318 : (Long)(con->Ico.U64))
320 # define DO_NEW(syze, tmpp) \
322 Bool vanilla, w_ecu; \
323 vg_assert(curr_IP_known); \
324 vanilla = NULL != VG_(tdict).track_new_mem_stack_##syze; \
325 w_ecu = NULL != VG_(tdict).track_new_mem_stack_##syze##_w_ECU; \
326 vg_assert(!(vanilla && w_ecu)); /* can't have both */ \
327 if (VG_(tdict).any_new_mem_stack \
328 && !vanilla && !w_ecu) { \
329 n_SP_updates_new_generic_known++; \
333 if (VG_(tdict).any_new_mem_stack) { \
334 /* I don't know if it's really necessary to say that the */ \
335 /* call reads the stack pointer. But anyway, we do. */ \
337 dcall = unsafeIRDirty_0_N( \
339 "track_new_mem_stack_" #syze "_w_ECU", \
340 VG_(fnptr_to_fnentry)( \
341 VG_(tdict).track_new_mem_stack_##syze##_w_ECU ), \
342 mkIRExprVec_2(IRExpr_RdTmp(tmpp), \
343 mk_ecu_Expr(curr_IP)) \
346 dcall = unsafeIRDirty_0_N( \
348 "track_new_mem_stack_" #syze , \
349 VG_(fnptr_to_fnentry)( \
350 VG_(tdict).track_new_mem_stack_##syze ), \
351 mkIRExprVec_1(IRExpr_RdTmp(tmpp)) \
354 dcall->nFxState = 1; \
355 dcall->fxState[0].fx = Ifx_Read; \
356 dcall->fxState[0].offset = layout->offset_SP; \
357 dcall->fxState[0].size = layout->sizeof_SP; \
358 dcall->fxState[0].nRepeats = 0; \
359 dcall->fxState[0].repeatLen = 0; \
361 addStmtToIRSB( bb, IRStmt_Dirty(dcall) ); \
364 vg_assert(syze > 0); \
365 update_SP_aliases(syze); \
367 n_SP_updates_new_fast++; \
371 # define DO_DIE(syze, tmpp) \
373 if (VG_(tdict).any_die_mem_stack \
374 && !VG_(tdict).track_die_mem_stack_##syze) { \
375 n_SP_updates_die_generic_known++; \
379 if (VG_(tdict).any_die_mem_stack) { \
380 /* I don't know if it's really necessary to say that the */ \
381 /* call reads the stack pointer. But anyway, we do. */ \
382 dcall = unsafeIRDirty_0_N( \
384 "track_die_mem_stack_" #syze, \
385 VG_(fnptr_to_fnentry)( \
386 VG_(tdict).track_die_mem_stack_##syze ), \
387 mkIRExprVec_1(IRExpr_RdTmp(tmpp)) \
389 dcall->nFxState = 1; \
390 dcall->fxState[0].fx = Ifx_Read; \
391 dcall->fxState[0].offset = layout->offset_SP; \
392 dcall->fxState[0].size = layout->sizeof_SP; \
393 dcall->fxState[0].nRepeats = 0; \
394 dcall->fxState[0].repeatLen = 0; \
396 addStmtToIRSB( bb, IRStmt_Dirty(dcall) ); \
399 vg_assert(syze > 0); \
400 update_SP_aliases(-(syze)); \
402 n_SP_updates_die_fast++; \
406 /* --- End of #defines --- */
410 for (i
= 0; i
< sb_in
->stmts_used
; i
++) {
412 st
= sb_in
->stmts
[i
];
414 if (st
->tag
== Ist_IMark
) {
415 curr_IP_known
= True
;
416 curr_IP
= st
->Ist
.IMark
.addr
;
419 /* t = Get(sp): curr = t, delta = 0 */
420 if (st
->tag
!= Ist_WrTmp
) goto case2
;
421 e
= st
->Ist
.WrTmp
.data
;
422 if (e
->tag
!= Iex_Get
) goto case2
;
423 if (e
->Iex
.Get
.offset
!= offset_SP
) goto case2
;
424 if (e
->Iex
.Get
.ty
!= typeof_SP
) goto case2
;
425 vg_assert( typeOfIRTemp(bb
->tyenv
, st
->Ist
.WrTmp
.tmp
) == typeof_SP
);
426 add_SP_alias(st
->Ist
.WrTmp
.tmp
, 0);
427 addStmtToIRSB( bb
, st
);
431 /* t' = curr +/- const: curr = t', delta +=/-= const */
432 if (st
->tag
!= Ist_WrTmp
) goto case3
;
433 e
= st
->Ist
.WrTmp
.data
;
434 if (e
->tag
!= Iex_Binop
) goto case3
;
435 if (e
->Iex
.Binop
.arg1
->tag
!= Iex_RdTmp
) goto case3
;
436 if (!get_SP_delta(e
->Iex
.Binop
.arg1
->Iex
.RdTmp
.tmp
, &delta
)) goto case3
;
437 if (e
->Iex
.Binop
.arg2
->tag
!= Iex_Const
) goto case3
;
438 if (!IS_ADD_OR_SUB(e
->Iex
.Binop
.op
)) goto case3
;
439 con
= GET_CONST(e
->Iex
.Binop
.arg2
->Iex
.Const
.con
);
440 vg_assert( typeOfIRTemp(bb
->tyenv
, st
->Ist
.WrTmp
.tmp
) == typeof_SP
);
441 if (IS_ADD(e
->Iex
.Binop
.op
)) {
442 add_SP_alias(st
->Ist
.WrTmp
.tmp
, delta
+ con
);
444 add_SP_alias(st
->Ist
.WrTmp
.tmp
, delta
- con
);
446 addStmtToIRSB( bb
, st
);
450 /* t' = curr: curr = t' */
451 if (st
->tag
!= Ist_WrTmp
) goto case4
;
452 e
= st
->Ist
.WrTmp
.data
;
453 if (e
->tag
!= Iex_RdTmp
) goto case4
;
454 if (!get_SP_delta(e
->Iex
.RdTmp
.tmp
, &delta
)) goto case4
;
455 vg_assert( typeOfIRTemp(bb
->tyenv
, st
->Ist
.WrTmp
.tmp
) == typeof_SP
);
456 add_SP_alias(st
->Ist
.WrTmp
.tmp
, delta
);
457 addStmtToIRSB( bb
, st
);
462 /* More generally, we must correctly handle a Put which writes
463 any part of SP, not just the case where all of SP is
465 if (st
->tag
!= Ist_Put
) goto case5
;
466 first_SP
= offset_SP
;
467 last_SP
= first_SP
+ sizeof_SP
- 1;
468 first_Put
= st
->Ist
.Put
.offset
;
470 + sizeofIRType( typeOfIRExpr( bb
->tyenv
, st
->Ist
.Put
.data
))
472 vg_assert(first_SP
<= last_SP
);
473 vg_assert(first_Put
<= last_Put
);
475 if (last_Put
< first_SP
|| last_SP
< first_Put
)
476 goto case5
; /* no overlap */
478 if (st
->Ist
.Put
.data
->tag
== Iex_RdTmp
479 && get_SP_delta(st
->Ist
.Put
.data
->Iex
.RdTmp
.tmp
, &delta
)) {
480 IRTemp tttmp
= st
->Ist
.Put
.data
->Iex
.RdTmp
.tmp
;
481 /* Why should the following assertion hold? Because any
482 alias added by put_SP_alias must be of a temporary which
483 has the same type as typeof_SP, and whose value is a Get
484 at exactly offset_SP of size typeof_SP. Each call to
485 put_SP_alias is immediately preceded by an assertion that
486 we are putting in a binding for a correctly-typed
488 vg_assert( typeOfIRTemp(bb
->tyenv
, tttmp
) == typeof_SP
);
489 /* From the same type-and-offset-correctness argument, if
490 we found a useable alias, it must for an "exact" write of SP. */
491 vg_assert(first_SP
== first_Put
);
492 vg_assert(last_SP
== last_Put
);
494 case 0: addStmtToIRSB(bb
,st
); continue;
495 case 4: DO_DIE( 4, tttmp
); addStmtToIRSB(bb
,st
); continue;
496 case -4: DO_NEW( 4, tttmp
); addStmtToIRSB(bb
,st
); continue;
497 case 8: DO_DIE( 8, tttmp
); addStmtToIRSB(bb
,st
); continue;
498 case -8: DO_NEW( 8, tttmp
); addStmtToIRSB(bb
,st
); continue;
499 case 12: DO_DIE( 12, tttmp
); addStmtToIRSB(bb
,st
); continue;
500 case -12: DO_NEW( 12, tttmp
); addStmtToIRSB(bb
,st
); continue;
501 case 16: DO_DIE( 16, tttmp
); addStmtToIRSB(bb
,st
); continue;
502 case -16: DO_NEW( 16, tttmp
); addStmtToIRSB(bb
,st
); continue;
503 case 32: DO_DIE( 32, tttmp
); addStmtToIRSB(bb
,st
); continue;
504 case -32: DO_NEW( 32, tttmp
); addStmtToIRSB(bb
,st
); continue;
505 case 112: DO_DIE( 112, tttmp
); addStmtToIRSB(bb
,st
); continue;
506 case -112: DO_NEW( 112, tttmp
); addStmtToIRSB(bb
,st
); continue;
507 case 128: DO_DIE( 128, tttmp
); addStmtToIRSB(bb
,st
); continue;
508 case -128: DO_NEW( 128, tttmp
); addStmtToIRSB(bb
,st
); continue;
509 case 144: DO_DIE( 144, tttmp
); addStmtToIRSB(bb
,st
); continue;
510 case -144: DO_NEW( 144, tttmp
); addStmtToIRSB(bb
,st
); continue;
511 case 160: DO_DIE( 160, tttmp
); addStmtToIRSB(bb
,st
); continue;
512 case -160: DO_NEW( 160, tttmp
); addStmtToIRSB(bb
,st
); continue;
515 n_SP_updates_die_generic_known
++;
516 if (VG_(tdict
).any_die_mem_stack
)
519 n_SP_updates_new_generic_known
++;
520 if (VG_(tdict
).any_new_mem_stack
)
523 /* No tracking for delta. Just add the original statement. */
524 addStmtToIRSB(bb
,st
); continue;
527 /* Deal with an unknown update to SP. We're here because
529 (1) the Put does not exactly cover SP; it is a partial update.
530 Highly unlikely, but has been known to happen for 16-bit
531 Windows apps running on Wine, doing 16-bit adjustments to
533 (2) the Put does exactly cover SP, but we are unable to
534 determine how the value relates to the old SP. In any
535 case, we cannot assume that the Put.data value is a tmp;
536 we must assume it can be anything allowed in flat IR (tmp
540 n_SP_updates_generic_unknown
++;
542 // Nb: if all is well, this generic case will typically be
543 // called something like every 1000th SP update. If it's more than
544 // that, the above code may be missing some cases.
546 /* Pass both the old and new SP values to this helper. Also,
547 pass an origin tag, even if it isn't needed. */
548 old_SP
= newIRTemp(bb
->tyenv
, typeof_SP
);
551 IRStmt_WrTmp( old_SP
, IRExpr_Get(offset_SP
, typeof_SP
) )
554 /* Now we know what the old value of SP is. But knowing the new
555 value is a bit tricky if there is a partial write. */
556 if (first_Put
== first_SP
&& last_Put
== last_SP
) {
557 /* The common case, an exact write to SP. So st->Ist.Put.data
558 does hold the new value; simple. */
559 vg_assert(curr_IP_known
);
560 if (NULL
!= VG_(tdict
).track_new_mem_stack_w_ECU
)
561 dcall
= unsafeIRDirty_0_N(
563 "VG_(unknown_SP_update_w_ECU)",
564 VG_(fnptr_to_fnentry
)( &VG_(unknown_SP_update_w_ECU
) ),
565 mkIRExprVec_3( IRExpr_RdTmp(old_SP
), st
->Ist
.Put
.data
,
566 mk_ecu_Expr(curr_IP
) )
569 dcall
= unsafeIRDirty_0_N(
571 "VG_(unknown_SP_update)",
572 VG_(fnptr_to_fnentry
)( &VG_(unknown_SP_update
) ),
573 mkIRExprVec_2( IRExpr_RdTmp(old_SP
), st
->Ist
.Put
.data
)
576 addStmtToIRSB( bb
, IRStmt_Dirty(dcall
) );
577 /* don't forget the original assignment */
578 addStmtToIRSB( bb
, st
);
580 /* We have a partial update to SP. We need to know what
581 the new SP will be, and hand that to the helper call,
582 but when the helper call happens, SP must hold the
583 value it had before the update. Tricky.
584 Therefore use the following kludge:
585 1. do the partial SP update (Put)
586 2. Get the new SP value into a tmp, new_SP
593 addStmtToIRSB( bb
, st
);
595 new_SP
= newIRTemp(bb
->tyenv
, typeof_SP
);
598 IRStmt_WrTmp( new_SP
, IRExpr_Get(offset_SP
, typeof_SP
) )
601 addStmtToIRSB( bb
, IRStmt_Put(offset_SP
, IRExpr_RdTmp(old_SP
) ));
603 vg_assert(curr_IP_known
);
604 if (NULL
!= VG_(tdict
).track_new_mem_stack_w_ECU
)
605 dcall
= unsafeIRDirty_0_N(
607 "VG_(unknown_SP_update_w_ECU)",
608 VG_(fnptr_to_fnentry
)( &VG_(unknown_SP_update_w_ECU
) ),
609 mkIRExprVec_3( IRExpr_RdTmp(old_SP
),
610 IRExpr_RdTmp(new_SP
),
611 mk_ecu_Expr(curr_IP
) )
614 dcall
= unsafeIRDirty_0_N(
616 "VG_(unknown_SP_update)",
617 VG_(fnptr_to_fnentry
)( &VG_(unknown_SP_update
) ),
618 mkIRExprVec_2( IRExpr_RdTmp(old_SP
),
619 IRExpr_RdTmp(new_SP
) )
621 addStmtToIRSB( bb
, IRStmt_Dirty(dcall
) );
623 addStmtToIRSB( bb
, IRStmt_Put(offset_SP
, IRExpr_RdTmp(new_SP
) ));
626 /* Forget what we already know. */
629 /* If this is a Put of a tmp that exactly updates SP,
630 start tracking aliases against this tmp. */
632 if (first_Put
== first_SP
&& last_Put
== last_SP
633 && st
->Ist
.Put
.data
->tag
== Iex_RdTmp
) {
634 vg_assert( typeOfIRTemp(bb
->tyenv
, st
->Ist
.Put
.data
->Iex
.RdTmp
.tmp
)
636 add_SP_alias(st
->Ist
.Put
.data
->Iex
.RdTmp
.tmp
, 0);
642 /* PutI or Dirty call which overlaps SP: complain. We can't
643 deal with SP changing in weird ways (well, we can, but not at
644 this time of night). */
645 if (st
->tag
== Ist_PutI
) {
646 descr
= st
->Ist
.PutI
.details
->descr
;
647 minoff_ST
= descr
->base
;
648 maxoff_ST
= descr
->base
649 + descr
->nElems
* sizeofIRType(descr
->elemTy
) - 1;
650 if (!(offset_SP
> maxoff_ST
651 || (offset_SP
+ sizeof_SP
- 1) < minoff_ST
))
654 if (st
->tag
== Ist_Dirty
) {
655 d
= st
->Ist
.Dirty
.details
;
656 for (j
= 0; j
< d
->nFxState
; j
++) {
657 if (d
->fxState
[j
].fx
== Ifx_Read
|| d
->fxState
[j
].fx
== Ifx_None
)
659 /* Enumerate the described state segments */
660 for (k
= 0; k
< 1 + d
->fxState
[j
].nRepeats
; k
++) {
661 minoff_ST
= d
->fxState
[j
].offset
+ k
* d
->fxState
[j
].repeatLen
;
662 maxoff_ST
= minoff_ST
+ d
->fxState
[j
].size
- 1;
663 if (!(offset_SP
> maxoff_ST
664 || (offset_SP
+ sizeof_SP
- 1) < minoff_ST
))
670 /* well, not interesting. Just copy and keep going. */
671 addStmtToIRSB( bb
, st
);
673 } /* for (i = 0; i < sb_in->stmts_used; i++) */
678 VG_(core_panic
)("vg_SP_update_pass: PutI or Dirty which overlaps SP");
688 /*------------------------------------------------------------*/
689 /*--- Main entry point for the JITter. ---*/
690 /*------------------------------------------------------------*/
692 /* Extra comments re self-checking translations and self-modifying
693 code. (JRS 14 Oct 05).
696 (1) no checking: all code assumed to be not self-modifying
697 (2) partial: known-problematic situations get a self-check
698 (3) full checking: all translations get a self-check
700 As currently implemented, the default is (2). (3) is always safe,
701 but very slow. (1) works mostly, but fails for gcc nested-function
702 code which uses trampolines on the stack; this situation is
703 detected and handled by (2).
707 A more robust and transparent solution, which is not currently
708 implemented, is a variant of (2): if a translation is made from an
709 area which aspacem says does not have 'w' permission, then it can
710 be non-self-checking. Otherwise, it needs a self-check.
712 This is complicated by Vex's basic-block chasing. If a self-check
713 is requested, then Vex will not chase over basic block boundaries
714 (it's too complex). However there is still a problem if it chases
715 from a non-'w' area into a 'w' area.
717 I think the right thing to do is:
719 - if a translation request starts in a 'w' area, ask for a
720 self-checking translation, and do not allow any chasing (make
721 chase_into_ok return False). Note that the latter is redundant
722 in the sense that Vex won't chase anyway in this situation.
724 - if a translation request starts in a non-'w' area, do not ask for
725 a self-checking translation. However, do not allow chasing (as
726 determined by chase_into_ok) to go into a 'w' area.
728 The result of this is that all code inside 'w' areas is self
731 To complete the trick, there is a caveat: we must watch the
732 client's mprotect calls. If pages are changed from non-'w' to 'w'
733 then we should throw away all translations which intersect the
734 affected area, so as to force them to be redone with self-checks.
738 The above outlines the conditions under which bb chasing is allowed
739 from a self-modifying-code point of view. There are other
740 situations pertaining to function redirection in which it is
741 necessary to disallow chasing, but those fall outside the scope of
746 /* Vex dumps the final code in here. Then we can copy it off
748 /* 60000: should agree with assertion in VG_(add_to_transtab) in
750 #define N_TMPBUF 60000
751 static UChar tmpbuf
[N_TMPBUF
];
754 /* Function pointers we must supply to LibVEX in order that it
755 can bomb out and emit messages under Valgrind's control. */
756 __attribute__ ((noreturn
))
758 void failure_exit ( void )
760 LibVEX_ShowAllocStats();
761 VG_(core_panic
)("LibVEX called failure_exit().");
765 void log_bytes ( const HChar
* bytes
, SizeT nbytes
)
769 for (; i
< nbytes
-3; i
+= 4)
770 VG_(printf
)("%c%c%c%c", bytes
[i
], bytes
[i
+1], bytes
[i
+2], bytes
[i
+3]);
771 for (; i
< nbytes
; i
++)
772 VG_(printf
)("%c", bytes
[i
]);
776 /* --------- Various helper functions for translation --------- */
778 /* Look for reasons to disallow making translations from the given
781 static Bool
translations_allowable_from_seg ( NSegment
const* seg
, Addr addr
)
783 # if defined(VGA_x86) || defined(VGA_s390x) || defined(VGA_mips32) \
784 || defined(VGA_mips64) || defined(VGA_nanomips)
790 && (seg
->kind
== SkAnonC
|| seg
->kind
== SkFileC
|| seg
->kind
== SkShmC
)
792 || (seg
->hasR
&& (allowR
793 || VG_(has_gdbserver_breakpoint
) (addr
))));
794 /* If GDB/gdbsrv has inserted a breakpoint at addr, assume this is a valid
795 location to translate if seg is not executable but is readable.
796 This is needed for inferior function calls from GDB: GDB inserts a
797 breakpoint on the stack, and expects to regain control before the
798 breakpoint instruction at the breakpoint address is really
799 executed. For this, the breakpoint instruction must be translated
800 so as to have the call to gdbserver executed. */
804 /* Produce a bitmask stating which of the supplied extents needs a
805 self-check. See documentation of
806 VexTranslateArgs::needs_self_check for more details about the
807 return convention. */
809 static UInt
needs_self_check ( void* closureV
,
810 /*MAYBE_MOD*/VexRegisterUpdates
* pxControl
,
811 const VexGuestExtents
* vge
)
813 VgCallbackClosure
* closure
= (VgCallbackClosure
*)closureV
;
816 vg_assert(vge
->n_used
>= 1 && vge
->n_used
<= 3);
819 /* Will we need to do a second pass in order to compute a
820 revised *pxControl value? */
821 Bool pxStatusMightChange
822 = /* "the user actually set it" */
823 VG_(clo_px_file_backed
) != VexRegUpd_INVALID
824 /* "and they set it to something other than the default. */
825 && *pxControl
!= VG_(clo_px_file_backed
);
827 /* First, compute |bitset|, which specifies which extent(s) need a
828 self check. Whilst we're at it, note any NSegments that we get,
829 so as to reduce the number of calls required to
830 VG_(am_find_nsegment) in a possible second pass. */
831 const NSegment
*segs
[3] = { NULL
, NULL
, NULL
};
833 for (i
= 0; i
< vge
->n_used
; i
++) {
835 Addr addr
= vge
->base
[i
];
836 SizeT len
= vge
->len
[i
];
837 NSegment
const* segA
= NULL
;
839 # if defined(VGO_darwin)
840 // GrP fixme hack - dyld i386 IMPORT gets rewritten.
841 // To really do this correctly, we'd need to flush the
842 // translation cache whenever a segment became +WX.
843 segA
= VG_(am_find_nsegment
)(addr
);
844 if (segA
&& segA
->hasX
&& segA
->hasW
)
849 switch (VG_(clo_smc_check
)) {
851 /* never check (except as per Darwin hack above) */
858 /* check if the address is in the same segment as this
859 thread's stack pointer */
860 Addr sp
= VG_(get_SP
)(closure
->tid
);
862 segA
= VG_(am_find_nsegment
)(addr
);
864 NSegment
const* segSP
= VG_(am_find_nsegment
)(sp
);
865 if (segA
&& segSP
&& segA
== segSP
)
869 case Vg_SmcAllNonFile
: {
870 /* check if any part of the extent is not in a
871 file-mapped segment */
873 segA
= VG_(am_find_nsegment
)(addr
);
875 if (segA
&& segA
->kind
== SkFileC
&& segA
->start
<= addr
876 && (len
== 0 || addr
+ len
<= segA
->end
+ 1)) {
877 /* in a file-mapped segment; skip the check */
891 if (pxStatusMightChange
&& segA
) {
892 vg_assert(i
< sizeof(segs
)/sizeof(segs
[0]));
897 /* Now, possibly do a second pass, to see if the PX status might
898 change. This can happen if the user specified value via
899 --px-file-backed= which is different from the default PX value
900 specified via --vex-iropt-register-updates (also known by the
901 shorter alias --px-default). */
902 if (pxStatusMightChange
) {
904 Bool allFileBacked
= True
;
905 for (i
= 0; i
< vge
->n_used
; i
++) {
906 Addr addr
= vge
->base
[i
];
907 SizeT len
= vge
->len
[i
];
908 NSegment
const* segA
= segs
[i
];
910 /* If we don't have a cached value for |segA|, compute it now. */
911 segA
= VG_(am_find_nsegment
)(addr
);
913 vg_assert(segA
); /* Can this ever fail? */
914 if (segA
&& segA
->kind
== SkFileC
&& segA
->start
<= addr
915 && (len
== 0 || addr
+ len
<= segA
->end
+ 1)) {
916 /* in a file-mapped segment */
918 /* not in a file-mapped segment, or we can't figure out
920 allFileBacked
= False
;
925 /* So, finally, if all the extents are in file backed segments, perform
926 the user-specified PX change. */
928 *pxControl
= VG_(clo_px_file_backed
);
933 /* Update running PX stats, as it is difficult without these to
934 check that the system is behaving as expected. */
935 switch (*pxControl
) {
936 case VexRegUpdSpAtMemAccess
:
937 n_PX_VexRegUpdSpAtMemAccess
++; break;
938 case VexRegUpdUnwindregsAtMemAccess
:
939 n_PX_VexRegUpdUnwindregsAtMemAccess
++; break;
940 case VexRegUpdAllregsAtMemAccess
:
941 n_PX_VexRegUpdAllregsAtMemAccess
++; break;
942 case VexRegUpdAllregsAtEachInsn
:
943 n_PX_VexRegUpdAllregsAtEachInsn
++; break;
952 /* This is a callback passed to LibVEX_Translate. It stops Vex from
953 chasing into function entry points that we wish to redirect.
954 Chasing across them obviously defeats the redirect mechanism, with
955 bad effects for Memcheck, Helgrind, DRD, Massif, and possibly others.
957 static Bool
chase_into_ok ( void* closureV
, Addr addr
)
959 NSegment
const* seg
= VG_(am_find_nsegment
)(addr
);
961 /* Work through a list of possibilities why we might not want to
964 /* Destination not in a plausible segment? */
965 if (!translations_allowable_from_seg(seg
, addr
))
968 /* Destination is redirected? */
969 if (addr
!= VG_(redir_do_lookup
)(addr
, NULL
))
972 # if defined(VG_PLAT_USES_PPCTOC) || defined(VGP_ppc64le_linux)
973 /* This needs to be at the start of its own block. Don't chase. */
974 if (addr
== (Addr
)&VG_(ppctoc_magic_redirect_return_stub
))
978 /* overly conservative, but .. don't chase into the distinguished
979 address that m_transtab uses as an empty-slot marker for
981 if (addr
== TRANSTAB_BOGUS_GUEST_ADDR
)
984 # if defined(VGA_s390x)
985 /* Never chase into an EX instruction. Generating IR for EX causes
986 a round-trip through the scheduler including VG_(discard_translations).
987 And that's expensive as shown by perf/tinycc.c:
988 Chasing into EX increases the number of EX translations from 21 to
989 102666 causing a 7x runtime increase for "none" and a 3.2x runtime
990 increase for memcheck. */
991 if (((UChar
*)addr
)[0] == 0x44 || /* EX */
992 ((UChar
*)addr
)[0] == 0xC6) /* EXRL */
996 /* well, ok then. go on and chase. */
1003 if (0) VG_(printf
)("not chasing into 0x%lx\n", addr
);
1008 /* --------------- helpers for with-TOC platforms --------------- */
1010 /* NOTE: with-TOC platforms are: ppc64-linux. */
1012 static IRExpr
* mkU64 ( ULong n
) {
1013 return IRExpr_Const(IRConst_U64(n
));
1015 static IRExpr
* mkU32 ( UInt n
) {
1016 return IRExpr_Const(IRConst_U32(n
));
1019 #if defined(VG_PLAT_USES_PPCTOC) || defined(VGP_ppc64le_linux)
1020 static IRExpr
* mkU8 ( UChar n
) {
1021 return IRExpr_Const(IRConst_U8(n
));
1023 static IRExpr
* narrowTo32 ( IRTypeEnv
* tyenv
, IRExpr
* e
) {
1024 if (typeOfIRExpr(tyenv
, e
) == Ity_I32
) {
1027 vg_assert(typeOfIRExpr(tyenv
, e
) == Ity_I64
);
1028 return IRExpr_Unop(Iop_64to32
, e
);
1032 /* Generate code to push word-typed expression 'e' onto this thread's
1033 redir stack, checking for stack overflow and generating code to
1036 static void gen_PUSH ( IRSB
* bb
, IRExpr
* e
)
1042 # if defined(VGP_ppc64be_linux) || defined(VGP_ppc64le_linux)
1043 Int stack_size
= VEX_GUEST_PPC64_REDIR_STACK_SIZE
;
1044 Int offB_REDIR_SP
= offsetof(VexGuestPPC64State
,guest_REDIR_SP
);
1045 Int offB_REDIR_STACK
= offsetof(VexGuestPPC64State
,guest_REDIR_STACK
);
1046 Int offB_EMNOTE
= offsetof(VexGuestPPC64State
,guest_EMNOTE
);
1047 Int offB_CIA
= offsetof(VexGuestPPC64State
,guest_CIA
);
1049 IRType ty_Word
= Ity_I64
;
1050 IROp op_CmpNE
= Iop_CmpNE64
;
1051 IROp op_Sar
= Iop_Sar64
;
1052 IROp op_Sub
= Iop_Sub64
;
1053 IROp op_Add
= Iop_Add64
;
1054 IRExpr
*(*mkU
)(ULong
) = mkU64
;
1055 vg_assert(VG_WORDSIZE
== 8);
1057 Int stack_size
= VEX_GUEST_PPC32_REDIR_STACK_SIZE
;
1058 Int offB_REDIR_SP
= offsetof(VexGuestPPC32State
,guest_REDIR_SP
);
1059 Int offB_REDIR_STACK
= offsetof(VexGuestPPC32State
,guest_REDIR_STACK
);
1060 Int offB_EMNOTE
= offsetof(VexGuestPPC32State
,guest_EMNOTE
);
1061 Int offB_CIA
= offsetof(VexGuestPPC32State
,guest_CIA
);
1063 IRType ty_Word
= Ity_I32
;
1064 IROp op_CmpNE
= Iop_CmpNE32
;
1065 IROp op_Sar
= Iop_Sar32
;
1066 IROp op_Sub
= Iop_Sub32
;
1067 IROp op_Add
= Iop_Add32
;
1068 IRExpr
*(*mkU
)(UInt
) = mkU32
;
1069 vg_assert(VG_WORDSIZE
== 4);
1072 vg_assert(sizeof(void*) == VG_WORDSIZE
);
1073 vg_assert(sizeof(Word
) == VG_WORDSIZE
);
1074 vg_assert(sizeof(Addr
) == VG_WORDSIZE
);
1076 descr
= mkIRRegArray( offB_REDIR_STACK
, ty_Word
, stack_size
);
1077 t1
= newIRTemp( bb
->tyenv
, ty_Word
);
1080 vg_assert(typeOfIRExpr(bb
->tyenv
, e
) == ty_Word
);
1082 /* t1 = guest_REDIR_SP + 1 */
1087 IRExpr_Binop(op_Add
, IRExpr_Get( offB_REDIR_SP
, ty_Word
), one
)
1091 /* Bomb out if t1 >=s stack_size, that is, (stack_size-1)-t1 <s 0.
1092 The destination (0) is a bit bogus but it doesn't matter since
1093 this is an unrecoverable error and will lead to Valgrind
1094 shutting down. _EMNOTE is set regardless - that's harmless
1095 since is only has a meaning if the exit is taken. */
1098 IRStmt_Put(offB_EMNOTE
, mkU32(EmWarn_PPC64_redir_overflow
))
1107 IRExpr_Binop(op_Sub
,mkU(stack_size
-1),IRExpr_RdTmp(t1
)),
1108 mkU8(8 * VG_WORDSIZE
- 1)
1113 is64
? IRConst_U64(0) : IRConst_U32(0),
1118 /* guest_REDIR_SP = t1 */
1119 addStmtToIRSB(bb
, IRStmt_Put(offB_REDIR_SP
, IRExpr_RdTmp(t1
)));
1121 /* guest_REDIR_STACK[t1+0] = e */
1122 /* PutI/GetI have I32-typed indexes regardless of guest word size */
1125 IRStmt_PutI(mkIRPutI(descr
,
1126 narrowTo32(bb
->tyenv
,IRExpr_RdTmp(t1
)), 0, e
)));
1130 /* Generate code to pop a word-sized value from this thread's redir
1131 stack, binding it to a new temporary, which is returned. As with
1132 gen_PUSH, an overflow check is also performed. */
1134 static IRTemp
gen_POP ( IRSB
* bb
)
1136 # if defined(VGP_ppc64be_linux) || defined(VGP_ppc64le_linux)
1137 Int stack_size
= VEX_GUEST_PPC64_REDIR_STACK_SIZE
;
1138 Int offB_REDIR_SP
= offsetof(VexGuestPPC64State
,guest_REDIR_SP
);
1139 Int offB_REDIR_STACK
= offsetof(VexGuestPPC64State
,guest_REDIR_STACK
);
1140 Int offB_EMNOTE
= offsetof(VexGuestPPC64State
,guest_EMNOTE
);
1141 Int offB_CIA
= offsetof(VexGuestPPC64State
,guest_CIA
);
1143 IRType ty_Word
= Ity_I64
;
1144 IROp op_CmpNE
= Iop_CmpNE64
;
1145 IROp op_Sar
= Iop_Sar64
;
1146 IROp op_Sub
= Iop_Sub64
;
1147 IRExpr
*(*mkU
)(ULong
) = mkU64
;
1149 Int stack_size
= VEX_GUEST_PPC32_REDIR_STACK_SIZE
;
1150 Int offB_REDIR_SP
= offsetof(VexGuestPPC32State
,guest_REDIR_SP
);
1151 Int offB_REDIR_STACK
= offsetof(VexGuestPPC32State
,guest_REDIR_STACK
);
1152 Int offB_EMNOTE
= offsetof(VexGuestPPC32State
,guest_EMNOTE
);
1153 Int offB_CIA
= offsetof(VexGuestPPC32State
,guest_CIA
);
1155 IRType ty_Word
= Ity_I32
;
1156 IROp op_CmpNE
= Iop_CmpNE32
;
1157 IROp op_Sar
= Iop_Sar32
;
1158 IROp op_Sub
= Iop_Sub32
;
1159 IRExpr
*(*mkU
)(UInt
) = mkU32
;
1162 IRRegArray
* descr
= mkIRRegArray( offB_REDIR_STACK
, ty_Word
, stack_size
);
1163 IRTemp t1
= newIRTemp( bb
->tyenv
, ty_Word
);
1164 IRTemp res
= newIRTemp( bb
->tyenv
, ty_Word
);
1165 IRExpr
* one
= mkU(1);
1167 vg_assert(sizeof(void*) == VG_WORDSIZE
);
1168 vg_assert(sizeof(Word
) == VG_WORDSIZE
);
1169 vg_assert(sizeof(Addr
) == VG_WORDSIZE
);
1171 /* t1 = guest_REDIR_SP */
1174 IRStmt_WrTmp( t1
, IRExpr_Get( offB_REDIR_SP
, ty_Word
) )
1177 /* Bomb out if t1 < 0. Same comments as gen_PUSH apply. */
1180 IRStmt_Put(offB_EMNOTE
, mkU32(EmWarn_PPC64_redir_underflow
))
1190 mkU8(8 * VG_WORDSIZE
- 1)
1195 is64
? IRConst_U64(0) : IRConst_U32(0),
1200 /* res = guest_REDIR_STACK[t1+0] */
1201 /* PutI/GetI have I32-typed indexes regardless of guest word size */
1206 IRExpr_GetI(descr
, narrowTo32(bb
->tyenv
,IRExpr_RdTmp(t1
)), 0)
1210 /* guest_REDIR_SP = t1-1 */
1213 IRStmt_Put(offB_REDIR_SP
, IRExpr_Binop(op_Sub
, IRExpr_RdTmp(t1
), one
))
1221 #if defined(VG_PLAT_USES_PPCTOC)
1223 /* Generate code to push LR and R2 onto this thread's redir stack,
1224 then set R2 to the new value (which is the TOC pointer to be used
1225 for the duration of the replacement function, as determined by
1226 m_debuginfo), and set LR to the magic return stub, so we get to
1227 intercept the return and restore R2 and L2 to the values saved
1230 static void gen_push_and_set_LR_R2 ( IRSB
* bb
, Addr new_R2_value
)
1232 # if defined(VGP_ppc64be_linux)
1233 Addr bogus_RA
= (Addr
)&VG_(ppctoc_magic_redirect_return_stub
);
1234 Int offB_GPR2
= offsetof(VexGuestPPC64State
,guest_GPR2
);
1235 Int offB_LR
= offsetof(VexGuestPPC64State
,guest_LR
);
1236 gen_PUSH( bb
, IRExpr_Get(offB_LR
, Ity_I64
) );
1237 gen_PUSH( bb
, IRExpr_Get(offB_GPR2
, Ity_I64
) );
1238 addStmtToIRSB( bb
, IRStmt_Put( offB_LR
, mkU64( bogus_RA
)) );
1239 addStmtToIRSB( bb
, IRStmt_Put( offB_GPR2
, mkU64( new_R2_value
)) );
1242 # error Platform is not TOC-afflicted, fortunately
1247 #if defined(VG_PLAT_USES_PPCTOC) || defined(VGP_ppc64le_linux)
1249 static void gen_pop_R2_LR_then_bLR ( IRSB
* bb
)
1251 # if defined(VGP_ppc64be_linux) || defined(VGP_ppc64le_linux)
1252 Int offB_GPR2
= offsetof(VexGuestPPC64State
,guest_GPR2
);
1253 Int offB_LR
= offsetof(VexGuestPPC64State
,guest_LR
);
1254 Int offB_CIA
= offsetof(VexGuestPPC64State
,guest_CIA
);
1255 IRTemp old_R2
= newIRTemp( bb
->tyenv
, Ity_I64
);
1256 IRTemp old_LR
= newIRTemp( bb
->tyenv
, Ity_I64
);
1258 old_R2
= gen_POP( bb
);
1259 addStmtToIRSB( bb
, IRStmt_Put( offB_GPR2
, IRExpr_RdTmp(old_R2
)) );
1261 old_LR
= gen_POP( bb
);
1262 addStmtToIRSB( bb
, IRStmt_Put( offB_LR
, IRExpr_RdTmp(old_LR
)) );
1264 /* re boring, we arrived here precisely because a wrapped fn did a
1265 blr (hence Ijk_Ret); so we should just mark this jump as Boring,
1266 else one _Call will have resulted in two _Rets. */
1267 bb
->jumpkind
= Ijk_Boring
;
1268 bb
->next
= IRExpr_Binop(Iop_And64
, IRExpr_RdTmp(old_LR
), mkU64(~(3ULL)));
1269 bb
->offsIP
= offB_CIA
;
1271 # error Platform is not TOC-afflicted, fortunately
1276 #if defined(VG_PLAT_USES_PPCTOC) || defined(VGP_ppc64le_linux)
1279 Bool
mk_preamble__ppctoc_magic_return_stub ( void* closureV
, IRSB
* bb
)
1281 VgCallbackClosure
* closure
= (VgCallbackClosure
*)closureV
;
1282 /* Since we're creating the entire IRSB right here, give it a
1283 proper IMark, as it won't get one any other way, and cachegrind
1284 will barf if it doesn't have one (fair enough really). */
1285 addStmtToIRSB( bb
, IRStmt_IMark( closure
->readdr
, 4, 0 ) );
1286 /* Generate the magic sequence:
1287 pop R2 from hidden stack
1288 pop LR from hidden stack
1291 gen_pop_R2_LR_then_bLR(bb
);
1292 return True
; /* True == this is the entire BB; don't disassemble any
1293 real insns into it - just hand it directly to
1294 optimiser/instrumenter/backend. */
1298 #if defined(VGP_ppc64le_linux)
1299 /* Generate code to push LR and R2 onto this thread's redir stack.
1300 Need to save R2 in case we redirect to a global entry point. The
1301 value of R2 is not preserved when entering the global entry point.
1302 Need to make sure R2 gets restored on return. Set LR to the magic
1303 return stub, so we get to intercept the return and restore R2 and
1304 L2 to the values saved here.
1306 The existing infrastruture for the TOC enabled architectures is
1307 being exploited here. So, we need to enable a number of the
1308 code sections used by VG_PLAT_USES_PPCTOC.
1311 static void gen_push_R2_and_set_LR ( IRSB
* bb
)
1313 Addr bogus_RA
= (Addr
)&VG_(ppctoc_magic_redirect_return_stub
);
1314 Int offB_GPR2
= offsetof(VexGuestPPC64State
,guest_GPR2
);
1315 Int offB_LR
= offsetof(VexGuestPPC64State
,guest_LR
);
1316 gen_PUSH( bb
, IRExpr_Get(offB_LR
, Ity_I64
) );
1317 gen_PUSH( bb
, IRExpr_Get(offB_GPR2
, Ity_I64
) );
1318 addStmtToIRSB( bb
, IRStmt_Put( offB_LR
, mkU64( bogus_RA
)) );
1322 /* --------------- END helpers for with-TOC platforms --------------- */
1325 /* This is the IR preamble generator used for replacement
1326 functions. It adds code to set the guest_NRADDR{_GPR2} to zero
1327 (technically not necessary, but facilitates detecting mixups in
1328 which a replacement function has been erroneously declared using
1329 VG_REPLACE_FUNCTION_Z{U,Z} when instead it should have been written
1330 using VG_WRAP_FUNCTION_Z{U,Z}).
1332 On with-TOC platforms the follow hacks are also done: LR and R2 are
1333 pushed onto a hidden stack, R2 is set to the correct value for the
1334 replacement function, and LR is set to point at the magic
1335 return-stub address. Setting LR causes the return of the
1336 wrapped/redirected function to lead to our magic return stub, which
1337 restores LR and R2 from said stack and returns for real.
1339 VG_(get_StackTrace_wrk) understands that the LR value may point to
1340 the return stub address, and that in that case it can get the real
1341 LR value from the hidden stack instead. */
1343 Bool
mk_preamble__set_NRADDR_to_zero ( void* closureV
, IRSB
* bb
)
1346 = sizeof(((VexGuestArchState
*)0)->guest_NRADDR
);
1347 vg_assert(nraddr_szB
== 4 || nraddr_szB
== 8);
1348 vg_assert(nraddr_szB
== sizeof(RegWord
));
1352 offsetof(VexGuestArchState
,guest_NRADDR
),
1353 nraddr_szB
== 8 ? mkU64(0) : mkU32(0)
1356 // t9 needs to be set to point to the start of the redirected function.
1357 # if defined(VGP_mips32_linux) || defined(VGP_nanomips_linux)
1358 VgCallbackClosure
* closure
= (VgCallbackClosure
*)closureV
;
1359 Int offB_GPR25
= offsetof(VexGuestMIPS32State
, guest_r25
);
1360 addStmtToIRSB(bb
, IRStmt_Put(offB_GPR25
, mkU32(closure
->readdr
)));
1362 # if defined(VGP_mips64_linux)
1363 VgCallbackClosure
* closure
= (VgCallbackClosure
*)closureV
;
1364 Int offB_GPR25
= offsetof(VexGuestMIPS64State
, guest_r25
);
1365 addStmtToIRSB(bb
, IRStmt_Put(offB_GPR25
, mkU64(closure
->readdr
)));
1367 # if defined(VG_PLAT_USES_PPCTOC)
1368 { VgCallbackClosure
* closure
= (VgCallbackClosure
*)closureV
;
1372 offsetof(VexGuestArchState
,guest_NRADDR_GPR2
),
1373 VG_WORDSIZE
==8 ? mkU64(0) : mkU32(0)
1376 gen_push_and_set_LR_R2 ( bb
, VG_(get_tocptr
)( VG_(current_DiEpoch
)(),
1377 closure
->readdr
) );
1381 #if defined(VGP_ppc64le_linux)
1382 VgCallbackClosure
* closure
= (VgCallbackClosure
*)closureV
;
1383 Int offB_GPR12
= offsetof(VexGuestArchState
, guest_GPR12
);
1384 addStmtToIRSB(bb
, IRStmt_Put(offB_GPR12
, mkU64(closure
->readdr
)));
1387 offsetof(VexGuestArchState
,guest_NRADDR_GPR2
),
1388 VG_WORDSIZE
==8 ? mkU64(0) : mkU32(0)
1391 gen_push_R2_and_set_LR ( bb
);
1396 /* Ditto, except set guest_NRADDR to nraddr (the un-redirected guest
1397 address). This is needed for function wrapping - so the wrapper
1398 can read _NRADDR and find the address of the function being
1399 wrapped. On toc-afflicted platforms we must also snarf r2. */
1401 Bool
mk_preamble__set_NRADDR_to_nraddr ( void* closureV
, IRSB
* bb
)
1403 VgCallbackClosure
* closure
= (VgCallbackClosure
*)closureV
;
1405 = sizeof(((VexGuestArchState
*)0)->guest_NRADDR
);
1406 vg_assert(nraddr_szB
== 4 || nraddr_szB
== 8);
1407 vg_assert(nraddr_szB
== sizeof(RegWord
));
1411 offsetof(VexGuestArchState
,guest_NRADDR
),
1413 ? IRExpr_Const(IRConst_U64( closure
->nraddr
))
1414 : IRExpr_Const(IRConst_U32( (UInt
)closure
->nraddr
))
1417 // t9 needs to be set to point to the start of the redirected function.
1418 # if defined(VGP_mips32_linux) || defined(VGP_nanomips_linux)
1419 Int offB_GPR25
= offsetof(VexGuestMIPS32State
, guest_r25
);
1420 addStmtToIRSB(bb
, IRStmt_Put(offB_GPR25
, mkU32(closure
->readdr
)));
1422 # if defined(VGP_mips64_linux)
1423 Int offB_GPR25
= offsetof(VexGuestMIPS64State
, guest_r25
);
1424 addStmtToIRSB(bb
, IRStmt_Put(offB_GPR25
, mkU64(closure
->readdr
)));
1426 # if defined(VG_PLAT_USES_PPCTOC)
1430 offsetof(VexGuestArchState
,guest_NRADDR_GPR2
),
1431 IRExpr_Get(offsetof(VexGuestArchState
,guest_GPR2
),
1432 VG_WORDSIZE
==8 ? Ity_I64
: Ity_I32
)
1435 gen_push_and_set_LR_R2 ( bb
, VG_(get_tocptr
)( VG_(current_DiEpoch
)(),
1436 closure
->readdr
) );
1438 #if defined(VGP_ppc64le_linux)
1439 /* This saves the r2 before leaving the function. We need to move
1440 * guest_NRADDR_GPR2 back to R2 on return.
1442 Int offB_GPR12
= offsetof(VexGuestArchState
, guest_GPR12
);
1446 offsetof(VexGuestArchState
,guest_NRADDR_GPR2
),
1447 IRExpr_Get(offsetof(VexGuestArchState
,guest_GPR2
),
1448 VG_WORDSIZE
==8 ? Ity_I64
: Ity_I32
)
1451 addStmtToIRSB(bb
, IRStmt_Put(offB_GPR12
, mkU64(closure
->readdr
)));
1452 gen_push_R2_and_set_LR ( bb
);
1457 /* --- Helpers to do with PPC related stack redzones. --- */
1459 __attribute__((unused
))
1460 static Bool
const_True ( Addr guest_addr
)
1465 /* --------------- main translation function --------------- */
1467 /* Note: see comments at top of m_redir.c for the Big Picture on how
1468 redirections are managed. */
1472 /* normal translation, redir neither requested nor inhibited */
1474 /* redir translation, function-wrap (set _NRADDR) style */
1476 /* redir translation, replacement (don't set _NRADDR) style */
1478 /* a translation in which redir is specifically disallowed */
1483 /* Translate the basic block beginning at NRADDR, and add it to the
1484 translation cache & translation table. Unless
1485 DEBUGGING_TRANSLATION is true, in which case the call is being done
1486 for debugging purposes, so (a) throw away the translation once it
1487 is made, and (b) produce a load of debugging output. If
1488 ALLOW_REDIRECTION is False, do not attempt redirection of NRADDR,
1489 and also, put the resulting translation into the no-redirect tt/tc
1490 instead of the normal one.
1492 TID is the identity of the thread requesting this translation.
1495 Bool
VG_(translate
) ( ThreadId tid
,
1497 Bool debugging_translation
,
1498 Int debugging_verbosity
,
1500 Bool allow_redirection
)
1504 Int tmpbuf_used
, verbosity
, i
;
1505 Bool (*preamble_fn
)(void*,IRSB
*);
1507 VexArchInfo vex_archinfo
;
1508 VexAbiInfo vex_abiinfo
;
1509 VexGuestExtents vge
;
1510 VexTranslateArgs vta
;
1511 VexTranslateResult tres
;
1512 VgCallbackClosure closure
;
1514 /* Make sure Vex is initialised right. */
1516 static Bool vex_init_done
= False
;
1518 if (!vex_init_done
) {
1519 LibVEX_Init ( &failure_exit
, &log_bytes
,
1520 1, /* debug_paranoia */
1521 &VG_(clo_vex_control
) );
1522 vex_init_done
= True
;
1525 /* Establish the translation kind and actual guest address to
1526 start from. Sets (addr,kind). */
1527 if (allow_redirection
) {
1529 Addr tmp
= VG_(redir_do_lookup
)( nraddr
, &isWrap
);
1530 if (tmp
== nraddr
) {
1531 /* no redirection found */
1535 /* found a redirect */
1537 kind
= isWrap
? T_Redir_Wrap
: T_Redir_Replace
;
1544 /* Established: (nraddr, addr, kind) */
1546 /* Printing redirection info. */
1548 if ((kind
== T_Redir_Wrap
|| kind
== T_Redir_Replace
)
1549 && (VG_(clo_verbosity
) >= 2 || VG_(clo_trace_redir
))) {
1553 const DiEpoch ep
= VG_(current_DiEpoch
)();
1555 /* Try also to get the soname (not the filename) of the "from"
1556 object. This makes it much easier to debug redirection
1558 const HChar
* nraddr_soname
= "???";
1559 DebugInfo
* nraddr_di
= VG_(find_DebugInfo
)(ep
, nraddr
);
1561 const HChar
* t
= VG_(DebugInfo_get_soname
)(nraddr_di
);
1566 ok
= VG_(get_fnname_w_offset
)(ep
, nraddr
, &buf
);
1567 if (!ok
) buf
= "???";
1569 HChar name1
[VG_(strlen
)(buf
) + 1];
1570 VG_(strcpy
)(name1
, buf
);
1571 ok
= VG_(get_fnname_w_offset
)(ep
, addr
, &name2
);
1572 if (!ok
) name2
= "???";
1574 VG_(message
)(Vg_DebugMsg
,
1575 "REDIR: 0x%lx (%s:%s) redirected to 0x%lx (%s)\n",
1576 nraddr
, nraddr_soname
, name1
,
1580 if (!debugging_translation
)
1581 VG_TRACK( pre_mem_read
, Vg_CoreTranslate
,
1582 tid
, "(translator)", addr
, 1 );
1584 /* If doing any code printing, print a basic block start marker */
1585 if (VG_(clo_trace_flags
) || debugging_translation
) {
1586 const HChar
* objname
= "UNKNOWN_OBJECT";
1588 const DiEpoch ep
= VG_(current_DiEpoch
)();
1589 DebugInfo
* di
= VG_(find_DebugInfo
)( ep
, addr
);
1591 objname
= VG_(DebugInfo_get_filename
)(di
);
1592 objoff
= addr
- VG_(DebugInfo_get_text_bias
)(di
);
1596 const HChar
*fnname
;
1597 Bool ok
= VG_(get_fnname_w_offset
)(ep
, addr
, &fnname
);
1598 if (!ok
) fnname
= "UNKNOWN_FUNCTION";
1600 "==== SB %u (evchecks %llu) [tid %u] 0x%lx %s %s%c0x%lx\n",
1601 VG_(get_bbs_translated
)(), bbs_done
, tid
, addr
,
1602 fnname
, objname
, objoff
>= 0 ? '+' : '-',
1603 (UWord
)(objoff
>= 0 ? objoff
: -objoff
)
1607 /* Are we allowed to translate here? */
1609 { /* BEGIN new scope specially for 'seg' */
1610 NSegment
const* seg
= VG_(am_find_nsegment
)(addr
);
1612 if ( (!translations_allowable_from_seg(seg
, addr
))
1613 || addr
== TRANSTAB_BOGUS_GUEST_ADDR
) {
1614 if (VG_(clo_trace_signals
))
1615 VG_(message
)(Vg_DebugMsg
, "translations not allowed here (0x%lx)"
1616 " - throwing SEGV\n", addr
);
1617 /* U R busted, sonny. Place your hands on your head and step
1618 away from the orig_addr. */
1619 /* Code address is bad - deliver a signal instead */
1621 /* There's some kind of segment at the requested place, but we
1622 aren't allowed to execute code here. */
1623 if (debugging_translation
)
1624 VG_(printf
)("translations not allowed here (segment not executable)"
1627 VG_(synth_fault_perms
)(tid
, addr
);
1629 /* There is no segment at all; we are attempting to execute in
1630 the middle of nowhere. */
1631 if (debugging_translation
)
1632 VG_(printf
)("translations not allowed here (no segment)"
1635 VG_(synth_fault_mapping
)(tid
, addr
);
1640 /* True if a debug trans., or if bit N set in VG_(clo_trace_codegen). */
1642 if (debugging_translation
) {
1643 verbosity
= debugging_verbosity
;
1646 if ( (VG_(clo_trace_flags
) > 0
1647 && VG_(get_bbs_translated
)() <= VG_(clo_trace_notabove
)
1648 && VG_(get_bbs_translated
)() >= VG_(clo_trace_notbelow
) )) {
1649 verbosity
= VG_(clo_trace_flags
);
1652 /* Figure out which preamble-mangling callback to send. */
1654 if (kind
== T_Redir_Replace
)
1655 preamble_fn
= mk_preamble__set_NRADDR_to_zero
;
1657 if (kind
== T_Redir_Wrap
)
1658 preamble_fn
= mk_preamble__set_NRADDR_to_nraddr
;
1660 /* LE we setup the LR */
1661 # if defined(VG_PLAT_USES_PPCTOC) || defined(VGP_ppc64le_linux)
1662 if (nraddr
== (Addr
)&VG_(ppctoc_magic_redirect_return_stub
)) {
1663 /* If entering the special return stub, this means a wrapped or
1664 redirected function is returning. Make this translation one
1665 which restores R2 and LR from the thread's hidden redir
1666 stack, and branch to the (restored) link register, thereby
1667 really causing the function to return. */
1668 vg_assert(kind
== T_Normal
);
1669 vg_assert(nraddr
== addr
);
1670 preamble_fn
= mk_preamble__ppctoc_magic_return_stub
;
1674 /* ------ Actually do the translation. ------ */
1675 vg_assert2(VG_(tdict
).tool_instrument
,
1676 "you forgot to set VgToolInterface function 'tool_instrument'");
1678 /* Get the CPU info established at startup. */
1679 VG_(machine_get_VexArchInfo
)( &vex_arch
, &vex_archinfo
);
1681 /* Set up 'abiinfo' structure with stuff Vex needs to know about
1682 the guest and host ABIs. */
1684 LibVEX_default_VexAbiInfo( &vex_abiinfo
);
1685 vex_abiinfo
.guest_stack_redzone_size
= VG_STACK_REDZONE_SZB
;
1687 # if defined(VGP_amd64_linux)
1688 vex_abiinfo
.guest_amd64_assume_fs_is_const
= True
;
1689 vex_abiinfo
.guest_amd64_assume_gs_is_const
= True
;
1691 # if defined(VGP_amd64_freebsd)
1692 vex_abiinfo
.guest_amd64_assume_fs_is_const
= True
;
1693 vex_abiinfo
.guest_amd64_sigbus_on_misalign
= True
;
1695 # if defined(VGP_amd64_darwin)
1696 vex_abiinfo
.guest_amd64_assume_gs_is_const
= True
;
1699 # if defined(VGP_amd64_solaris)
1700 vex_abiinfo
.guest_amd64_assume_fs_is_const
= True
;
1703 # if defined(VGP_ppc32_linux)
1704 vex_abiinfo
.guest_ppc_zap_RZ_at_blr
= False
;
1705 vex_abiinfo
.guest_ppc_zap_RZ_at_bl
= NULL
;
1708 # if defined(VGP_ppc64be_linux)
1709 vex_abiinfo
.guest_ppc_zap_RZ_at_blr
= True
;
1710 vex_abiinfo
.guest_ppc_zap_RZ_at_bl
= const_True
;
1711 vex_abiinfo
.host_ppc_calls_use_fndescrs
= True
;
1714 # if defined(VGP_ppc64le_linux)
1715 vex_abiinfo
.guest_ppc_zap_RZ_at_blr
= True
;
1716 vex_abiinfo
.guest_ppc_zap_RZ_at_bl
= const_True
;
1717 vex_abiinfo
.host_ppc_calls_use_fndescrs
= False
;
1720 # if defined(VGP_mips32_linux) || defined(VGP_mips64_linux)
1721 ThreadArchState
* arch
= &VG_(threads
)[tid
].arch
;
1722 vex_abiinfo
.guest_mips_fp_mode
=
1723 !!(arch
->vex
.guest_CP0_status
& MIPS_CP0_STATUS_FR
);
1724 # if defined(VGP_mips32_linux)
1725 vex_abiinfo
.guest_mips_fp_mode
|=
1726 (!!(arch
->vex
.guest_CP0_Config5
& MIPS_CONF5_FRE
)) << 1;
1728 /* Compute guest__use_fallback_LLSC, overiding any settings of
1729 VG_(clo_fallback_llsc) that we know would cause the guest to
1731 if (VEX_MIPS_COMP_ID(vex_archinfo
.hwcaps
) == VEX_PRID_COMP_CAVIUM
) {
1732 /* We must use the fallback scheme. */
1733 vex_abiinfo
.guest__use_fallback_LLSC
= True
;
1735 vex_abiinfo
.guest__use_fallback_LLSC
1736 = SimHintiS(SimHint_fallback_llsc
, VG_(clo_sim_hints
));
1740 #if defined(VGP_nanomips_linux)
1741 vex_abiinfo
.guest__use_fallback_LLSC
1742 = SimHintiS(SimHint_fallback_llsc
, VG_(clo_sim_hints
));
1745 # if defined(VGP_arm64_linux)
1746 vex_abiinfo
.guest__use_fallback_LLSC
1747 = /* The user asked explicitly */
1748 SimHintiS(SimHint_fallback_llsc
, VG_(clo_sim_hints
))
1749 || /* we autodetected that it is necessary */
1750 vex_archinfo
.arm64_requires_fallback_LLSC
;
1753 /* Set up closure args. */
1755 closure
.nraddr
= nraddr
;
1756 closure
.readdr
= addr
;
1758 /* Set up args for LibVEX_Translate. */
1759 vta
.arch_guest
= vex_arch
;
1760 vta
.archinfo_guest
= vex_archinfo
;
1761 vta
.arch_host
= vex_arch
;
1762 vta
.archinfo_host
= vex_archinfo
;
1763 vta
.abiinfo_both
= vex_abiinfo
;
1764 vta
.callback_opaque
= (void*)&closure
;
1765 vta
.guest_bytes
= (UChar
*)addr
;
1766 vta
.guest_bytes_addr
= addr
;
1767 vta
.chase_into_ok
= chase_into_ok
;
1768 vta
.guest_extents
= &vge
;
1769 vta
.host_bytes
= tmpbuf
;
1770 vta
.host_bytes_size
= N_TMPBUF
;
1771 vta
.host_bytes_used
= &tmpbuf_used
;
1772 { /* At this point we have to reconcile Vex's view of the
1773 instrumentation callback - which takes a void* first argument
1774 - with Valgrind's view, in which the first arg is a
1775 VgCallbackClosure*. Hence the following longwinded casts.
1776 They are entirely legal but longwinded so as to maximise the
1777 chance of the C typechecker picking up any type snafus. */
1778 IRSB
*(*f
)(VgCallbackClosure
*,
1779 IRSB
*,const VexGuestLayout
*,const VexGuestExtents
*,
1780 const VexArchInfo
*,IRType
,IRType
)
1781 = VG_(clo_vgdb
) != Vg_VgdbNo
1782 ? tool_instrument_then_gdbserver_if_needed
1783 : VG_(tdict
).tool_instrument
;
1785 IRSB
*,const VexGuestLayout
*,const VexGuestExtents
*,
1786 const VexArchInfo
*,IRType
,IRType
) = (__typeof__(g
)) f
;
1787 vta
.instrument1
= g
;
1789 /* No need for type kludgery here. */
1790 vta
.instrument2
= need_to_handle_SP_assignment()
1793 vta
.finaltidy
= VG_(needs
).final_IR_tidy_pass
1794 ? VG_(tdict
).tool_final_IR_tidy_pass
1796 vta
.needs_self_check
= needs_self_check
;
1797 vta
.preamble_function
= preamble_fn
;
1798 vta
.traceflags
= verbosity
;
1799 vta
.sigill_diag
= VG_(clo_sigill_diag
);
1800 vta
.addProfInc
= VG_(clo_profyle_sbs
) && kind
!= T_NoRedir
;
1802 /* Set up the dispatch continuation-point info. If this is a
1803 no-redir translation then it cannot be chained, and the chain-me
1804 points are set to NULL to indicate that. The indir point must
1805 also be NULL, since we can't allow this translation to do an
1806 indir transfer -- that would take it back into the main
1807 translation cache too.
1809 All this is because no-redir translations live outside the main
1810 translation cache (in a secondary one) and chaining them would
1811 involve more adminstrative complexity that isn't worth the
1812 hassle, because we don't expect them to get used often. So
1814 if (allow_redirection
) {
1815 vta
.disp_cp_chain_me_to_slowEP
1816 = VG_(fnptr_to_fnentry
)( &VG_(disp_cp_chain_me_to_slowEP
) );
1817 vta
.disp_cp_chain_me_to_fastEP
1818 = VG_(fnptr_to_fnentry
)( &VG_(disp_cp_chain_me_to_fastEP
) );
1820 = VG_(fnptr_to_fnentry
)( &VG_(disp_cp_xindir
) );
1822 vta
.disp_cp_chain_me_to_slowEP
= NULL
;
1823 vta
.disp_cp_chain_me_to_fastEP
= NULL
;
1824 vta
.disp_cp_xindir
= NULL
;
1826 /* This doesn't involve chaining and so is always allowable. */
1827 vta
.disp_cp_xassisted
1828 = VG_(fnptr_to_fnentry
)( &VG_(disp_cp_xassisted
) );
1830 /* Sheesh. Finally, actually _do_ the translation! */
1831 tres
= LibVEX_Translate ( &vta
);
1833 vg_assert(tres
.status
== VexTransOK
);
1834 vg_assert(tres
.n_sc_extents
>= 0 && tres
.n_sc_extents
<= 3);
1835 vg_assert(tmpbuf_used
<= N_TMPBUF
);
1836 vg_assert(tmpbuf_used
> 0);
1838 n_TRACE_total_constructed
+= 1;
1839 n_TRACE_total_guest_insns
+= tres
.n_guest_instrs
;
1840 n_TRACE_total_uncond_branches_followed
+= tres
.n_uncond_in_trace
;
1841 n_TRACE_total_cond_branches_followed
+= tres
.n_cond_in_trace
;
1842 } /* END new scope specially for 'seg' */
1844 /* Tell aspacem of all segments that have had translations taken
1846 for (i
= 0; i
< vge
.n_used
; i
++) {
1847 VG_(am_set_segment_hasT
)( vge
.base
[i
] );
1850 /* Copy data at trans_addr into the translation cache. */
1851 vg_assert(tmpbuf_used
> 0 && tmpbuf_used
< 65536);
1853 // If debugging, don't do anything with the translated block; we
1854 // only did this for the debugging output produced along the way.
1855 if (!debugging_translation
) {
1857 if (kind
!= T_NoRedir
) {
1858 // Put it into the normal TT/TC structures. This is the
1861 // Note that we use nraddr (the non-redirected address), not
1862 // addr, which might have been changed by the redirection
1863 VG_(add_to_transtab
)( &vge
,
1867 tres
.n_sc_extents
> 0,
1869 tres
.n_guest_instrs
);
1871 vg_assert(tres
.offs_profInc
== -1); /* -1 == unset */
1872 VG_(add_to_unredir_transtab
)( &vge
,
1882 /*--------------------------------------------------------------------*/
1884 /*--------------------------------------------------------------------*/