2 /*--------------------------------------------------------------------*/
3 /*--- Interface to LibVEX_Translate, and the SP-update pass ---*/
4 /*--- m_translate.c ---*/
5 /*--------------------------------------------------------------------*/
8 This file is part of Valgrind, a dynamic binary instrumentation
11 Copyright (C) 2000-2017 Julian Seward
14 This program is free software; you can redistribute it and/or
15 modify it under the terms of the GNU General Public License as
16 published by the Free Software Foundation; either version 2 of the
17 License, or (at your option) any later version.
19 This program is distributed in the hope that it will be useful, but
20 WITHOUT ANY WARRANTY; without even the implied warranty of
21 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
22 General Public License for more details.
24 You should have received a copy of the GNU General Public License
25 along with this program; if not, write to the Free Software
26 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
29 The GNU General Public License is contained in the file COPYING.
32 #include "pub_core_basics.h"
33 #include "pub_core_vki.h"
34 #include "pub_core_aspacemgr.h"
36 #include "pub_core_machine.h" // VG_(fnptr_to_fnentry)
38 // VG_(machine_get_VexArchInfo)
39 #include "pub_core_libcbase.h"
40 #include "pub_core_libcassert.h"
41 #include "pub_core_libcprint.h"
42 #include "pub_core_options.h"
44 #include "pub_core_debuginfo.h" // VG_(get_fnname_w_offset)
45 #include "pub_core_redir.h" // VG_(redir_do_lookup)
47 #include "pub_core_signals.h" // VG_(synth_fault_{perms,mapping}
48 #include "pub_core_stacks.h" // VG_(unknown_SP_update*)()
49 #include "pub_core_tooliface.h" // VG_(tdict)
51 #include "pub_core_translate.h"
52 #include "pub_core_transtab.h"
53 #include "pub_core_dispatch.h" // VG_(run_innerloop__dispatch_{un}profiled)
54 // VG_(run_a_noredir_translation__return_point)
56 #include "pub_core_threadstate.h" // VexGuestArchState
57 #include "pub_core_trampoline.h" // VG_(ppctoc_magic_redirect_return_stub)
59 #include "pub_core_execontext.h" // VG_(make_depth_1_ExeContext_from_Addr)
61 #include "pub_core_gdbserver.h" // VG_(instrument_for_gdbserver_if_needed)
63 #include "libvex_emnote.h" // For PPC, EmWarn_PPC64_redir_underflow
65 /*------------------------------------------------------------*/
67 /*------------------------------------------------------------*/
69 static ULong n_SP_updates_new_fast
= 0;
70 static ULong n_SP_updates_new_generic_known
= 0;
71 static ULong n_SP_updates_die_fast
= 0;
72 static ULong n_SP_updates_die_generic_known
= 0;
73 static ULong n_SP_updates_generic_unknown
= 0;
75 static ULong n_PX_VexRegUpdSpAtMemAccess
= 0;
76 static ULong n_PX_VexRegUpdUnwindregsAtMemAccess
= 0;
77 static ULong n_PX_VexRegUpdAllregsAtMemAccess
= 0;
78 static ULong n_PX_VexRegUpdAllregsAtEachInsn
= 0;
80 void VG_(print_translation_stats
) ( void )
82 UInt n_SP_updates
= n_SP_updates_new_fast
+ n_SP_updates_new_generic_known
83 + n_SP_updates_die_fast
+ n_SP_updates_die_generic_known
84 + n_SP_updates_generic_unknown
;
85 if (n_SP_updates
== 0) {
86 VG_(message
)(Vg_DebugMsg
, "translate: no SP updates identified\n");
88 VG_(message
)(Vg_DebugMsg
,
89 "translate: fast new/die SP updates identified: "
90 "%'llu (%3.1f%%)/%'llu (%3.1f%%)\n",
91 n_SP_updates_new_fast
, n_SP_updates_new_fast
* 100.0 / n_SP_updates
,
92 n_SP_updates_die_fast
, n_SP_updates_die_fast
* 100.0 / n_SP_updates
);
94 VG_(message
)(Vg_DebugMsg
,
95 "translate: generic_known new/die SP updates identified: "
96 "%'llu (%3.1f%%)/%'llu (%3.1f%%)\n",
97 n_SP_updates_new_generic_known
,
98 n_SP_updates_new_generic_known
* 100.0 / n_SP_updates
,
99 n_SP_updates_die_generic_known
,
100 n_SP_updates_die_generic_known
* 100.0 / n_SP_updates
);
102 VG_(message
)(Vg_DebugMsg
,
103 "translate: generic_unknown SP updates identified: %'llu (%3.1f%%)\n",
104 n_SP_updates_generic_unknown
,
105 n_SP_updates_generic_unknown
* 100.0 / n_SP_updates
);
110 "translate: PX: SPonly %'llu, UnwRegs %'llu,"
111 " AllRegs %'llu, AllRegsAllInsns %'llu\n",
112 n_PX_VexRegUpdSpAtMemAccess
, n_PX_VexRegUpdUnwindregsAtMemAccess
,
113 n_PX_VexRegUpdAllregsAtMemAccess
, n_PX_VexRegUpdAllregsAtEachInsn
);
116 /*------------------------------------------------------------*/
117 /*--- %SP-update pass ---*/
118 /*------------------------------------------------------------*/
120 static Bool
need_to_handle_SP_assignment(void)
122 return VG_(tdict
).any_new_mem_stack
|| VG_(tdict
).any_die_mem_stack
;
125 // - The SP aliases are held in an array which is used as a circular buffer.
126 // This misses very few constant updates of SP (ie. < 0.1%) while using a
127 // small, constant structure that will also never fill up and cause
128 // execution to abort.
129 // - Unused slots have a .temp value of 'IRTemp_INVALID'.
130 // - 'next_SP_alias_slot' is the index where the next alias will be stored.
131 // - If the buffer fills, we circle around and start over-writing
132 // non-IRTemp_INVALID values. This is rare, and the overwriting of a
133 // value that would have subsequently be used is even rarer.
134 // - Every slot below next_SP_alias_slot holds a non-IRTemp_INVALID value.
135 // The rest either all won't (if we haven't yet circled around) or all
136 // will (if we have circled around).
145 // With 32 slots the buffer fills very rarely -- eg. once in a run of GCC.
146 // And I've tested with smaller values and the wrap-around case works ok.
148 static SP_Alias SP_aliases
[N_ALIASES
];
149 static Int next_SP_alias_slot
= 0;
151 static void clear_SP_aliases(void)
154 for (i
= 0; i
< N_ALIASES
; i
++) {
155 SP_aliases
[i
].temp
= IRTemp_INVALID
;
156 SP_aliases
[i
].delta
= 0;
158 next_SP_alias_slot
= 0;
161 static void add_SP_alias(IRTemp temp
, Long delta
)
163 vg_assert(temp
!= IRTemp_INVALID
);
164 SP_aliases
[ next_SP_alias_slot
].temp
= temp
;
165 SP_aliases
[ next_SP_alias_slot
].delta
= delta
;
166 next_SP_alias_slot
++;
167 if (N_ALIASES
== next_SP_alias_slot
) next_SP_alias_slot
= 0;
170 static Bool
get_SP_delta(IRTemp temp
, Long
* delta
)
172 Int i
; // i must be signed!
173 vg_assert(IRTemp_INVALID
!= temp
);
174 // Search backwards between current buffer position and the start.
175 for (i
= next_SP_alias_slot
-1; i
>= 0; i
--) {
176 if (temp
== SP_aliases
[i
].temp
) {
177 *delta
= SP_aliases
[i
].delta
;
181 // Search backwards between the end and the current buffer position.
182 for (i
= N_ALIASES
-1; i
>= next_SP_alias_slot
; i
--) {
183 if (temp
== SP_aliases
[i
].temp
) {
184 *delta
= SP_aliases
[i
].delta
;
191 static void update_SP_aliases(Long delta
)
194 for (i
= 0; i
< N_ALIASES
; i
++) {
195 if (SP_aliases
[i
].temp
== IRTemp_INVALID
) {
198 SP_aliases
[i
].delta
+= delta
;
202 /* Given a guest IP, get an origin tag for a 1-element stack trace,
203 and wrap it up in an IR atom that can be passed as the origin-tag
204 value for a stack-adjustment helper function. */
205 static IRExpr
* mk_ecu_Expr ( Addr guest_IP
)
209 = VG_(make_depth_1_ExeContext_from_Addr
)( guest_IP
);
211 ecu
= VG_(get_ECU_from_ExeContext
)( ec
);
212 vg_assert(VG_(is_plausible_ECU
)(ecu
));
213 /* This is always safe to do, since ecu is only 32 bits, and
214 HWord is 32 or 64. */
215 return mkIRExpr_HWord( (HWord
)ecu
);
218 /* When gdbserver is activated, the translation of a block must
219 first be done by the tool function, then followed by a pass
220 which (if needed) instruments the code for gdbserver.
223 IRSB
* tool_instrument_then_gdbserver_if_needed ( VgCallbackClosure
* closureV
,
225 const VexGuestLayout
* layout
,
226 const VexGuestExtents
* vge
,
227 const VexArchInfo
* vai
,
231 return VG_(instrument_for_gdbserver_if_needed
)
232 (VG_(tdict
).tool_instrument (closureV
,
245 /* For tools that want to know about SP changes, this pass adds
246 in the appropriate hooks. We have to do it after the tool's
247 instrumentation, so the tool doesn't have to worry about the C calls
248 it adds in, and we must do it before register allocation because
249 spilled temps make it much harder to work out the SP deltas.
250 This it is done with Vex's "second instrumentation" pass.
252 Basically, we look for GET(SP)/PUT(SP) pairs and track constant
253 increments/decrements of SP between them. (This requires tracking one or
254 more "aliases", which are not exact aliases but instead are tempregs
255 whose value is equal to the SP's plus or minus a known constant.)
256 If all the changes to SP leading up to a PUT(SP) are by known, small
257 constants, we can do a specific call to eg. new_mem_stack_4, otherwise
258 we fall back to the case that handles an unknown SP change.
260 There is some extra complexity to deal correctly with updates to
261 only parts of SP. Bizarre, but it has been known to happen.
264 IRSB
* vg_SP_update_pass ( void* closureV
,
266 const VexGuestLayout
* layout
,
267 const VexGuestExtents
* vge
,
268 const VexArchInfo
* vai
,
272 Int i
, j
, k
, minoff_ST
, maxoff_ST
, sizeof_SP
, offset_SP
;
273 Int first_SP
, last_SP
, first_Put
, last_Put
;
281 /* Set up stuff for tracking the guest IP */
282 Bool curr_IP_known
= False
;
286 IRSB
* bb
= emptyIRSB();
287 bb
->tyenv
= deepCopyIRTypeEnv(sb_in
->tyenv
);
288 bb
->next
= deepCopyIRExpr(sb_in
->next
);
289 bb
->jumpkind
= sb_in
->jumpkind
;
290 bb
->offsIP
= sb_in
->offsIP
;
294 sizeof_SP
= layout
->sizeof_SP
;
295 offset_SP
= layout
->offset_SP
;
296 typeof_SP
= sizeof_SP
==4 ? Ity_I32
: Ity_I64
;
297 vg_assert(sizeof_SP
== 4 || sizeof_SP
== 8);
299 /* --- Start of #defines --- */
301 # define IS_ADD(op) (sizeof_SP==4 ? ((op)==Iop_Add32) : ((op)==Iop_Add64))
302 # define IS_SUB(op) (sizeof_SP==4 ? ((op)==Iop_Sub32) : ((op)==Iop_Sub64))
304 # define IS_ADD_OR_SUB(op) (IS_ADD(op) || IS_SUB(op))
306 # define GET_CONST(con) \
307 (sizeof_SP==4 ? (Long)(Int)(con->Ico.U32) \
308 : (Long)(con->Ico.U64))
310 # define DO_NEW(syze, tmpp) \
312 Bool vanilla, w_ecu; \
313 vg_assert(curr_IP_known); \
314 vanilla = NULL != VG_(tdict).track_new_mem_stack_##syze; \
315 w_ecu = NULL != VG_(tdict).track_new_mem_stack_##syze##_w_ECU; \
316 vg_assert(!(vanilla && w_ecu)); /* can't have both */ \
317 if (VG_(tdict).any_new_mem_stack \
318 && !vanilla && !w_ecu) { \
319 n_SP_updates_new_generic_known++; \
323 if (VG_(tdict).any_new_mem_stack) { \
324 /* I don't know if it's really necessary to say that the */ \
325 /* call reads the stack pointer. But anyway, we do. */ \
327 dcall = unsafeIRDirty_0_N( \
329 "track_new_mem_stack_" #syze "_w_ECU", \
330 VG_(fnptr_to_fnentry)( \
331 VG_(tdict).track_new_mem_stack_##syze##_w_ECU ), \
332 mkIRExprVec_2(IRExpr_RdTmp(tmpp), \
333 mk_ecu_Expr(curr_IP)) \
336 dcall = unsafeIRDirty_0_N( \
338 "track_new_mem_stack_" #syze , \
339 VG_(fnptr_to_fnentry)( \
340 VG_(tdict).track_new_mem_stack_##syze ), \
341 mkIRExprVec_1(IRExpr_RdTmp(tmpp)) \
344 dcall->nFxState = 1; \
345 dcall->fxState[0].fx = Ifx_Read; \
346 dcall->fxState[0].offset = layout->offset_SP; \
347 dcall->fxState[0].size = layout->sizeof_SP; \
348 dcall->fxState[0].nRepeats = 0; \
349 dcall->fxState[0].repeatLen = 0; \
351 addStmtToIRSB( bb, IRStmt_Dirty(dcall) ); \
354 vg_assert(syze > 0); \
355 update_SP_aliases(syze); \
357 n_SP_updates_new_fast++; \
361 # define DO_DIE(syze, tmpp) \
363 if (VG_(tdict).any_die_mem_stack \
364 && !VG_(tdict).track_die_mem_stack_##syze) { \
365 n_SP_updates_die_generic_known++; \
369 if (VG_(tdict).any_die_mem_stack) { \
370 /* I don't know if it's really necessary to say that the */ \
371 /* call reads the stack pointer. But anyway, we do. */ \
372 dcall = unsafeIRDirty_0_N( \
374 "track_die_mem_stack_" #syze, \
375 VG_(fnptr_to_fnentry)( \
376 VG_(tdict).track_die_mem_stack_##syze ), \
377 mkIRExprVec_1(IRExpr_RdTmp(tmpp)) \
379 dcall->nFxState = 1; \
380 dcall->fxState[0].fx = Ifx_Read; \
381 dcall->fxState[0].offset = layout->offset_SP; \
382 dcall->fxState[0].size = layout->sizeof_SP; \
383 dcall->fxState[0].nRepeats = 0; \
384 dcall->fxState[0].repeatLen = 0; \
386 addStmtToIRSB( bb, IRStmt_Dirty(dcall) ); \
389 vg_assert(syze > 0); \
390 update_SP_aliases(-(syze)); \
392 n_SP_updates_die_fast++; \
396 /* --- End of #defines --- */
400 for (i
= 0; i
< sb_in
->stmts_used
; i
++) {
402 st
= sb_in
->stmts
[i
];
404 if (st
->tag
== Ist_IMark
) {
405 curr_IP_known
= True
;
406 curr_IP
= st
->Ist
.IMark
.addr
;
409 /* t = Get(sp): curr = t, delta = 0 */
410 if (st
->tag
!= Ist_WrTmp
) goto case2
;
411 e
= st
->Ist
.WrTmp
.data
;
412 if (e
->tag
!= Iex_Get
) goto case2
;
413 if (e
->Iex
.Get
.offset
!= offset_SP
) goto case2
;
414 if (e
->Iex
.Get
.ty
!= typeof_SP
) goto case2
;
415 vg_assert( typeOfIRTemp(bb
->tyenv
, st
->Ist
.WrTmp
.tmp
) == typeof_SP
);
416 add_SP_alias(st
->Ist
.WrTmp
.tmp
, 0);
417 addStmtToIRSB( bb
, st
);
421 /* t' = curr +/- const: curr = t', delta +=/-= const */
422 if (st
->tag
!= Ist_WrTmp
) goto case3
;
423 e
= st
->Ist
.WrTmp
.data
;
424 if (e
->tag
!= Iex_Binop
) goto case3
;
425 if (e
->Iex
.Binop
.arg1
->tag
!= Iex_RdTmp
) goto case3
;
426 if (!get_SP_delta(e
->Iex
.Binop
.arg1
->Iex
.RdTmp
.tmp
, &delta
)) goto case3
;
427 if (e
->Iex
.Binop
.arg2
->tag
!= Iex_Const
) goto case3
;
428 if (!IS_ADD_OR_SUB(e
->Iex
.Binop
.op
)) goto case3
;
429 con
= GET_CONST(e
->Iex
.Binop
.arg2
->Iex
.Const
.con
);
430 vg_assert( typeOfIRTemp(bb
->tyenv
, st
->Ist
.WrTmp
.tmp
) == typeof_SP
);
431 if (IS_ADD(e
->Iex
.Binop
.op
)) {
432 add_SP_alias(st
->Ist
.WrTmp
.tmp
, delta
+ con
);
434 add_SP_alias(st
->Ist
.WrTmp
.tmp
, delta
- con
);
436 addStmtToIRSB( bb
, st
);
440 /* t' = curr: curr = t' */
441 if (st
->tag
!= Ist_WrTmp
) goto case4
;
442 e
= st
->Ist
.WrTmp
.data
;
443 if (e
->tag
!= Iex_RdTmp
) goto case4
;
444 if (!get_SP_delta(e
->Iex
.RdTmp
.tmp
, &delta
)) goto case4
;
445 vg_assert( typeOfIRTemp(bb
->tyenv
, st
->Ist
.WrTmp
.tmp
) == typeof_SP
);
446 add_SP_alias(st
->Ist
.WrTmp
.tmp
, delta
);
447 addStmtToIRSB( bb
, st
);
452 /* More generally, we must correctly handle a Put which writes
453 any part of SP, not just the case where all of SP is
455 if (st
->tag
!= Ist_Put
) goto case5
;
456 first_SP
= offset_SP
;
457 last_SP
= first_SP
+ sizeof_SP
- 1;
458 first_Put
= st
->Ist
.Put
.offset
;
460 + sizeofIRType( typeOfIRExpr( bb
->tyenv
, st
->Ist
.Put
.data
))
462 vg_assert(first_SP
<= last_SP
);
463 vg_assert(first_Put
<= last_Put
);
465 if (last_Put
< first_SP
|| last_SP
< first_Put
)
466 goto case5
; /* no overlap */
468 if (st
->Ist
.Put
.data
->tag
== Iex_RdTmp
469 && get_SP_delta(st
->Ist
.Put
.data
->Iex
.RdTmp
.tmp
, &delta
)) {
470 IRTemp tttmp
= st
->Ist
.Put
.data
->Iex
.RdTmp
.tmp
;
471 /* Why should the following assertion hold? Because any
472 alias added by put_SP_alias must be of a temporary which
473 has the same type as typeof_SP, and whose value is a Get
474 at exactly offset_SP of size typeof_SP. Each call to
475 put_SP_alias is immediately preceded by an assertion that
476 we are putting in a binding for a correctly-typed
478 vg_assert( typeOfIRTemp(bb
->tyenv
, tttmp
) == typeof_SP
);
479 /* From the same type-and-offset-correctness argument, if
480 we found a useable alias, it must for an "exact" write of SP. */
481 vg_assert(first_SP
== first_Put
);
482 vg_assert(last_SP
== last_Put
);
484 case 0: addStmtToIRSB(bb
,st
); continue;
485 case 4: DO_DIE( 4, tttmp
); addStmtToIRSB(bb
,st
); continue;
486 case -4: DO_NEW( 4, tttmp
); addStmtToIRSB(bb
,st
); continue;
487 case 8: DO_DIE( 8, tttmp
); addStmtToIRSB(bb
,st
); continue;
488 case -8: DO_NEW( 8, tttmp
); addStmtToIRSB(bb
,st
); continue;
489 case 12: DO_DIE( 12, tttmp
); addStmtToIRSB(bb
,st
); continue;
490 case -12: DO_NEW( 12, tttmp
); addStmtToIRSB(bb
,st
); continue;
491 case 16: DO_DIE( 16, tttmp
); addStmtToIRSB(bb
,st
); continue;
492 case -16: DO_NEW( 16, tttmp
); addStmtToIRSB(bb
,st
); continue;
493 case 32: DO_DIE( 32, tttmp
); addStmtToIRSB(bb
,st
); continue;
494 case -32: DO_NEW( 32, tttmp
); addStmtToIRSB(bb
,st
); continue;
495 case 112: DO_DIE( 112, tttmp
); addStmtToIRSB(bb
,st
); continue;
496 case -112: DO_NEW( 112, tttmp
); addStmtToIRSB(bb
,st
); continue;
497 case 128: DO_DIE( 128, tttmp
); addStmtToIRSB(bb
,st
); continue;
498 case -128: DO_NEW( 128, tttmp
); addStmtToIRSB(bb
,st
); continue;
499 case 144: DO_DIE( 144, tttmp
); addStmtToIRSB(bb
,st
); continue;
500 case -144: DO_NEW( 144, tttmp
); addStmtToIRSB(bb
,st
); continue;
501 case 160: DO_DIE( 160, tttmp
); addStmtToIRSB(bb
,st
); continue;
502 case -160: DO_NEW( 160, tttmp
); addStmtToIRSB(bb
,st
); continue;
505 n_SP_updates_die_generic_known
++;
506 if (VG_(tdict
).any_die_mem_stack
)
509 n_SP_updates_new_generic_known
++;
510 if (VG_(tdict
).any_new_mem_stack
)
513 /* No tracking for delta. Just add the original statement. */
514 addStmtToIRSB(bb
,st
); continue;
517 /* Deal with an unknown update to SP. We're here because
519 (1) the Put does not exactly cover SP; it is a partial update.
520 Highly unlikely, but has been known to happen for 16-bit
521 Windows apps running on Wine, doing 16-bit adjustments to
523 (2) the Put does exactly cover SP, but we are unable to
524 determine how the value relates to the old SP. In any
525 case, we cannot assume that the Put.data value is a tmp;
526 we must assume it can be anything allowed in flat IR (tmp
530 n_SP_updates_generic_unknown
++;
532 // Nb: if all is well, this generic case will typically be
533 // called something like every 1000th SP update. If it's more than
534 // that, the above code may be missing some cases.
536 /* Pass both the old and new SP values to this helper. Also,
537 pass an origin tag, even if it isn't needed. */
538 old_SP
= newIRTemp(bb
->tyenv
, typeof_SP
);
541 IRStmt_WrTmp( old_SP
, IRExpr_Get(offset_SP
, typeof_SP
) )
544 /* Now we know what the old value of SP is. But knowing the new
545 value is a bit tricky if there is a partial write. */
546 if (first_Put
== first_SP
&& last_Put
== last_SP
) {
547 /* The common case, an exact write to SP. So st->Ist.Put.data
548 does hold the new value; simple. */
549 vg_assert(curr_IP_known
);
550 if (NULL
!= VG_(tdict
).track_new_mem_stack_w_ECU
)
551 dcall
= unsafeIRDirty_0_N(
553 "VG_(unknown_SP_update_w_ECU)",
554 VG_(fnptr_to_fnentry
)( &VG_(unknown_SP_update_w_ECU
) ),
555 mkIRExprVec_3( IRExpr_RdTmp(old_SP
), st
->Ist
.Put
.data
,
556 mk_ecu_Expr(curr_IP
) )
559 dcall
= unsafeIRDirty_0_N(
561 "VG_(unknown_SP_update)",
562 VG_(fnptr_to_fnentry
)( &VG_(unknown_SP_update
) ),
563 mkIRExprVec_2( IRExpr_RdTmp(old_SP
), st
->Ist
.Put
.data
)
566 addStmtToIRSB( bb
, IRStmt_Dirty(dcall
) );
567 /* don't forget the original assignment */
568 addStmtToIRSB( bb
, st
);
570 /* We have a partial update to SP. We need to know what
571 the new SP will be, and hand that to the helper call,
572 but when the helper call happens, SP must hold the
573 value it had before the update. Tricky.
574 Therefore use the following kludge:
575 1. do the partial SP update (Put)
576 2. Get the new SP value into a tmp, new_SP
583 addStmtToIRSB( bb
, st
);
585 new_SP
= newIRTemp(bb
->tyenv
, typeof_SP
);
588 IRStmt_WrTmp( new_SP
, IRExpr_Get(offset_SP
, typeof_SP
) )
591 addStmtToIRSB( bb
, IRStmt_Put(offset_SP
, IRExpr_RdTmp(old_SP
) ));
593 vg_assert(curr_IP_known
);
594 if (NULL
!= VG_(tdict
).track_new_mem_stack_w_ECU
)
595 dcall
= unsafeIRDirty_0_N(
597 "VG_(unknown_SP_update_w_ECU)",
598 VG_(fnptr_to_fnentry
)( &VG_(unknown_SP_update_w_ECU
) ),
599 mkIRExprVec_3( IRExpr_RdTmp(old_SP
),
600 IRExpr_RdTmp(new_SP
),
601 mk_ecu_Expr(curr_IP
) )
604 dcall
= unsafeIRDirty_0_N(
606 "VG_(unknown_SP_update)",
607 VG_(fnptr_to_fnentry
)( &VG_(unknown_SP_update
) ),
608 mkIRExprVec_2( IRExpr_RdTmp(old_SP
),
609 IRExpr_RdTmp(new_SP
) )
611 addStmtToIRSB( bb
, IRStmt_Dirty(dcall
) );
613 addStmtToIRSB( bb
, IRStmt_Put(offset_SP
, IRExpr_RdTmp(new_SP
) ));
616 /* Forget what we already know. */
619 /* If this is a Put of a tmp that exactly updates SP,
620 start tracking aliases against this tmp. */
622 if (first_Put
== first_SP
&& last_Put
== last_SP
623 && st
->Ist
.Put
.data
->tag
== Iex_RdTmp
) {
624 vg_assert( typeOfIRTemp(bb
->tyenv
, st
->Ist
.Put
.data
->Iex
.RdTmp
.tmp
)
626 add_SP_alias(st
->Ist
.Put
.data
->Iex
.RdTmp
.tmp
, 0);
632 /* PutI or Dirty call which overlaps SP: complain. We can't
633 deal with SP changing in weird ways (well, we can, but not at
634 this time of night). */
635 if (st
->tag
== Ist_PutI
) {
636 descr
= st
->Ist
.PutI
.details
->descr
;
637 minoff_ST
= descr
->base
;
638 maxoff_ST
= descr
->base
639 + descr
->nElems
* sizeofIRType(descr
->elemTy
) - 1;
640 if (!(offset_SP
> maxoff_ST
641 || (offset_SP
+ sizeof_SP
- 1) < minoff_ST
))
644 if (st
->tag
== Ist_Dirty
) {
645 d
= st
->Ist
.Dirty
.details
;
646 for (j
= 0; j
< d
->nFxState
; j
++) {
647 if (d
->fxState
[j
].fx
== Ifx_Read
|| d
->fxState
[j
].fx
== Ifx_None
)
649 /* Enumerate the described state segments */
650 for (k
= 0; k
< 1 + d
->fxState
[j
].nRepeats
; k
++) {
651 minoff_ST
= d
->fxState
[j
].offset
+ k
* d
->fxState
[j
].repeatLen
;
652 maxoff_ST
= minoff_ST
+ d
->fxState
[j
].size
- 1;
653 if (!(offset_SP
> maxoff_ST
654 || (offset_SP
+ sizeof_SP
- 1) < minoff_ST
))
660 /* well, not interesting. Just copy and keep going. */
661 addStmtToIRSB( bb
, st
);
663 } /* for (i = 0; i < sb_in->stmts_used; i++) */
668 VG_(core_panic
)("vg_SP_update_pass: PutI or Dirty which overlaps SP");
678 /*------------------------------------------------------------*/
679 /*--- Main entry point for the JITter. ---*/
680 /*------------------------------------------------------------*/
682 /* Extra comments re self-checking translations and self-modifying
683 code. (JRS 14 Oct 05).
686 (1) no checking: all code assumed to be not self-modifying
687 (2) partial: known-problematic situations get a self-check
688 (3) full checking: all translations get a self-check
690 As currently implemented, the default is (2). (3) is always safe,
691 but very slow. (1) works mostly, but fails for gcc nested-function
692 code which uses trampolines on the stack; this situation is
693 detected and handled by (2).
697 A more robust and transparent solution, which is not currently
698 implemented, is a variant of (2): if a translation is made from an
699 area which aspacem says does not have 'w' permission, then it can
700 be non-self-checking. Otherwise, it needs a self-check.
702 This is complicated by Vex's basic-block chasing. If a self-check
703 is requested, then Vex will not chase over basic block boundaries
704 (it's too complex). However there is still a problem if it chases
705 from a non-'w' area into a 'w' area.
707 I think the right thing to do is:
709 - if a translation request starts in a 'w' area, ask for a
710 self-checking translation, and do not allow any chasing (make
711 chase_into_ok return False). Note that the latter is redundant
712 in the sense that Vex won't chase anyway in this situation.
714 - if a translation request starts in a non-'w' area, do not ask for
715 a self-checking translation. However, do not allow chasing (as
716 determined by chase_into_ok) to go into a 'w' area.
718 The result of this is that all code inside 'w' areas is self
721 To complete the trick, there is a caveat: we must watch the
722 client's mprotect calls. If pages are changed from non-'w' to 'w'
723 then we should throw away all translations which intersect the
724 affected area, so as to force them to be redone with self-checks.
728 The above outlines the conditions under which bb chasing is allowed
729 from a self-modifying-code point of view. There are other
730 situations pertaining to function redirection in which it is
731 necessary to disallow chasing, but those fall outside the scope of
736 /* Vex dumps the final code in here. Then we can copy it off
738 /* 60000: should agree with assertion in VG_(add_to_transtab) in
740 #define N_TMPBUF 60000
741 static UChar tmpbuf
[N_TMPBUF
];
744 /* Function pointers we must supply to LibVEX in order that it
745 can bomb out and emit messages under Valgrind's control. */
746 __attribute__ ((noreturn
))
748 void failure_exit ( void )
750 LibVEX_ShowAllocStats();
751 VG_(core_panic
)("LibVEX called failure_exit().");
755 void log_bytes ( const HChar
* bytes
, SizeT nbytes
)
759 for (; i
< nbytes
-3; i
+= 4)
760 VG_(printf
)("%c%c%c%c", bytes
[i
], bytes
[i
+1], bytes
[i
+2], bytes
[i
+3]);
761 for (; i
< nbytes
; i
++)
762 VG_(printf
)("%c", bytes
[i
]);
766 /* --------- Various helper functions for translation --------- */
768 /* Look for reasons to disallow making translations from the given
771 static Bool
translations_allowable_from_seg ( NSegment
const* seg
, Addr addr
)
773 # if defined(VGA_x86) || defined(VGA_s390x) || defined(VGA_mips32) \
774 || defined(VGA_mips64)
780 && (seg
->kind
== SkAnonC
|| seg
->kind
== SkFileC
|| seg
->kind
== SkShmC
)
782 || (seg
->hasR
&& (allowR
783 || VG_(has_gdbserver_breakpoint
) (addr
))));
784 /* If GDB/gdbsrv has inserted a breakpoint at addr, assume this is a valid
785 location to translate if seg is not executable but is readable.
786 This is needed for inferior function calls from GDB: GDB inserts a
787 breakpoint on the stack, and expects to regain control before the
788 breakpoint instruction at the breakpoint address is really
789 executed. For this, the breakpoint instruction must be translated
790 so as to have the call to gdbserver executed. */
794 /* Produce a bitmask stating which of the supplied extents needs a
795 self-check. See documentation of
796 VexTranslateArgs::needs_self_check for more details about the
797 return convention. */
799 static UInt
needs_self_check ( void* closureV
,
800 /*MAYBE_MOD*/VexRegisterUpdates
* pxControl
,
801 const VexGuestExtents
* vge
)
803 VgCallbackClosure
* closure
= (VgCallbackClosure
*)closureV
;
806 vg_assert(vge
->n_used
>= 1 && vge
->n_used
<= 3);
809 /* Will we need to do a second pass in order to compute a
810 revised *pxControl value? */
811 Bool pxStatusMightChange
812 = /* "the user actually set it" */
813 VG_(clo_px_file_backed
) != VexRegUpd_INVALID
814 /* "and they set it to something other than the default. */
815 && *pxControl
!= VG_(clo_px_file_backed
);
817 /* First, compute |bitset|, which specifies which extent(s) need a
818 self check. Whilst we're at it, note any NSegments that we get,
819 so as to reduce the number of calls required to
820 VG_(am_find_nsegment) in a possible second pass. */
821 const NSegment
*segs
[3] = { NULL
, NULL
, NULL
};
823 for (i
= 0; i
< vge
->n_used
; i
++) {
825 Addr addr
= vge
->base
[i
];
826 SizeT len
= vge
->len
[i
];
827 NSegment
const* segA
= NULL
;
829 # if defined(VGO_darwin)
830 // GrP fixme hack - dyld i386 IMPORT gets rewritten.
831 // To really do this correctly, we'd need to flush the
832 // translation cache whenever a segment became +WX.
833 segA
= VG_(am_find_nsegment
)(addr
);
834 if (segA
&& segA
->hasX
&& segA
->hasW
)
839 switch (VG_(clo_smc_check
)) {
841 /* never check (except as per Darwin hack above) */
848 /* check if the address is in the same segment as this
849 thread's stack pointer */
850 Addr sp
= VG_(get_SP
)(closure
->tid
);
852 segA
= VG_(am_find_nsegment
)(addr
);
854 NSegment
const* segSP
= VG_(am_find_nsegment
)(sp
);
855 if (segA
&& segSP
&& segA
== segSP
)
859 case Vg_SmcAllNonFile
: {
860 /* check if any part of the extent is not in a
861 file-mapped segment */
863 segA
= VG_(am_find_nsegment
)(addr
);
865 if (segA
&& segA
->kind
== SkFileC
&& segA
->start
<= addr
866 && (len
== 0 || addr
+ len
<= segA
->end
+ 1)) {
867 /* in a file-mapped segment; skip the check */
881 if (pxStatusMightChange
&& segA
) {
882 vg_assert(i
< sizeof(segs
)/sizeof(segs
[0]));
887 /* Now, possibly do a second pass, to see if the PX status might
888 change. This can happen if the user specified value via
889 --px-file-backed= which is different from the default PX value
890 specified via --vex-iropt-register-updates (also known by the
891 shorter alias --px-default). */
892 if (pxStatusMightChange
) {
894 Bool allFileBacked
= True
;
895 for (i
= 0; i
< vge
->n_used
; i
++) {
896 Addr addr
= vge
->base
[i
];
897 SizeT len
= vge
->len
[i
];
898 NSegment
const* segA
= segs
[i
];
900 /* If we don't have a cached value for |segA|, compute it now. */
901 segA
= VG_(am_find_nsegment
)(addr
);
903 vg_assert(segA
); /* Can this ever fail? */
904 if (segA
&& segA
->kind
== SkFileC
&& segA
->start
<= addr
905 && (len
== 0 || addr
+ len
<= segA
->end
+ 1)) {
906 /* in a file-mapped segment */
908 /* not in a file-mapped segment, or we can't figure out
910 allFileBacked
= False
;
915 /* So, finally, if all the extents are in file backed segments, perform
916 the user-specified PX change. */
918 *pxControl
= VG_(clo_px_file_backed
);
923 /* Update running PX stats, as it is difficult without these to
924 check that the system is behaving as expected. */
925 switch (*pxControl
) {
926 case VexRegUpdSpAtMemAccess
:
927 n_PX_VexRegUpdSpAtMemAccess
++; break;
928 case VexRegUpdUnwindregsAtMemAccess
:
929 n_PX_VexRegUpdUnwindregsAtMemAccess
++; break;
930 case VexRegUpdAllregsAtMemAccess
:
931 n_PX_VexRegUpdAllregsAtMemAccess
++; break;
932 case VexRegUpdAllregsAtEachInsn
:
933 n_PX_VexRegUpdAllregsAtEachInsn
++; break;
942 /* This is a callback passed to LibVEX_Translate. It stops Vex from
943 chasing into function entry points that we wish to redirect.
944 Chasing across them obviously defeats the redirect mechanism, with
945 bad effects for Memcheck, Helgrind, DRD, Massif, and possibly others.
947 static Bool
chase_into_ok ( void* closureV
, Addr addr
)
949 NSegment
const* seg
= VG_(am_find_nsegment
)(addr
);
951 /* Work through a list of possibilities why we might not want to
954 /* Destination not in a plausible segment? */
955 if (!translations_allowable_from_seg(seg
, addr
))
958 /* Destination is redirected? */
959 if (addr
!= VG_(redir_do_lookup
)(addr
, NULL
))
962 # if defined(VG_PLAT_USES_PPCTOC) || defined(VGP_ppc64le_linux)
963 /* This needs to be at the start of its own block. Don't chase. */
964 if (addr
== (Addr
)&VG_(ppctoc_magic_redirect_return_stub
))
968 /* overly conservative, but .. don't chase into the distinguished
969 address that m_transtab uses as an empty-slot marker for
971 if (addr
== TRANSTAB_BOGUS_GUEST_ADDR
)
974 # if defined(VGA_s390x)
975 /* Never chase into an EX instruction. Generating IR for EX causes
976 a round-trip through the scheduler including VG_(discard_translations).
977 And that's expensive as shown by perf/tinycc.c:
978 Chasing into EX increases the number of EX translations from 21 to
979 102666 causing a 7x runtime increase for "none" and a 3.2x runtime
980 increase for memcheck. */
981 if (((UChar
*)addr
)[0] == 0x44 || /* EX */
982 ((UChar
*)addr
)[0] == 0xC6) /* EXRL */
986 /* well, ok then. go on and chase. */
993 if (0) VG_(printf
)("not chasing into 0x%lx\n", addr
);
998 /* --------------- helpers for with-TOC platforms --------------- */
1000 /* NOTE: with-TOC platforms are: ppc64-linux. */
1002 static IRExpr
* mkU64 ( ULong n
) {
1003 return IRExpr_Const(IRConst_U64(n
));
1005 static IRExpr
* mkU32 ( UInt n
) {
1006 return IRExpr_Const(IRConst_U32(n
));
1009 #if defined(VG_PLAT_USES_PPCTOC) || defined(VGP_ppc64le_linux)
1010 static IRExpr
* mkU8 ( UChar n
) {
1011 return IRExpr_Const(IRConst_U8(n
));
1013 static IRExpr
* narrowTo32 ( IRTypeEnv
* tyenv
, IRExpr
* e
) {
1014 if (typeOfIRExpr(tyenv
, e
) == Ity_I32
) {
1017 vg_assert(typeOfIRExpr(tyenv
, e
) == Ity_I64
);
1018 return IRExpr_Unop(Iop_64to32
, e
);
1022 /* Generate code to push word-typed expression 'e' onto this thread's
1023 redir stack, checking for stack overflow and generating code to
1026 static void gen_PUSH ( IRSB
* bb
, IRExpr
* e
)
1032 # if defined(VGP_ppc64be_linux) || defined(VGP_ppc64le_linux)
1033 Int stack_size
= VEX_GUEST_PPC64_REDIR_STACK_SIZE
;
1034 Int offB_REDIR_SP
= offsetof(VexGuestPPC64State
,guest_REDIR_SP
);
1035 Int offB_REDIR_STACK
= offsetof(VexGuestPPC64State
,guest_REDIR_STACK
);
1036 Int offB_EMNOTE
= offsetof(VexGuestPPC64State
,guest_EMNOTE
);
1037 Int offB_CIA
= offsetof(VexGuestPPC64State
,guest_CIA
);
1039 IRType ty_Word
= Ity_I64
;
1040 IROp op_CmpNE
= Iop_CmpNE64
;
1041 IROp op_Sar
= Iop_Sar64
;
1042 IROp op_Sub
= Iop_Sub64
;
1043 IROp op_Add
= Iop_Add64
;
1044 IRExpr
*(*mkU
)(ULong
) = mkU64
;
1045 vg_assert(VG_WORDSIZE
== 8);
1047 Int stack_size
= VEX_GUEST_PPC32_REDIR_STACK_SIZE
;
1048 Int offB_REDIR_SP
= offsetof(VexGuestPPC32State
,guest_REDIR_SP
);
1049 Int offB_REDIR_STACK
= offsetof(VexGuestPPC32State
,guest_REDIR_STACK
);
1050 Int offB_EMNOTE
= offsetof(VexGuestPPC32State
,guest_EMNOTE
);
1051 Int offB_CIA
= offsetof(VexGuestPPC32State
,guest_CIA
);
1053 IRType ty_Word
= Ity_I32
;
1054 IROp op_CmpNE
= Iop_CmpNE32
;
1055 IROp op_Sar
= Iop_Sar32
;
1056 IROp op_Sub
= Iop_Sub32
;
1057 IROp op_Add
= Iop_Add32
;
1058 IRExpr
*(*mkU
)(UInt
) = mkU32
;
1059 vg_assert(VG_WORDSIZE
== 4);
1062 vg_assert(sizeof(void*) == VG_WORDSIZE
);
1063 vg_assert(sizeof(Word
) == VG_WORDSIZE
);
1064 vg_assert(sizeof(Addr
) == VG_WORDSIZE
);
1066 descr
= mkIRRegArray( offB_REDIR_STACK
, ty_Word
, stack_size
);
1067 t1
= newIRTemp( bb
->tyenv
, ty_Word
);
1070 vg_assert(typeOfIRExpr(bb
->tyenv
, e
) == ty_Word
);
1072 /* t1 = guest_REDIR_SP + 1 */
1077 IRExpr_Binop(op_Add
, IRExpr_Get( offB_REDIR_SP
, ty_Word
), one
)
1081 /* Bomb out if t1 >=s stack_size, that is, (stack_size-1)-t1 <s 0.
1082 The destination (0) is a bit bogus but it doesn't matter since
1083 this is an unrecoverable error and will lead to Valgrind
1084 shutting down. _EMNOTE is set regardless - that's harmless
1085 since is only has a meaning if the exit is taken. */
1088 IRStmt_Put(offB_EMNOTE
, mkU32(EmWarn_PPC64_redir_overflow
))
1097 IRExpr_Binop(op_Sub
,mkU(stack_size
-1),IRExpr_RdTmp(t1
)),
1098 mkU8(8 * VG_WORDSIZE
- 1)
1103 is64
? IRConst_U64(0) : IRConst_U32(0),
1108 /* guest_REDIR_SP = t1 */
1109 addStmtToIRSB(bb
, IRStmt_Put(offB_REDIR_SP
, IRExpr_RdTmp(t1
)));
1111 /* guest_REDIR_STACK[t1+0] = e */
1112 /* PutI/GetI have I32-typed indexes regardless of guest word size */
1115 IRStmt_PutI(mkIRPutI(descr
,
1116 narrowTo32(bb
->tyenv
,IRExpr_RdTmp(t1
)), 0, e
)));
1120 /* Generate code to pop a word-sized value from this thread's redir
1121 stack, binding it to a new temporary, which is returned. As with
1122 gen_PUSH, an overflow check is also performed. */
1124 static IRTemp
gen_POP ( IRSB
* bb
)
1126 # if defined(VGP_ppc64be_linux) || defined(VGP_ppc64le_linux)
1127 Int stack_size
= VEX_GUEST_PPC64_REDIR_STACK_SIZE
;
1128 Int offB_REDIR_SP
= offsetof(VexGuestPPC64State
,guest_REDIR_SP
);
1129 Int offB_REDIR_STACK
= offsetof(VexGuestPPC64State
,guest_REDIR_STACK
);
1130 Int offB_EMNOTE
= offsetof(VexGuestPPC64State
,guest_EMNOTE
);
1131 Int offB_CIA
= offsetof(VexGuestPPC64State
,guest_CIA
);
1133 IRType ty_Word
= Ity_I64
;
1134 IROp op_CmpNE
= Iop_CmpNE64
;
1135 IROp op_Sar
= Iop_Sar64
;
1136 IROp op_Sub
= Iop_Sub64
;
1137 IRExpr
*(*mkU
)(ULong
) = mkU64
;
1139 Int stack_size
= VEX_GUEST_PPC32_REDIR_STACK_SIZE
;
1140 Int offB_REDIR_SP
= offsetof(VexGuestPPC32State
,guest_REDIR_SP
);
1141 Int offB_REDIR_STACK
= offsetof(VexGuestPPC32State
,guest_REDIR_STACK
);
1142 Int offB_EMNOTE
= offsetof(VexGuestPPC32State
,guest_EMNOTE
);
1143 Int offB_CIA
= offsetof(VexGuestPPC32State
,guest_CIA
);
1145 IRType ty_Word
= Ity_I32
;
1146 IROp op_CmpNE
= Iop_CmpNE32
;
1147 IROp op_Sar
= Iop_Sar32
;
1148 IROp op_Sub
= Iop_Sub32
;
1149 IRExpr
*(*mkU
)(UInt
) = mkU32
;
1152 IRRegArray
* descr
= mkIRRegArray( offB_REDIR_STACK
, ty_Word
, stack_size
);
1153 IRTemp t1
= newIRTemp( bb
->tyenv
, ty_Word
);
1154 IRTemp res
= newIRTemp( bb
->tyenv
, ty_Word
);
1155 IRExpr
* one
= mkU(1);
1157 vg_assert(sizeof(void*) == VG_WORDSIZE
);
1158 vg_assert(sizeof(Word
) == VG_WORDSIZE
);
1159 vg_assert(sizeof(Addr
) == VG_WORDSIZE
);
1161 /* t1 = guest_REDIR_SP */
1164 IRStmt_WrTmp( t1
, IRExpr_Get( offB_REDIR_SP
, ty_Word
) )
1167 /* Bomb out if t1 < 0. Same comments as gen_PUSH apply. */
1170 IRStmt_Put(offB_EMNOTE
, mkU32(EmWarn_PPC64_redir_underflow
))
1180 mkU8(8 * VG_WORDSIZE
- 1)
1185 is64
? IRConst_U64(0) : IRConst_U32(0),
1190 /* res = guest_REDIR_STACK[t1+0] */
1191 /* PutI/GetI have I32-typed indexes regardless of guest word size */
1196 IRExpr_GetI(descr
, narrowTo32(bb
->tyenv
,IRExpr_RdTmp(t1
)), 0)
1200 /* guest_REDIR_SP = t1-1 */
1203 IRStmt_Put(offB_REDIR_SP
, IRExpr_Binop(op_Sub
, IRExpr_RdTmp(t1
), one
))
1211 #if defined(VG_PLAT_USES_PPCTOC)
1213 /* Generate code to push LR and R2 onto this thread's redir stack,
1214 then set R2 to the new value (which is the TOC pointer to be used
1215 for the duration of the replacement function, as determined by
1216 m_debuginfo), and set LR to the magic return stub, so we get to
1217 intercept the return and restore R2 and L2 to the values saved
1220 static void gen_push_and_set_LR_R2 ( IRSB
* bb
, Addr new_R2_value
)
1222 # if defined(VGP_ppc64be_linux)
1223 Addr bogus_RA
= (Addr
)&VG_(ppctoc_magic_redirect_return_stub
);
1224 Int offB_GPR2
= offsetof(VexGuestPPC64State
,guest_GPR2
);
1225 Int offB_LR
= offsetof(VexGuestPPC64State
,guest_LR
);
1226 gen_PUSH( bb
, IRExpr_Get(offB_LR
, Ity_I64
) );
1227 gen_PUSH( bb
, IRExpr_Get(offB_GPR2
, Ity_I64
) );
1228 addStmtToIRSB( bb
, IRStmt_Put( offB_LR
, mkU64( bogus_RA
)) );
1229 addStmtToIRSB( bb
, IRStmt_Put( offB_GPR2
, mkU64( new_R2_value
)) );
1232 # error Platform is not TOC-afflicted, fortunately
1237 #if defined(VG_PLAT_USES_PPCTOC) || defined(VGP_ppc64le_linux)
1239 static void gen_pop_R2_LR_then_bLR ( IRSB
* bb
)
1241 # if defined(VGP_ppc64be_linux) || defined(VGP_ppc64le_linux)
1242 Int offB_GPR2
= offsetof(VexGuestPPC64State
,guest_GPR2
);
1243 Int offB_LR
= offsetof(VexGuestPPC64State
,guest_LR
);
1244 Int offB_CIA
= offsetof(VexGuestPPC64State
,guest_CIA
);
1245 IRTemp old_R2
= newIRTemp( bb
->tyenv
, Ity_I64
);
1246 IRTemp old_LR
= newIRTemp( bb
->tyenv
, Ity_I64
);
1248 old_R2
= gen_POP( bb
);
1249 addStmtToIRSB( bb
, IRStmt_Put( offB_GPR2
, IRExpr_RdTmp(old_R2
)) );
1251 old_LR
= gen_POP( bb
);
1252 addStmtToIRSB( bb
, IRStmt_Put( offB_LR
, IRExpr_RdTmp(old_LR
)) );
1254 /* re boring, we arrived here precisely because a wrapped fn did a
1255 blr (hence Ijk_Ret); so we should just mark this jump as Boring,
1256 else one _Call will have resulted in two _Rets. */
1257 bb
->jumpkind
= Ijk_Boring
;
1258 bb
->next
= IRExpr_Binop(Iop_And64
, IRExpr_RdTmp(old_LR
), mkU64(~(3ULL)));
1259 bb
->offsIP
= offB_CIA
;
1261 # error Platform is not TOC-afflicted, fortunately
1266 #if defined(VG_PLAT_USES_PPCTOC) || defined(VGP_ppc64le_linux)
1269 Bool
mk_preamble__ppctoc_magic_return_stub ( void* closureV
, IRSB
* bb
)
1271 VgCallbackClosure
* closure
= (VgCallbackClosure
*)closureV
;
1272 /* Since we're creating the entire IRSB right here, give it a
1273 proper IMark, as it won't get one any other way, and cachegrind
1274 will barf if it doesn't have one (fair enough really). */
1275 addStmtToIRSB( bb
, IRStmt_IMark( closure
->readdr
, 4, 0 ) );
1276 /* Generate the magic sequence:
1277 pop R2 from hidden stack
1278 pop LR from hidden stack
1281 gen_pop_R2_LR_then_bLR(bb
);
1282 return True
; /* True == this is the entire BB; don't disassemble any
1283 real insns into it - just hand it directly to
1284 optimiser/instrumenter/backend. */
1288 #if defined(VGP_ppc64le_linux)
1289 /* Generate code to push LR and R2 onto this thread's redir stack.
1290 Need to save R2 in case we redirect to a global entry point. The
1291 value of R2 is not preserved when entering the global entry point.
1292 Need to make sure R2 gets restored on return. Set LR to the magic
1293 return stub, so we get to intercept the return and restore R2 and
1294 L2 to the values saved here.
1296 The existing infrastruture for the TOC enabled architectures is
1297 being exploited here. So, we need to enable a number of the
1298 code sections used by VG_PLAT_USES_PPCTOC.
1301 static void gen_push_R2_and_set_LR ( IRSB
* bb
)
1303 Addr bogus_RA
= (Addr
)&VG_(ppctoc_magic_redirect_return_stub
);
1304 Int offB_GPR2
= offsetof(VexGuestPPC64State
,guest_GPR2
);
1305 Int offB_LR
= offsetof(VexGuestPPC64State
,guest_LR
);
1306 gen_PUSH( bb
, IRExpr_Get(offB_LR
, Ity_I64
) );
1307 gen_PUSH( bb
, IRExpr_Get(offB_GPR2
, Ity_I64
) );
1308 addStmtToIRSB( bb
, IRStmt_Put( offB_LR
, mkU64( bogus_RA
)) );
1312 /* --------------- END helpers for with-TOC platforms --------------- */
1315 /* This is the IR preamble generator used for replacement
1316 functions. It adds code to set the guest_NRADDR{_GPR2} to zero
1317 (technically not necessary, but facilitates detecting mixups in
1318 which a replacement function has been erroneously declared using
1319 VG_REPLACE_FUNCTION_Z{U,Z} when instead it should have been written
1320 using VG_WRAP_FUNCTION_Z{U,Z}).
1322 On with-TOC platforms the follow hacks are also done: LR and R2 are
1323 pushed onto a hidden stack, R2 is set to the correct value for the
1324 replacement function, and LR is set to point at the magic
1325 return-stub address. Setting LR causes the return of the
1326 wrapped/redirected function to lead to our magic return stub, which
1327 restores LR and R2 from said stack and returns for real.
1329 VG_(get_StackTrace_wrk) understands that the LR value may point to
1330 the return stub address, and that in that case it can get the real
1331 LR value from the hidden stack instead. */
1333 Bool
mk_preamble__set_NRADDR_to_zero ( void* closureV
, IRSB
* bb
)
1336 = sizeof(((VexGuestArchState
*)0)->guest_NRADDR
);
1337 vg_assert(nraddr_szB
== 4 || nraddr_szB
== 8);
1338 vg_assert(nraddr_szB
== VG_WORDSIZE
);
1342 offsetof(VexGuestArchState
,guest_NRADDR
),
1343 nraddr_szB
== 8 ? mkU64(0) : mkU32(0)
1346 // t9 needs to be set to point to the start of the redirected function.
1347 # if defined(VGP_mips32_linux)
1348 VgCallbackClosure
* closure
= (VgCallbackClosure
*)closureV
;
1349 Int offB_GPR25
= offsetof(VexGuestMIPS32State
, guest_r25
);
1350 addStmtToIRSB(bb
, IRStmt_Put(offB_GPR25
, mkU32(closure
->readdr
)));
1352 # if defined(VGP_mips64_linux)
1353 VgCallbackClosure
* closure
= (VgCallbackClosure
*)closureV
;
1354 Int offB_GPR25
= offsetof(VexGuestMIPS64State
, guest_r25
);
1355 addStmtToIRSB(bb
, IRStmt_Put(offB_GPR25
, mkU64(closure
->readdr
)));
1357 # if defined(VG_PLAT_USES_PPCTOC)
1358 { VgCallbackClosure
* closure
= (VgCallbackClosure
*)closureV
;
1362 offsetof(VexGuestArchState
,guest_NRADDR_GPR2
),
1363 VG_WORDSIZE
==8 ? mkU64(0) : mkU32(0)
1366 gen_push_and_set_LR_R2 ( bb
, VG_(get_tocptr
)( VG_(current_DiEpoch
)(),
1367 closure
->readdr
) );
1371 #if defined(VGP_ppc64le_linux)
1372 VgCallbackClosure
* closure
= (VgCallbackClosure
*)closureV
;
1373 Int offB_GPR12
= offsetof(VexGuestArchState
, guest_GPR12
);
1374 addStmtToIRSB(bb
, IRStmt_Put(offB_GPR12
, mkU64(closure
->readdr
)));
1377 offsetof(VexGuestArchState
,guest_NRADDR_GPR2
),
1378 VG_WORDSIZE
==8 ? mkU64(0) : mkU32(0)
1381 gen_push_R2_and_set_LR ( bb
);
1386 /* Ditto, except set guest_NRADDR to nraddr (the un-redirected guest
1387 address). This is needed for function wrapping - so the wrapper
1388 can read _NRADDR and find the address of the function being
1389 wrapped. On toc-afflicted platforms we must also snarf r2. */
1391 Bool
mk_preamble__set_NRADDR_to_nraddr ( void* closureV
, IRSB
* bb
)
1393 VgCallbackClosure
* closure
= (VgCallbackClosure
*)closureV
;
1395 = sizeof(((VexGuestArchState
*)0)->guest_NRADDR
);
1396 vg_assert(nraddr_szB
== 4 || nraddr_szB
== 8);
1397 vg_assert(nraddr_szB
== VG_WORDSIZE
);
1401 offsetof(VexGuestArchState
,guest_NRADDR
),
1403 ? IRExpr_Const(IRConst_U64( closure
->nraddr
))
1404 : IRExpr_Const(IRConst_U32( (UInt
)closure
->nraddr
))
1407 // t9 needs to be set to point to the start of the redirected function.
1408 # if defined(VGP_mips32_linux)
1409 Int offB_GPR25
= offsetof(VexGuestMIPS32State
, guest_r25
);
1410 addStmtToIRSB(bb
, IRStmt_Put(offB_GPR25
, mkU32(closure
->readdr
)));
1412 # if defined(VGP_mips64_linux)
1413 Int offB_GPR25
= offsetof(VexGuestMIPS64State
, guest_r25
);
1414 addStmtToIRSB(bb
, IRStmt_Put(offB_GPR25
, mkU64(closure
->readdr
)));
1416 # if defined(VG_PLAT_USES_PPCTOC)
1420 offsetof(VexGuestArchState
,guest_NRADDR_GPR2
),
1421 IRExpr_Get(offsetof(VexGuestArchState
,guest_GPR2
),
1422 VG_WORDSIZE
==8 ? Ity_I64
: Ity_I32
)
1425 gen_push_and_set_LR_R2 ( bb
, VG_(get_tocptr
)( VG_(current_DiEpoch
)(),
1426 closure
->readdr
) );
1428 #if defined(VGP_ppc64le_linux)
1429 /* This saves the r2 before leaving the function. We need to move
1430 * guest_NRADDR_GPR2 back to R2 on return.
1432 Int offB_GPR12
= offsetof(VexGuestArchState
, guest_GPR12
);
1436 offsetof(VexGuestArchState
,guest_NRADDR_GPR2
),
1437 IRExpr_Get(offsetof(VexGuestArchState
,guest_GPR2
),
1438 VG_WORDSIZE
==8 ? Ity_I64
: Ity_I32
)
1441 addStmtToIRSB(bb
, IRStmt_Put(offB_GPR12
, mkU64(closure
->readdr
)));
1442 gen_push_R2_and_set_LR ( bb
);
1447 /* --- Helpers to do with PPC related stack redzones. --- */
1449 __attribute__((unused
))
1450 static Bool
const_True ( Addr guest_addr
)
1455 /* --------------- main translation function --------------- */
1457 /* Note: see comments at top of m_redir.c for the Big Picture on how
1458 redirections are managed. */
1462 /* normal translation, redir neither requested nor inhibited */
1464 /* redir translation, function-wrap (set _NRADDR) style */
1466 /* redir translation, replacement (don't set _NRADDR) style */
1468 /* a translation in which redir is specifically disallowed */
1473 /* Translate the basic block beginning at NRADDR, and add it to the
1474 translation cache & translation table. Unless
1475 DEBUGGING_TRANSLATION is true, in which case the call is being done
1476 for debugging purposes, so (a) throw away the translation once it
1477 is made, and (b) produce a load of debugging output. If
1478 ALLOW_REDIRECTION is False, do not attempt redirection of NRADDR,
1479 and also, put the resulting translation into the no-redirect tt/tc
1480 instead of the normal one.
1482 TID is the identity of the thread requesting this translation.
1485 Bool
VG_(translate
) ( ThreadId tid
,
1487 Bool debugging_translation
,
1488 Int debugging_verbosity
,
1490 Bool allow_redirection
)
1494 Int tmpbuf_used
, verbosity
, i
;
1495 Bool (*preamble_fn
)(void*,IRSB
*);
1497 VexArchInfo vex_archinfo
;
1498 VexAbiInfo vex_abiinfo
;
1499 VexGuestExtents vge
;
1500 VexTranslateArgs vta
;
1501 VexTranslateResult tres
;
1502 VgCallbackClosure closure
;
1504 /* Make sure Vex is initialised right. */
1506 static Bool vex_init_done
= False
;
1508 if (!vex_init_done
) {
1509 LibVEX_Init ( &failure_exit
, &log_bytes
,
1510 1, /* debug_paranoia */
1511 &VG_(clo_vex_control
) );
1512 vex_init_done
= True
;
1515 /* Establish the translation kind and actual guest address to
1516 start from. Sets (addr,kind). */
1517 if (allow_redirection
) {
1519 Addr tmp
= VG_(redir_do_lookup
)( nraddr
, &isWrap
);
1520 if (tmp
== nraddr
) {
1521 /* no redirection found */
1525 /* found a redirect */
1527 kind
= isWrap
? T_Redir_Wrap
: T_Redir_Replace
;
1534 /* Established: (nraddr, addr, kind) */
1536 /* Printing redirection info. */
1538 if ((kind
== T_Redir_Wrap
|| kind
== T_Redir_Replace
)
1539 && (VG_(clo_verbosity
) >= 2 || VG_(clo_trace_redir
))) {
1543 const DiEpoch ep
= VG_(current_DiEpoch
)();
1545 /* Try also to get the soname (not the filename) of the "from"
1546 object. This makes it much easier to debug redirection
1548 const HChar
* nraddr_soname
= "???";
1549 DebugInfo
* nraddr_di
= VG_(find_DebugInfo
)(ep
, nraddr
);
1551 const HChar
* t
= VG_(DebugInfo_get_soname
)(nraddr_di
);
1556 ok
= VG_(get_fnname_w_offset
)(ep
, nraddr
, &buf
);
1557 if (!ok
) buf
= "???";
1559 HChar name1
[VG_(strlen
)(buf
) + 1];
1560 VG_(strcpy
)(name1
, buf
);
1561 ok
= VG_(get_fnname_w_offset
)(ep
, addr
, &name2
);
1562 if (!ok
) name2
= "???";
1564 VG_(message
)(Vg_DebugMsg
,
1565 "REDIR: 0x%lx (%s:%s) redirected to 0x%lx (%s)\n",
1566 nraddr
, nraddr_soname
, name1
,
1570 if (!debugging_translation
)
1571 VG_TRACK( pre_mem_read
, Vg_CoreTranslate
,
1572 tid
, "(translator)", addr
, 1 );
1574 /* If doing any code printing, print a basic block start marker */
1575 if (VG_(clo_trace_flags
) || debugging_translation
) {
1576 const HChar
* objname
= "UNKNOWN_OBJECT";
1578 const DiEpoch ep
= VG_(current_DiEpoch
)();
1579 DebugInfo
* di
= VG_(find_DebugInfo
)( ep
, addr
);
1581 objname
= VG_(DebugInfo_get_filename
)(di
);
1582 objoff
= addr
- VG_(DebugInfo_get_text_bias
)(di
);
1586 const HChar
*fnname
;
1587 Bool ok
= VG_(get_fnname_w_offset
)(ep
, addr
, &fnname
);
1588 if (!ok
) fnname
= "UNKNOWN_FUNCTION";
1590 "==== SB %u (evchecks %llu) [tid %u] 0x%lx %s %s%c0x%lx\n",
1591 VG_(get_bbs_translated
)(), bbs_done
, tid
, addr
,
1592 fnname
, objname
, objoff
>= 0 ? '+' : '-',
1593 (UWord
)(objoff
>= 0 ? objoff
: -objoff
)
1597 /* Are we allowed to translate here? */
1599 { /* BEGIN new scope specially for 'seg' */
1600 NSegment
const* seg
= VG_(am_find_nsegment
)(addr
);
1602 if ( (!translations_allowable_from_seg(seg
, addr
))
1603 || addr
== TRANSTAB_BOGUS_GUEST_ADDR
) {
1604 if (VG_(clo_trace_signals
))
1605 VG_(message
)(Vg_DebugMsg
, "translations not allowed here (0x%lx)"
1606 " - throwing SEGV\n", addr
);
1607 /* U R busted, sonny. Place your hands on your head and step
1608 away from the orig_addr. */
1609 /* Code address is bad - deliver a signal instead */
1611 /* There's some kind of segment at the requested place, but we
1612 aren't allowed to execute code here. */
1613 if (debugging_translation
)
1614 VG_(printf
)("translations not allowed here (segment not executable)"
1617 VG_(synth_fault_perms
)(tid
, addr
);
1619 /* There is no segment at all; we are attempting to execute in
1620 the middle of nowhere. */
1621 if (debugging_translation
)
1622 VG_(printf
)("translations not allowed here (no segment)"
1625 VG_(synth_fault_mapping
)(tid
, addr
);
1630 /* True if a debug trans., or if bit N set in VG_(clo_trace_codegen). */
1632 if (debugging_translation
) {
1633 verbosity
= debugging_verbosity
;
1636 if ( (VG_(clo_trace_flags
) > 0
1637 && VG_(get_bbs_translated
)() <= VG_(clo_trace_notabove
)
1638 && VG_(get_bbs_translated
)() >= VG_(clo_trace_notbelow
) )) {
1639 verbosity
= VG_(clo_trace_flags
);
1642 /* Figure out which preamble-mangling callback to send. */
1644 if (kind
== T_Redir_Replace
)
1645 preamble_fn
= mk_preamble__set_NRADDR_to_zero
;
1647 if (kind
== T_Redir_Wrap
)
1648 preamble_fn
= mk_preamble__set_NRADDR_to_nraddr
;
1650 /* LE we setup the LR */
1651 # if defined(VG_PLAT_USES_PPCTOC) || defined(VGP_ppc64le_linux)
1652 if (nraddr
== (Addr
)&VG_(ppctoc_magic_redirect_return_stub
)) {
1653 /* If entering the special return stub, this means a wrapped or
1654 redirected function is returning. Make this translation one
1655 which restores R2 and LR from the thread's hidden redir
1656 stack, and branch to the (restored) link register, thereby
1657 really causing the function to return. */
1658 vg_assert(kind
== T_Normal
);
1659 vg_assert(nraddr
== addr
);
1660 preamble_fn
= mk_preamble__ppctoc_magic_return_stub
;
1664 /* ------ Actually do the translation. ------ */
1665 vg_assert2(VG_(tdict
).tool_instrument
,
1666 "you forgot to set VgToolInterface function 'tool_instrument'");
1668 /* Get the CPU info established at startup. */
1669 VG_(machine_get_VexArchInfo
)( &vex_arch
, &vex_archinfo
);
1671 /* Set up 'abiinfo' structure with stuff Vex needs to know about
1672 the guest and host ABIs. */
1674 LibVEX_default_VexAbiInfo( &vex_abiinfo
);
1675 vex_abiinfo
.guest_stack_redzone_size
= VG_STACK_REDZONE_SZB
;
1677 # if defined(VGP_amd64_linux)
1678 vex_abiinfo
.guest_amd64_assume_fs_is_const
= True
;
1679 vex_abiinfo
.guest_amd64_assume_gs_is_const
= True
;
1682 # if defined(VGP_amd64_darwin)
1683 vex_abiinfo
.guest_amd64_assume_gs_is_const
= True
;
1686 # if defined(VGP_amd64_solaris)
1687 vex_abiinfo
.guest_amd64_assume_fs_is_const
= True
;
1690 # if defined(VGP_ppc32_linux)
1691 vex_abiinfo
.guest_ppc_zap_RZ_at_blr
= False
;
1692 vex_abiinfo
.guest_ppc_zap_RZ_at_bl
= NULL
;
1695 # if defined(VGP_ppc64be_linux)
1696 vex_abiinfo
.guest_ppc_zap_RZ_at_blr
= True
;
1697 vex_abiinfo
.guest_ppc_zap_RZ_at_bl
= const_True
;
1698 vex_abiinfo
.host_ppc_calls_use_fndescrs
= True
;
1701 # if defined(VGP_ppc64le_linux)
1702 vex_abiinfo
.guest_ppc_zap_RZ_at_blr
= True
;
1703 vex_abiinfo
.guest_ppc_zap_RZ_at_bl
= const_True
;
1704 vex_abiinfo
.host_ppc_calls_use_fndescrs
= False
;
1707 # if defined(VGP_mips32_linux) || defined(VGP_mips64_linux)
1708 ThreadArchState
* arch
= &VG_(threads
)[tid
].arch
;
1709 vex_abiinfo
.guest_mips_fp_mode
=
1710 !!(arch
->vex
.guest_CP0_status
& MIPS_CP0_STATUS_FR
);
1711 # if defined(VGP_mips32_linux)
1712 vex_abiinfo
.guest_mips_fp_mode
|=
1713 (!!(arch
->vex
.guest_CP0_Config5
& MIPS_CONF5_FRE
)) << 1;
1715 /* Compute guest__use_fallback_LLSC, overiding any settings of
1716 VG_(clo_fallback_llsc) that we know would cause the guest to
1718 if (VEX_MIPS_COMP_ID(vex_archinfo
.hwcaps
) == VEX_PRID_COMP_CAVIUM
) {
1719 /* We must use the fallback scheme. */
1720 vex_abiinfo
.guest__use_fallback_LLSC
= True
;
1722 vex_abiinfo
.guest__use_fallback_LLSC
1723 = SimHintiS(SimHint_fallback_llsc
, VG_(clo_sim_hints
));
1727 # if defined(VGP_arm64_linux)
1728 vex_abiinfo
.guest__use_fallback_LLSC
1729 = /* The user asked explicitly */
1730 SimHintiS(SimHint_fallback_llsc
, VG_(clo_sim_hints
))
1731 || /* we autodetected that it is necessary */
1732 vex_archinfo
.arm64_requires_fallback_LLSC
;
1735 /* Set up closure args. */
1737 closure
.nraddr
= nraddr
;
1738 closure
.readdr
= addr
;
1740 /* Set up args for LibVEX_Translate. */
1741 vta
.arch_guest
= vex_arch
;
1742 vta
.archinfo_guest
= vex_archinfo
;
1743 vta
.arch_host
= vex_arch
;
1744 vta
.archinfo_host
= vex_archinfo
;
1745 vta
.abiinfo_both
= vex_abiinfo
;
1746 vta
.callback_opaque
= (void*)&closure
;
1747 vta
.guest_bytes
= (UChar
*)addr
;
1748 vta
.guest_bytes_addr
= addr
;
1749 vta
.chase_into_ok
= chase_into_ok
;
1750 vta
.guest_extents
= &vge
;
1751 vta
.host_bytes
= tmpbuf
;
1752 vta
.host_bytes_size
= N_TMPBUF
;
1753 vta
.host_bytes_used
= &tmpbuf_used
;
1754 { /* At this point we have to reconcile Vex's view of the
1755 instrumentation callback - which takes a void* first argument
1756 - with Valgrind's view, in which the first arg is a
1757 VgCallbackClosure*. Hence the following longwinded casts.
1758 They are entirely legal but longwinded so as to maximise the
1759 chance of the C typechecker picking up any type snafus. */
1760 IRSB
*(*f
)(VgCallbackClosure
*,
1761 IRSB
*,const VexGuestLayout
*,const VexGuestExtents
*,
1762 const VexArchInfo
*,IRType
,IRType
)
1763 = VG_(clo_vgdb
) != Vg_VgdbNo
1764 ? tool_instrument_then_gdbserver_if_needed
1765 : VG_(tdict
).tool_instrument
;
1767 IRSB
*,const VexGuestLayout
*,const VexGuestExtents
*,
1768 const VexArchInfo
*,IRType
,IRType
) = (__typeof__(g
)) f
;
1769 vta
.instrument1
= g
;
1771 /* No need for type kludgery here. */
1772 vta
.instrument2
= need_to_handle_SP_assignment()
1775 vta
.finaltidy
= VG_(needs
).final_IR_tidy_pass
1776 ? VG_(tdict
).tool_final_IR_tidy_pass
1778 vta
.needs_self_check
= needs_self_check
;
1779 vta
.preamble_function
= preamble_fn
;
1780 vta
.traceflags
= verbosity
;
1781 vta
.sigill_diag
= VG_(clo_sigill_diag
);
1782 vta
.addProfInc
= VG_(clo_profyle_sbs
) && kind
!= T_NoRedir
;
1784 /* Set up the dispatch continuation-point info. If this is a
1785 no-redir translation then it cannot be chained, and the chain-me
1786 points are set to NULL to indicate that. The indir point must
1787 also be NULL, since we can't allow this translation to do an
1788 indir transfer -- that would take it back into the main
1789 translation cache too.
1791 All this is because no-redir translations live outside the main
1792 translation cache (in a secondary one) and chaining them would
1793 involve more adminstrative complexity that isn't worth the
1794 hassle, because we don't expect them to get used often. So
1796 if (allow_redirection
) {
1797 vta
.disp_cp_chain_me_to_slowEP
1798 = VG_(fnptr_to_fnentry
)( &VG_(disp_cp_chain_me_to_slowEP
) );
1799 vta
.disp_cp_chain_me_to_fastEP
1800 = VG_(fnptr_to_fnentry
)( &VG_(disp_cp_chain_me_to_fastEP
) );
1802 = VG_(fnptr_to_fnentry
)( &VG_(disp_cp_xindir
) );
1804 vta
.disp_cp_chain_me_to_slowEP
= NULL
;
1805 vta
.disp_cp_chain_me_to_fastEP
= NULL
;
1806 vta
.disp_cp_xindir
= NULL
;
1808 /* This doesn't involve chaining and so is always allowable. */
1809 vta
.disp_cp_xassisted
1810 = VG_(fnptr_to_fnentry
)( &VG_(disp_cp_xassisted
) );
1812 /* Sheesh. Finally, actually _do_ the translation! */
1813 tres
= LibVEX_Translate ( &vta
);
1815 vg_assert(tres
.status
== VexTransOK
);
1816 vg_assert(tres
.n_sc_extents
>= 0 && tres
.n_sc_extents
<= 3);
1817 vg_assert(tmpbuf_used
<= N_TMPBUF
);
1818 vg_assert(tmpbuf_used
> 0);
1819 } /* END new scope specially for 'seg' */
1821 /* Tell aspacem of all segments that have had translations taken
1823 for (i
= 0; i
< vge
.n_used
; i
++) {
1824 VG_(am_set_segment_hasT
)( vge
.base
[i
] );
1827 /* Copy data at trans_addr into the translation cache. */
1828 vg_assert(tmpbuf_used
> 0 && tmpbuf_used
< 65536);
1830 // If debugging, don't do anything with the translated block; we
1831 // only did this for the debugging output produced along the way.
1832 if (!debugging_translation
) {
1834 if (kind
!= T_NoRedir
) {
1835 // Put it into the normal TT/TC structures. This is the
1838 // Note that we use nraddr (the non-redirected address), not
1839 // addr, which might have been changed by the redirection
1840 VG_(add_to_transtab
)( &vge
,
1844 tres
.n_sc_extents
> 0,
1846 tres
.n_guest_instrs
);
1848 vg_assert(tres
.offs_profInc
== -1); /* -1 == unset */
1849 VG_(add_to_unredir_transtab
)( &vge
,
1859 /*--------------------------------------------------------------------*/
1861 /*--------------------------------------------------------------------*/