2 /*--------------------------------------------------------------------*/
3 /*--- The core dispatch loop, for jumping to a code address. ---*/
4 /*--- dispatch-x86-darwin.S ---*/
5 /*--------------------------------------------------------------------*/
8 This file is part of Valgrind, a dynamic binary instrumentation
11 Copyright (C) 2000-2017 Julian Seward
14 This program is free software; you can redistribute it and/or
15 modify it under the terms of the GNU General Public License as
16 published by the Free Software Foundation; either version 2 of the
17 License, or (at your option) any later version.
19 This program is distributed in the hope that it will be useful, but
20 WITHOUT ANY WARRANTY; without even the implied warranty of
21 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
22 General Public License for more details.
24 You should have received a copy of the GNU General Public License
25 along with this program; if not, see <http://www.gnu.org/licenses/>.
27 The GNU General Public License is contained in the file COPYING.
30 #include "pub_core_basics_asm.h"
32 #if defined(VGP_x86_darwin)
34 #include "pub_core_dispatch_asm.h"
35 #include "pub_core_transtab_asm.h"
36 #include "libvex_guest_offsets.h" /* for OFFSET_x86_EIP */
39 /*------------------------------------------------------------*/
41 /*--- The dispatch loop. VG_(disp_run_translations) is ---*/
42 /*--- used to run all translations, ---*/
43 /*--- including no-redir ones. ---*/
45 /*------------------------------------------------------------*/
47 /*----------------------------------------------------*/
48 /*--- Entry and preamble (set everything up) ---*/
49 /*----------------------------------------------------*/
52 void VG_(disp_run_translations)( UWord* two_words,
57 .globl VG_(disp_run_translations)
58 VG_(disp_run_translations):
59 /* 0(%esp) holds our return address. */
60 /* 4(%esp) holds two_words */
61 /* 8(%esp) holds guest_state */
62 /* 12(%esp) holds host_addr */
66 /* Save integer registers, since this is a pseudo-function. */
75 /* 28+4(%esp) holds two_words */
76 /* 28+8(%esp) holds guest_state */
77 /* 28+12(%esp) holds host_addr */
79 /* Get the host CPU in the state expected by generated code. */
81 /* set host FPU control word to the default mode expected
82 by VEX-generated code. See comments in libvex.h for
89 /* set host SSE control word to the default mode expected
90 by VEX-generated code. */
91 cmpl $0, VG_(machine_x86_have_mxcsr)
97 /* set dir flag to known value */
100 /* Set up the guest state pointer */
101 movl 28+8(%esp), %ebp
103 /* and jump into the code cache. Chained translations in
104 the code cache run, until for whatever reason, they can't
105 continue. When that happens, the translation in question
106 will jump (or call) to one of the continuation points
107 VG_(cp_...) below. */
111 /*----------------------------------------------------*/
112 /*--- Postamble and exit. ---*/
113 /*----------------------------------------------------*/
116 /* At this point, %eax and %edx contain two
117 words to be returned to the caller. %eax
118 holds a TRC value, and %edx optionally may
119 hold another word (for CHAIN_ME exits, the
120 address of the place to patch.) */
122 /* We're leaving. Check that nobody messed with %mxcsr
123 or %fpucw. We can't mess with %eax or %edx here as they
124 holds the tentative return value, but any others are OK. */
125 #if !defined(ENABLE_INNER)
126 /* This check fails for self-hosting, so skip in that case */
130 popl %esi /* get rid of the word without trashing %eflags */
131 jnz invariant_violation
133 # cmpl $0, VG_(machine_x86_have_mxcsr)
137 andl $0xFFFFFFC0, (%esp) /* mask out status flags */
140 jnz invariant_violation
141 L2: /* otherwise we're OK */
144 movl $VG_TRC_INVARIANT_FAILED, %eax
148 /* Stash return values */
149 movl 28+4(%esp), %edi /* two_words */
152 /* Restore int regs and return. */
162 /*----------------------------------------------------*/
163 /*--- Continuation points ---*/
164 /*----------------------------------------------------*/
166 /* ------ Chain me to slow entry point ------ */
167 .globl VG_(disp_cp_chain_me_to_slowEP)
168 VG_(disp_cp_chain_me_to_slowEP):
169 /* We got called. The return address indicates
170 where the patching needs to happen. Collect
171 the return address and, exit back to C land,
172 handing the caller the pair (Chain_me_S, RA) */
173 movl $VG_TRC_CHAIN_ME_TO_SLOW_EP, %eax
175 /* 5 = movl $VG_(disp_chain_me_to_slowEP), %edx;
180 /* ------ Chain me to fast entry point ------ */
181 .globl VG_(disp_cp_chain_me_to_fastEP)
182 VG_(disp_cp_chain_me_to_fastEP):
183 /* We got called. The return address indicates
184 where the patching needs to happen. Collect
185 the return address and, exit back to C land,
186 handing the caller the pair (Chain_me_F, RA) */
187 movl $VG_TRC_CHAIN_ME_TO_FAST_EP, %eax
189 /* 5 = movl $VG_(disp_chain_me_to_fastEP), %edx;
194 /* ------ Indirect but boring jump ------ */
195 .global VG_(disp_cp_xindir)
197 /* Where are we going? */
198 movl OFFSET_x86_EIP(%ebp), %eax // "guest"
201 addl $1, VG_(stats__n_xIndirs_32)
203 // LIVE: %ebp (guest state ptr), %eax (guest address to go to).
204 // We use 4 temporaries:
205 // %esi (to point at the relevant FastCacheSet),
206 // %ebx, %ecx and %edx (scratch).
208 /* Try a fast lookup in the translation cache. This is pretty much
209 a handcoded version of VG_(lookupInFastCache). */
211 // Compute %esi = VG_TT_FAST_HASH(guest)
212 movl %eax, %esi // guest
213 shrl $VG_TT_FAST_BITS, %esi // (guest >> VG_TT_FAST_BITS)
214 xorl %eax, %esi // (guest >> VG_TT_FAST_BITS) ^ guest
215 andl $VG_TT_FAST_MASK, %esi // setNo
217 // Compute %esi = &VG_(tt_fast)[%esi]
218 shll $VG_FAST_CACHE_SET_BITS, %esi // setNo * sizeof(FastCacheSet)
219 leal VG_(tt_fast)(%esi), %esi // &VG_(tt_fast)[setNo]
221 // LIVE: %ebp (guest state ptr), %eax (guest addr), %esi (cache set)
223 cmpl %eax, FCS_g0(%esi) // cmp against .guest0
226 jmp *FCS_h0(%esi) // goto .host0
230 cmpl %eax, FCS_g1(%esi) // cmp against .guest1
232 // hit at way 1; swap upwards
234 addl $1, VG_(stats__n_xIndir_hits1_32)
235 movl FCS_g0(%esi), %ebx // ebx = old .guest0
236 movl FCS_h0(%esi), %ecx // ecx = old .host0
237 movl FCS_h1(%esi), %edx // edx = old .host1
238 movl %eax, FCS_g0(%esi) // new .guest0 = guest
239 movl %edx, FCS_h0(%esi) // new .host0 = old .host1
240 movl %ebx, FCS_g1(%esi) // new .guest1 = old .guest0
241 movl %ecx, FCS_h1(%esi) // new .host1 = old .host0
242 jmp *%edx // goto old .host1 a.k.a. new .host0
246 cmpl %eax, FCS_g2(%esi) // cmp against .guest2
248 // hit at way 2; swap upwards
250 addl $1, VG_(stats__n_xIndir_hits2_32)
251 movl FCS_g1(%esi), %ebx
252 movl FCS_h1(%esi), %ecx
253 movl FCS_h2(%esi), %edx
254 movl %eax, FCS_g1(%esi)
255 movl %edx, FCS_h1(%esi)
256 movl %ebx, FCS_g2(%esi)
257 movl %ecx, FCS_h2(%esi)
262 cmpl %eax, FCS_g3(%esi) // cmp against .guest3
264 // hit at way 3; swap upwards
266 addl $1, VG_(stats__n_xIndir_hits3_32)
267 movl FCS_g2(%esi), %ebx
268 movl FCS_h2(%esi), %ecx
269 movl FCS_h3(%esi), %edx
270 movl %eax, FCS_g2(%esi)
271 movl %edx, FCS_h2(%esi)
272 movl %ebx, FCS_g3(%esi)
273 movl %ecx, FCS_h3(%esi)
277 4: // fast lookup failed
279 addl $1, VG_(stats__n_xIndir_misses_32)
281 movl $VG_TRC_INNER_FASTMISS, %eax
285 /* ------ Assisted jump ------ */
286 .globl VG_(disp_cp_xassisted)
287 VG_(disp_cp_xassisted):
288 /* %ebp contains the TRC */
293 /* ------ Event check failed ------ */
294 .globl VG_(disp_cp_evcheck_fail)
295 VG_(disp_cp_evcheck_fail):
296 movl $VG_TRC_INNER_COUNTERZERO, %eax
301 #endif // defined(VGP_x86_darwin)
303 /* Let the linker know we don't need an executable stack */
306 /*--------------------------------------------------------------------*/
308 /*--------------------------------------------------------------------*/