2 /*--------------------------------------------------------------------*/
3 /*--- The core dispatch loop, for jumping to a code address. ---*/
4 /*--- dispatch-x86-solaris.S ---*/
5 /*--------------------------------------------------------------------*/
8 This file is part of Valgrind, a dynamic binary instrumentation
11 Copyright (C) 2012-2017 Petr Pavlu
14 This program is free software; you can redistribute it and/or
15 modify it under the terms of the GNU General Public License as
16 published by the Free Software Foundation; either version 2 of the
17 License, or (at your option) any later version.
19 This program is distributed in the hope that it will be useful, but
20 WITHOUT ANY WARRANTY; without even the implied warranty of
21 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
22 General Public License for more details.
24 You should have received a copy of the GNU General Public License
25 along with this program; if not, see <http://www.gnu.org/licenses/>.
27 The GNU General Public License is contained in the file COPYING.
30 #include "pub_core_basics_asm.h"
32 #if defined(VGP_x86_solaris)
34 #include "pub_core_dispatch_asm.h"
35 #include "pub_core_transtab_asm.h"
36 #include "libvex_guest_offsets.h" /* for OFFSET_x86_EIP */
39 /*------------------------------------------------------------*/
41 /*--- The dispatch loop. VG_(disp_run_translations) is ---*/
42 /*--- used to run all translations, ---*/
43 /*--- including no-redir ones. ---*/
45 /*------------------------------------------------------------*/
47 /*----------------------------------------------------*/
48 /*--- Entry and preamble (set everything up) ---*/
49 /*----------------------------------------------------*/
52 void VG_(disp_run_translations)( UWord* two_words,
57 .globl VG_(disp_run_translations)
58 .type VG_(disp_run_translations), @function
59 VG_(disp_run_translations):
60 /* 0(%esp) holds our return address. */
61 /* 4(%esp) holds two_words */
62 /* 8(%esp) holds guest_state */
63 /* 12(%esp) holds host_addr */
67 /* Save integer registers, since this is a pseudo-function. */
76 /* 28+4(%esp) holds two_words */
77 /* 28+8(%esp) holds guest_state */
78 /* 28+12(%esp) holds host_addr */
80 /* Get the host CPU in the state expected by generated code. */
82 /* set host FPU control word to the default mode expected
83 by VEX-generated code. See comments in libvex.h for
90 /* set host SSE control word to the default mode expected
91 by VEX-generated code. */
92 cmpl $0, VG_(machine_x86_have_mxcsr)
98 /* set dir flag to known value */
101 /* Set up the guest state pointer */
102 movl 28+8(%esp), %ebp
104 /* and jump into the code cache. Chained translations in
105 the code cache run, until for whatever reason, they can't
106 continue. When that happens, the translation in question
107 will jump (or call) to one of the continuation points
108 VG_(cp_...) below. */
112 /*----------------------------------------------------*/
113 /*--- Postamble and exit. ---*/
114 /*----------------------------------------------------*/
117 /* At this point, %eax and %edx contain two
118 words to be returned to the caller. %eax
119 holds a TRC value, and %edx optionally may
120 hold another word (for CHAIN_ME exits, the
121 address of the place to patch.) */
123 /* We're leaving. Check that nobody messed with %mxcsr
124 or %fpucw. We can't mess with %eax or %edx here as they
125 holds the tentative return value, but any others are OK. */
126 #if !defined(ENABLE_INNER)
127 /* This check fails for self-hosting, so skip in that case */
131 popl %esi /* get rid of the word without trashing %eflags */
132 jnz invariant_violation
134 # cmpl $0, VG_(machine_x86_have_mxcsr)
138 andl $0xFFFFFFC0, (%esp) /* mask out status flags */
141 jnz invariant_violation
142 L2: /* otherwise we're OK */
145 movl $VG_TRC_INVARIANT_FAILED, %eax
149 /* Stash return values */
150 movl 28+4(%esp), %edi /* two_words */
153 /* Restore int regs and return. */
163 /*----------------------------------------------------*/
164 /*--- Continuation points ---*/
165 /*----------------------------------------------------*/
167 /* ------ Chain me to slow entry point ------ */
168 .global VG_(disp_cp_chain_me_to_slowEP)
169 VG_(disp_cp_chain_me_to_slowEP):
170 /* We got called. The return address indicates
171 where the patching needs to happen. Collect
172 the return address and, exit back to C land,
173 handing the caller the pair (Chain_me_S, RA) */
174 movl $VG_TRC_CHAIN_ME_TO_SLOW_EP, %eax
176 /* 5 = movl $VG_(disp_chain_me_to_slowEP), %edx;
181 /* ------ Chain me to fast entry point ------ */
182 .global VG_(disp_cp_chain_me_to_fastEP)
183 VG_(disp_cp_chain_me_to_fastEP):
184 /* We got called. The return address indicates
185 where the patching needs to happen. Collect
186 the return address and, exit back to C land,
187 handing the caller the pair (Chain_me_F, RA) */
188 movl $VG_TRC_CHAIN_ME_TO_FAST_EP, %eax
190 /* 5 = movl $VG_(disp_chain_me_to_fastEP), %edx;
195 /* ------ Indirect but boring jump ------ */
196 .global VG_(disp_cp_xindir)
198 /* Where are we going? */
199 movl OFFSET_x86_EIP(%ebp), %eax // "guest"
202 addl $1, VG_(stats__n_xIndirs_32)
204 // LIVE: %ebp (guest state ptr), %eax (guest address to go to).
205 // We use 4 temporaries:
206 // %esi (to point at the relevant FastCacheSet),
207 // %ebx, %ecx and %edx (scratch).
209 /* Try a fast lookup in the translation cache. This is pretty much
210 a handcoded version of VG_(lookupInFastCache). */
212 // Compute %esi = VG_TT_FAST_HASH(guest)
213 movl %eax, %esi // guest
214 shrl $VG_TT_FAST_BITS, %esi // (guest >> VG_TT_FAST_BITS)
215 xorl %eax, %esi // (guest >> VG_TT_FAST_BITS) ^ guest
216 andl $VG_TT_FAST_MASK, %esi // setNo
218 // Compute %esi = &VG_(tt_fast)[%esi]
219 shll $VG_FAST_CACHE_SET_BITS, %esi // setNo * sizeof(FastCacheSet)
220 leal VG_(tt_fast)(%esi), %esi // &VG_(tt_fast)[setNo]
222 // LIVE: %ebp (guest state ptr), %eax (guest addr), %esi (cache set)
224 cmpl %eax, FCS_g0(%esi) // cmp against .guest0
227 jmp *FCS_h0(%esi) // goto .host0
231 cmpl %eax, FCS_g1(%esi) // cmp against .guest1
233 // hit at way 1; swap upwards
235 addl $1, VG_(stats__n_xIndir_hits1_32)
236 movl FCS_g0(%esi), %ebx // ebx = old .guest0
237 movl FCS_h0(%esi), %ecx // ecx = old .host0
238 movl FCS_h1(%esi), %edx // edx = old .host1
239 movl %eax, FCS_g0(%esi) // new .guest0 = guest
240 movl %edx, FCS_h0(%esi) // new .host0 = old .host1
241 movl %ebx, FCS_g1(%esi) // new .guest1 = old .guest0
242 movl %ecx, FCS_h1(%esi) // new .host1 = old .host0
243 jmp *%edx // goto old .host1 a.k.a. new .host0
247 cmpl %eax, FCS_g2(%esi) // cmp against .guest2
249 // hit at way 2; swap upwards
251 addl $1, VG_(stats__n_xIndir_hits2_32)
252 movl FCS_g1(%esi), %ebx
253 movl FCS_h1(%esi), %ecx
254 movl FCS_h2(%esi), %edx
255 movl %eax, FCS_g1(%esi)
256 movl %edx, FCS_h1(%esi)
257 movl %ebx, FCS_g2(%esi)
258 movl %ecx, FCS_h2(%esi)
263 cmpl %eax, FCS_g3(%esi) // cmp against .guest3
265 // hit at way 3; swap upwards
267 addl $1, VG_(stats__n_xIndir_hits3_32)
268 movl FCS_g2(%esi), %ebx
269 movl FCS_h2(%esi), %ecx
270 movl FCS_h3(%esi), %edx
271 movl %eax, FCS_g2(%esi)
272 movl %edx, FCS_h2(%esi)
273 movl %ebx, FCS_g3(%esi)
274 movl %ecx, FCS_h3(%esi)
278 4: // fast lookup failed
280 addl $1, VG_(stats__n_xIndir_misses_32)
282 movl $VG_TRC_INNER_FASTMISS, %eax
286 /* ------ Assisted jump ------ */
287 .global VG_(disp_cp_xassisted)
288 VG_(disp_cp_xassisted):
289 /* %ebp contains the TRC */
294 /* ------ Event check failed ------ */
295 .global VG_(disp_cp_evcheck_fail)
296 VG_(disp_cp_evcheck_fail):
297 movl $VG_TRC_INNER_COUNTERZERO, %eax
302 .size VG_(disp_run_translations), .-VG_(disp_run_translations)
304 #endif // defined(VGP_x86_solaris)
306 /* Let the linker know we don't need an executable stack */
309 /*--------------------------------------------------------------------*/
311 /*--------------------------------------------------------------------*/