2 /*--------------------------------------------------------------------*/
3 /*--- The core dispatch loop, for jumping to a code address. ---*/
4 /*--- dispatch-ppc32-linux.S ---*/
5 /*--------------------------------------------------------------------*/
8 This file is part of Valgrind, a dynamic binary instrumentation
11 Copyright (C) 2005-2017 Cerion Armour-Brown <cerion@open-works.co.uk>
13 This program is free software; you can redistribute it and/or
14 modify it under the terms of the GNU General Public License as
15 published by the Free Software Foundation; either version 2 of the
16 License, or (at your option) any later version.
18 This program is distributed in the hope that it will be useful, but
19 WITHOUT ANY WARRANTY; without even the implied warranty of
20 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
21 General Public License for more details.
23 You should have received a copy of the GNU General Public License
24 along with this program; if not, see <http://www.gnu.org/licenses/>.
26 The GNU General Public License is contained in the file COPYING.
29 #include "pub_core_basics_asm.h"
31 #if defined(VGP_ppc32_linux)
33 #include "pub_core_dispatch_asm.h"
34 #include "pub_core_transtab_asm.h"
35 #include "libvex_guest_offsets.h" /* for OFFSET_ppc32_CIA */
38 /*------------------------------------------------------------*/
40 /*--- The dispatch loop. VG_(disp_run_translations) is ---*/
41 /*--- used to run all translations, ---*/
42 /*--- including no-redir ones. ---*/
44 /*------------------------------------------------------------*/
46 /*----------------------------------------------------*/
47 /*--- Entry and preamble (set everything up) ---*/
48 /*----------------------------------------------------*/
51 void VG_(disp_run_translations)( UWord* two_words,
56 .globl VG_(disp_run_translations)
57 .type VG_(disp_run_translations), @function
58 VG_(disp_run_translations):
59 /* r3 holds two_words */
60 /* r4 holds guest_state */
61 /* r5 holds host_addr */
63 /* ----- entry point to VG_(disp_run_translations) ----- */
64 /* For Linux/ppc32 we need the SysV ABI, which uses
65 LR->4(parent_sp), CR->anywhere.
66 (The AIX ABI, used on Darwin,
67 uses LR->8(prt_sp), CR->4(prt_sp))
75 stwu 1,-496(1) /* sp should maintain 16-byte alignment */
77 /* Save callee-saved registers... */
78 /* r3, r4, r5 are live here, so use r6 */
79 lis 6,VG_(machine_ppc32_has_FP)@ha
80 lwz 6,VG_(machine_ppc32_has_FP)@l(6)
84 /* Floating-point reg save area : 144 bytes */
105 /* General reg save area : 76 bytes */
125 stw 3,272(1) /* save two_words for later */
127 /* It's necessary to save/restore VRSAVE in the AIX / Darwin ABI.
128 The Linux kernel might not actually use VRSAVE for its intended
129 purpose, but it should be harmless to preserve anyway. */
130 /* r3, r4, r5 are live here, so use r6 */
131 lis 6,VG_(machine_ppc32_has_VMX)@ha
132 lwz 6,VG_(machine_ppc32_has_VMX)@l(6)
137 /* VRSAVE save word : 32 bytes */
138 mfspr 6,256 /* vrsave reg is spr number 256 */
141 /* Alignment padding : 4 bytes */
143 /* Vector reg save area (quadword aligned) : 192 bytes */
176 /* Local variable space... */
178 /* 32(sp) used later to check FPSCR[RM] */
180 /* r3 holds two_words */
181 /* r4 holds guest_state */
182 /* r5 holds host_addr */
184 /* 24(sp) used later to stop ctr reg being clobbered */
185 /* 20(sp) used later to load fpscr with zero */
188 /* Linkage Area (reserved)
193 /* set host FPU control word to the default mode expected
194 by VEX-generated code. See comments in libvex.h for
196 lis 6,VG_(machine_ppc32_has_FP)@ha
197 lwz 6,VG_(machine_ppc32_has_FP)@l(6)
201 /* get zero into f3 (tedious) */
202 /* note: fsub 3,3,3 is not a reliable way to do this,
203 since if f3 holds a NaN or similar then we don't necessarily
204 wind up with zero. */
208 mtfsf 0xFF,3 /* fpscr = f3 */
211 /* set host AltiVec control word to the default mode expected
212 by VEX-generated code. */
213 lis 6,VG_(machine_ppc32_has_VMX)@ha
214 lwz 6,VG_(machine_ppc32_has_VMX)@l(6)
219 vspltisw 3,0x0 /* generate zero */
220 mtvscr 3 /* sets VSCR[NJ]=0 */
225 /* make a stack frame for the code we are calling */
228 /* Set up the guest state ptr */
229 mr 31,4 /* r31 (generated code gsp) = r4 */
231 /* and jump into the code cache. Chained translations in
232 the code cache run, until for whatever reason, they can't
233 continue. When that happens, the translation in question
234 will jump (or call) to one of the continuation points
235 VG_(cp_...) below. */
240 /*----------------------------------------------------*/
241 /*--- Postamble and exit. ---*/
242 /*----------------------------------------------------*/
245 /* At this point, r6 and r7 contain two
246 words to be returned to the caller. r6
247 holds a TRC value, and r7 optionally may
248 hold another word (for CHAIN_ME exits, the
249 address of the place to patch.) */
251 /* We're leaving. Check that nobody messed with
252 VSCR or FPSCR in ways we don't expect. */
253 /* Using r10 - value used again further on, so don't trash! */
254 lis 10,VG_(machine_ppc32_has_FP)@ha
255 lwz 10,VG_(machine_ppc32_has_FP)@l(10)
257 /* Using r11 - value used again further on, so don't trash! */
258 lis 11,VG_(machine_ppc32_has_VMX)@ha
259 lwz 11,VG_(machine_ppc32_has_VMX)@l(11)
261 cmplwi 10,0 /* Do we have FP ? */
264 /* Set fpscr back to a known state, since vex-generated code
265 may have messed with fpscr[rm]. */
271 mtfsf 0xFF,3 /* fpscr = f3 */
274 cmplwi 11,0 /* Do we have altivec? */
278 /* Expect VSCR[NJ] to be 0, call invariant_violation if
280 /* first generate 4x 0x00010000 */
281 vspltisw 4,0x1 /* 4x 0x00000001 */
282 vspltisw 5,0x0 /* zero */
283 vsldoi 6,4,5,0x2 /* <<2*8 => 4x 0x00010000 */
284 /* retrieve VSCR and mask wanted bits */
286 vand 7,7,6 /* gives NJ flag */
287 vspltw 7,7,0x3 /* flags-word to all lanes */
288 vcmpequw. 8,6,7 /* CR[24] = 1 if v6 == v7 */
289 bt 24,invariant_violation /* branch if all_equal */
293 /* otherwise we're OK */
297 li 6,VG_TRC_INVARIANT_FAILED
302 /* Restore FP regs */
303 /* r10 already holds VG_(machine_ppc32_has_FP) value */
307 /* Floating-point regs */
328 /* r11 already holds VG_(machine_ppc32_has_VMX) value */
332 /* Restore Altivec regs */
336 mfspr 4,256 /* VRSAVE reg is spr number 256 */
366 /* restore int regs, including importantly r3 (two_words) */
388 /* Stash return values */
392 /* restore lr & sp, and leave */
393 lwz 0,500(1) /* stack_size + 4 */
395 addi 1,1,496 /* stack_size */
399 /*----------------------------------------------------*/
400 /*--- Continuation points ---*/
401 /*----------------------------------------------------*/
403 /* ------ Chain me to slow entry point ------ */
404 .global VG_(disp_cp_chain_me_to_slowEP)
405 VG_(disp_cp_chain_me_to_slowEP):
406 /* We got called. The return address indicates
407 where the patching needs to happen. Collect
408 the return address and, exit back to C land,
409 handing the caller the pair (Chain_me_S, RA) */
410 li 6, VG_TRC_CHAIN_ME_TO_SLOW_EP
412 /* 8 = imm32-fixed2 r30, disp_cp_chain_me_to_slowEP
419 /* ------ Chain me to fast entry point ------ */
420 .global VG_(disp_cp_chain_me_to_fastEP)
421 VG_(disp_cp_chain_me_to_fastEP):
422 /* We got called. The return address indicates
423 where the patching needs to happen. Collect
424 the return address and, exit back to C land,
425 handing the caller the pair (Chain_me_S, RA) */
426 li 6, VG_TRC_CHAIN_ME_TO_FAST_EP
428 /* 8 = imm32-fixed2 r30, disp_cp_chain_me_to_fastEP
435 /* ------ Indirect but boring jump ------ */
436 .global VG_(disp_cp_xindir)
438 /* Where are we going? */
439 lwz 20, OFFSET_ppc32_CIA(31)
442 lis 24, VG_(stats__n_xIndirs_32)@ha
443 addi 24, 24, VG_(stats__n_xIndirs_32)@l
448 // LIVE: r31 (guest state ptr), r20 (guest address to go to).
449 // We use 6 temporaries:
450 // r26 (to point at the relevant FastCacheSet),
451 // r21, r22, r23 (scratch, for swapping entries within a set)
452 // r24, r25 (other scratch)
454 /* Try a fast lookup in the translation cache. This is pretty much
455 a handcoded version of VG_(lookupInFastCache). */
457 // Compute r26 = VG_TT_FAST_HASH(guest)
458 srwi 26, 20, 2 // g2 = guest >> 2
459 srwi 25, 20, (VG_TT_FAST_BITS + 2) // (g2 >> VG_TT_FAST_BITS)
460 xor 26, 26, 25 // (g2 >> VG_TT_FAST_BITS) ^ g2
461 andi. 26, 26, VG_TT_FAST_MASK // setNo
463 // Compute r6 = &VG_(tt_fast)[r6]
464 lis 25, VG_(tt_fast)@ha
465 addi 25, 25, VG_(tt_fast)@l
466 slwi 26, 26, VG_FAST_CACHE_SET_BITS
469 // LIVE: r31 (guest state ptr), r20 (guest addr), r26 (cache set)
471 lwz 24, FCS_g0(26) // .guest0
472 lwz 25, FCS_h0(26) // .host0
473 cmpw 24, 20 // cmp against .guest0
483 cmpw 24, 20 // cmp against .guest1
485 // hit at way 1; swap upwards
486 lwz 21, FCS_g0(26) // 21 = old .guest0
487 lwz 22, FCS_h0(26) // 22 = old .host0
488 lwz 23, FCS_h1(26) // 23 = old .host1
489 stw 20, FCS_g0(26) // new .guest0 = guest
490 stw 23, FCS_h0(26) // new .host0 = old .host1
491 stw 21, FCS_g1(26) // new .guest1 = old .guest0
492 stw 22, FCS_h1(26) // new .host1 = old .host0
494 lis 24, VG_(stats__n_xIndir_hits1_32)@ha
495 addi 24, 24, VG_(stats__n_xIndir_hits1_32)@l
499 // goto old .host1 a.k.a. new .host0
506 cmpw 24, 20 // cmp against .guest2
508 // hit at way 2; swap upwards
517 lis 24, VG_(stats__n_xIndir_hits2_32)@ha
518 addi 24, 24, VG_(stats__n_xIndir_hits2_32)@l
522 // goto old .host2 a.k.a. new .host1
529 cmpw 24, 20 // cmp against .guest3
531 // hit at way 3; swap upwards
540 lis 24, VG_(stats__n_xIndir_hits3_32)@ha
541 addi 24, 24, VG_(stats__n_xIndir_hits3_32)@l
545 // goto old .host3 a.k.a. new .host2
550 4: // fast lookup failed:
552 lis 24, VG_(stats__n_xIndir_misses_32)@ha
553 addi 24, 24, VG_(stats__n_xIndir_misses_32)@l
558 li 6, VG_TRC_INNER_FASTMISS
563 /* ------ Assisted jump ------ */
564 .global VG_(disp_cp_xassisted)
565 VG_(disp_cp_xassisted):
566 /* r31 contains the TRC */
571 /* ------ Event check failed ------ */
572 .global VG_(disp_cp_evcheck_fail)
573 VG_(disp_cp_evcheck_fail):
574 li 6,VG_TRC_INNER_COUNTERZERO
579 .size VG_(disp_run_translations), .-VG_(disp_run_translations)
581 #endif // defined(VGP_ppc32_linux)
583 /* Let the linker know we don't need an executable stack */
586 /*--------------------------------------------------------------------*/
588 /*--------------------------------------------------------------------*/