2 /*--------------------------------------------------------------------*/
3 /*--- The core dispatch loop, for jumping to a code address. ---*/
4 /*--- dispatch-ppc64-linux.S ---*/
5 /*--------------------------------------------------------------------*/
8 This file is part of Valgrind, a dynamic binary instrumentation
11 Copyright (C) 2005-2013 Cerion Armour-Brown <cerion@open-works.co.uk>
13 This program is free software; you can redistribute it and/or
14 modify it under the terms of the GNU General Public License as
15 published by the Free Software Foundation; either version 2 of the
16 License, or (at your option) any later version.
18 This program is distributed in the hope that it will be useful, but
19 WITHOUT ANY WARRANTY; without even the implied warranty of
20 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
21 General Public License for more details.
23 You should have received a copy of the GNU General Public License
24 along with this program; if not, write to the Free Software
25 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
28 The GNU General Public License is contained in the file COPYING.
31 #if defined(VGP_ppc64le_linux)
33 #include "pub_core_basics_asm.h"
34 #include "pub_core_dispatch_asm.h"
35 #include "pub_core_transtab_asm.h"
36 #include "libvex_guest_offsets.h" /* for OFFSET_ppc64_CIA */
38 /* NOTE: PPC64 supports Big Endian and Little Endian. It also supports the
39 ELF version 1 and ELF version 2 APIs.
41 Currently LE uses ELF version 2 and BE uses ELF version 1. However,
42 BE and LE may support the other ELF version in the future. So, the
43 _CALL_ELF is used in the assembly function to enable code for a
44 specific ELF version independently of the Enianess of the machine.
45 The test "#if _CALL_ELF == 2" checks if ELF version 2 is being used.
48 /* References to globals via the TOC */
51 .globl vgPlain_tt_fast
52 .lcomm vgPlain_tt_fast,4,4
53 .type vgPlain_tt_fast, @object
56 .tocent__vgPlain_tt_fast:
57 .tc vgPlain_tt_fast[TC],vgPlain_tt_fast
58 .tocent__vgPlain_stats__n_xindirs_32:
59 .tc vgPlain_stats__n_xindirs_32[TC],vgPlain_stats__n_xindirs_32
60 .tocent__vgPlain_stats__n_xindir_misses_32:
61 .tc vgPlain_stats__n_xindir_misses_32[TC],vgPlain_stats__n_xindir_misses_32
62 .tocent__vgPlain_machine_ppc64_has_VMX:
63 .tc vgPlain_machine_ppc64_has_VMX[TC],vgPlain_machine_ppc64_has_VMX
65 /*------------------------------------------------------------*/
67 /*--- The dispatch loop. VG_(disp_run_translations) is ---*/
68 /*--- used to run all translations, ---*/
69 /*--- including no-redir ones. ---*/
71 /*------------------------------------------------------------*/
73 /*----------------------------------------------------*/
74 /*--- Entry and preamble (set everything up) ---*/
75 /*----------------------------------------------------*/
78 void VG_(disp_run_translations)( UWord* two_words,
85 .globl VG_(disp_run_translations)
87 .type VG_(disp_run_translations),@function
88 VG_(disp_run_translations):
89 .type .VG_(disp_run_translations),@function
93 VG_(disp_run_translations):
94 .quad .VG_(disp_run_translations),.TOC.@tocbase,0
96 .type .VG_(disp_run_translations),@function
98 .globl .VG_(disp_run_translations)
99 .VG_(disp_run_translations):
101 0: addis 2, 12,.TOC.-0b@ha
103 .localentry VG_(disp_run_translations), .-VG_(disp_run_translations)
106 /* r3 holds two_words */
107 /* r4 holds guest_state */
108 /* r5 holds host_addr */
110 /* ----- entry point to VG_(disp_run_translations) ----- */
111 /* PPC64 ABI saves LR->16(prt_sp), CR->8(prt_sp)) */
119 /* New stack frame */
120 stdu 1,-624(1) /* sp should maintain 16-byte alignment */
122 /* General reg save area : 152 bytes */
142 std 3,104(1) /* save two_words for later */
144 /* Save callee-saved registers... */
145 /* Floating-point reg save area : 144 bytes */
165 /* It's necessary to save/restore VRSAVE in the AIX / Darwin ABI.
166 The Linux kernel might not actually use VRSAVE for its intended
167 purpose, but it should be harmless to preserve anyway. */
168 /* r3, r4, r5 are live here, so use r6 */
169 ld 6,.tocent__vgPlain_machine_ppc64_has_VMX@toc(2)
174 /* VRSAVE save word : 32 bytes */
175 mfspr 6,256 /* vrsave reg is spr number 256 */
178 /* Alignment padding : 4 bytes */
180 /* Vector reg save area (quadword aligned) : 192 bytes */
207 /* Local variable space... */
209 /* r3 holds two_words */
210 /* r4 holds guest_state */
211 /* r5 holds host_addr */
213 /* 96(sp) used later to check FPSCR[RM] */
214 /* 88(sp) used later to load fpscr with zero */
217 /* Linkage Area (reserved) BE ABI
219 32(sp) : link editor doubleword
220 24(sp) : compiler doubleword
226 /* set host FPU control word to the default mode expected
227 by VEX-generated code. See comments in libvex.h for
229 /* => get zero into f3 (tedious)
230 fsub 3,3,3 is not a reliable way to do this, since if
231 f3 holds a NaN or similar then we don't necessarily
232 wind up with zero. */
236 mtfsf 0xFF,3 /* fpscr = lo32 of f3 */
238 /* set host AltiVec control word to the default mode expected
239 by VEX-generated code. */
240 ld 6,.tocent__vgPlain_machine_ppc64_has_VMX@toc(2)
245 vspltisw 3,0x0 /* generate zero */
249 /* make a stack frame for the code we are calling */
252 /* Set up the guest state ptr */
253 mr 31,4 /* r31 (generated code gsp) = r4 */
255 /* for the LE ABI need to setup r2 and r12 */
256 0: addis 2, 12,.TOC.-0b@ha
260 /* and jump into the code cache. Chained translations in
261 the code cache run, until for whatever reason, they can't
262 continue. When that happens, the translation in question
263 will jump (or call) to one of the continuation points
264 VG_(cp_...) below. */
269 /*----------------------------------------------------*/
270 /*--- Postamble and exit. ---*/
271 /*----------------------------------------------------*/
274 /* At this point, r6 and r7 contain two
275 words to be returned to the caller. r6
276 holds a TRC value, and r7 optionally may
277 hold another word (for CHAIN_ME exits, the
278 address of the place to patch.) */
280 /* undo the "make a stack frame for the code we are calling" */
283 /* We're leaving. Check that nobody messed with
284 VSCR or FPSCR in ways we don't expect. */
285 /* Using r11 - value used again further on, so don't trash! */
286 ld 11,.tocent__vgPlain_machine_ppc64_has_VMX@toc(2)
289 /* Set fpscr back to a known state, since vex-generated code
290 may have messed with fpscr[rm]. */
296 mtfsf 0xFF,3 /* fpscr = f3 */
298 cmpldi 11,0 /* Do we have altivec? */
301 /* Check VSCR[NJ] == 1 */
302 /* first generate 4x 0x00010000 */
303 vspltisw 4,0x1 /* 4x 0x00000001 */
304 vspltisw 5,0x0 /* zero */
305 vsldoi 6,4,5,0x2 /* <<2*8 => 4x 0x00010000 */
306 /* retrieve VSCR and mask wanted bits */
308 vand 7,7,6 /* gives NJ flag */
309 vspltw 7,7,0x3 /* flags-word to all lanes */
310 vcmpequw. 8,6,7 /* CR[24] = 1 if v6 == v7 */
311 bt 24,.invariant_violation /* branch if all_equal */
314 /* otherwise we're OK */
317 .invariant_violation:
318 li 6,VG_TRC_INVARIANT_FAILED
323 /* r11 already holds VG_(machine_ppc32_has_VMX) value */
327 /* Restore Altivec regs.
328 Use r5 as scratch since r6/r7 are live. */
331 mfspr 5,256 /* VRSAVE reg is spr number 256 */
360 /* Restore FP regs */
361 /* Floating-point regs */
381 /* restore int regs, including importantly r3 (two_words) */
402 /* Stash return values */
406 /* restore lr & sp, and leave */
407 ld 0,632(1) /* stack_size + 8 */
409 ld 0,640(1) /* stack_size + 16 */
411 addi 1,1,624 /* stack_size */
414 .size VG_(disp_run_translations),.-VG_(disp_run_translations)
418 /*----------------------------------------------------*/
419 /*--- Continuation points ---*/
420 /*----------------------------------------------------*/
422 /* ------ Chain me to slow entry point ------ */
425 .globl VG_(disp_cp_chain_me_to_slowEP)
427 .type VG_(disp_cp_chain_me_to_slowEP),@function
428 VG_(disp_cp_chain_me_to_slowEP):
432 VG_(disp_cp_chain_me_to_slowEP):
433 .quad .VG_(disp_cp_chain_me_to_slowEP),.TOC.@tocbase,0
436 .type .VG_(disp_cp_chain_me_to_slowEP),@function
437 .globl .VG_(disp_cp_chain_me_to_slowEP)
438 .VG_(disp_cp_chain_me_to_slowEP):
440 0: addis 2, 12,.TOC.-0b@ha
442 .localentry VG_(disp_cp_chain_me_to_slowEP), .-VG_(disp_cp_chain_me_to_slowEP)
444 /* We got called. The return address indicates
445 where the patching needs to happen. Collect
446 the return address and, exit back to C land,
447 handing the caller the pair (Chain_me_S, RA) */
448 li 6, VG_TRC_CHAIN_ME_TO_SLOW_EP
450 /* 20 = imm64-fixed5 r30, disp_cp_chain_me_to_slowEP
457 .size VG_(disp_cp_chain_me_to_slowEP),.-VG_(disp_cp_chain_me_to_slowEP)
460 /* ------ Chain me to fast entry point ------ */
463 .globl VG_(disp_cp_chain_me_to_fastEP)
465 .type VG_(disp_cp_chain_me_to_fastEP),@function
466 VG_(disp_cp_chain_me_to_fastEP):
470 VG_(disp_cp_chain_me_to_fastEP):
471 .quad .VG_(disp_cp_chain_me_to_fastEP),.TOC.@tocbase,0
474 .type .VG_(disp_cp_chain_me_to_fastEP),@function
475 .globl .VG_(disp_cp_chain_me_to_fastEP)
476 .VG_(disp_cp_chain_me_to_fastEP):
478 0: addis 2, 12,.TOC.-0b@ha
480 .localentry VG_(disp_cp_chain_me_to_fastEP), .-VG_(disp_cp_chain_me_to_fastEP)
482 /* We got called. The return address indicates
483 where the patching needs to happen. Collect
484 the return address and, exit back to C land,
485 handing the caller the pair (Chain_me_S, RA) */
486 li 6, VG_TRC_CHAIN_ME_TO_FAST_EP
488 /* 20 = imm64-fixed5 r30, disp_cp_chain_me_to_fastEP
495 .size VG_(disp_cp_chain_me_to_fastEP),.-VG_(disp_cp_chain_me_to_fastEP)
498 /* ------ Indirect but boring jump ------ */
501 .globl VG_(disp_cp_xindir)
503 .type VG_(disp_cp_xindir),@function
509 .quad .VG_(disp_cp_xindir),.TOC.@tocbase,0
512 .type .VG_(disp_cp_xindir),@function
513 .globl .VG_(disp_cp_xindir)
514 .VG_(disp_cp_xindir):
516 0: addis 2, 12,.TOC.-0b@ha
518 .localentry VG_(disp_cp_xindir), .-VG_(disp_cp_xindir)
520 /* Where are we going? */
521 ld 3,OFFSET_ppc64_CIA(31)
524 ld 5, .tocent__vgPlain_stats__n_xindirs_32@toc(2)
529 /* r5 = &VG_(tt_fast) */
530 ld 5, .tocent__vgPlain_tt_fast@toc(2) /* &VG_(tt_fast) */
532 /* try a fast lookup in the translation cache */
533 /* r4 = VG_TT_FAST_HASH(addr) * sizeof(FastCacheEntry)
534 = ((r3 >>u 2) & VG_TT_FAST_MASK) << 4 */
535 rldicl 4,3, 62, 64-VG_TT_FAST_BITS /* entry# */
536 sldi 4,4,4 /* entry# * sizeof(FastCacheEntry) */
537 add 5,5,4 /* & VG_(tt_fast)[entry#] */
538 ld 6,0(5) /* .guest */
539 ld 7,8(5) /* .host */
541 bne .fast_lookup_failed
543 /* Found a match. Jump to .host. */
547 .size VG_(disp_cp_xindir),.-VG_(disp_cp_xindir)
552 ld 5, .tocent__vgPlain_stats__n_xindir_misses_32@toc(2)
557 li 6,VG_TRC_INNER_FASTMISS
562 /* ------ Assisted jump ------ */
565 .globl VG_(disp_cp_xassisted)
567 .type VG_(disp_cp_xassisted),@function
568 VG_(disp_cp_xassisted):
572 VG_(disp_cp_xassisted):
573 .quad .VG_(disp_cp_xassisted),.TOC.@tocbase,0
577 0: addis 2, 12,.TOC.-0b@ha
579 .localentry VG_(disp_cp_xassisted), .-VG_(disp_cp_xassisted)
581 .type .VG_(disp_cp_xassisted),@function
582 .globl .VG_(disp_cp_xassisted)
583 .VG_(disp_cp_xassisted):
584 /* r31 contains the TRC */
589 .size VG_(disp_cp_xassisted),.-VG_(disp_cp_xassisted)
592 /* ------ Event check failed ------ */
595 .globl VG_(disp_cp_evcheck_fail)
597 .type VG_(disp_cp_evcheck_fail),@function
598 VG_(disp_cp_evcheck_fail):
602 VG_(disp_cp_evcheck_fail):
603 .quad .VG_(disp_cp_evcheck_fail),.TOC.@tocbase,0
607 0: addis 2, 12,.TOC.-0b@ha
609 .localentry VG_(disp_cp_evcheck_fail), .-VG_(disp_cp_evcheck_fail)
611 .type .VG_(disp_cp_evcheck_fail),@function
612 .globl .VG_(disp_cp_evcheck_fail)
613 .VG_(disp_cp_evcheck_fail):
614 li 6,VG_TRC_INNER_COUNTERZERO
618 .size VG_(disp_cp_evcheck_fail),.-VG_(disp_cp_evcheck_fail)
621 .size .VG_(disp_run_translations), .-.VG_(disp_run_translations)
623 /* Let the linker know we don't need an executable stack */
624 .section .note.GNU-stack,"",@progbits
626 #endif // defined(VGP_ppc64be_linux) || defined(VGP_ppc64le_linux)
628 /*--------------------------------------------------------------------*/
630 /*--------------------------------------------------------------------*/