2 /*--------------------------------------------------------------------*/
3 /*--- The core dispatch loop, for jumping to a code address. ---*/
4 /*--- dispatch-ppc64-linux.S ---*/
5 /*--------------------------------------------------------------------*/
8 This file is part of Valgrind, a dynamic binary instrumentation
11 Copyright (C) 2005-2017 Cerion Armour-Brown <cerion@open-works.co.uk>
13 This program is free software; you can redistribute it and/or
14 modify it under the terms of the GNU General Public License as
15 published by the Free Software Foundation; either version 2 of the
16 License, or (at your option) any later version.
18 This program is distributed in the hope that it will be useful, but
19 WITHOUT ANY WARRANTY; without even the implied warranty of
20 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
21 General Public License for more details.
23 You should have received a copy of the GNU General Public License
24 along with this program; if not, write to the Free Software
25 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
28 The GNU General Public License is contained in the file COPYING.
31 #include "pub_core_basics_asm.h"
33 #if defined(VGP_ppc64le_linux)
35 #include "pub_core_dispatch_asm.h"
36 #include "pub_core_transtab_asm.h"
37 #include "libvex_guest_offsets.h" /* for OFFSET_ppc64_CIA */
39 /* NOTE: PPC64 supports Big Endian and Little Endian. It also supports the
40 ELF version 1 and ELF version 2 APIs.
42 Currently LE uses ELF version 2 and BE uses ELF version 1. However,
43 BE and LE may support the other ELF version in the future. So, the
44 _CALL_ELF is used in the assembly function to enable code for a
45 specific ELF version independently of the Enianess of the machine.
46 The test "#if _CALL_ELF == 2" checks if ELF version 2 is being used.
49 /* References to globals via the TOC */
52 .globl vgPlain_tt_fast
53 .lcomm vgPlain_tt_fast,4,4
54 .type vgPlain_tt_fast, @object
57 .tocent__vgPlain_tt_fast:
58 .tc vgPlain_tt_fast[TC],vgPlain_tt_fast
59 .tocent__vgPlain_stats__n_xindirs_32:
60 .tc vgPlain_stats__n_xindirs_32[TC],vgPlain_stats__n_xindirs_32
61 .tocent__vgPlain_stats__n_xindir_misses_32:
62 .tc vgPlain_stats__n_xindir_misses_32[TC],vgPlain_stats__n_xindir_misses_32
63 .tocent__vgPlain_machine_ppc64_has_VMX:
64 .tc vgPlain_machine_ppc64_has_VMX[TC],vgPlain_machine_ppc64_has_VMX
66 /*------------------------------------------------------------*/
68 /*--- The dispatch loop. VG_(disp_run_translations) is ---*/
69 /*--- used to run all translations, ---*/
70 /*--- including no-redir ones. ---*/
72 /*------------------------------------------------------------*/
74 /*----------------------------------------------------*/
75 /*--- Entry and preamble (set everything up) ---*/
76 /*----------------------------------------------------*/
79 void VG_(disp_run_translations)( UWord* two_words,
86 .globl VG_(disp_run_translations)
88 .type VG_(disp_run_translations),@function
89 VG_(disp_run_translations):
90 .type .VG_(disp_run_translations),@function
94 VG_(disp_run_translations):
95 .quad .VG_(disp_run_translations),.TOC.@tocbase,0
97 .type .VG_(disp_run_translations),@function
99 .globl .VG_(disp_run_translations)
100 .VG_(disp_run_translations):
102 0: addis 2, 12,.TOC.-0b@ha
104 .localentry VG_(disp_run_translations), .-VG_(disp_run_translations)
107 /* r3 holds two_words */
108 /* r4 holds guest_state */
109 /* r5 holds host_addr */
111 /* ----- entry point to VG_(disp_run_translations) ----- */
112 /* PPC64 ABI saves LR->16(prt_sp), CR->8(prt_sp)) */
120 /* New stack frame */
121 stdu 1,-624(1) /* sp should maintain 16-byte alignment */
123 /* General reg save area : 152 bytes */
143 std 3,104(1) /* save two_words for later */
145 /* Save callee-saved registers... */
146 /* Floating-point reg save area : 144 bytes */
166 /* It's necessary to save/restore VRSAVE in the AIX / Darwin ABI.
167 The Linux kernel might not actually use VRSAVE for its intended
168 purpose, but it should be harmless to preserve anyway. */
169 /* r3, r4, r5 are live here, so use r6 */
170 ld 6,.tocent__vgPlain_machine_ppc64_has_VMX@toc(2)
175 /* VRSAVE save word : 32 bytes */
176 mfspr 6,256 /* vrsave reg is spr number 256 */
179 /* Alignment padding : 4 bytes */
181 /* Vector reg save area (quadword aligned) : 192 bytes */
208 /* Local variable space... */
210 /* r3 holds two_words */
211 /* r4 holds guest_state */
212 /* r5 holds host_addr */
214 /* 96(sp) used later to check FPSCR[RM] */
215 /* 88(sp) used later to load fpscr with zero */
218 /* Linkage Area (reserved) BE ABI
220 32(sp) : link editor doubleword
221 24(sp) : compiler doubleword
227 /* set host FPU control word to the default mode expected
228 by VEX-generated code. See comments in libvex.h for
230 /* => get zero into f3 (tedious)
231 fsub 3,3,3 is not a reliable way to do this, since if
232 f3 holds a NaN or similar then we don't necessarily
233 wind up with zero. */
237 mtfsf 0xFF,3 /* fpscr = lo32 of f3 */
239 /* set host AltiVec control word to the default mode expected
240 by VEX-generated code. */
241 ld 6,.tocent__vgPlain_machine_ppc64_has_VMX@toc(2)
246 vspltisw 3,0x0 /* generate zero */
250 /* make a stack frame for the code we are calling */
253 /* Set up the guest state ptr */
254 mr 31,4 /* r31 (generated code gsp) = r4 */
256 /* for the LE ABI need to setup r2 and r12 */
257 0: addis 2, 12,.TOC.-0b@ha
261 /* and jump into the code cache. Chained translations in
262 the code cache run, until for whatever reason, they can't
263 continue. When that happens, the translation in question
264 will jump (or call) to one of the continuation points
265 VG_(cp_...) below. */
270 /*----------------------------------------------------*/
271 /*--- Postamble and exit. ---*/
272 /*----------------------------------------------------*/
275 /* At this point, r6 and r7 contain two
276 words to be returned to the caller. r6
277 holds a TRC value, and r7 optionally may
278 hold another word (for CHAIN_ME exits, the
279 address of the place to patch.) */
281 /* undo the "make a stack frame for the code we are calling" */
284 /* We're leaving. Check that nobody messed with
285 VSCR or FPSCR in ways we don't expect. */
286 /* Using r11 - value used again further on, so don't trash! */
287 ld 11,.tocent__vgPlain_machine_ppc64_has_VMX@toc(2)
290 /* Set fpscr back to a known state, since vex-generated code
291 may have messed with fpscr[rm]. */
297 mtfsf 0xFF,3 /* fpscr = f3 */
299 cmpldi 11,0 /* Do we have altivec? */
302 /* Check VSCR[NJ] == 1 */
303 /* first generate 4x 0x00010000 */
304 vspltisw 4,0x1 /* 4x 0x00000001 */
305 vspltisw 5,0x0 /* zero */
306 vsldoi 6,4,5,0x2 /* <<2*8 => 4x 0x00010000 */
307 /* retrieve VSCR and mask wanted bits */
309 vand 7,7,6 /* gives NJ flag */
310 vspltw 7,7,0x3 /* flags-word to all lanes */
311 vcmpequw. 8,6,7 /* CR[24] = 1 if v6 == v7 */
312 bt 24,.invariant_violation /* branch if all_equal */
315 /* otherwise we're OK */
318 .invariant_violation:
319 li 6,VG_TRC_INVARIANT_FAILED
324 /* r11 already holds VG_(machine_ppc32_has_VMX) value */
328 /* Restore Altivec regs.
329 Use r5 as scratch since r6/r7 are live. */
332 mfspr 5,256 /* VRSAVE reg is spr number 256 */
361 /* Restore FP regs */
362 /* Floating-point regs */
382 /* restore int regs, including importantly r3 (two_words) */
403 /* Stash return values */
407 /* restore lr & sp, and leave */
408 ld 0,632(1) /* stack_size + 8 */
410 ld 0,640(1) /* stack_size + 16 */
412 addi 1,1,624 /* stack_size */
415 .size VG_(disp_run_translations),.-VG_(disp_run_translations)
419 /*----------------------------------------------------*/
420 /*--- Continuation points ---*/
421 /*----------------------------------------------------*/
423 /* ------ Chain me to slow entry point ------ */
426 .globl VG_(disp_cp_chain_me_to_slowEP)
428 .type VG_(disp_cp_chain_me_to_slowEP),@function
429 VG_(disp_cp_chain_me_to_slowEP):
433 VG_(disp_cp_chain_me_to_slowEP):
434 .quad .VG_(disp_cp_chain_me_to_slowEP),.TOC.@tocbase,0
437 .type .VG_(disp_cp_chain_me_to_slowEP),@function
438 .globl .VG_(disp_cp_chain_me_to_slowEP)
439 .VG_(disp_cp_chain_me_to_slowEP):
441 0: addis 2, 12,.TOC.-0b@ha
443 .localentry VG_(disp_cp_chain_me_to_slowEP), .-VG_(disp_cp_chain_me_to_slowEP)
445 /* We got called. The return address indicates
446 where the patching needs to happen. Collect
447 the return address and, exit back to C land,
448 handing the caller the pair (Chain_me_S, RA) */
449 li 6, VG_TRC_CHAIN_ME_TO_SLOW_EP
451 /* 20 = imm64-fixed5 r30, disp_cp_chain_me_to_slowEP
458 .size VG_(disp_cp_chain_me_to_slowEP),.-VG_(disp_cp_chain_me_to_slowEP)
461 /* ------ Chain me to fast entry point ------ */
464 .globl VG_(disp_cp_chain_me_to_fastEP)
466 .type VG_(disp_cp_chain_me_to_fastEP),@function
467 VG_(disp_cp_chain_me_to_fastEP):
471 VG_(disp_cp_chain_me_to_fastEP):
472 .quad .VG_(disp_cp_chain_me_to_fastEP),.TOC.@tocbase,0
475 .type .VG_(disp_cp_chain_me_to_fastEP),@function
476 .globl .VG_(disp_cp_chain_me_to_fastEP)
477 .VG_(disp_cp_chain_me_to_fastEP):
479 0: addis 2, 12,.TOC.-0b@ha
481 .localentry VG_(disp_cp_chain_me_to_fastEP), .-VG_(disp_cp_chain_me_to_fastEP)
483 /* We got called. The return address indicates
484 where the patching needs to happen. Collect
485 the return address and, exit back to C land,
486 handing the caller the pair (Chain_me_S, RA) */
487 li 6, VG_TRC_CHAIN_ME_TO_FAST_EP
489 /* 20 = imm64-fixed5 r30, disp_cp_chain_me_to_fastEP
496 .size VG_(disp_cp_chain_me_to_fastEP),.-VG_(disp_cp_chain_me_to_fastEP)
499 /* ------ Indirect but boring jump ------ */
502 .globl VG_(disp_cp_xindir)
504 .type VG_(disp_cp_xindir),@function
510 .quad .VG_(disp_cp_xindir),.TOC.@tocbase,0
513 .type .VG_(disp_cp_xindir),@function
514 .globl .VG_(disp_cp_xindir)
515 .VG_(disp_cp_xindir):
517 0: addis 2, 12,.TOC.-0b@ha
519 .localentry VG_(disp_cp_xindir), .-VG_(disp_cp_xindir)
521 /* Where are we going? */
522 ld 3,OFFSET_ppc64_CIA(31)
525 ld 5, .tocent__vgPlain_stats__n_xindirs_32@toc(2)
530 /* r5 = &VG_(tt_fast) */
531 ld 5, .tocent__vgPlain_tt_fast@toc(2) /* &VG_(tt_fast) */
533 /* try a fast lookup in the translation cache */
534 /* r4 = VG_TT_FAST_HASH(addr) * sizeof(FastCacheEntry)
535 = ((r3 >>u 2) & VG_TT_FAST_MASK) << 4 */
536 rldicl 4,3, 62, 64-VG_TT_FAST_BITS /* entry# */
537 sldi 4,4,4 /* entry# * sizeof(FastCacheEntry) */
538 add 5,5,4 /* & VG_(tt_fast)[entry#] */
539 ld 6,0(5) /* .guest */
540 ld 7,8(5) /* .host */
542 bne .fast_lookup_failed
544 /* Found a match. Jump to .host. */
548 .size VG_(disp_cp_xindir),.-VG_(disp_cp_xindir)
553 ld 5, .tocent__vgPlain_stats__n_xindir_misses_32@toc(2)
558 li 6,VG_TRC_INNER_FASTMISS
563 /* ------ Assisted jump ------ */
566 .globl VG_(disp_cp_xassisted)
568 .type VG_(disp_cp_xassisted),@function
569 VG_(disp_cp_xassisted):
573 VG_(disp_cp_xassisted):
574 .quad .VG_(disp_cp_xassisted),.TOC.@tocbase,0
578 0: addis 2, 12,.TOC.-0b@ha
580 .localentry VG_(disp_cp_xassisted), .-VG_(disp_cp_xassisted)
582 .type .VG_(disp_cp_xassisted),@function
583 .globl .VG_(disp_cp_xassisted)
584 .VG_(disp_cp_xassisted):
585 /* r31 contains the TRC */
590 .size VG_(disp_cp_xassisted),.-VG_(disp_cp_xassisted)
593 /* ------ Event check failed ------ */
596 .globl VG_(disp_cp_evcheck_fail)
598 .type VG_(disp_cp_evcheck_fail),@function
599 VG_(disp_cp_evcheck_fail):
603 VG_(disp_cp_evcheck_fail):
604 .quad .VG_(disp_cp_evcheck_fail),.TOC.@tocbase,0
608 0: addis 2, 12,.TOC.-0b@ha
610 .localentry VG_(disp_cp_evcheck_fail), .-VG_(disp_cp_evcheck_fail)
612 .type .VG_(disp_cp_evcheck_fail),@function
613 .globl .VG_(disp_cp_evcheck_fail)
614 .VG_(disp_cp_evcheck_fail):
615 li 6,VG_TRC_INNER_COUNTERZERO
619 .size VG_(disp_cp_evcheck_fail),.-VG_(disp_cp_evcheck_fail)
622 .size .VG_(disp_run_translations), .-.VG_(disp_run_translations)
624 #endif // defined(VGP_ppc64be_linux) || defined(VGP_ppc64le_linux)
626 /* Let the linker know we don't need an executable stack */
629 /*--------------------------------------------------------------------*/
631 /*--------------------------------------------------------------------*/