2 /*--------------------------------------------------------------------*/
3 /*--- The core dispatch loop, for jumping to a code address. ---*/
4 /*--- dispatch-arm64-linux.S ---*/
5 /*--------------------------------------------------------------------*/
8 This file is part of Valgrind, a dynamic binary instrumentation
11 Copyright (C) 2013-2013 OpenWorks
14 This program is free software; you can redistribute it and/or
15 modify it under the terms of the GNU General Public License as
16 published by the Free Software Foundation; either version 2 of the
17 License, or (at your option) any later version.
19 This program is distributed in the hope that it will be useful, but
20 WITHOUT ANY WARRANTY; without even the implied warranty of
21 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
22 General Public License for more details.
24 You should have received a copy of the GNU General Public License
25 along with this program; if not, write to the Free Software
26 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
29 The GNU General Public License is contained in the file COPYING.
32 #if defined(VGP_arm64_linux)
34 #include "pub_core_basics_asm.h"
35 #include "pub_core_dispatch_asm.h"
36 #include "pub_core_transtab_asm.h"
37 #include "libvex_guest_offsets.h" /* for OFFSET_arm_R* */
40 /*------------------------------------------------------------*/
42 /*--- The dispatch loop. VG_(disp_run_translations) is ---*/
43 /*--- used to run all translations, ---*/
44 /*--- including no-redir ones. ---*/
46 /*------------------------------------------------------------*/
48 /*----------------------------------------------------*/
49 /*--- Entry and preamble (set everything up) ---*/
50 /*----------------------------------------------------*/
53 void VG_(disp_run_translations)( UWord* two_words,
58 .global VG_(disp_run_translations)
59 VG_(disp_run_translations):
64 /* Push the callee-saved registers. Unclear if x19/x20 are
65 callee-saved, but be on the safe side. Note this sequence
66 maintains 16-alignment of sp. Also save x0 since it will
67 be needed in the postamble. */
68 stp x29, x30, [sp, #-16]!
69 stp x27, x28, [sp, #-16]!
70 stp x25, x26, [sp, #-16]!
71 stp x23, x24, [sp, #-16]!
72 stp x21, x22, [sp, #-16]!
73 stp x19, x20, [sp, #-16]!
74 stp x0, xzr, [sp, #-16]!
76 /* set FPSCR to vex-required default value */
81 /* Set up the guest state pointer */
84 /* and jump into the code cache. Chained translations in
85 the code cache run, until for whatever reason, they can't
86 continue. When that happens, the translation in question
87 will jump (or call) to one of the continuation points
92 /*----------------------------------------------------*/
93 /*--- Postamble and exit. ---*/
94 /*----------------------------------------------------*/
97 /* At this point, r1 and r2 contain two
98 words to be returned to the caller. r1
99 holds a TRC value, and r2 optionally may
100 hold another word (for CHAIN_ME exits, the
101 address of the place to patch.) */
103 /* We're leaving. Check that nobody messed with
104 FPSCR in ways we don't expect. */
107 // bic r4, #0xF8000000 /* mask out NZCV and QC */
108 // bic r4, #0x0000009F /* mask out IDC,IXC,UFC,OFC,DZC,IOC */
110 // beq remove_frame /* we're OK */
111 /* otherwise we have an invariant violation */
112 // movw r1, #VG_TRC_INVARIANT_FAILED
117 /* Restore int regs, including importantly x0 (two_words),
119 ldp x0, xzr, [sp], #16
120 ldp x19, x20, [sp], #16
121 ldp x21, x22, [sp], #16
122 ldp x23, x24, [sp], #16
123 ldp x25, x26, [sp], #16
124 ldp x27, x28, [sp], #16
125 ldp x29, x30, [sp], #16
127 /* Stash return values */
132 /*----------------------------------------------------*/
133 /*--- Continuation points ---*/
134 /*----------------------------------------------------*/
136 /* ------ Chain me to slow entry point ------ */
137 .global VG_(disp_cp_chain_me_to_slowEP)
138 VG_(disp_cp_chain_me_to_slowEP):
139 /* We got called. The return address indicates
140 where the patching needs to happen. Collect
141 the return address and, exit back to C land,
142 handing the caller the pair (Chain_me_S, RA) */
143 mov x1, #VG_TRC_CHAIN_ME_TO_SLOW_EP
144 mov x2, x30 // 30 == LR
145 /* 4 = movw x9, disp_cp_chain_me_to_slowEP[15:0]
146 4 = movk x9, disp_cp_chain_me_to_slowEP[31:16], lsl 16
147 4 = movk x9, disp_cp_chain_me_to_slowEP[47:32], lsl 32
148 4 = movk x9, disp_cp_chain_me_to_slowEP[63:48], lsl 48
151 sub x2, x2, #4+4+4+4+4
154 /* ------ Chain me to fast entry point ------ */
155 .global VG_(disp_cp_chain_me_to_fastEP)
156 VG_(disp_cp_chain_me_to_fastEP):
157 /* We got called. The return address indicates
158 where the patching needs to happen. Collect
159 the return address and, exit back to C land,
160 handing the caller the pair (Chain_me_F, RA) */
161 mov x1, #VG_TRC_CHAIN_ME_TO_FAST_EP
162 mov x2, x30 // 30 == LR
163 /* 4 = movw x9, disp_cp_chain_me_to_fastEP[15:0]
164 4 = movk x9, disp_cp_chain_me_to_fastEP[31:16], lsl 16
165 4 = movk x9, disp_cp_chain_me_to_fastEP[47:32], lsl 32
166 4 = movk x9, disp_cp_chain_me_to_fastEP[63:48], lsl 48
169 sub x2, x2, #4+4+4+4+4
172 /* ------ Indirect but boring jump ------ */
173 .global VG_(disp_cp_xindir)
175 /* Where are we going? */
176 ldr x0, [x21, #OFFSET_arm64_PC]
179 adrp x1, VG_(stats__n_xindirs_32)
180 add x1, x1, :lo12:VG_(stats__n_xindirs_32)
185 /* try a fast lookup in the translation cache */
186 // x0 = next guest, x1,x2,x3,x4 scratch
187 mov x1, #VG_TT_FAST_MASK // x1 = VG_TT_FAST_MASK
188 and x2, x1, x0, LSR #2 // x2 = entry # = (x1 & (x0 >> 2))
190 adrp x4, VG_(tt_fast)
191 add x4, x4, :lo12:VG_(tt_fast) // x4 = &VG_(tt_fast)
193 add x1, x4, x2, LSL #4 // r1 = &tt_fast[entry#]
195 ldp x4, x5, [x1, #0] // x4 = .guest, x5 = .host
199 // jump to host if lookup succeeded
200 bne fast_lookup_failed
205 /* RM ME -- stats only */
206 adrp x1, VG_(stats__n_xindir_misses_32)
207 add x1, x1, :lo12:VG_(stats__n_xindir_misses_32)
212 mov x1, #VG_TRC_INNER_FASTMISS
216 /* ------ Assisted jump ------ */
217 .global VG_(disp_cp_xassisted)
218 VG_(disp_cp_xassisted):
219 /* x21 contains the TRC */
224 /* ------ Event check failed ------ */
225 .global VG_(disp_cp_evcheck_fail)
226 VG_(disp_cp_evcheck_fail):
227 mov x1, #VG_TRC_INNER_COUNTERZERO
232 .size VG_(disp_run_translations), .-VG_(disp_run_translations)
234 /* Let the linker know we don't need an executable stack */
235 .section .note.GNU-stack,"",%progbits
237 #endif // defined(VGP_arm64_linux)
239 /*--------------------------------------------------------------------*/
240 /*--- end dispatch-arm64-linux.S ---*/
241 /*--------------------------------------------------------------------*/