2 /*---------------------------------------------------------------*/
3 /*--- begin guest_arm_defs.h ---*/
4 /*---------------------------------------------------------------*/
6 This file is part of Valgrind, a dynamic binary instrumentation
9 Copyright (C) 2004-2017 OpenWorks LLP
12 This program is free software; you can redistribute it and/or
13 modify it under the terms of the GNU General Public License as
14 published by the Free Software Foundation; either version 2 of the
15 License, or (at your option) any later version.
17 This program is distributed in the hope that it will be useful, but
18 WITHOUT ANY WARRANTY; without even the implied warranty of
19 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 General Public License for more details.
22 You should have received a copy of the GNU General Public License
23 along with this program; if not, see <http://www.gnu.org/licenses/>.
25 The GNU General Public License is contained in the file COPYING.
28 /* Only to be used within the guest-arm directory. */
30 #ifndef __VEX_GUEST_ARM_DEFS_H
31 #define __VEX_GUEST_ARM_DEFS_H
33 #include "libvex_basictypes.h"
34 #include "guest_generic_bb_to_IR.h" // DisResult
36 /*---------------------------------------------------------*/
37 /*--- arm to IR conversion ---*/
38 /*---------------------------------------------------------*/
40 /* Convert one ARM insn to IR. See the type DisOneInstrFn in
41 geust_generic_ bb_to_IR.h. */
43 DisResult
disInstr_ARM ( IRSB
* irbb
,
44 const UChar
* guest_code
,
48 const VexArchInfo
* archinfo
,
49 const VexAbiInfo
* abiinfo
,
50 VexEndness host_endness
,
53 /* Used by the optimiser to specialise calls to helpers. */
55 IRExpr
* guest_arm_spechelper ( const HChar
* function_name
,
57 IRStmt
** precedingStmts
,
58 Int n_precedingStmts
);
60 /* Describes to the optimser which part of the guest state require
61 precise memory exceptions. This is logically part of the guest
64 Bool
guest_arm_state_requires_precise_mem_exns ( Int
, Int
,
68 VexGuestLayout armGuest_layout
;
71 /*---------------------------------------------------------*/
72 /*--- arm guest helpers ---*/
73 /*---------------------------------------------------------*/
75 /* --- CLEAN HELPERS --- */
77 /* Calculate NZCV from the supplied thunk components, in the positions
78 they appear in the CPSR, viz bits 31:28 for N Z V C respectively.
79 Returned bits 27:0 are zero. */
81 UInt
armg_calculate_flags_nzcv ( UInt cc_op
, UInt cc_dep1
,
82 UInt cc_dep2
, UInt cc_dep3
);
84 /* Calculate the C flag from the thunk components, in the lowest bit
85 of the word (bit 0). */
87 UInt
armg_calculate_flag_c ( UInt cc_op
, UInt cc_dep1
,
88 UInt cc_dep2
, UInt cc_dep3
);
90 /* Calculate the V flag from the thunk components, in the lowest bit
91 of the word (bit 0). */
93 UInt
armg_calculate_flag_v ( UInt cc_op
, UInt cc_dep1
,
94 UInt cc_dep2
, UInt cc_dep3
);
96 /* Calculate the specified condition from the thunk components, in the
97 lowest bit of the word (bit 0). */
99 UInt
armg_calculate_condition ( UInt cond_n_op
/* ARMCondcode << 4 | cc_op */,
101 UInt cc_dep2
, UInt cc_dep3
);
103 /* Calculate the QC flag from the thunk components, in the lowest bit
104 of the word (bit 0). */
106 UInt
armg_calculate_flag_qc ( UInt resL1
, UInt resL2
,
107 UInt resR1
, UInt resR2
);
109 /* --- DIRTY HELPERS --- */
111 /* Confusingly, for the AES insns, the 32-bit ARM docs refers to the
112 one-and-only source register as 'm' whereas the 64-bit docs refer to
113 it as 'n'. We sidestep that here by just calling it 'arg32_*'. */
116 void armg_dirtyhelper_AESE (
118 UInt arg32_3
, UInt arg32_2
, UInt arg32_1
, UInt arg32_0
122 void armg_dirtyhelper_AESD (
124 UInt arg32_3
, UInt arg32_2
, UInt arg32_1
, UInt arg32_0
128 void armg_dirtyhelper_AESMC (
130 UInt arg32_3
, UInt arg32_2
, UInt arg32_1
, UInt arg32_0
134 void armg_dirtyhelper_AESIMC (
136 UInt arg32_3
, UInt arg32_2
, UInt arg32_1
, UInt arg32_0
140 void armg_dirtyhelper_SHA1C (
142 UInt argD3
, UInt argD2
, UInt argD1
, UInt argD0
,
143 UInt argN3
, UInt argN2
, UInt argN1
, UInt argN0
,
144 UInt argM3
, UInt argM2
, UInt argM1
, UInt argM0
148 void armg_dirtyhelper_SHA1P (
150 UInt argD3
, UInt argD2
, UInt argD1
, UInt argD0
,
151 UInt argN3
, UInt argN2
, UInt argN1
, UInt argN0
,
152 UInt argM3
, UInt argM2
, UInt argM1
, UInt argM0
156 void armg_dirtyhelper_SHA1M (
158 UInt argD3
, UInt argD2
, UInt argD1
, UInt argD0
,
159 UInt argN3
, UInt argN2
, UInt argN1
, UInt argN0
,
160 UInt argM3
, UInt argM2
, UInt argM1
, UInt argM0
164 void armg_dirtyhelper_SHA1SU0 (
166 UInt argD3
, UInt argD2
, UInt argD1
, UInt argD0
,
167 UInt argN3
, UInt argN2
, UInt argN1
, UInt argN0
,
168 UInt argM3
, UInt argM2
, UInt argM1
, UInt argM0
172 void armg_dirtyhelper_SHA256H (
174 UInt argD3
, UInt argD2
, UInt argD1
, UInt argD0
,
175 UInt argN3
, UInt argN2
, UInt argN1
, UInt argN0
,
176 UInt argM3
, UInt argM2
, UInt argM1
, UInt argM0
180 void armg_dirtyhelper_SHA256H2 (
182 UInt argD3
, UInt argD2
, UInt argD1
, UInt argD0
,
183 UInt argN3
, UInt argN2
, UInt argN1
, UInt argN0
,
184 UInt argM3
, UInt argM2
, UInt argM1
, UInt argM0
188 void armg_dirtyhelper_SHA256SU1 (
190 UInt argD3
, UInt argD2
, UInt argD1
, UInt argD0
,
191 UInt argN3
, UInt argN2
, UInt argN1
, UInt argN0
,
192 UInt argM3
, UInt argM2
, UInt argM1
, UInt argM0
196 void armg_dirtyhelper_SHA1SU1 (
198 UInt argD3
, UInt argD2
, UInt argD1
, UInt argD0
,
199 UInt argM3
, UInt argM2
, UInt argM1
, UInt argM0
203 void armg_dirtyhelper_SHA256SU0 (
205 UInt argD3
, UInt argD2
, UInt argD1
, UInt argD0
,
206 UInt argM3
, UInt argM2
, UInt argM1
, UInt argM0
210 void armg_dirtyhelper_SHA1H (
212 UInt argM3
, UInt argM2
, UInt argM1
, UInt argM0
216 void armg_dirtyhelper_VMULLP64 (
218 UInt argN1
, UInt argN0
, UInt argM1
, UInt argM0
222 /*---------------------------------------------------------*/
223 /*--- Condition code stuff ---*/
224 /*---------------------------------------------------------*/
226 /* Flags masks. Defines positions of flags bits in the CPSR. */
227 #define ARMG_CC_SHIFT_N 31
228 #define ARMG_CC_SHIFT_Z 30
229 #define ARMG_CC_SHIFT_C 29
230 #define ARMG_CC_SHIFT_V 28
231 #define ARMG_CC_SHIFT_Q 27
233 #define ARMG_CC_MASK_N (1 << ARMG_CC_SHIFT_N)
234 #define ARMG_CC_MASK_Z (1 << ARMG_CC_SHIFT_Z)
235 #define ARMG_CC_MASK_C (1 << ARMG_CC_SHIFT_C)
236 #define ARMG_CC_MASK_V (1 << ARMG_CC_SHIFT_V)
237 #define ARMG_CC_MASK_Q (1 << ARMG_CC_SHIFT_Q)
239 /* Flag thunk descriptors. A four-word thunk is used to record
240 details of the most recent flag-setting operation, so NZCV can
241 be computed later if needed.
245 CC_OP, which describes the operation.
247 CC_DEP1, CC_DEP2, CC_DEP3. These are arguments to the
248 operation. We want set up the mcx_masks in flag helper calls
249 involving these fields so that Memcheck "believes" that the
250 resulting flags are data-dependent on both CC_DEP1 and
251 CC_DEP2. Hence the name DEP.
253 When building the thunk, it is always necessary to write words into
254 CC_DEP1/2/3, even if those args are not used given the
255 CC_OP field. This is important because otherwise Memcheck could
256 give false positives as it does not understand the relationship
257 between the CC_OP field and CC_DEP1/2/3, and so believes
258 that the definedness of the stored flags always depends on
261 Fields carrying only 1 or 2 bits of useful information (old_C,
262 shifter_co, old_V, oldC:oldV) must have their top 31 or 30 bits
263 (respectively) zero. The text "31x0:" or "30x0:" denotes this.
265 A summary of the field usages is:
268 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
270 OP_COPY curr_NZCV:28x0 unused unused
271 OP_ADD argL argR unused
272 OP_SUB argL argR unused
273 OP_ADC argL argR 31x0:old_C
274 OP_SBB argL argR 31x0:old_C
275 OP_LOGIC result 31x0:shifter_co 31x0:old_V
276 OP_MUL result unused 30x0:old_C:old_V
277 OP_MULL resLO32 resHI32 30x0:old_C:old_V
281 ARMG_CC_OP_COPY
=0, /* DEP1 = NZCV in 31:28, DEP2 = 0, DEP3 = 0
282 just copy DEP1 to output */
284 ARMG_CC_OP_ADD
, /* DEP1 = argL (Rn), DEP2 = argR (shifter_op),
287 ARMG_CC_OP_SUB
, /* DEP1 = argL (Rn), DEP2 = argR (shifter_op),
290 ARMG_CC_OP_ADC
, /* DEP1 = argL (Rn), DEP2 = arg2 (shifter_op),
291 DEP3 = oldC (in LSB) */
293 ARMG_CC_OP_SBB
, /* DEP1 = argL (Rn), DEP2 = arg2 (shifter_op),
294 DEP3 = oldC (in LSB) */
296 ARMG_CC_OP_LOGIC
, /* DEP1 = result, DEP2 = shifter_carry_out (in LSB),
297 DEP3 = old V flag (in LSB) */
299 ARMG_CC_OP_MUL
, /* DEP1 = result, DEP2 = 0, DEP3 = oldC:old_V
302 ARMG_CC_OP_MULL
, /* DEP1 = resLO32, DEP2 = resHI32, DEP3 = oldC:old_V
308 /* XXXX because of the calling conventions for
309 armg_calculate_condition, all this OP values MUST be in the range
310 0 .. 15 only (viz, 4-bits). */
314 /* Defines conditions which we can ask for (ARM ARM 2e page A3-6) */
318 ARMCondEQ
= 0, /* equal : Z=1 */
319 ARMCondNE
= 1, /* not equal : Z=0 */
321 ARMCondHS
= 2, /* >=u (higher or same) : C=1 */
322 ARMCondLO
= 3, /* <u (lower) : C=0 */
324 ARMCondMI
= 4, /* minus (negative) : N=1 */
325 ARMCondPL
= 5, /* plus (zero or +ve) : N=0 */
327 ARMCondVS
= 6, /* overflow : V=1 */
328 ARMCondVC
= 7, /* no overflow : V=0 */
330 ARMCondHI
= 8, /* >u (higher) : C=1 && Z=0 */
331 ARMCondLS
= 9, /* <=u (lower or same) : C=0 || Z=1 */
333 ARMCondGE
= 10, /* >=s (signed greater or equal) : N=V */
334 ARMCondLT
= 11, /* <s (signed less than) : N!=V */
336 ARMCondGT
= 12, /* >s (signed greater) : Z=0 && N=V */
337 ARMCondLE
= 13, /* <=s (signed less or equal) : Z=1 || N!=V */
339 ARMCondAL
= 14, /* always (unconditional) : 1 */
340 ARMCondNV
= 15 /* never (unconditional): : 0 */
341 /* NB: ARM have deprecated the use of the NV condition code.
342 You are now supposed to use MOV R0,R0 as a noop rather than
343 MOVNV R0,R0 as was previously recommended. Future processors
344 may have the NV condition code reused to do other things. */
348 #endif /* ndef __VEX_GUEST_ARM_DEFS_H */
350 /*---------------------------------------------------------------*/
351 /*--- end guest_arm_defs.h ---*/
352 /*---------------------------------------------------------------*/