2 /*---------------------------------------------------------------*/
3 /*--- begin guest_arm_defs.h ---*/
4 /*---------------------------------------------------------------*/
6 This file is part of Valgrind, a dynamic binary instrumentation
9 Copyright (C) 2004-2017 OpenWorks LLP
12 This program is free software; you can redistribute it and/or
13 modify it under the terms of the GNU General Public License as
14 published by the Free Software Foundation; either version 2 of the
15 License, or (at your option) any later version.
17 This program is distributed in the hope that it will be useful, but
18 WITHOUT ANY WARRANTY; without even the implied warranty of
19 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 General Public License for more details.
22 You should have received a copy of the GNU General Public License
23 along with this program; if not, see <http://www.gnu.org/licenses/>.
25 The GNU General Public License is contained in the file COPYING.
28 /* Only to be used within the guest-arm directory. */
30 #ifndef __VEX_GUEST_ARM_DEFS_H
31 #define __VEX_GUEST_ARM_DEFS_H
33 #include "libvex_basictypes.h"
34 #include "guest_generic_bb_to_IR.h" // DisResult
36 /*---------------------------------------------------------*/
37 /*--- arm to IR conversion ---*/
38 /*---------------------------------------------------------*/
40 /* Convert one ARM insn to IR. See the type DisOneInstrFn in
41 geust_generic_ bb_to_IR.h. */
43 DisResult
disInstr_ARM ( IRSB
* irbb
,
44 Bool (*resteerOkFn
) ( void*, Addr
),
46 void* callback_opaque
,
47 const UChar
* guest_code
,
51 const VexArchInfo
* archinfo
,
52 const VexAbiInfo
* abiinfo
,
53 VexEndness host_endness
,
56 /* Used by the optimiser to specialise calls to helpers. */
58 IRExpr
* guest_arm_spechelper ( const HChar
* function_name
,
60 IRStmt
** precedingStmts
,
61 Int n_precedingStmts
);
63 /* Describes to the optimser which part of the guest state require
64 precise memory exceptions. This is logically part of the guest
67 Bool
guest_arm_state_requires_precise_mem_exns ( Int
, Int
,
71 VexGuestLayout armGuest_layout
;
74 /*---------------------------------------------------------*/
75 /*--- arm guest helpers ---*/
76 /*---------------------------------------------------------*/
78 /* --- CLEAN HELPERS --- */
80 /* Calculate NZCV from the supplied thunk components, in the positions
81 they appear in the CPSR, viz bits 31:28 for N Z V C respectively.
82 Returned bits 27:0 are zero. */
84 UInt
armg_calculate_flags_nzcv ( UInt cc_op
, UInt cc_dep1
,
85 UInt cc_dep2
, UInt cc_dep3
);
87 /* Calculate the C flag from the thunk components, in the lowest bit
88 of the word (bit 0). */
90 UInt
armg_calculate_flag_c ( UInt cc_op
, UInt cc_dep1
,
91 UInt cc_dep2
, UInt cc_dep3
);
93 /* Calculate the V flag from the thunk components, in the lowest bit
94 of the word (bit 0). */
96 UInt
armg_calculate_flag_v ( UInt cc_op
, UInt cc_dep1
,
97 UInt cc_dep2
, UInt cc_dep3
);
99 /* Calculate the specified condition from the thunk components, in the
100 lowest bit of the word (bit 0). */
102 UInt
armg_calculate_condition ( UInt cond_n_op
/* ARMCondcode << 4 | cc_op */,
104 UInt cc_dep2
, UInt cc_dep3
);
106 /* Calculate the QC flag from the thunk components, in the lowest bit
107 of the word (bit 0). */
109 UInt
armg_calculate_flag_qc ( UInt resL1
, UInt resL2
,
110 UInt resR1
, UInt resR2
);
112 /* --- DIRTY HELPERS --- */
114 /* Confusingly, for the AES insns, the 32-bit ARM docs refers to the
115 one-and-only source register as 'm' whereas the 64-bit docs refer to
116 it as 'n'. We sidestep that here by just calling it 'arg32_*'. */
119 void armg_dirtyhelper_AESE (
121 UInt arg32_3
, UInt arg32_2
, UInt arg32_1
, UInt arg32_0
125 void armg_dirtyhelper_AESD (
127 UInt arg32_3
, UInt arg32_2
, UInt arg32_1
, UInt arg32_0
131 void armg_dirtyhelper_AESMC (
133 UInt arg32_3
, UInt arg32_2
, UInt arg32_1
, UInt arg32_0
137 void armg_dirtyhelper_AESIMC (
139 UInt arg32_3
, UInt arg32_2
, UInt arg32_1
, UInt arg32_0
143 void armg_dirtyhelper_SHA1C (
145 UInt argD3
, UInt argD2
, UInt argD1
, UInt argD0
,
146 UInt argN3
, UInt argN2
, UInt argN1
, UInt argN0
,
147 UInt argM3
, UInt argM2
, UInt argM1
, UInt argM0
151 void armg_dirtyhelper_SHA1P (
153 UInt argD3
, UInt argD2
, UInt argD1
, UInt argD0
,
154 UInt argN3
, UInt argN2
, UInt argN1
, UInt argN0
,
155 UInt argM3
, UInt argM2
, UInt argM1
, UInt argM0
159 void armg_dirtyhelper_SHA1M (
161 UInt argD3
, UInt argD2
, UInt argD1
, UInt argD0
,
162 UInt argN3
, UInt argN2
, UInt argN1
, UInt argN0
,
163 UInt argM3
, UInt argM2
, UInt argM1
, UInt argM0
167 void armg_dirtyhelper_SHA1SU0 (
169 UInt argD3
, UInt argD2
, UInt argD1
, UInt argD0
,
170 UInt argN3
, UInt argN2
, UInt argN1
, UInt argN0
,
171 UInt argM3
, UInt argM2
, UInt argM1
, UInt argM0
175 void armg_dirtyhelper_SHA256H (
177 UInt argD3
, UInt argD2
, UInt argD1
, UInt argD0
,
178 UInt argN3
, UInt argN2
, UInt argN1
, UInt argN0
,
179 UInt argM3
, UInt argM2
, UInt argM1
, UInt argM0
183 void armg_dirtyhelper_SHA256H2 (
185 UInt argD3
, UInt argD2
, UInt argD1
, UInt argD0
,
186 UInt argN3
, UInt argN2
, UInt argN1
, UInt argN0
,
187 UInt argM3
, UInt argM2
, UInt argM1
, UInt argM0
191 void armg_dirtyhelper_SHA256SU1 (
193 UInt argD3
, UInt argD2
, UInt argD1
, UInt argD0
,
194 UInt argN3
, UInt argN2
, UInt argN1
, UInt argN0
,
195 UInt argM3
, UInt argM2
, UInt argM1
, UInt argM0
199 void armg_dirtyhelper_SHA1SU1 (
201 UInt argD3
, UInt argD2
, UInt argD1
, UInt argD0
,
202 UInt argM3
, UInt argM2
, UInt argM1
, UInt argM0
206 void armg_dirtyhelper_SHA256SU0 (
208 UInt argD3
, UInt argD2
, UInt argD1
, UInt argD0
,
209 UInt argM3
, UInt argM2
, UInt argM1
, UInt argM0
213 void armg_dirtyhelper_SHA1H (
215 UInt argM3
, UInt argM2
, UInt argM1
, UInt argM0
219 void armg_dirtyhelper_VMULLP64 (
221 UInt argN1
, UInt argN0
, UInt argM1
, UInt argM0
225 /*---------------------------------------------------------*/
226 /*--- Condition code stuff ---*/
227 /*---------------------------------------------------------*/
229 /* Flags masks. Defines positions of flags bits in the CPSR. */
230 #define ARMG_CC_SHIFT_N 31
231 #define ARMG_CC_SHIFT_Z 30
232 #define ARMG_CC_SHIFT_C 29
233 #define ARMG_CC_SHIFT_V 28
234 #define ARMG_CC_SHIFT_Q 27
236 #define ARMG_CC_MASK_N (1 << ARMG_CC_SHIFT_N)
237 #define ARMG_CC_MASK_Z (1 << ARMG_CC_SHIFT_Z)
238 #define ARMG_CC_MASK_C (1 << ARMG_CC_SHIFT_C)
239 #define ARMG_CC_MASK_V (1 << ARMG_CC_SHIFT_V)
240 #define ARMG_CC_MASK_Q (1 << ARMG_CC_SHIFT_Q)
242 /* Flag thunk descriptors. A four-word thunk is used to record
243 details of the most recent flag-setting operation, so NZCV can
244 be computed later if needed.
248 CC_OP, which describes the operation.
250 CC_DEP1, CC_DEP2, CC_DEP3. These are arguments to the
251 operation. We want set up the mcx_masks in flag helper calls
252 involving these fields so that Memcheck "believes" that the
253 resulting flags are data-dependent on both CC_DEP1 and
254 CC_DEP2. Hence the name DEP.
256 When building the thunk, it is always necessary to write words into
257 CC_DEP1/2/3, even if those args are not used given the
258 CC_OP field. This is important because otherwise Memcheck could
259 give false positives as it does not understand the relationship
260 between the CC_OP field and CC_DEP1/2/3, and so believes
261 that the definedness of the stored flags always depends on
264 Fields carrying only 1 or 2 bits of useful information (old_C,
265 shifter_co, old_V, oldC:oldV) must have their top 31 or 30 bits
266 (respectively) zero. The text "31x0:" or "30x0:" denotes this.
268 A summary of the field usages is:
271 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
273 OP_COPY curr_NZCV:28x0 unused unused
274 OP_ADD argL argR unused
275 OP_SUB argL argR unused
276 OP_ADC argL argR 31x0:old_C
277 OP_SBB argL argR 31x0:old_C
278 OP_LOGIC result 31x0:shifter_co 31x0:old_V
279 OP_MUL result unused 30x0:old_C:old_V
280 OP_MULL resLO32 resHI32 30x0:old_C:old_V
284 ARMG_CC_OP_COPY
=0, /* DEP1 = NZCV in 31:28, DEP2 = 0, DEP3 = 0
285 just copy DEP1 to output */
287 ARMG_CC_OP_ADD
, /* DEP1 = argL (Rn), DEP2 = argR (shifter_op),
290 ARMG_CC_OP_SUB
, /* DEP1 = argL (Rn), DEP2 = argR (shifter_op),
293 ARMG_CC_OP_ADC
, /* DEP1 = argL (Rn), DEP2 = arg2 (shifter_op),
294 DEP3 = oldC (in LSB) */
296 ARMG_CC_OP_SBB
, /* DEP1 = argL (Rn), DEP2 = arg2 (shifter_op),
297 DEP3 = oldC (in LSB) */
299 ARMG_CC_OP_LOGIC
, /* DEP1 = result, DEP2 = shifter_carry_out (in LSB),
300 DEP3 = old V flag (in LSB) */
302 ARMG_CC_OP_MUL
, /* DEP1 = result, DEP2 = 0, DEP3 = oldC:old_V
305 ARMG_CC_OP_MULL
, /* DEP1 = resLO32, DEP2 = resHI32, DEP3 = oldC:old_V
311 /* XXXX because of the calling conventions for
312 armg_calculate_condition, all this OP values MUST be in the range
313 0 .. 15 only (viz, 4-bits). */
317 /* Defines conditions which we can ask for (ARM ARM 2e page A3-6) */
321 ARMCondEQ
= 0, /* equal : Z=1 */
322 ARMCondNE
= 1, /* not equal : Z=0 */
324 ARMCondHS
= 2, /* >=u (higher or same) : C=1 */
325 ARMCondLO
= 3, /* <u (lower) : C=0 */
327 ARMCondMI
= 4, /* minus (negative) : N=1 */
328 ARMCondPL
= 5, /* plus (zero or +ve) : N=0 */
330 ARMCondVS
= 6, /* overflow : V=1 */
331 ARMCondVC
= 7, /* no overflow : V=0 */
333 ARMCondHI
= 8, /* >u (higher) : C=1 && Z=0 */
334 ARMCondLS
= 9, /* <=u (lower or same) : C=0 || Z=1 */
336 ARMCondGE
= 10, /* >=s (signed greater or equal) : N=V */
337 ARMCondLT
= 11, /* <s (signed less than) : N!=V */
339 ARMCondGT
= 12, /* >s (signed greater) : Z=0 && N=V */
340 ARMCondLE
= 13, /* <=s (signed less or equal) : Z=1 || N!=V */
342 ARMCondAL
= 14, /* always (unconditional) : 1 */
343 ARMCondNV
= 15 /* never (unconditional): : 0 */
344 /* NB: ARM have deprecated the use of the NV condition code.
345 You are now supposed to use MOV R0,R0 as a noop rather than
346 MOVNV R0,R0 as was previously recommended. Future processors
347 may have the NV condition code reused to do other things. */
351 #endif /* ndef __VEX_GUEST_ARM_DEFS_H */
353 /*---------------------------------------------------------------*/
354 /*--- end guest_arm_defs.h ---*/
355 /*---------------------------------------------------------------*/