2 /*---------------------------------------------------------------*/
3 /*--- begin guest_amd64_defs.h ---*/
4 /*---------------------------------------------------------------*/
7 This file is part of Valgrind, a dynamic binary instrumentation
10 Copyright (C) 2004-2017 OpenWorks LLP
13 This program is free software; you can redistribute it and/or
14 modify it under the terms of the GNU General Public License as
15 published by the Free Software Foundation; either version 2 of the
16 License, or (at your option) any later version.
18 This program is distributed in the hope that it will be useful, but
19 WITHOUT ANY WARRANTY; without even the implied warranty of
20 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
21 General Public License for more details.
23 You should have received a copy of the GNU General Public License
24 along with this program; if not, see <http://www.gnu.org/licenses/>.
26 The GNU General Public License is contained in the file COPYING.
28 Neither the names of the U.S. Department of Energy nor the
29 University of California nor the names of its contributors may be
30 used to endorse or promote products derived from this software
31 without prior written permission.
34 /* Only to be used within the guest-amd64 directory. */
36 #ifndef __VEX_GUEST_AMD64_DEFS_H
37 #define __VEX_GUEST_AMD64_DEFS_H
39 #include "libvex_basictypes.h"
40 #include "libvex_emnote.h" // VexEmNote
41 #include "libvex_guest_amd64.h" // VexGuestAMD64State
42 #include "guest_generic_bb_to_IR.h" // DisResult
44 /*---------------------------------------------------------*/
45 /*--- amd64 to IR conversion ---*/
46 /*---------------------------------------------------------*/
48 /* Convert one amd64 insn to IR. See the type DisOneInstrFn in
49 guest_generic_bb_to_IR.h. */
51 DisResult
disInstr_AMD64 ( IRSB
* irsb_IN
,
52 const UChar
* guest_code_IN
,
56 const VexArchInfo
* archinfo
,
57 const VexAbiInfo
* abiinfo
,
58 VexEndness host_endness_IN
,
59 Bool sigill_diag_IN
);
61 /* Used by the optimiser to specialise calls to helpers. */
63 IRExpr
* guest_amd64_spechelper ( const HChar
* function_name
,
65 IRStmt
** precedingStmts
,
66 Int n_precedingStmts
);
68 /* Describes to the optimiser which part of the guest state require
69 precise memory exceptions. This is logically part of the guest
72 Bool
guest_amd64_state_requires_precise_mem_exns ( Int
, Int
,
76 VexGuestLayout amd64guest_layout
;
79 /*---------------------------------------------------------*/
80 /*--- amd64 guest helpers ---*/
81 /*---------------------------------------------------------*/
83 /* --- CLEAN HELPERS --- */
85 extern ULong
amd64g_calculate_rflags_all (
87 ULong cc_dep1
, ULong cc_dep2
, ULong cc_ndep
90 extern ULong
amd64g_calculate_rflags_c (
92 ULong cc_dep1
, ULong cc_dep2
, ULong cc_ndep
95 extern ULong
amd64g_calculate_condition (
96 ULong
/*AMD64Condcode*/ cond
,
98 ULong cc_dep1
, ULong cc_dep2
, ULong cc_ndep
101 extern ULong
amd64g_calculate_FXAM ( ULong tag
, ULong dbl
);
103 extern ULong
amd64g_calculate_RCR (
104 ULong arg
, ULong rot_amt
, ULong rflags_in
, Long sz
107 extern ULong
amd64g_calculate_RCL (
108 ULong arg
, ULong rot_amt
, ULong rflags_in
, Long sz
111 extern ULong
amd64g_calculate_pclmul(ULong a
, ULong b
, ULong which
);
113 extern ULong
amd64g_check_fldcw ( ULong fpucw
);
115 extern ULong
amd64g_create_fpucw ( ULong fpround
);
117 extern ULong
amd64g_check_ldmxcsr ( ULong mxcsr
);
119 extern ULong
amd64g_create_mxcsr ( ULong sseround
);
121 extern VexEmNote
amd64g_dirtyhelper_FLDENV ( VexGuestAMD64State
*, HWord
);
122 extern VexEmNote
amd64g_dirtyhelper_FRSTOR ( VexGuestAMD64State
*, HWord
);
123 extern VexEmNote
amd64g_dirtyhelper_FRSTORS ( VexGuestAMD64State
*, HWord
);
125 extern void amd64g_dirtyhelper_FSTENV ( VexGuestAMD64State
*, HWord
);
126 extern void amd64g_dirtyhelper_FNSAVE ( VexGuestAMD64State
*, HWord
);
127 extern void amd64g_dirtyhelper_FNSAVES ( VexGuestAMD64State
*, HWord
);
129 /* Translate a guest virtual_addr into a guest linear address by
130 consulting the supplied LDT/GDT structures. Their representation
131 must be as specified in pub/libvex_guest_amd64.h. To indicate a
132 translation failure, 1<<32 is returned. On success, the lower 32
133 bits of the returned result indicate the linear address.
136 //ULong amd64g_use_seg_selector ( HWord ldt, HWord gdt,
137 // UInt seg_selector, UInt virtual_addr );
139 extern ULong
amd64g_calculate_mmx_pmaddwd ( ULong
, ULong
);
140 extern ULong
amd64g_calculate_mmx_psadbw ( ULong
, ULong
);
142 extern ULong
amd64g_calculate_sse_phminposuw ( ULong sLo
, ULong sHi
);
144 extern ULong
amd64g_calc_crc32b ( ULong crcIn
, ULong b
);
145 extern ULong
amd64g_calc_crc32w ( ULong crcIn
, ULong w
);
146 extern ULong
amd64g_calc_crc32l ( ULong crcIn
, ULong l
);
147 extern ULong
amd64g_calc_crc32q ( ULong crcIn
, ULong q
);
149 extern ULong
amd64g_calc_mpsadbw ( ULong sHi
, ULong sLo
,
150 ULong dHi
, ULong dLo
,
151 ULong imm_and_return_control_bit
);
153 extern ULong
amd64g_calculate_pext ( ULong
, ULong
);
154 extern ULong
amd64g_calculate_pdep ( ULong
, ULong
);
156 /* --- DIRTY HELPERS --- */
158 extern ULong
amd64g_dirtyhelper_loadF80le ( Addr
/*addr*/ );
160 extern void amd64g_dirtyhelper_storeF80le ( Addr
/*addr*/, ULong
/*data*/ );
162 extern void amd64g_dirtyhelper_CPUID_baseline ( VexGuestAMD64State
* st
);
163 extern void amd64g_dirtyhelper_CPUID_sse3_and_cx16 ( VexGuestAMD64State
* st
);
164 extern void amd64g_dirtyhelper_CPUID_sse42_and_cx16 ( VexGuestAMD64State
* st
);
165 extern void amd64g_dirtyhelper_CPUID_avx_and_cx16 ( VexGuestAMD64State
* st
,
170 extern void amd64g_dirtyhelper_CPUID_avx2 ( VexGuestAMD64State
* st
,
171 ULong hasF16C
, ULong hasRDRAND
,
175 extern void amd64g_dirtyhelper_FINIT ( VexGuestAMD64State
* );
177 extern void amd64g_dirtyhelper_XSAVE_COMPONENT_0
178 ( VexGuestAMD64State
* gst
, HWord addr
);
179 extern void amd64g_dirtyhelper_XSAVE_COMPONENT_1_EXCLUDING_XMMREGS
180 ( VexGuestAMD64State
* gst
, HWord addr
);
182 extern VexEmNote amd64g_dirtyhelper_XRSTOR_COMPONENT_0
183 ( VexGuestAMD64State
* gst
, HWord addr
);
184 extern VexEmNote amd64g_dirtyhelper_XRSTOR_COMPONENT_1_EXCLUDING_XMMREGS
185 ( VexGuestAMD64State
* gst
, HWord addr
);
187 extern ULong
amd64g_dirtyhelper_RDTSC ( void );
188 extern void amd64g_dirtyhelper_RDTSCP ( VexGuestAMD64State
* st
);
190 extern ULong
amd64g_dirtyhelper_IN ( ULong portno
, ULong sz
/*1,2 or 4*/ );
191 extern void amd64g_dirtyhelper_OUT ( ULong portno
, ULong data
,
192 ULong sz
/*1,2 or 4*/ );
194 extern void amd64g_dirtyhelper_SxDT ( void* address
,
195 ULong op
/* 0 or 1 */ );
197 // This returns a 32-bit value from the host's RDRAND in bits 31:0, and the
198 // resulting C flag value in bit 32.
199 extern ULong
amd64g_dirtyhelper_RDRAND ( void );
201 extern ULong
amd64g_dirtyhelper_RDSEED ( void );
203 /* Helps with PCMP{I,E}STR{I,M}.
205 CALLED FROM GENERATED CODE: DIRTY HELPER(s). (But not really,
206 actually it could be a clean helper, but for the fact that we can't
207 pass by value 2 x V128 to a clean helper, nor have one returned.)
208 Reads guest state, writes to guest state for the xSTRM cases, no
209 accesses of memory, is a pure function.
211 opc_and_imm contains (4th byte of opcode << 8) | the-imm8-byte so
212 the callee knows which I/E and I/M variant it is dealing with and
213 what the specific operation is. 4th byte of opcode is in the range
220 gstOffL and gstOffR are the guest state offsets for the two XMM
221 register inputs. We never have to deal with the memory case since
222 that is handled by pre-loading the relevant value into the fake
225 For ESTRx variants, edxIN and eaxIN hold the values of those two
228 In all cases, the bottom 16 bits of the result contain the new
229 OSZACP %rflags values. For xSTRI variants, bits[31:16] of the
230 result hold the new %ecx value. For xSTRM variants, the helper
231 writes the result directly to the guest XMM0.
233 Declarable side effects: in all cases, reads guest state at
234 [gstOffL, +16) and [gstOffR, +16). For xSTRM variants, also writes
237 Is expected to be called with opc_and_imm combinations which have
238 actually been validated, and will assert if otherwise. The front
239 end should ensure we're only called with verified values.
241 extern ULong
amd64g_dirtyhelper_PCMPxSTRx (
244 HWord gstOffL
, HWord gstOffR
,
245 HWord edxIN
, HWord eaxIN
248 /* Implementation of intel AES instructions as described in
249 Intel Advanced Vector Extensions
250 Programming Reference
254 CALLED FROM GENERATED CODE: DIRTY HELPER(s). (But not really,
255 actually it could be a clean helper, but for the fact that we can't
256 pass by value 2 x V128 to a clean helper, nor have one returned.)
257 Reads guest state, writes to guest state, no
258 accesses of memory, is a pure function.
260 opc4 contains the 4th byte of opcode. Front-end should only
261 give opcode corresponding to AESENC/AESENCLAST/AESDEC/AESDECLAST/AESIMC.
262 (will assert otherwise).
264 gstOffL and gstOffR are the guest state offsets for the two XMM
265 register inputs, gstOffD is the guest state offset for the XMM register
266 output. We never have to deal with the memory case since that is handled
267 by pre-loading the relevant value into the fake XMM16 register.
270 extern void amd64g_dirtyhelper_AES (
271 VexGuestAMD64State
* gst
,
272 HWord opc4
, HWord gstOffD
,
273 HWord gstOffL
, HWord gstOffR
276 /* Implementation of AESKEYGENASSIST.
278 CALLED FROM GENERATED CODE: DIRTY HELPER(s). (But not really,
279 actually it could be a clean helper, but for the fact that we can't
280 pass by value 1 x V128 to a clean helper, nor have one returned.)
281 Reads guest state, writes to guest state, no
282 accesses of memory, is a pure function.
284 imm8 is the Round Key constant.
286 gstOffL and gstOffR are the guest state offsets for the two XMM
287 register input and output. We never have to deal with the memory case since
288 that is handled by pre-loading the relevant value into the fake
292 extern void amd64g_dirtyhelper_AESKEYGENASSIST (
293 VexGuestAMD64State
* gst
,
295 HWord gstOffL
, HWord gstOffR
298 //extern void amd64g_dirtyhelper_CPUID_sse0 ( VexGuestAMD64State* );
299 //extern void amd64g_dirtyhelper_CPUID_sse1 ( VexGuestAMD64State* );
300 //extern void amd64g_dirtyhelper_CPUID_sse2 ( VexGuestAMD64State* );
302 //extern void amd64g_dirtyhelper_FSAVE ( VexGuestAMD64State*, HWord );
305 // amd64g_dirtyhelper_FRSTOR ( VexGuestAMD64State*, HWord );
307 //extern void amd64g_dirtyhelper_FSTENV ( VexGuestAMD64State*, HWord );
310 // amd64g_dirtyhelper_FLDENV ( VexGuestAMD64State*, HWord );
314 /*---------------------------------------------------------*/
315 /*--- Condition code stuff ---*/
316 /*---------------------------------------------------------*/
319 #define AMD64G_CC_SHIFT_O 11
320 #define AMD64G_CC_SHIFT_S 7
321 #define AMD64G_CC_SHIFT_Z 6
322 #define AMD64G_CC_SHIFT_A 4
323 #define AMD64G_CC_SHIFT_C 0
324 #define AMD64G_CC_SHIFT_P 2
326 #define AMD64G_CC_MASK_O (1ULL << AMD64G_CC_SHIFT_O)
327 #define AMD64G_CC_MASK_S (1ULL << AMD64G_CC_SHIFT_S)
328 #define AMD64G_CC_MASK_Z (1ULL << AMD64G_CC_SHIFT_Z)
329 #define AMD64G_CC_MASK_A (1ULL << AMD64G_CC_SHIFT_A)
330 #define AMD64G_CC_MASK_C (1ULL << AMD64G_CC_SHIFT_C)
331 #define AMD64G_CC_MASK_P (1ULL << AMD64G_CC_SHIFT_P)
333 /* additional rflags masks */
334 #define AMD64G_CC_SHIFT_ID 21
335 #define AMD64G_CC_SHIFT_AC 18
336 #define AMD64G_CC_SHIFT_D 10
338 #define AMD64G_CC_MASK_ID (1ULL << AMD64G_CC_SHIFT_ID)
339 #define AMD64G_CC_MASK_AC (1ULL << AMD64G_CC_SHIFT_AC)
340 #define AMD64G_CC_MASK_D (1ULL << AMD64G_CC_SHIFT_D)
343 #define AMD64G_FC_SHIFT_C3 14
344 #define AMD64G_FC_SHIFT_C2 10
345 #define AMD64G_FC_SHIFT_C1 9
346 #define AMD64G_FC_SHIFT_C0 8
348 #define AMD64G_FC_MASK_C3 (1ULL << AMD64G_FC_SHIFT_C3)
349 #define AMD64G_FC_MASK_C2 (1ULL << AMD64G_FC_SHIFT_C2)
350 #define AMD64G_FC_MASK_C1 (1ULL << AMD64G_FC_SHIFT_C1)
351 #define AMD64G_FC_MASK_C0 (1ULL << AMD64G_FC_SHIFT_C0)
354 /* %RFLAGS thunk descriptors. A four-word thunk is used to record
355 details of the most recent flag-setting operation, so the flags can
356 be computed later if needed. It is possible to do this a little
357 more efficiently using a 3-word thunk, but that makes it impossible
358 to describe the flag data dependencies sufficiently accurately for
359 Memcheck. Hence 4 words are used, with minimal loss of efficiency.
363 CC_OP, which describes the operation.
365 CC_DEP1 and CC_DEP2. These are arguments to the operation.
366 We want Memcheck to believe that the resulting flags are
367 data-dependent on both CC_DEP1 and CC_DEP2, hence the
370 CC_NDEP. This is a 3rd argument to the operation which is
371 sometimes needed. We arrange things so that Memcheck does
372 not believe the resulting flags are data-dependent on CC_NDEP
375 To make Memcheck believe that (the definedness of) the encoded
376 flags depends only on (the definedness of) CC_DEP1 and CC_DEP2
379 (1) In the guest state layout info (amd64guest_layout), CC_OP and
380 CC_NDEP are marked as always defined.
382 (2) When passing the thunk components to an evaluation function
383 (calculate_condition, calculate_eflags, calculate_eflags_c) the
384 IRCallee's mcx_mask must be set so as to exclude from
385 consideration all passed args except CC_DEP1 and CC_DEP2.
387 Strictly speaking only (2) is necessary for correctness. However,
388 (1) helps efficiency in that since (2) means we never ask about the
389 definedness of CC_OP or CC_NDEP, we may as well not even bother to
390 track their definedness.
392 When building the thunk, it is always necessary to write words into
393 CC_DEP1 and CC_DEP2, even if those args are not used given the
394 CC_OP field (eg, CC_DEP2 is not used if CC_OP is CC_LOGIC1/2/4).
395 This is important because otherwise Memcheck could give false
396 positives as it does not understand the relationship between the
397 CC_OP field and CC_DEP1 and CC_DEP2, and so believes that the
398 definedness of the stored flags always depends on both CC_DEP1 and
401 However, it is only necessary to set CC_NDEP when the CC_OP value
402 requires it, because Memcheck ignores CC_NDEP, and the evaluation
403 functions do understand the CC_OP fields and will only examine
404 CC_NDEP for suitable values of CC_OP.
406 A summary of the field usages is:
408 Operation DEP1 DEP2 NDEP
409 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
411 add/sub/mul first arg second arg unused
413 adc/sbb first arg (second arg)
414 XOR old_carry old_carry
416 and/or/xor result zero unused
418 inc/dec result zero old_carry
420 shl/shr/sar result subshifted- unused
423 rol/ror result zero old_flags
425 copy old_flags zero unused.
428 Therefore Memcheck will believe the following:
430 * add/sub/mul -- definedness of result flags depends on definedness
433 * adc/sbb -- definedness of result flags depends on definedness of
434 both args and definedness of the old C flag. Because only two
435 DEP fields are available, the old C flag is XOR'd into the second
436 arg so that Memcheck sees the data dependency on it. That means
437 the NDEP field must contain a second copy of the old C flag
438 so that the evaluation functions can correctly recover the second
441 * and/or/xor are straightforward -- definedness of result flags
442 depends on definedness of result value.
444 * inc/dec -- definedness of result flags depends only on
445 definedness of result. This isn't really true -- it also depends
446 on the old C flag. However, we don't want Memcheck to see that,
447 and so the old C flag must be passed in NDEP and not in DEP2.
448 It's inconceivable that a compiler would generate code that puts
449 the C flag in an undefined state, then does an inc/dec, which
450 leaves C unchanged, and then makes a conditional jump/move based
451 on C. So our fiction seems a good approximation.
453 * shl/shr/sar -- straightforward, again, definedness of result
454 flags depends on definedness of result value. The subshifted
455 value (value shifted one less) is also needed, but its
456 definedness is the same as the definedness of the shifted value.
458 * rol/ror -- these only set O and C, and leave A Z C P alone.
459 However it seems prudent (as per inc/dec) to say the definedness
460 of all resulting flags depends on the definedness of the result,
461 hence the old flags must go in as NDEP and not DEP2.
463 * rcl/rcr are too difficult to do in-line, and so are done by a
464 helper function. They are not part of this scheme. The helper
465 function takes the value to be rotated, the rotate amount and the
466 old flags, and returns the new flags and the rotated value.
467 Since the helper's mcx_mask does not have any set bits, Memcheck
468 will lazily propagate undefinedness from any of the 3 args into
469 both results (flags and actual value).
472 AMD64G_CC_OP_COPY
=0, /* DEP1 = current flags, DEP2 = 0, NDEP = unused */
473 /* just copy DEP1 to output */
475 AMD64G_CC_OP_ADDB
, /* 1 */
476 AMD64G_CC_OP_ADDW
, /* 2 DEP1 = argL, DEP2 = argR, NDEP = unused */
477 AMD64G_CC_OP_ADDL
, /* 3 */
478 AMD64G_CC_OP_ADDQ
, /* 4 */
480 AMD64G_CC_OP_SUBB
, /* 5 */
481 AMD64G_CC_OP_SUBW
, /* 6 DEP1 = argL, DEP2 = argR, NDEP = unused */
482 AMD64G_CC_OP_SUBL
, /* 7 */
483 AMD64G_CC_OP_SUBQ
, /* 8 */
485 AMD64G_CC_OP_ADCB
, /* 9 */
486 AMD64G_CC_OP_ADCW
, /* 10 DEP1 = argL, DEP2 = argR ^ oldCarry, NDEP = oldCarry */
487 AMD64G_CC_OP_ADCL
, /* 11 */
488 AMD64G_CC_OP_ADCQ
, /* 12 */
490 AMD64G_CC_OP_SBBB
, /* 13 */
491 AMD64G_CC_OP_SBBW
, /* 14 DEP1 = argL, DEP2 = argR ^ oldCarry, NDEP = oldCarry */
492 AMD64G_CC_OP_SBBL
, /* 15 */
493 AMD64G_CC_OP_SBBQ
, /* 16 */
495 AMD64G_CC_OP_LOGICB
, /* 17 */
496 AMD64G_CC_OP_LOGICW
, /* 18 DEP1 = result, DEP2 = 0, NDEP = unused */
497 AMD64G_CC_OP_LOGICL
, /* 19 */
498 AMD64G_CC_OP_LOGICQ
, /* 20 */
500 AMD64G_CC_OP_INCB
, /* 21 */
501 AMD64G_CC_OP_INCW
, /* 22 DEP1 = result, DEP2 = 0, NDEP = oldCarry (0 or 1) */
502 AMD64G_CC_OP_INCL
, /* 23 */
503 AMD64G_CC_OP_INCQ
, /* 24 */
505 AMD64G_CC_OP_DECB
, /* 25 */
506 AMD64G_CC_OP_DECW
, /* 26 DEP1 = result, DEP2 = 0, NDEP = oldCarry (0 or 1) */
507 AMD64G_CC_OP_DECL
, /* 27 */
508 AMD64G_CC_OP_DECQ
, /* 28 */
510 AMD64G_CC_OP_SHLB
, /* 29 DEP1 = res, DEP2 = res', NDEP = unused */
511 AMD64G_CC_OP_SHLW
, /* 30 where res' is like res but shifted one bit less */
512 AMD64G_CC_OP_SHLL
, /* 31 */
513 AMD64G_CC_OP_SHLQ
, /* 32 */
515 AMD64G_CC_OP_SHRB
, /* 33 DEP1 = res, DEP2 = res', NDEP = unused */
516 AMD64G_CC_OP_SHRW
, /* 34 where res' is like res but shifted one bit less */
517 AMD64G_CC_OP_SHRL
, /* 35 */
518 AMD64G_CC_OP_SHRQ
, /* 36 */
520 AMD64G_CC_OP_ROLB
, /* 37 */
521 AMD64G_CC_OP_ROLW
, /* 38 DEP1 = res, DEP2 = 0, NDEP = old flags */
522 AMD64G_CC_OP_ROLL
, /* 39 */
523 AMD64G_CC_OP_ROLQ
, /* 40 */
525 AMD64G_CC_OP_RORB
, /* 41 */
526 AMD64G_CC_OP_RORW
, /* 42 DEP1 = res, DEP2 = 0, NDEP = old flags */
527 AMD64G_CC_OP_RORL
, /* 43 */
528 AMD64G_CC_OP_RORQ
, /* 44 */
530 AMD64G_CC_OP_UMULB
, /* 45 */
531 AMD64G_CC_OP_UMULW
, /* 46 DEP1 = argL, DEP2 = argR, NDEP = unused */
532 AMD64G_CC_OP_UMULL
, /* 47 */
533 AMD64G_CC_OP_UMULQ
, /* 48 */
535 AMD64G_CC_OP_SMULB
, /* 49 */
536 AMD64G_CC_OP_SMULW
, /* 50 DEP1 = argL, DEP2 = argR, NDEP = unused */
537 AMD64G_CC_OP_SMULL
, /* 51 */
538 AMD64G_CC_OP_SMULQ
, /* 52 */
540 AMD64G_CC_OP_ANDN32
, /* 53 */
541 AMD64G_CC_OP_ANDN64
, /* 54 DEP1 = res, DEP2 = 0, NDEP = unused */
543 AMD64G_CC_OP_BLSI32
, /* 55 */
544 AMD64G_CC_OP_BLSI64
, /* 56 DEP1 = res, DEP2 = arg, NDEP = unused */
546 AMD64G_CC_OP_BLSMSK32
,/* 57 */
547 AMD64G_CC_OP_BLSMSK64
,/* 58 DEP1 = res, DEP2 = arg, NDEP = unused */
549 AMD64G_CC_OP_BLSR32
, /* 59 */
550 AMD64G_CC_OP_BLSR64
, /* 60 DEP1 = res, DEP2 = arg, NDEP = unused */
552 AMD64G_CC_OP_ADCX32
, /* 61 DEP1 = argL, DEP2 = argR ^ oldCarry, .. */
553 AMD64G_CC_OP_ADCX64
, /* 62 .. NDEP = old flags */
555 AMD64G_CC_OP_ADOX32
, /* 63 DEP1 = argL, DEP2 = argR ^ oldOverflow, .. */
556 AMD64G_CC_OP_ADOX64
, /* 64 .. NDEP = old flags */
563 AMD64CondO
= 0, /* overflow */
564 AMD64CondNO
= 1, /* no overflow */
566 AMD64CondB
= 2, /* below */
567 AMD64CondNB
= 3, /* not below */
569 AMD64CondZ
= 4, /* zero */
570 AMD64CondNZ
= 5, /* not zero */
572 AMD64CondBE
= 6, /* below or equal */
573 AMD64CondNBE
= 7, /* not below or equal */
575 AMD64CondS
= 8, /* negative */
576 AMD64CondNS
= 9, /* not negative */
578 AMD64CondP
= 10, /* parity even */
579 AMD64CondNP
= 11, /* not parity even */
581 AMD64CondL
= 12, /* less */
582 AMD64CondNL
= 13, /* not less */
584 AMD64CondLE
= 14, /* less or equal */
585 AMD64CondNLE
= 15, /* not less or equal */
587 AMD64CondAlways
= 16 /* HACK */
591 #endif /* ndef __VEX_GUEST_AMD64_DEFS_H */
593 /*---------------------------------------------------------------*/
594 /*--- end guest_amd64_defs.h ---*/
595 /*---------------------------------------------------------------*/