1 //===-- X86InstrInfo.td - Main X86 Instruction Definition --*- tablegen -*-===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This file describes the X86 instruction set, defining the instructions, and
10 // properties of the instructions which are needed for code generation, machine
11 // code emission, and analysis.
13 //===----------------------------------------------------------------------===//
15 //===----------------------------------------------------------------------===//
16 // X86 specific DAG Nodes.
19 def SDTX86CmpTest : SDTypeProfile<1, 2, [SDTCisVT<0, i32>, SDTCisInt<1>,
21 def SDTX86FCmp : SDTypeProfile<1, 2, [SDTCisVT<0, i32>, SDTCisFP<1>,
24 def SDTX86Cmov : SDTypeProfile<1, 4,
25 [SDTCisSameAs<0, 1>, SDTCisSameAs<1, 2>,
26 SDTCisVT<3, i8>, SDTCisVT<4, i32>]>;
28 // Unary and binary operator instructions that set EFLAGS as a side-effect.
29 def SDTUnaryArithWithFlags : SDTypeProfile<2, 1,
31 SDTCisInt<0>, SDTCisVT<1, i32>]>;
33 def SDTBinaryArithWithFlags : SDTypeProfile<2, 2,
36 SDTCisInt<0>, SDTCisVT<1, i32>]>;
38 // SDTBinaryArithWithFlagsInOut - RES1, EFLAGS = op LHS, RHS, EFLAGS
39 def SDTBinaryArithWithFlagsInOut : SDTypeProfile<2, 3,
45 // RES1, RES2, FLAGS = op LHS, RHS
46 def SDT2ResultBinaryArithWithFlags : SDTypeProfile<3, 2,
50 SDTCisInt<0>, SDTCisVT<1, i32>]>;
51 def SDTX86BrCond : SDTypeProfile<0, 3,
52 [SDTCisVT<0, OtherVT>,
53 SDTCisVT<1, i8>, SDTCisVT<2, i32>]>;
55 def SDTX86SetCC : SDTypeProfile<1, 2,
57 SDTCisVT<1, i8>, SDTCisVT<2, i32>]>;
58 def SDTX86SetCC_C : SDTypeProfile<1, 2,
60 SDTCisVT<1, i8>, SDTCisVT<2, i32>]>;
62 def SDTX86sahf : SDTypeProfile<1, 1, [SDTCisVT<0, i32>, SDTCisVT<1, i8>]>;
64 def SDTX86rdrand : SDTypeProfile<2, 0, [SDTCisInt<0>, SDTCisVT<1, i32>]>;
66 def SDTX86rdpkru : SDTypeProfile<1, 1, [SDTCisVT<0, i32>, SDTCisVT<1, i32>]>;
67 def SDTX86wrpkru : SDTypeProfile<0, 3, [SDTCisVT<0, i32>, SDTCisVT<1, i32>,
70 def SDTX86cas : SDTypeProfile<0, 3, [SDTCisPtrTy<0>, SDTCisInt<1>,
72 def SDTX86cas8pair : SDTypeProfile<0, 1, [SDTCisPtrTy<0>]>;
73 def SDTX86cas16pair : SDTypeProfile<0, 2, [SDTCisPtrTy<0>, SDTCisVT<1, i64>]>;
75 def SDTLockBinaryArithWithFlags : SDTypeProfile<1, 2, [SDTCisVT<0, i32>,
79 def SDTLockUnaryArithWithFlags : SDTypeProfile<1, 1, [SDTCisVT<0, i32>,
82 def SDTX86Ret : SDTypeProfile<0, -1, [SDTCisVT<0, i32>]>;
84 def SDT_X86CallSeqStart : SDCallSeqStart<[SDTCisVT<0, i32>,
86 def SDT_X86CallSeqEnd : SDCallSeqEnd<[SDTCisVT<0, i32>,
89 def SDT_X86Call : SDTypeProfile<0, -1, [SDTCisVT<0, iPTR>]>;
91 def SDT_X86NtBrind : SDTypeProfile<0, -1, [SDTCisVT<0, iPTR>]>;
93 def SDT_X86VASTART_SAVE_XMM_REGS : SDTypeProfile<0, -1, [SDTCisVT<0, i8>,
97 def SDT_X86VAARG : SDTypeProfile<1, -1, [SDTCisPtrTy<0>,
103 def SDTX86RepStr : SDTypeProfile<0, 1, [SDTCisVT<0, OtherVT>]>;
105 def SDTX86Void : SDTypeProfile<0, 0, []>;
107 def SDTX86Wrapper : SDTypeProfile<1, 1, [SDTCisSameAs<0, 1>, SDTCisPtrTy<0>]>;
109 def SDT_X86TLSADDR : SDTypeProfile<0, 1, [SDTCisInt<0>]>;
111 def SDT_X86TLSBASEADDR : SDTypeProfile<0, 1, [SDTCisInt<0>]>;
113 def SDT_X86TLSCALL : SDTypeProfile<0, 1, [SDTCisInt<0>]>;
115 def SDT_X86WIN_ALLOCA : SDTypeProfile<0, 1, [SDTCisVT<0, iPTR>]>;
117 def SDT_X86SEG_ALLOCA : SDTypeProfile<1, 1, [SDTCisVT<0, iPTR>, SDTCisVT<1, iPTR>]>;
119 def SDT_X86PROBED_ALLOCA : SDTypeProfile<1, 1, [SDTCisVT<0, iPTR>, SDTCisVT<1, iPTR>]>;
121 def SDT_X86EHRET : SDTypeProfile<0, 1, [SDTCisInt<0>]>;
123 def SDT_X86TCRET : SDTypeProfile<0, 2, [SDTCisPtrTy<0>, SDTCisVT<1, i32>]>;
125 def SDT_X86MEMBARRIER : SDTypeProfile<0, 0, []>;
127 def SDT_X86ENQCMD : SDTypeProfile<1, 2, [SDTCisVT<0, i32>,
128 SDTCisPtrTy<1>, SDTCisSameAs<1, 2>]>;
130 def SDT_X86AESENCDECKL : SDTypeProfile<2, 2, [SDTCisVT<0, v2i64>,
135 def X86MemBarrier : SDNode<"X86ISD::MEMBARRIER", SDT_X86MEMBARRIER,
136 [SDNPHasChain,SDNPSideEffect]>;
137 def X86MFence : SDNode<"X86ISD::MFENCE", SDT_X86MEMBARRIER,
141 def X86bsf : SDNode<"X86ISD::BSF", SDTUnaryArithWithFlags>;
142 def X86bsr : SDNode<"X86ISD::BSR", SDTUnaryArithWithFlags>;
143 def X86fshl : SDNode<"X86ISD::FSHL", SDTIntShiftDOp>;
144 def X86fshr : SDNode<"X86ISD::FSHR", SDTIntShiftDOp>;
146 def X86cmp : SDNode<"X86ISD::CMP" , SDTX86CmpTest>;
147 def X86fcmp : SDNode<"X86ISD::FCMP", SDTX86FCmp>;
148 def X86strict_fcmp : SDNode<"X86ISD::STRICT_FCMP", SDTX86FCmp, [SDNPHasChain]>;
149 def X86strict_fcmps : SDNode<"X86ISD::STRICT_FCMPS", SDTX86FCmp, [SDNPHasChain]>;
150 def X86bt : SDNode<"X86ISD::BT", SDTX86CmpTest>;
152 def X86cmov : SDNode<"X86ISD::CMOV", SDTX86Cmov>;
153 def X86brcond : SDNode<"X86ISD::BRCOND", SDTX86BrCond,
155 def X86setcc : SDNode<"X86ISD::SETCC", SDTX86SetCC>;
156 def X86setcc_c : SDNode<"X86ISD::SETCC_CARRY", SDTX86SetCC_C>;
158 def X86rdrand : SDNode<"X86ISD::RDRAND", SDTX86rdrand,
159 [SDNPHasChain, SDNPSideEffect]>;
161 def X86rdseed : SDNode<"X86ISD::RDSEED", SDTX86rdrand,
162 [SDNPHasChain, SDNPSideEffect]>;
164 def X86rdpkru : SDNode<"X86ISD::RDPKRU", SDTX86rdpkru,
165 [SDNPHasChain, SDNPSideEffect]>;
166 def X86wrpkru : SDNode<"X86ISD::WRPKRU", SDTX86wrpkru,
167 [SDNPHasChain, SDNPSideEffect]>;
169 def X86cas : SDNode<"X86ISD::LCMPXCHG_DAG", SDTX86cas,
170 [SDNPHasChain, SDNPInGlue, SDNPOutGlue, SDNPMayStore,
171 SDNPMayLoad, SDNPMemOperand]>;
172 def X86cas8 : SDNode<"X86ISD::LCMPXCHG8_DAG", SDTX86cas8pair,
173 [SDNPHasChain, SDNPInGlue, SDNPOutGlue, SDNPMayStore,
174 SDNPMayLoad, SDNPMemOperand]>;
175 def X86cas16 : SDNode<"X86ISD::LCMPXCHG16_DAG", SDTX86cas16pair,
176 [SDNPHasChain, SDNPInGlue, SDNPOutGlue, SDNPMayStore,
177 SDNPMayLoad, SDNPMemOperand]>;
179 def X86retflag : SDNode<"X86ISD::RET_FLAG", SDTX86Ret,
180 [SDNPHasChain, SDNPOptInGlue, SDNPVariadic]>;
181 def X86iret : SDNode<"X86ISD::IRET", SDTX86Ret,
182 [SDNPHasChain, SDNPOptInGlue]>;
184 def X86vastart_save_xmm_regs :
185 SDNode<"X86ISD::VASTART_SAVE_XMM_REGS",
186 SDT_X86VASTART_SAVE_XMM_REGS,
187 [SDNPHasChain, SDNPVariadic]>;
189 SDNode<"X86ISD::VAARG_64", SDT_X86VAARG,
190 [SDNPHasChain, SDNPMayLoad, SDNPMayStore,
193 SDNode<"X86ISD::VAARG_X32", SDT_X86VAARG,
194 [SDNPHasChain, SDNPMayLoad, SDNPMayStore,
196 def X86callseq_start :
197 SDNode<"ISD::CALLSEQ_START", SDT_X86CallSeqStart,
198 [SDNPHasChain, SDNPOutGlue]>;
200 SDNode<"ISD::CALLSEQ_END", SDT_X86CallSeqEnd,
201 [SDNPHasChain, SDNPOptInGlue, SDNPOutGlue]>;
203 def X86call : SDNode<"X86ISD::CALL", SDT_X86Call,
204 [SDNPHasChain, SDNPOutGlue, SDNPOptInGlue,
207 def X86call_rvmarker : SDNode<"X86ISD::CALL_RVMARKER", SDT_X86Call,
208 [SDNPHasChain, SDNPOutGlue, SDNPOptInGlue,
212 def X86NoTrackCall : SDNode<"X86ISD::NT_CALL", SDT_X86Call,
213 [SDNPHasChain, SDNPOutGlue, SDNPOptInGlue,
215 def X86NoTrackBrind : SDNode<"X86ISD::NT_BRIND", SDT_X86NtBrind,
218 def X86rep_stos: SDNode<"X86ISD::REP_STOS", SDTX86RepStr,
219 [SDNPHasChain, SDNPInGlue, SDNPOutGlue, SDNPMayStore]>;
220 def X86rep_movs: SDNode<"X86ISD::REP_MOVS", SDTX86RepStr,
221 [SDNPHasChain, SDNPInGlue, SDNPOutGlue, SDNPMayStore,
224 def X86Wrapper : SDNode<"X86ISD::Wrapper", SDTX86Wrapper>;
225 def X86WrapperRIP : SDNode<"X86ISD::WrapperRIP", SDTX86Wrapper>;
227 def X86RecoverFrameAlloc : SDNode<"ISD::LOCAL_RECOVER",
228 SDTypeProfile<1, 1, [SDTCisSameAs<0, 1>,
231 def X86tlsaddr : SDNode<"X86ISD::TLSADDR", SDT_X86TLSADDR,
232 [SDNPHasChain, SDNPOptInGlue, SDNPOutGlue]>;
234 def X86tlsbaseaddr : SDNode<"X86ISD::TLSBASEADDR", SDT_X86TLSBASEADDR,
235 [SDNPHasChain, SDNPOptInGlue, SDNPOutGlue]>;
237 def X86ehret : SDNode<"X86ISD::EH_RETURN", SDT_X86EHRET,
240 def X86eh_sjlj_setjmp : SDNode<"X86ISD::EH_SJLJ_SETJMP",
241 SDTypeProfile<1, 1, [SDTCisInt<0>,
243 [SDNPHasChain, SDNPSideEffect]>;
244 def X86eh_sjlj_longjmp : SDNode<"X86ISD::EH_SJLJ_LONGJMP",
245 SDTypeProfile<0, 1, [SDTCisPtrTy<0>]>,
246 [SDNPHasChain, SDNPSideEffect]>;
247 def X86eh_sjlj_setup_dispatch : SDNode<"X86ISD::EH_SJLJ_SETUP_DISPATCH",
248 SDTypeProfile<0, 0, []>,
249 [SDNPHasChain, SDNPSideEffect]>;
251 def X86tcret : SDNode<"X86ISD::TC_RETURN", SDT_X86TCRET,
252 [SDNPHasChain, SDNPOptInGlue, SDNPVariadic]>;
254 def X86add_flag : SDNode<"X86ISD::ADD", SDTBinaryArithWithFlags,
256 def X86sub_flag : SDNode<"X86ISD::SUB", SDTBinaryArithWithFlags>;
257 def X86smul_flag : SDNode<"X86ISD::SMUL", SDTBinaryArithWithFlags,
259 def X86umul_flag : SDNode<"X86ISD::UMUL", SDT2ResultBinaryArithWithFlags,
261 def X86adc_flag : SDNode<"X86ISD::ADC", SDTBinaryArithWithFlagsInOut>;
262 def X86sbb_flag : SDNode<"X86ISD::SBB", SDTBinaryArithWithFlagsInOut>;
264 def X86or_flag : SDNode<"X86ISD::OR", SDTBinaryArithWithFlags,
266 def X86xor_flag : SDNode<"X86ISD::XOR", SDTBinaryArithWithFlags,
268 def X86and_flag : SDNode<"X86ISD::AND", SDTBinaryArithWithFlags,
271 def X86lock_add : SDNode<"X86ISD::LADD", SDTLockBinaryArithWithFlags,
272 [SDNPHasChain, SDNPMayStore, SDNPMayLoad,
274 def X86lock_sub : SDNode<"X86ISD::LSUB", SDTLockBinaryArithWithFlags,
275 [SDNPHasChain, SDNPMayStore, SDNPMayLoad,
277 def X86lock_or : SDNode<"X86ISD::LOR", SDTLockBinaryArithWithFlags,
278 [SDNPHasChain, SDNPMayStore, SDNPMayLoad,
280 def X86lock_xor : SDNode<"X86ISD::LXOR", SDTLockBinaryArithWithFlags,
281 [SDNPHasChain, SDNPMayStore, SDNPMayLoad,
283 def X86lock_and : SDNode<"X86ISD::LAND", SDTLockBinaryArithWithFlags,
284 [SDNPHasChain, SDNPMayStore, SDNPMayLoad,
287 def X86bextr : SDNode<"X86ISD::BEXTR", SDTIntBinOp>;
288 def X86bextri : SDNode<"X86ISD::BEXTRI", SDTIntBinOp>;
290 def X86bzhi : SDNode<"X86ISD::BZHI", SDTIntBinOp>;
292 def X86pdep : SDNode<"X86ISD::PDEP", SDTIntBinOp>;
293 def X86pext : SDNode<"X86ISD::PEXT", SDTIntBinOp>;
295 def X86mul_imm : SDNode<"X86ISD::MUL_IMM", SDTIntBinOp>;
297 def X86WinAlloca : SDNode<"X86ISD::WIN_ALLOCA", SDT_X86WIN_ALLOCA,
298 [SDNPHasChain, SDNPOutGlue]>;
300 def X86SegAlloca : SDNode<"X86ISD::SEG_ALLOCA", SDT_X86SEG_ALLOCA,
303 def X86ProbedAlloca : SDNode<"X86ISD::PROBED_ALLOCA", SDT_X86PROBED_ALLOCA,
306 def X86TLSCall : SDNode<"X86ISD::TLSCALL", SDT_X86TLSCALL,
307 [SDNPHasChain, SDNPOptInGlue, SDNPOutGlue]>;
309 def X86lwpins : SDNode<"X86ISD::LWPINS",
310 SDTypeProfile<1, 3, [SDTCisVT<0, i32>, SDTCisInt<1>,
311 SDTCisVT<2, i32>, SDTCisVT<3, i32>]>,
312 [SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPSideEffect]>;
314 def X86umwait : SDNode<"X86ISD::UMWAIT",
315 SDTypeProfile<1, 3, [SDTCisVT<0, i32>, SDTCisInt<1>,
316 SDTCisVT<2, i32>, SDTCisVT<3, i32>]>,
317 [SDNPHasChain, SDNPSideEffect]>;
319 def X86tpause : SDNode<"X86ISD::TPAUSE",
320 SDTypeProfile<1, 3, [SDTCisVT<0, i32>, SDTCisInt<1>,
321 SDTCisVT<2, i32>, SDTCisVT<3, i32>]>,
322 [SDNPHasChain, SDNPSideEffect]>;
324 def X86enqcmd : SDNode<"X86ISD::ENQCMD", SDT_X86ENQCMD,
325 [SDNPHasChain, SDNPSideEffect]>;
326 def X86enqcmds : SDNode<"X86ISD::ENQCMDS", SDT_X86ENQCMD,
327 [SDNPHasChain, SDNPSideEffect]>;
328 def X86testui : SDNode<"X86ISD::TESTUI",
329 SDTypeProfile<1, 0, [SDTCisVT<0, i32>]>,
330 [SDNPHasChain, SDNPSideEffect]>;
332 def X86aesenc128kl : SDNode<"X86ISD::AESENC128KL", SDT_X86AESENCDECKL,
333 [SDNPHasChain, SDNPMayLoad, SDNPSideEffect,
335 def X86aesdec128kl : SDNode<"X86ISD::AESDEC128KL", SDT_X86AESENCDECKL,
336 [SDNPHasChain, SDNPMayLoad, SDNPSideEffect,
338 def X86aesenc256kl : SDNode<"X86ISD::AESENC256KL", SDT_X86AESENCDECKL,
339 [SDNPHasChain, SDNPMayLoad, SDNPSideEffect,
341 def X86aesdec256kl : SDNode<"X86ISD::AESDEC256KL", SDT_X86AESENCDECKL,
342 [SDNPHasChain, SDNPMayLoad, SDNPSideEffect,
345 //===----------------------------------------------------------------------===//
346 // X86 Operand Definitions.
349 // A version of ptr_rc which excludes SP, ESP, and RSP. This is used for
350 // the index operand of an address, to conform to x86 encoding restrictions.
351 def ptr_rc_nosp : PointerLikeRegClass<1>;
353 // *mem - Operand definitions for the funky X86 addressing mode operands.
355 def X86MemAsmOperand : AsmOperandClass {
358 let RenderMethod = "addMemOperands", SuperClasses = [X86MemAsmOperand] in {
359 def X86Mem8AsmOperand : AsmOperandClass { let Name = "Mem8"; }
360 def X86Mem16AsmOperand : AsmOperandClass { let Name = "Mem16"; }
361 def X86Mem32AsmOperand : AsmOperandClass { let Name = "Mem32"; }
362 def X86Mem64AsmOperand : AsmOperandClass { let Name = "Mem64"; }
363 def X86Mem80AsmOperand : AsmOperandClass { let Name = "Mem80"; }
364 def X86Mem128AsmOperand : AsmOperandClass { let Name = "Mem128"; }
365 def X86Mem256AsmOperand : AsmOperandClass { let Name = "Mem256"; }
366 def X86Mem512AsmOperand : AsmOperandClass { let Name = "Mem512"; }
367 // Gather mem operands
368 def X86Mem64_RC128Operand : AsmOperandClass { let Name = "Mem64_RC128"; }
369 def X86Mem128_RC128Operand : AsmOperandClass { let Name = "Mem128_RC128"; }
370 def X86Mem256_RC128Operand : AsmOperandClass { let Name = "Mem256_RC128"; }
371 def X86Mem128_RC256Operand : AsmOperandClass { let Name = "Mem128_RC256"; }
372 def X86Mem256_RC256Operand : AsmOperandClass { let Name = "Mem256_RC256"; }
374 def X86Mem64_RC128XOperand : AsmOperandClass { let Name = "Mem64_RC128X"; }
375 def X86Mem128_RC128XOperand : AsmOperandClass { let Name = "Mem128_RC128X"; }
376 def X86Mem256_RC128XOperand : AsmOperandClass { let Name = "Mem256_RC128X"; }
377 def X86Mem128_RC256XOperand : AsmOperandClass { let Name = "Mem128_RC256X"; }
378 def X86Mem256_RC256XOperand : AsmOperandClass { let Name = "Mem256_RC256X"; }
379 def X86Mem512_RC256XOperand : AsmOperandClass { let Name = "Mem512_RC256X"; }
380 def X86Mem256_RC512Operand : AsmOperandClass { let Name = "Mem256_RC512"; }
381 def X86Mem512_RC512Operand : AsmOperandClass { let Name = "Mem512_RC512"; }
383 def X86SibMemOperand : AsmOperandClass { let Name = "SibMem"; }
386 def X86AbsMemAsmOperand : AsmOperandClass {
388 let SuperClasses = [X86MemAsmOperand];
391 class X86MemOperand<string printMethod,
392 AsmOperandClass parserMatchClass = X86MemAsmOperand> : Operand<iPTR> {
393 let PrintMethod = printMethod;
394 let MIOperandInfo = (ops ptr_rc, i8imm, ptr_rc_nosp, i32imm, SEGMENT_REG);
395 let ParserMatchClass = parserMatchClass;
396 let OperandType = "OPERAND_MEMORY";
399 // Gather mem operands
400 class X86VMemOperand<RegisterClass RC, string printMethod,
401 AsmOperandClass parserMatchClass>
402 : X86MemOperand<printMethod, parserMatchClass> {
403 let MIOperandInfo = (ops ptr_rc, i8imm, RC, i32imm, SEGMENT_REG);
406 def anymem : X86MemOperand<"printMemReference">;
407 def X86any_fcmp : PatFrags<(ops node:$lhs, node:$rhs),
408 [(X86strict_fcmp node:$lhs, node:$rhs),
409 (X86fcmp node:$lhs, node:$rhs)]>;
411 // FIXME: Right now we allow any size during parsing, but we might want to
412 // restrict to only unsized memory.
413 def opaquemem : X86MemOperand<"printMemReference">;
415 def sibmem: X86MemOperand<"printMemReference", X86SibMemOperand>;
417 def i8mem : X86MemOperand<"printbytemem", X86Mem8AsmOperand>;
418 def i16mem : X86MemOperand<"printwordmem", X86Mem16AsmOperand>;
419 def i32mem : X86MemOperand<"printdwordmem", X86Mem32AsmOperand>;
420 def i64mem : X86MemOperand<"printqwordmem", X86Mem64AsmOperand>;
421 def i128mem : X86MemOperand<"printxmmwordmem", X86Mem128AsmOperand>;
422 def i256mem : X86MemOperand<"printymmwordmem", X86Mem256AsmOperand>;
423 def i512mem : X86MemOperand<"printzmmwordmem", X86Mem512AsmOperand>;
424 def f16mem : X86MemOperand<"printwordmem", X86Mem16AsmOperand>;
425 def f32mem : X86MemOperand<"printdwordmem", X86Mem32AsmOperand>;
426 def f64mem : X86MemOperand<"printqwordmem", X86Mem64AsmOperand>;
427 def f80mem : X86MemOperand<"printtbytemem", X86Mem80AsmOperand>;
428 def f128mem : X86MemOperand<"printxmmwordmem", X86Mem128AsmOperand>;
429 def f256mem : X86MemOperand<"printymmwordmem", X86Mem256AsmOperand>;
430 def f512mem : X86MemOperand<"printzmmwordmem", X86Mem512AsmOperand>;
432 // Gather mem operands
433 def vx64mem : X86VMemOperand<VR128, "printqwordmem", X86Mem64_RC128Operand>;
434 def vx128mem : X86VMemOperand<VR128, "printxmmwordmem", X86Mem128_RC128Operand>;
435 def vx256mem : X86VMemOperand<VR128, "printymmwordmem", X86Mem256_RC128Operand>;
436 def vy128mem : X86VMemOperand<VR256, "printxmmwordmem", X86Mem128_RC256Operand>;
437 def vy256mem : X86VMemOperand<VR256, "printymmwordmem", X86Mem256_RC256Operand>;
439 def vx64xmem : X86VMemOperand<VR128X, "printqwordmem", X86Mem64_RC128XOperand>;
440 def vx128xmem : X86VMemOperand<VR128X, "printxmmwordmem", X86Mem128_RC128XOperand>;
441 def vx256xmem : X86VMemOperand<VR128X, "printymmwordmem", X86Mem256_RC128XOperand>;
442 def vy128xmem : X86VMemOperand<VR256X, "printxmmwordmem", X86Mem128_RC256XOperand>;
443 def vy256xmem : X86VMemOperand<VR256X, "printymmwordmem", X86Mem256_RC256XOperand>;
444 def vy512xmem : X86VMemOperand<VR256X, "printzmmwordmem", X86Mem512_RC256XOperand>;
445 def vz256mem : X86VMemOperand<VR512, "printymmwordmem", X86Mem256_RC512Operand>;
446 def vz512mem : X86VMemOperand<VR512, "printzmmwordmem", X86Mem512_RC512Operand>;
448 // A version of i8mem for use on x86-64 and x32 that uses a NOREX GPR instead
449 // of a plain GPR, so that it doesn't potentially require a REX prefix.
450 def ptr_rc_norex : PointerLikeRegClass<2>;
451 def ptr_rc_norex_nosp : PointerLikeRegClass<3>;
453 def i8mem_NOREX : Operand<iPTR> {
454 let PrintMethod = "printbytemem";
455 let MIOperandInfo = (ops ptr_rc_norex, i8imm, ptr_rc_norex_nosp, i32imm,
457 let ParserMatchClass = X86Mem8AsmOperand;
458 let OperandType = "OPERAND_MEMORY";
461 // GPRs available for tailcall.
462 // It represents GR32_TC, GR64_TC or GR64_TCW64.
463 def ptr_rc_tailcall : PointerLikeRegClass<4>;
465 // Special i32mem for addresses of load folding tail calls. These are not
466 // allowed to use callee-saved registers since they must be scheduled
467 // after callee-saved register are popped.
468 def i32mem_TC : Operand<i32> {
469 let PrintMethod = "printdwordmem";
470 let MIOperandInfo = (ops ptr_rc_tailcall, i8imm, ptr_rc_tailcall,
471 i32imm, SEGMENT_REG);
472 let ParserMatchClass = X86Mem32AsmOperand;
473 let OperandType = "OPERAND_MEMORY";
476 // Special i64mem for addresses of load folding tail calls. These are not
477 // allowed to use callee-saved registers since they must be scheduled
478 // after callee-saved register are popped.
479 def i64mem_TC : Operand<i64> {
480 let PrintMethod = "printqwordmem";
481 let MIOperandInfo = (ops ptr_rc_tailcall, i8imm,
482 ptr_rc_tailcall, i32imm, SEGMENT_REG);
483 let ParserMatchClass = X86Mem64AsmOperand;
484 let OperandType = "OPERAND_MEMORY";
487 // Special parser to detect 16-bit mode to select 16-bit displacement.
488 def X86AbsMem16AsmOperand : AsmOperandClass {
489 let Name = "AbsMem16";
490 let RenderMethod = "addAbsMemOperands";
491 let SuperClasses = [X86AbsMemAsmOperand];
494 // Branch targets print as pc-relative values.
495 class BranchTargetOperand<ValueType ty> : Operand<ty> {
496 let OperandType = "OPERAND_PCREL";
497 let PrintMethod = "printPCRelImm";
498 let ParserMatchClass = X86AbsMemAsmOperand;
501 def i32imm_brtarget : BranchTargetOperand<i32>;
502 def i16imm_brtarget : BranchTargetOperand<i16>;
504 // 64-bits but only 32 bits are significant, and those bits are treated as being
506 def i64i32imm_brtarget : BranchTargetOperand<i64>;
508 def brtarget : BranchTargetOperand<OtherVT>;
509 def brtarget8 : BranchTargetOperand<OtherVT>;
510 def brtarget16 : BranchTargetOperand<OtherVT> {
511 let ParserMatchClass = X86AbsMem16AsmOperand;
513 def brtarget32 : BranchTargetOperand<OtherVT>;
515 let RenderMethod = "addSrcIdxOperands" in {
516 def X86SrcIdx8Operand : AsmOperandClass {
517 let Name = "SrcIdx8";
518 let SuperClasses = [X86Mem8AsmOperand];
520 def X86SrcIdx16Operand : AsmOperandClass {
521 let Name = "SrcIdx16";
522 let SuperClasses = [X86Mem16AsmOperand];
524 def X86SrcIdx32Operand : AsmOperandClass {
525 let Name = "SrcIdx32";
526 let SuperClasses = [X86Mem32AsmOperand];
528 def X86SrcIdx64Operand : AsmOperandClass {
529 let Name = "SrcIdx64";
530 let SuperClasses = [X86Mem64AsmOperand];
532 } // RenderMethod = "addSrcIdxOperands"
534 let RenderMethod = "addDstIdxOperands" in {
535 def X86DstIdx8Operand : AsmOperandClass {
536 let Name = "DstIdx8";
537 let SuperClasses = [X86Mem8AsmOperand];
539 def X86DstIdx16Operand : AsmOperandClass {
540 let Name = "DstIdx16";
541 let SuperClasses = [X86Mem16AsmOperand];
543 def X86DstIdx32Operand : AsmOperandClass {
544 let Name = "DstIdx32";
545 let SuperClasses = [X86Mem32AsmOperand];
547 def X86DstIdx64Operand : AsmOperandClass {
548 let Name = "DstIdx64";
549 let SuperClasses = [X86Mem64AsmOperand];
551 } // RenderMethod = "addDstIdxOperands"
553 let RenderMethod = "addMemOffsOperands" in {
554 def X86MemOffs16_8AsmOperand : AsmOperandClass {
555 let Name = "MemOffs16_8";
556 let SuperClasses = [X86Mem8AsmOperand];
558 def X86MemOffs16_16AsmOperand : AsmOperandClass {
559 let Name = "MemOffs16_16";
560 let SuperClasses = [X86Mem16AsmOperand];
562 def X86MemOffs16_32AsmOperand : AsmOperandClass {
563 let Name = "MemOffs16_32";
564 let SuperClasses = [X86Mem32AsmOperand];
566 def X86MemOffs32_8AsmOperand : AsmOperandClass {
567 let Name = "MemOffs32_8";
568 let SuperClasses = [X86Mem8AsmOperand];
570 def X86MemOffs32_16AsmOperand : AsmOperandClass {
571 let Name = "MemOffs32_16";
572 let SuperClasses = [X86Mem16AsmOperand];
574 def X86MemOffs32_32AsmOperand : AsmOperandClass {
575 let Name = "MemOffs32_32";
576 let SuperClasses = [X86Mem32AsmOperand];
578 def X86MemOffs32_64AsmOperand : AsmOperandClass {
579 let Name = "MemOffs32_64";
580 let SuperClasses = [X86Mem64AsmOperand];
582 def X86MemOffs64_8AsmOperand : AsmOperandClass {
583 let Name = "MemOffs64_8";
584 let SuperClasses = [X86Mem8AsmOperand];
586 def X86MemOffs64_16AsmOperand : AsmOperandClass {
587 let Name = "MemOffs64_16";
588 let SuperClasses = [X86Mem16AsmOperand];
590 def X86MemOffs64_32AsmOperand : AsmOperandClass {
591 let Name = "MemOffs64_32";
592 let SuperClasses = [X86Mem32AsmOperand];
594 def X86MemOffs64_64AsmOperand : AsmOperandClass {
595 let Name = "MemOffs64_64";
596 let SuperClasses = [X86Mem64AsmOperand];
598 } // RenderMethod = "addMemOffsOperands"
600 class X86SrcIdxOperand<string printMethod, AsmOperandClass parserMatchClass>
601 : X86MemOperand<printMethod, parserMatchClass> {
602 let MIOperandInfo = (ops ptr_rc, SEGMENT_REG);
605 class X86DstIdxOperand<string printMethod, AsmOperandClass parserMatchClass>
606 : X86MemOperand<printMethod, parserMatchClass> {
607 let MIOperandInfo = (ops ptr_rc);
610 def srcidx8 : X86SrcIdxOperand<"printSrcIdx8", X86SrcIdx8Operand>;
611 def srcidx16 : X86SrcIdxOperand<"printSrcIdx16", X86SrcIdx16Operand>;
612 def srcidx32 : X86SrcIdxOperand<"printSrcIdx32", X86SrcIdx32Operand>;
613 def srcidx64 : X86SrcIdxOperand<"printSrcIdx64", X86SrcIdx64Operand>;
614 def dstidx8 : X86DstIdxOperand<"printDstIdx8", X86DstIdx8Operand>;
615 def dstidx16 : X86DstIdxOperand<"printDstIdx16", X86DstIdx16Operand>;
616 def dstidx32 : X86DstIdxOperand<"printDstIdx32", X86DstIdx32Operand>;
617 def dstidx64 : X86DstIdxOperand<"printDstIdx64", X86DstIdx64Operand>;
619 class X86MemOffsOperand<Operand immOperand, string printMethod,
620 AsmOperandClass parserMatchClass>
621 : X86MemOperand<printMethod, parserMatchClass> {
622 let MIOperandInfo = (ops immOperand, SEGMENT_REG);
625 def offset16_8 : X86MemOffsOperand<i16imm, "printMemOffs8",
626 X86MemOffs16_8AsmOperand>;
627 def offset16_16 : X86MemOffsOperand<i16imm, "printMemOffs16",
628 X86MemOffs16_16AsmOperand>;
629 def offset16_32 : X86MemOffsOperand<i16imm, "printMemOffs32",
630 X86MemOffs16_32AsmOperand>;
631 def offset32_8 : X86MemOffsOperand<i32imm, "printMemOffs8",
632 X86MemOffs32_8AsmOperand>;
633 def offset32_16 : X86MemOffsOperand<i32imm, "printMemOffs16",
634 X86MemOffs32_16AsmOperand>;
635 def offset32_32 : X86MemOffsOperand<i32imm, "printMemOffs32",
636 X86MemOffs32_32AsmOperand>;
637 def offset32_64 : X86MemOffsOperand<i32imm, "printMemOffs64",
638 X86MemOffs32_64AsmOperand>;
639 def offset64_8 : X86MemOffsOperand<i64imm, "printMemOffs8",
640 X86MemOffs64_8AsmOperand>;
641 def offset64_16 : X86MemOffsOperand<i64imm, "printMemOffs16",
642 X86MemOffs64_16AsmOperand>;
643 def offset64_32 : X86MemOffsOperand<i64imm, "printMemOffs32",
644 X86MemOffs64_32AsmOperand>;
645 def offset64_64 : X86MemOffsOperand<i64imm, "printMemOffs64",
646 X86MemOffs64_64AsmOperand>;
648 def ccode : Operand<i8> {
649 let PrintMethod = "printCondCode";
650 let OperandNamespace = "X86";
651 let OperandType = "OPERAND_COND_CODE";
654 class ImmSExtAsmOperandClass : AsmOperandClass {
655 let SuperClasses = [ImmAsmOperand];
656 let RenderMethod = "addImmOperands";
659 def X86GR32orGR64AsmOperand : AsmOperandClass {
660 let Name = "GR32orGR64";
662 def GR32orGR64 : RegisterOperand<GR32> {
663 let ParserMatchClass = X86GR32orGR64AsmOperand;
666 def X86GR16orGR32orGR64AsmOperand : AsmOperandClass {
667 let Name = "GR16orGR32orGR64";
669 def GR16orGR32orGR64 : RegisterOperand<GR16> {
670 let ParserMatchClass = X86GR16orGR32orGR64AsmOperand;
673 def AVX512RCOperand : AsmOperandClass {
674 let Name = "AVX512RC";
676 def AVX512RC : Operand<i32> {
677 let PrintMethod = "printRoundingControl";
678 let OperandNamespace = "X86";
679 let OperandType = "OPERAND_ROUNDING_CONTROL";
680 let ParserMatchClass = AVX512RCOperand;
683 // Sign-extended immediate classes. We don't need to define the full lattice
684 // here because there is no instruction with an ambiguity between ImmSExti64i32
687 // The strange ranges come from the fact that the assembler always works with
688 // 64-bit immediates, but for a 16-bit target value we want to accept both "-1"
689 // (which will be a -1ULL), and "0xFF" (-1 in 16-bits).
692 // [0xFFFFFFFF80000000, 0xFFFFFFFFFFFFFFFF]
693 def ImmSExti64i32AsmOperand : ImmSExtAsmOperandClass {
694 let Name = "ImmSExti64i32";
697 // [0, 0x0000007F] | [0x000000000000FF80, 0x000000000000FFFF] |
698 // [0xFFFFFFFFFFFFFF80, 0xFFFFFFFFFFFFFFFF]
699 def ImmSExti16i8AsmOperand : ImmSExtAsmOperandClass {
700 let Name = "ImmSExti16i8";
701 let SuperClasses = [ImmSExti64i32AsmOperand];
704 // [0, 0x0000007F] | [0x00000000FFFFFF80, 0x00000000FFFFFFFF] |
705 // [0xFFFFFFFFFFFFFF80, 0xFFFFFFFFFFFFFFFF]
706 def ImmSExti32i8AsmOperand : ImmSExtAsmOperandClass {
707 let Name = "ImmSExti32i8";
711 // [0xFFFFFFFFFFFFFF80, 0xFFFFFFFFFFFFFFFF]
712 def ImmSExti64i8AsmOperand : ImmSExtAsmOperandClass {
713 let Name = "ImmSExti64i8";
714 let SuperClasses = [ImmSExti16i8AsmOperand, ImmSExti32i8AsmOperand,
715 ImmSExti64i32AsmOperand];
718 // 4-bit immediate used by some XOP instructions
720 def ImmUnsignedi4AsmOperand : AsmOperandClass {
721 let Name = "ImmUnsignedi4";
722 let RenderMethod = "addImmOperands";
723 let DiagnosticType = "InvalidImmUnsignedi4";
726 // Unsigned immediate used by SSE/AVX instructions
728 // [0xFFFFFFFFFFFFFF80, 0xFFFFFFFFFFFFFFFF]
729 def ImmUnsignedi8AsmOperand : AsmOperandClass {
730 let Name = "ImmUnsignedi8";
731 let RenderMethod = "addImmOperands";
734 // A couple of more descriptive operand definitions.
735 // 16-bits but only 8 bits are significant.
736 def i16i8imm : Operand<i16> {
737 let ParserMatchClass = ImmSExti16i8AsmOperand;
738 let OperandType = "OPERAND_IMMEDIATE";
740 // 32-bits but only 8 bits are significant.
741 def i32i8imm : Operand<i32> {
742 let ParserMatchClass = ImmSExti32i8AsmOperand;
743 let OperandType = "OPERAND_IMMEDIATE";
746 // 64-bits but only 32 bits are significant.
747 def i64i32imm : Operand<i64> {
748 let ParserMatchClass = ImmSExti64i32AsmOperand;
749 let OperandType = "OPERAND_IMMEDIATE";
752 // 64-bits but only 8 bits are significant.
753 def i64i8imm : Operand<i64> {
754 let ParserMatchClass = ImmSExti64i8AsmOperand;
755 let OperandType = "OPERAND_IMMEDIATE";
758 // Unsigned 4-bit immediate used by some XOP instructions.
759 def u4imm : Operand<i8> {
760 let PrintMethod = "printU8Imm";
761 let ParserMatchClass = ImmUnsignedi4AsmOperand;
762 let OperandType = "OPERAND_IMMEDIATE";
765 // Unsigned 8-bit immediate used by SSE/AVX instructions.
766 def u8imm : Operand<i8> {
767 let PrintMethod = "printU8Imm";
768 let ParserMatchClass = ImmUnsignedi8AsmOperand;
769 let OperandType = "OPERAND_IMMEDIATE";
772 // 16-bit immediate but only 8-bits are significant and they are unsigned.
773 // Used by BT instructions.
774 def i16u8imm : Operand<i16> {
775 let PrintMethod = "printU8Imm";
776 let ParserMatchClass = ImmUnsignedi8AsmOperand;
777 let OperandType = "OPERAND_IMMEDIATE";
780 // 32-bit immediate but only 8-bits are significant and they are unsigned.
781 // Used by some SSE/AVX instructions that use intrinsics.
782 def i32u8imm : Operand<i32> {
783 let PrintMethod = "printU8Imm";
784 let ParserMatchClass = ImmUnsignedi8AsmOperand;
785 let OperandType = "OPERAND_IMMEDIATE";
788 // 64-bit immediate but only 8-bits are significant and they are unsigned.
789 // Used by BT instructions.
790 def i64u8imm : Operand<i64> {
791 let PrintMethod = "printU8Imm";
792 let ParserMatchClass = ImmUnsignedi8AsmOperand;
793 let OperandType = "OPERAND_IMMEDIATE";
796 def lea64_32mem : Operand<i32> {
797 let PrintMethod = "printMemReference";
798 let MIOperandInfo = (ops GR64, i8imm, GR64_NOSP, i32imm, SEGMENT_REG);
799 let ParserMatchClass = X86MemAsmOperand;
802 // Memory operands that use 64-bit pointers in both ILP32 and LP64.
803 def lea64mem : Operand<i64> {
804 let PrintMethod = "printMemReference";
805 let MIOperandInfo = (ops GR64, i8imm, GR64_NOSP, i32imm, SEGMENT_REG);
806 let ParserMatchClass = X86MemAsmOperand;
809 let RenderMethod = "addMaskPairOperands" in {
810 def VK1PairAsmOperand : AsmOperandClass { let Name = "VK1Pair"; }
811 def VK2PairAsmOperand : AsmOperandClass { let Name = "VK2Pair"; }
812 def VK4PairAsmOperand : AsmOperandClass { let Name = "VK4Pair"; }
813 def VK8PairAsmOperand : AsmOperandClass { let Name = "VK8Pair"; }
814 def VK16PairAsmOperand : AsmOperandClass { let Name = "VK16Pair"; }
817 def VK1Pair : RegisterOperand<VK1PAIR, "printVKPair"> {
818 let ParserMatchClass = VK1PairAsmOperand;
821 def VK2Pair : RegisterOperand<VK2PAIR, "printVKPair"> {
822 let ParserMatchClass = VK2PairAsmOperand;
825 def VK4Pair : RegisterOperand<VK4PAIR, "printVKPair"> {
826 let ParserMatchClass = VK4PairAsmOperand;
829 def VK8Pair : RegisterOperand<VK8PAIR, "printVKPair"> {
830 let ParserMatchClass = VK8PairAsmOperand;
833 def VK16Pair : RegisterOperand<VK16PAIR, "printVKPair"> {
834 let ParserMatchClass = VK16PairAsmOperand;
837 //===----------------------------------------------------------------------===//
838 // X86 Complex Pattern Definitions.
841 // Define X86-specific addressing mode.
842 def addr : ComplexPattern<iPTR, 5, "selectAddr", [], [SDNPWantParent]>;
843 def lea32addr : ComplexPattern<i32, 5, "selectLEAAddr",
844 [add, sub, mul, X86mul_imm, shl, or, frameindex],
846 // In 64-bit mode 32-bit LEAs can use RIP-relative addressing.
847 def lea64_32addr : ComplexPattern<i32, 5, "selectLEA64_32Addr",
848 [add, sub, mul, X86mul_imm, shl, or,
849 frameindex, X86WrapperRIP],
852 def tls32addr : ComplexPattern<i32, 5, "selectTLSADDRAddr",
853 [tglobaltlsaddr], []>;
855 def tls32baseaddr : ComplexPattern<i32, 5, "selectTLSADDRAddr",
856 [tglobaltlsaddr], []>;
858 def lea64addr : ComplexPattern<i64, 5, "selectLEAAddr",
859 [add, sub, mul, X86mul_imm, shl, or, frameindex,
862 def tls64addr : ComplexPattern<i64, 5, "selectTLSADDRAddr",
863 [tglobaltlsaddr], []>;
865 def tls64baseaddr : ComplexPattern<i64, 5, "selectTLSADDRAddr",
866 [tglobaltlsaddr], []>;
868 def vectoraddr : ComplexPattern<iPTR, 5, "selectVectorAddr", [],[SDNPWantParent]>;
870 // A relocatable immediate is an operand that can be relocated by the linker to
871 // an immediate, such as a regular symbol in non-PIC code.
872 def relocImm : ComplexPattern<iAny, 1, "selectRelocImm",
873 [X86Wrapper], [], 0>;
875 //===----------------------------------------------------------------------===//
876 // X86 Instruction Predicate Definitions.
877 def TruePredicate : Predicate<"true">;
879 def HasCMov : Predicate<"Subtarget->hasCMov()">;
880 def NoCMov : Predicate<"!Subtarget->hasCMov()">;
882 def HasMMX : Predicate<"Subtarget->hasMMX()">;
883 def Has3DNow : Predicate<"Subtarget->has3DNow()">;
884 def Has3DNowA : Predicate<"Subtarget->has3DNowA()">;
885 def HasSSE1 : Predicate<"Subtarget->hasSSE1()">;
886 def UseSSE1 : Predicate<"Subtarget->hasSSE1() && !Subtarget->hasAVX()">;
887 def HasSSE2 : Predicate<"Subtarget->hasSSE2()">;
888 def UseSSE2 : Predicate<"Subtarget->hasSSE2() && !Subtarget->hasAVX()">;
889 def HasSSE3 : Predicate<"Subtarget->hasSSE3()">;
890 def UseSSE3 : Predicate<"Subtarget->hasSSE3() && !Subtarget->hasAVX()">;
891 def HasSSSE3 : Predicate<"Subtarget->hasSSSE3()">;
892 def UseSSSE3 : Predicate<"Subtarget->hasSSSE3() && !Subtarget->hasAVX()">;
893 def HasSSE41 : Predicate<"Subtarget->hasSSE41()">;
894 def NoSSE41 : Predicate<"!Subtarget->hasSSE41()">;
895 def UseSSE41 : Predicate<"Subtarget->hasSSE41() && !Subtarget->hasAVX()">;
896 def HasSSE42 : Predicate<"Subtarget->hasSSE42()">;
897 def UseSSE42 : Predicate<"Subtarget->hasSSE42() && !Subtarget->hasAVX()">;
898 def HasSSE4A : Predicate<"Subtarget->hasSSE4A()">;
899 def NoAVX : Predicate<"!Subtarget->hasAVX()">;
900 def HasAVX : Predicate<"Subtarget->hasAVX()">;
901 def HasAVX2 : Predicate<"Subtarget->hasAVX2()">;
902 def HasAVX1Only : Predicate<"Subtarget->hasAVX() && !Subtarget->hasAVX2()">;
903 def HasAVX512 : Predicate<"Subtarget->hasAVX512()">;
904 def UseAVX : Predicate<"Subtarget->hasAVX() && !Subtarget->hasAVX512()">;
905 def UseAVX2 : Predicate<"Subtarget->hasAVX2() && !Subtarget->hasAVX512()">;
906 def NoAVX512 : Predicate<"!Subtarget->hasAVX512()">;
907 def HasCDI : Predicate<"Subtarget->hasCDI()">;
908 def HasVPOPCNTDQ : Predicate<"Subtarget->hasVPOPCNTDQ()">;
909 def HasPFI : Predicate<"Subtarget->hasPFI()">;
910 def HasERI : Predicate<"Subtarget->hasERI()">;
911 def HasDQI : Predicate<"Subtarget->hasDQI()">;
912 def NoDQI : Predicate<"!Subtarget->hasDQI()">;
913 def HasBWI : Predicate<"Subtarget->hasBWI()">;
914 def NoBWI : Predicate<"!Subtarget->hasBWI()">;
915 def HasVLX : Predicate<"Subtarget->hasVLX()">;
916 def NoVLX : Predicate<"!Subtarget->hasVLX()">;
917 def NoVLX_Or_NoBWI : Predicate<"!Subtarget->hasVLX() || !Subtarget->hasBWI()">;
918 def NoVLX_Or_NoDQI : Predicate<"!Subtarget->hasVLX() || !Subtarget->hasDQI()">;
919 def PKU : Predicate<"Subtarget->hasPKU()">;
920 def HasVNNI : Predicate<"Subtarget->hasVNNI()">;
921 def HasVP2INTERSECT : Predicate<"Subtarget->hasVP2INTERSECT()">;
922 def HasBF16 : Predicate<"Subtarget->hasBF16()">;
923 def HasFP16 : Predicate<"Subtarget->hasFP16()">;
924 def HasAVXVNNI : Predicate <"Subtarget->hasAVXVNNI()">;
925 def NoVLX_Or_NoVNNI : Predicate<"!Subtarget->hasVLX() || !Subtarget->hasVNNI()">;
927 def HasBITALG : Predicate<"Subtarget->hasBITALG()">;
928 def HasPOPCNT : Predicate<"Subtarget->hasPOPCNT()">;
929 def HasAES : Predicate<"Subtarget->hasAES()">;
930 def HasVAES : Predicate<"Subtarget->hasVAES()">;
931 def NoVLX_Or_NoVAES : Predicate<"!Subtarget->hasVLX() || !Subtarget->hasVAES()">;
932 def HasFXSR : Predicate<"Subtarget->hasFXSR()">;
933 def HasXSAVE : Predicate<"Subtarget->hasXSAVE()">;
934 def HasXSAVEOPT : Predicate<"Subtarget->hasXSAVEOPT()">;
935 def HasXSAVEC : Predicate<"Subtarget->hasXSAVEC()">;
936 def HasXSAVES : Predicate<"Subtarget->hasXSAVES()">;
937 def HasPCLMUL : Predicate<"Subtarget->hasPCLMUL()">;
938 def NoVLX_Or_NoVPCLMULQDQ :
939 Predicate<"!Subtarget->hasVLX() || !Subtarget->hasVPCLMULQDQ()">;
940 def HasVPCLMULQDQ : Predicate<"Subtarget->hasVPCLMULQDQ()">;
941 def HasGFNI : Predicate<"Subtarget->hasGFNI()">;
942 def HasFMA : Predicate<"Subtarget->hasFMA()">;
943 def HasFMA4 : Predicate<"Subtarget->hasFMA4()">;
944 def NoFMA4 : Predicate<"!Subtarget->hasFMA4()">;
945 def HasXOP : Predicate<"Subtarget->hasXOP()">;
946 def HasTBM : Predicate<"Subtarget->hasTBM()">;
947 def NoTBM : Predicate<"!Subtarget->hasTBM()">;
948 def HasLWP : Predicate<"Subtarget->hasLWP()">;
949 def HasMOVBE : Predicate<"Subtarget->hasMOVBE()">;
950 def HasRDRAND : Predicate<"Subtarget->hasRDRAND()">;
951 def HasF16C : Predicate<"Subtarget->hasF16C()">;
952 def HasFSGSBase : Predicate<"Subtarget->hasFSGSBase()">;
953 def HasLZCNT : Predicate<"Subtarget->hasLZCNT()">;
954 def HasBMI : Predicate<"Subtarget->hasBMI()">;
955 def HasBMI2 : Predicate<"Subtarget->hasBMI2()">;
956 def NoBMI2 : Predicate<"!Subtarget->hasBMI2()">;
957 def HasVBMI : Predicate<"Subtarget->hasVBMI()">;
958 def HasVBMI2 : Predicate<"Subtarget->hasVBMI2()">;
959 def HasIFMA : Predicate<"Subtarget->hasIFMA()">;
960 def HasRTM : Predicate<"Subtarget->hasRTM()">;
961 def HasADX : Predicate<"Subtarget->hasADX()">;
962 def HasSHA : Predicate<"Subtarget->hasSHA()">;
963 def HasSGX : Predicate<"Subtarget->hasSGX()">;
964 def HasRDSEED : Predicate<"Subtarget->hasRDSEED()">;
965 def HasSSEPrefetch : Predicate<"Subtarget->hasSSEPrefetch()">;
966 def NoSSEPrefetch : Predicate<"!Subtarget->hasSSEPrefetch()">;
967 def HasPrefetchW : Predicate<"Subtarget->hasPrefetchW()">;
968 def HasPREFETCHWT1 : Predicate<"Subtarget->hasPREFETCHWT1()">;
969 def HasLAHFSAHF : Predicate<"Subtarget->hasLAHFSAHF()">;
970 def HasMWAITX : Predicate<"Subtarget->hasMWAITX()">;
971 def HasCLZERO : Predicate<"Subtarget->hasCLZERO()">;
972 def HasCLDEMOTE : Predicate<"Subtarget->hasCLDEMOTE()">;
973 def HasMOVDIRI : Predicate<"Subtarget->hasMOVDIRI()">;
974 def HasMOVDIR64B : Predicate<"Subtarget->hasMOVDIR64B()">;
975 def HasPTWRITE : Predicate<"Subtarget->hasPTWRITE()">;
976 def FPStackf32 : Predicate<"!Subtarget->hasSSE1()">;
977 def FPStackf64 : Predicate<"!Subtarget->hasSSE2()">;
978 def HasSHSTK : Predicate<"Subtarget->hasSHSTK()">;
979 def HasCLFLUSHOPT : Predicate<"Subtarget->hasCLFLUSHOPT()">;
980 def HasCLWB : Predicate<"Subtarget->hasCLWB()">;
981 def HasWBNOINVD : Predicate<"Subtarget->hasWBNOINVD()">;
982 def HasRDPID : Predicate<"Subtarget->hasRDPID()">;
983 def HasWAITPKG : Predicate<"Subtarget->hasWAITPKG()">;
984 def HasINVPCID : Predicate<"Subtarget->hasINVPCID()">;
985 def HasCmpxchg8b : Predicate<"Subtarget->hasCmpxchg8b()">;
986 def HasCmpxchg16b: Predicate<"Subtarget->hasCmpxchg16b()">;
987 def HasPCONFIG : Predicate<"Subtarget->hasPCONFIG()">;
988 def HasENQCMD : Predicate<"Subtarget->hasENQCMD()">;
989 def HasKL : Predicate<"Subtarget->hasKL()">;
990 def HasWIDEKL : Predicate<"Subtarget->hasWIDEKL()">;
991 def HasHRESET : Predicate<"Subtarget->hasHRESET()">;
992 def HasSERIALIZE : Predicate<"Subtarget->hasSERIALIZE()">;
993 def HasTSXLDTRK : Predicate<"Subtarget->hasTSXLDTRK()">;
994 def HasAMXTILE : Predicate<"Subtarget->hasAMXTILE()">;
995 def HasAMXBF16 : Predicate<"Subtarget->hasAMXBF16()">;
996 def HasAMXINT8 : Predicate<"Subtarget->hasAMXINT8()">;
997 def HasUINTR : Predicate<"Subtarget->hasUINTR()">;
998 def Not64BitMode : Predicate<"!Subtarget->is64Bit()">,
999 AssemblerPredicate<(all_of (not Mode64Bit)), "Not 64-bit mode">;
1000 def In64BitMode : Predicate<"Subtarget->is64Bit()">,
1001 AssemblerPredicate<(all_of Mode64Bit), "64-bit mode">;
1002 def IsLP64 : Predicate<"Subtarget->isTarget64BitLP64()">;
1003 def NotLP64 : Predicate<"!Subtarget->isTarget64BitLP64()">;
1004 def In16BitMode : Predicate<"Subtarget->is16Bit()">,
1005 AssemblerPredicate<(all_of Mode16Bit), "16-bit mode">;
1006 def Not16BitMode : Predicate<"!Subtarget->is16Bit()">,
1007 AssemblerPredicate<(all_of (not Mode16Bit)), "Not 16-bit mode">;
1008 def In32BitMode : Predicate<"Subtarget->is32Bit()">,
1009 AssemblerPredicate<(all_of Mode32Bit), "32-bit mode">;
1010 def IsWin64 : Predicate<"Subtarget->isTargetWin64()">;
1011 def NotWin64 : Predicate<"!Subtarget->isTargetWin64()">;
1012 def NotWin64WithoutFP : Predicate<"!Subtarget->isTargetWin64() ||"
1013 "Subtarget->getFrameLowering()->hasFP(*MF)"> {
1014 let RecomputePerFunction = 1;
1016 def IsPS4 : Predicate<"Subtarget->isTargetPS4()">;
1017 def NotPS4 : Predicate<"!Subtarget->isTargetPS4()">;
1018 def IsNaCl : Predicate<"Subtarget->isTargetNaCl()">;
1019 def NotNaCl : Predicate<"!Subtarget->isTargetNaCl()">;
1020 def SmallCode : Predicate<"TM.getCodeModel() == CodeModel::Small">;
1021 def KernelCode : Predicate<"TM.getCodeModel() == CodeModel::Kernel">;
1022 def NearData : Predicate<"TM.getCodeModel() == CodeModel::Small ||"
1023 "TM.getCodeModel() == CodeModel::Kernel">;
1024 def IsNotPIC : Predicate<"!TM.isPositionIndependent()">;
1026 // We could compute these on a per-module basis but doing so requires accessing
1027 // the Function object through the <Target>Subtarget and objections were raised
1028 // to that (see post-commit review comments for r301750).
1029 let RecomputePerFunction = 1 in {
1030 def OptForSize : Predicate<"shouldOptForSize(MF)">;
1031 def OptForMinSize : Predicate<"MF->getFunction().hasMinSize()">;
1032 def OptForSpeed : Predicate<"!shouldOptForSize(MF)">;
1033 def UseIncDec : Predicate<"!Subtarget->slowIncDec() || "
1034 "shouldOptForSize(MF)">;
1035 def NoSSE41_Or_OptForSize : Predicate<"shouldOptForSize(MF) || "
1036 "!Subtarget->hasSSE41()">;
1039 def CallImmAddr : Predicate<"Subtarget->isLegalToCallImmediateAddr()">;
1040 def FavorMemIndirectCall : Predicate<"!Subtarget->slowTwoMemOps()">;
1041 def HasFastMem32 : Predicate<"!Subtarget->isUnalignedMem32Slow()">;
1042 def HasFastLZCNT : Predicate<"Subtarget->hasFastLZCNT()">;
1043 def HasFastSHLDRotate : Predicate<"Subtarget->hasFastSHLDRotate()">;
1044 def HasERMSB : Predicate<"Subtarget->hasERMSB()">;
1045 def HasFSRM : Predicate<"Subtarget->hasFSRM()">;
1046 def HasMFence : Predicate<"Subtarget->hasMFence()">;
1047 def UseIndirectThunkCalls : Predicate<"Subtarget->useIndirectThunkCalls()">;
1048 def NotUseIndirectThunkCalls : Predicate<"!Subtarget->useIndirectThunkCalls()">;
1050 //===----------------------------------------------------------------------===//
1051 // X86 Instruction Format Definitions.
1054 include "X86InstrFormats.td"
1056 //===----------------------------------------------------------------------===//
1057 // Pattern fragments.
1060 // X86 specific condition code. These correspond to CondCode in
1061 // X86InstrInfo.h. They must be kept in synch.
1062 def X86_COND_O : PatLeaf<(i8 0)>;
1063 def X86_COND_NO : PatLeaf<(i8 1)>;
1064 def X86_COND_B : PatLeaf<(i8 2)>; // alt. COND_C
1065 def X86_COND_AE : PatLeaf<(i8 3)>; // alt. COND_NC
1066 def X86_COND_E : PatLeaf<(i8 4)>; // alt. COND_Z
1067 def X86_COND_NE : PatLeaf<(i8 5)>; // alt. COND_NZ
1068 def X86_COND_BE : PatLeaf<(i8 6)>; // alt. COND_NA
1069 def X86_COND_A : PatLeaf<(i8 7)>; // alt. COND_NBE
1070 def X86_COND_S : PatLeaf<(i8 8)>;
1071 def X86_COND_NS : PatLeaf<(i8 9)>;
1072 def X86_COND_P : PatLeaf<(i8 10)>; // alt. COND_PE
1073 def X86_COND_NP : PatLeaf<(i8 11)>; // alt. COND_PO
1074 def X86_COND_L : PatLeaf<(i8 12)>; // alt. COND_NGE
1075 def X86_COND_GE : PatLeaf<(i8 13)>; // alt. COND_NL
1076 def X86_COND_LE : PatLeaf<(i8 14)>; // alt. COND_NG
1077 def X86_COND_G : PatLeaf<(i8 15)>; // alt. COND_NLE
1079 def i16immSExt8 : ImmLeaf<i16, [{ return isInt<8>(Imm); }]>;
1080 def i32immSExt8 : ImmLeaf<i32, [{ return isInt<8>(Imm); }]>;
1081 def i64immSExt8 : ImmLeaf<i64, [{ return isInt<8>(Imm); }]>;
1082 def i64immSExt32 : ImmLeaf<i64, [{ return isInt<32>(Imm); }]>;
1083 def i64timmSExt32 : TImmLeaf<i64, [{ return isInt<32>(Imm); }]>;
1085 def i16relocImmSExt8 : PatLeaf<(i16 relocImm), [{
1086 return isSExtAbsoluteSymbolRef(8, N);
1088 def i32relocImmSExt8 : PatLeaf<(i32 relocImm), [{
1089 return isSExtAbsoluteSymbolRef(8, N);
1091 def i64relocImmSExt8 : PatLeaf<(i64 relocImm), [{
1092 return isSExtAbsoluteSymbolRef(8, N);
1094 def i64relocImmSExt32 : PatLeaf<(i64 relocImm), [{
1095 return isSExtAbsoluteSymbolRef(32, N);
1098 // If we have multiple users of an immediate, it's much smaller to reuse
1099 // the register, rather than encode the immediate in every instruction.
1100 // This has the risk of increasing register pressure from stretched live
1101 // ranges, however, the immediates should be trivial to rematerialize by
1102 // the RA in the event of high register pressure.
1103 // TODO : This is currently enabled for stores and binary ops. There are more
1104 // cases for which this can be enabled, though this catches the bulk of the
1106 // TODO2 : This should really also be enabled under O2, but there's currently
1107 // an issue with RA where we don't pull the constants into their users
1108 // when we rematerialize them. I'll follow-up on enabling O2 after we fix that
1110 // TODO3 : This is currently limited to single basic blocks (DAG creation
1111 // pulls block immediates to the top and merges them if necessary).
1112 // Eventually, it would be nice to allow ConstantHoisting to merge constants
1113 // globally for potentially added savings.
1115 def imm_su : PatLeaf<(imm), [{
1116 return !shouldAvoidImmediateInstFormsForSize(N);
1118 def i64immSExt32_su : PatLeaf<(i64immSExt32), [{
1119 return !shouldAvoidImmediateInstFormsForSize(N);
1122 def relocImm8_su : PatLeaf<(i8 relocImm), [{
1123 return !shouldAvoidImmediateInstFormsForSize(N);
1125 def relocImm16_su : PatLeaf<(i16 relocImm), [{
1126 return !shouldAvoidImmediateInstFormsForSize(N);
1128 def relocImm32_su : PatLeaf<(i32 relocImm), [{
1129 return !shouldAvoidImmediateInstFormsForSize(N);
1132 def i16relocImmSExt8_su : PatLeaf<(i16relocImmSExt8), [{
1133 return !shouldAvoidImmediateInstFormsForSize(N);
1135 def i32relocImmSExt8_su : PatLeaf<(i32relocImmSExt8), [{
1136 return !shouldAvoidImmediateInstFormsForSize(N);
1138 def i64relocImmSExt8_su : PatLeaf<(i64relocImmSExt8), [{
1139 return !shouldAvoidImmediateInstFormsForSize(N);
1141 def i64relocImmSExt32_su : PatLeaf<(i64relocImmSExt32), [{
1142 return !shouldAvoidImmediateInstFormsForSize(N);
1145 def i16immSExt8_su : PatLeaf<(i16immSExt8), [{
1146 return !shouldAvoidImmediateInstFormsForSize(N);
1148 def i32immSExt8_su : PatLeaf<(i32immSExt8), [{
1149 return !shouldAvoidImmediateInstFormsForSize(N);
1151 def i64immSExt8_su : PatLeaf<(i64immSExt8), [{
1152 return !shouldAvoidImmediateInstFormsForSize(N);
1155 // i64immZExt32 predicate - True if the 64-bit immediate fits in a 32-bit
1157 def i64immZExt32 : ImmLeaf<i64, [{ return isUInt<32>(Imm); }]>;
1159 def i64immZExt32SExt8 : ImmLeaf<i64, [{
1160 return isUInt<32>(Imm) && isInt<8>(static_cast<int32_t>(Imm));
1163 // Helper fragments for loads.
1165 // It's safe to fold a zextload/extload from i1 as a regular i8 load. The
1166 // upper bits are guaranteed to be zero and we were going to emit a MOV8rm
1167 // which might get folded during peephole anyway.
1168 def loadi8 : PatFrag<(ops node:$ptr), (i8 (unindexedload node:$ptr)), [{
1169 LoadSDNode *LD = cast<LoadSDNode>(N);
1170 ISD::LoadExtType ExtType = LD->getExtensionType();
1171 return ExtType == ISD::NON_EXTLOAD || ExtType == ISD::EXTLOAD ||
1172 ExtType == ISD::ZEXTLOAD;
1175 // It's always safe to treat a anyext i16 load as a i32 load if the i16 is
1176 // known to be 32-bit aligned or better. Ditto for i8 to i16.
1177 def loadi16 : PatFrag<(ops node:$ptr), (i16 (unindexedload node:$ptr)), [{
1178 LoadSDNode *LD = cast<LoadSDNode>(N);
1179 ISD::LoadExtType ExtType = LD->getExtensionType();
1180 if (ExtType == ISD::NON_EXTLOAD)
1182 if (ExtType == ISD::EXTLOAD && EnablePromoteAnyextLoad)
1183 return LD->getAlignment() >= 2 && LD->isSimple();
1187 def loadi32 : PatFrag<(ops node:$ptr), (i32 (unindexedload node:$ptr)), [{
1188 LoadSDNode *LD = cast<LoadSDNode>(N);
1189 ISD::LoadExtType ExtType = LD->getExtensionType();
1190 if (ExtType == ISD::NON_EXTLOAD)
1192 if (ExtType == ISD::EXTLOAD && EnablePromoteAnyextLoad)
1193 return LD->getAlignment() >= 4 && LD->isSimple();
1197 def loadi64 : PatFrag<(ops node:$ptr), (i64 (load node:$ptr))>;
1198 def loadf16 : PatFrag<(ops node:$ptr), (f16 (load node:$ptr))>;
1199 def loadf32 : PatFrag<(ops node:$ptr), (f32 (load node:$ptr))>;
1200 def loadf64 : PatFrag<(ops node:$ptr), (f64 (load node:$ptr))>;
1201 def loadf80 : PatFrag<(ops node:$ptr), (f80 (load node:$ptr))>;
1202 def loadf128 : PatFrag<(ops node:$ptr), (f128 (load node:$ptr))>;
1203 def alignedloadf128 : PatFrag<(ops node:$ptr), (f128 (load node:$ptr)), [{
1204 LoadSDNode *Ld = cast<LoadSDNode>(N);
1205 return Ld->getAlignment() >= Ld->getMemoryVT().getStoreSize();
1207 def memopf128 : PatFrag<(ops node:$ptr), (f128 (load node:$ptr)), [{
1208 LoadSDNode *Ld = cast<LoadSDNode>(N);
1209 return Subtarget->hasSSEUnalignedMem() ||
1210 Ld->getAlignment() >= Ld->getMemoryVT().getStoreSize();
1213 def sextloadi16i8 : PatFrag<(ops node:$ptr), (i16 (sextloadi8 node:$ptr))>;
1214 def sextloadi32i8 : PatFrag<(ops node:$ptr), (i32 (sextloadi8 node:$ptr))>;
1215 def sextloadi32i16 : PatFrag<(ops node:$ptr), (i32 (sextloadi16 node:$ptr))>;
1216 def sextloadi64i8 : PatFrag<(ops node:$ptr), (i64 (sextloadi8 node:$ptr))>;
1217 def sextloadi64i16 : PatFrag<(ops node:$ptr), (i64 (sextloadi16 node:$ptr))>;
1218 def sextloadi64i32 : PatFrag<(ops node:$ptr), (i64 (sextloadi32 node:$ptr))>;
1220 def zextloadi8i1 : PatFrag<(ops node:$ptr), (i8 (zextloadi1 node:$ptr))>;
1221 def zextloadi16i1 : PatFrag<(ops node:$ptr), (i16 (zextloadi1 node:$ptr))>;
1222 def zextloadi32i1 : PatFrag<(ops node:$ptr), (i32 (zextloadi1 node:$ptr))>;
1223 def zextloadi16i8 : PatFrag<(ops node:$ptr), (i16 (zextloadi8 node:$ptr))>;
1224 def zextloadi32i8 : PatFrag<(ops node:$ptr), (i32 (zextloadi8 node:$ptr))>;
1225 def zextloadi32i16 : PatFrag<(ops node:$ptr), (i32 (zextloadi16 node:$ptr))>;
1226 def zextloadi64i1 : PatFrag<(ops node:$ptr), (i64 (zextloadi1 node:$ptr))>;
1227 def zextloadi64i8 : PatFrag<(ops node:$ptr), (i64 (zextloadi8 node:$ptr))>;
1228 def zextloadi64i16 : PatFrag<(ops node:$ptr), (i64 (zextloadi16 node:$ptr))>;
1229 def zextloadi64i32 : PatFrag<(ops node:$ptr), (i64 (zextloadi32 node:$ptr))>;
1231 def extloadi8i1 : PatFrag<(ops node:$ptr), (i8 (extloadi1 node:$ptr))>;
1232 def extloadi16i1 : PatFrag<(ops node:$ptr), (i16 (extloadi1 node:$ptr))>;
1233 def extloadi32i1 : PatFrag<(ops node:$ptr), (i32 (extloadi1 node:$ptr))>;
1234 def extloadi16i8 : PatFrag<(ops node:$ptr), (i16 (extloadi8 node:$ptr))>;
1235 def extloadi32i8 : PatFrag<(ops node:$ptr), (i32 (extloadi8 node:$ptr))>;
1236 def extloadi32i16 : PatFrag<(ops node:$ptr), (i32 (extloadi16 node:$ptr))>;
1237 def extloadi64i1 : PatFrag<(ops node:$ptr), (i64 (extloadi1 node:$ptr))>;
1238 def extloadi64i8 : PatFrag<(ops node:$ptr), (i64 (extloadi8 node:$ptr))>;
1239 def extloadi64i16 : PatFrag<(ops node:$ptr), (i64 (extloadi16 node:$ptr))>;
1241 // We can treat an i8/i16 extending load to i64 as a 32 bit load if its known
1242 // to be 4 byte aligned or better.
1243 def extloadi64i32 : PatFrag<(ops node:$ptr), (i64 (unindexedload node:$ptr)), [{
1244 LoadSDNode *LD = cast<LoadSDNode>(N);
1245 ISD::LoadExtType ExtType = LD->getExtensionType();
1246 if (ExtType != ISD::EXTLOAD)
1248 if (LD->getMemoryVT() == MVT::i32)
1251 return LD->getAlignment() >= 4 && LD->isSimple();
1255 // An 'and' node with a single use.
1256 def and_su : PatFrag<(ops node:$lhs, node:$rhs), (and node:$lhs, node:$rhs), [{
1257 return N->hasOneUse();
1259 // An 'srl' node with a single use.
1260 def srl_su : PatFrag<(ops node:$lhs, node:$rhs), (srl node:$lhs, node:$rhs), [{
1261 return N->hasOneUse();
1263 // An 'trunc' node with a single use.
1264 def trunc_su : PatFrag<(ops node:$src), (trunc node:$src), [{
1265 return N->hasOneUse();
1268 //===----------------------------------------------------------------------===//
1269 // Instruction list.
1273 let hasSideEffects = 0, SchedRW = [WriteNop] in {
1274 def NOOP : I<0x90, RawFrm, (outs), (ins), "nop", []>;
1275 def NOOPW : I<0x1f, MRMXm, (outs), (ins i16mem:$zero),
1276 "nop{w}\t$zero", []>, TB, OpSize16, NotMemoryFoldable;
1277 def NOOPL : I<0x1f, MRMXm, (outs), (ins i32mem:$zero),
1278 "nop{l}\t$zero", []>, TB, OpSize32, NotMemoryFoldable;
1279 def NOOPQ : RI<0x1f, MRMXm, (outs), (ins i64mem:$zero),
1280 "nop{q}\t$zero", []>, TB, NotMemoryFoldable,
1281 Requires<[In64BitMode]>;
1282 // Also allow register so we can assemble/disassemble
1283 def NOOPWr : I<0x1f, MRMXr, (outs), (ins GR16:$zero),
1284 "nop{w}\t$zero", []>, TB, OpSize16, NotMemoryFoldable;
1285 def NOOPLr : I<0x1f, MRMXr, (outs), (ins GR32:$zero),
1286 "nop{l}\t$zero", []>, TB, OpSize32, NotMemoryFoldable;
1287 def NOOPQr : RI<0x1f, MRMXr, (outs), (ins GR64:$zero),
1288 "nop{q}\t$zero", []>, TB, NotMemoryFoldable,
1289 Requires<[In64BitMode]>;
1293 // Constructing a stack frame.
1294 def ENTER : Ii16<0xC8, RawFrmImm8, (outs), (ins i16imm:$len, i8imm:$lvl),
1295 "enter\t$len, $lvl", []>, Sched<[WriteMicrocoded]>;
1297 let SchedRW = [WriteALU] in {
1298 let Defs = [EBP, ESP], Uses = [EBP, ESP], mayLoad = 1, hasSideEffects=0 in
1299 def LEAVE : I<0xC9, RawFrm, (outs), (ins), "leave", []>,
1300 Requires<[Not64BitMode]>;
1302 let Defs = [RBP,RSP], Uses = [RBP,RSP], mayLoad = 1, hasSideEffects = 0 in
1303 def LEAVE64 : I<0xC9, RawFrm, (outs), (ins), "leave", []>,
1304 Requires<[In64BitMode]>;
1307 //===----------------------------------------------------------------------===//
1308 // Miscellaneous Instructions.
1311 let isBarrier = 1, hasSideEffects = 1, usesCustomInserter = 1,
1312 SchedRW = [WriteSystem] in
1313 def Int_eh_sjlj_setup_dispatch
1314 : PseudoI<(outs), (ins), [(X86eh_sjlj_setup_dispatch)]>;
1316 let Defs = [ESP], Uses = [ESP], hasSideEffects=0 in {
1317 let mayLoad = 1, SchedRW = [WriteLoad] in {
1318 def POP16r : I<0x58, AddRegFrm, (outs GR16:$reg), (ins), "pop{w}\t$reg", []>,
1320 def POP32r : I<0x58, AddRegFrm, (outs GR32:$reg), (ins), "pop{l}\t$reg", []>,
1321 OpSize32, Requires<[Not64BitMode]>;
1322 // Long form for the disassembler.
1323 let isCodeGenOnly = 1, ForceDisassemble = 1 in {
1324 def POP16rmr: I<0x8F, MRM0r, (outs GR16:$reg), (ins), "pop{w}\t$reg", []>,
1325 OpSize16, NotMemoryFoldable;
1326 def POP32rmr: I<0x8F, MRM0r, (outs GR32:$reg), (ins), "pop{l}\t$reg", []>,
1327 OpSize32, Requires<[Not64BitMode]>, NotMemoryFoldable;
1328 } // isCodeGenOnly = 1, ForceDisassemble = 1
1329 } // mayLoad, SchedRW
1330 let mayStore = 1, mayLoad = 1, SchedRW = [WriteCopy] in {
1331 def POP16rmm: I<0x8F, MRM0m, (outs), (ins i16mem:$dst), "pop{w}\t$dst", []>,
1333 def POP32rmm: I<0x8F, MRM0m, (outs), (ins i32mem:$dst), "pop{l}\t$dst", []>,
1334 OpSize32, Requires<[Not64BitMode]>;
1335 } // mayStore, mayLoad, SchedRW
1337 let mayStore = 1, SchedRW = [WriteStore] in {
1338 def PUSH16r : I<0x50, AddRegFrm, (outs), (ins GR16:$reg), "push{w}\t$reg",[]>,
1340 def PUSH32r : I<0x50, AddRegFrm, (outs), (ins GR32:$reg), "push{l}\t$reg",[]>,
1341 OpSize32, Requires<[Not64BitMode]>;
1342 // Long form for the disassembler.
1343 let isCodeGenOnly = 1, ForceDisassemble = 1 in {
1344 def PUSH16rmr: I<0xFF, MRM6r, (outs), (ins GR16:$reg), "push{w}\t$reg",[]>,
1345 OpSize16, NotMemoryFoldable;
1346 def PUSH32rmr: I<0xFF, MRM6r, (outs), (ins GR32:$reg), "push{l}\t$reg",[]>,
1347 OpSize32, Requires<[Not64BitMode]>, NotMemoryFoldable;
1348 } // isCodeGenOnly = 1, ForceDisassemble = 1
1350 def PUSH16i8 : Ii8<0x6a, RawFrm, (outs), (ins i16i8imm:$imm),
1351 "push{w}\t$imm", []>, OpSize16;
1352 def PUSHi16 : Ii16<0x68, RawFrm, (outs), (ins i16imm:$imm),
1353 "push{w}\t$imm", []>, OpSize16;
1355 def PUSH32i8 : Ii8<0x6a, RawFrm, (outs), (ins i32i8imm:$imm),
1356 "push{l}\t$imm", []>, OpSize32,
1357 Requires<[Not64BitMode]>;
1358 def PUSHi32 : Ii32<0x68, RawFrm, (outs), (ins i32imm:$imm),
1359 "push{l}\t$imm", []>, OpSize32,
1360 Requires<[Not64BitMode]>;
1361 } // mayStore, SchedRW
1363 let mayLoad = 1, mayStore = 1, SchedRW = [WriteCopy] in {
1364 def PUSH16rmm: I<0xFF, MRM6m, (outs), (ins i16mem:$src), "push{w}\t$src", []>,
1366 def PUSH32rmm: I<0xFF, MRM6m, (outs), (ins i32mem:$src), "push{l}\t$src", []>,
1367 OpSize32, Requires<[Not64BitMode]>;
1368 } // mayLoad, mayStore, SchedRW
1372 let mayLoad = 1, mayStore = 1, usesCustomInserter = 1,
1373 SchedRW = [WriteRMW], Defs = [ESP] in {
1375 def RDFLAGS32 : PseudoI<(outs GR32:$dst), (ins),
1376 [(set GR32:$dst, (int_x86_flags_read_u32))]>,
1377 Requires<[Not64BitMode]>;
1380 def RDFLAGS64 : PseudoI<(outs GR64:$dst), (ins),
1381 [(set GR64:$dst, (int_x86_flags_read_u64))]>,
1382 Requires<[In64BitMode]>;
1385 let mayLoad = 1, mayStore = 1, usesCustomInserter = 1,
1386 SchedRW = [WriteRMW] in {
1387 let Defs = [ESP, EFLAGS, DF], Uses = [ESP] in
1388 def WRFLAGS32 : PseudoI<(outs), (ins GR32:$src),
1389 [(int_x86_flags_write_u32 GR32:$src)]>,
1390 Requires<[Not64BitMode]>;
1392 let Defs = [RSP, EFLAGS, DF], Uses = [RSP] in
1393 def WRFLAGS64 : PseudoI<(outs), (ins GR64:$src),
1394 [(int_x86_flags_write_u64 GR64:$src)]>,
1395 Requires<[In64BitMode]>;
1398 let Defs = [ESP, EFLAGS, DF], Uses = [ESP], mayLoad = 1, hasSideEffects=0,
1399 SchedRW = [WriteLoad] in {
1400 def POPF16 : I<0x9D, RawFrm, (outs), (ins), "popf{w}", []>, OpSize16;
1401 def POPF32 : I<0x9D, RawFrm, (outs), (ins), "popf{l|d}", []>, OpSize32,
1402 Requires<[Not64BitMode]>;
1405 let Defs = [ESP], Uses = [ESP, EFLAGS, DF], mayStore = 1, hasSideEffects=0,
1406 SchedRW = [WriteStore] in {
1407 def PUSHF16 : I<0x9C, RawFrm, (outs), (ins), "pushf{w}", []>, OpSize16;
1408 def PUSHF32 : I<0x9C, RawFrm, (outs), (ins), "pushf{l|d}", []>, OpSize32,
1409 Requires<[Not64BitMode]>;
1412 let Defs = [RSP], Uses = [RSP], hasSideEffects=0 in {
1413 let mayLoad = 1, SchedRW = [WriteLoad] in {
1414 def POP64r : I<0x58, AddRegFrm, (outs GR64:$reg), (ins), "pop{q}\t$reg", []>,
1415 OpSize32, Requires<[In64BitMode]>;
1416 // Long form for the disassembler.
1417 let isCodeGenOnly = 1, ForceDisassemble = 1 in {
1418 def POP64rmr: I<0x8F, MRM0r, (outs GR64:$reg), (ins), "pop{q}\t$reg", []>,
1419 OpSize32, Requires<[In64BitMode]>, NotMemoryFoldable;
1420 } // isCodeGenOnly = 1, ForceDisassemble = 1
1421 } // mayLoad, SchedRW
1422 let mayLoad = 1, mayStore = 1, SchedRW = [WriteCopy] in
1423 def POP64rmm: I<0x8F, MRM0m, (outs), (ins i64mem:$dst), "pop{q}\t$dst", []>,
1424 OpSize32, Requires<[In64BitMode]>;
1425 let mayStore = 1, SchedRW = [WriteStore] in {
1426 def PUSH64r : I<0x50, AddRegFrm, (outs), (ins GR64:$reg), "push{q}\t$reg", []>,
1427 OpSize32, Requires<[In64BitMode]>;
1428 // Long form for the disassembler.
1429 let isCodeGenOnly = 1, ForceDisassemble = 1 in {
1430 def PUSH64rmr: I<0xFF, MRM6r, (outs), (ins GR64:$reg), "push{q}\t$reg", []>,
1431 OpSize32, Requires<[In64BitMode]>, NotMemoryFoldable;
1432 } // isCodeGenOnly = 1, ForceDisassemble = 1
1433 } // mayStore, SchedRW
1434 let mayLoad = 1, mayStore = 1, SchedRW = [WriteCopy] in {
1435 def PUSH64rmm: I<0xFF, MRM6m, (outs), (ins i64mem:$src), "push{q}\t$src", []>,
1436 OpSize32, Requires<[In64BitMode]>;
1437 } // mayLoad, mayStore, SchedRW
1440 let Defs = [RSP], Uses = [RSP], hasSideEffects = 0, mayStore = 1,
1441 SchedRW = [WriteStore] in {
1442 def PUSH64i8 : Ii8<0x6a, RawFrm, (outs), (ins i64i8imm:$imm),
1443 "push{q}\t$imm", []>, OpSize32,
1444 Requires<[In64BitMode]>;
1445 def PUSH64i32 : Ii32S<0x68, RawFrm, (outs), (ins i64i32imm:$imm),
1446 "push{q}\t$imm", []>, OpSize32,
1447 Requires<[In64BitMode]>;
1450 let Defs = [RSP, EFLAGS, DF], Uses = [RSP], mayLoad = 1, hasSideEffects=0 in
1451 def POPF64 : I<0x9D, RawFrm, (outs), (ins), "popfq", []>,
1452 OpSize32, Requires<[In64BitMode]>, Sched<[WriteLoad]>;
1453 let Defs = [RSP], Uses = [RSP, EFLAGS, DF], mayStore = 1, hasSideEffects=0 in
1454 def PUSHF64 : I<0x9C, RawFrm, (outs), (ins), "pushfq", []>,
1455 OpSize32, Requires<[In64BitMode]>, Sched<[WriteStore]>;
1457 let Defs = [EDI, ESI, EBP, EBX, EDX, ECX, EAX, ESP], Uses = [ESP],
1458 mayLoad = 1, hasSideEffects = 0, SchedRW = [WriteLoad] in {
1459 def POPA32 : I<0x61, RawFrm, (outs), (ins), "popal", []>,
1460 OpSize32, Requires<[Not64BitMode]>;
1461 def POPA16 : I<0x61, RawFrm, (outs), (ins), "popaw", []>,
1462 OpSize16, Requires<[Not64BitMode]>;
1464 let Defs = [ESP], Uses = [EDI, ESI, EBP, EBX, EDX, ECX, EAX, ESP],
1465 mayStore = 1, hasSideEffects = 0, SchedRW = [WriteStore] in {
1466 def PUSHA32 : I<0x60, RawFrm, (outs), (ins), "pushal", []>,
1467 OpSize32, Requires<[Not64BitMode]>;
1468 def PUSHA16 : I<0x60, RawFrm, (outs), (ins), "pushaw", []>,
1469 OpSize16, Requires<[Not64BitMode]>;
1472 let Constraints = "$src = $dst", SchedRW = [WriteBSWAP32] in {
1473 // This instruction is a consequence of BSWAP32r observing operand size. The
1474 // encoding is valid, but the behavior is undefined.
1475 let isCodeGenOnly = 1, ForceDisassemble = 1, hasSideEffects = 0 in
1476 def BSWAP16r_BAD : I<0xC8, AddRegFrm, (outs GR16:$dst), (ins GR16:$src),
1477 "bswap{w}\t$dst", []>, OpSize16, TB;
1478 // GR32 = bswap GR32
1479 def BSWAP32r : I<0xC8, AddRegFrm, (outs GR32:$dst), (ins GR32:$src),
1481 [(set GR32:$dst, (bswap GR32:$src))]>, OpSize32, TB;
1483 let SchedRW = [WriteBSWAP64] in
1484 def BSWAP64r : RI<0xC8, AddRegFrm, (outs GR64:$dst), (ins GR64:$src),
1486 [(set GR64:$dst, (bswap GR64:$src))]>, TB;
1487 } // Constraints = "$src = $dst", SchedRW
1489 // Bit scan instructions.
1490 let Defs = [EFLAGS] in {
1491 def BSF16rr : I<0xBC, MRMSrcReg, (outs GR16:$dst), (ins GR16:$src),
1492 "bsf{w}\t{$src, $dst|$dst, $src}",
1493 [(set GR16:$dst, EFLAGS, (X86bsf GR16:$src))]>,
1494 PS, OpSize16, Sched<[WriteBSF]>;
1495 def BSF16rm : I<0xBC, MRMSrcMem, (outs GR16:$dst), (ins i16mem:$src),
1496 "bsf{w}\t{$src, $dst|$dst, $src}",
1497 [(set GR16:$dst, EFLAGS, (X86bsf (loadi16 addr:$src)))]>,
1498 PS, OpSize16, Sched<[WriteBSFLd]>;
1499 def BSF32rr : I<0xBC, MRMSrcReg, (outs GR32:$dst), (ins GR32:$src),
1500 "bsf{l}\t{$src, $dst|$dst, $src}",
1501 [(set GR32:$dst, EFLAGS, (X86bsf GR32:$src))]>,
1502 PS, OpSize32, Sched<[WriteBSF]>;
1503 def BSF32rm : I<0xBC, MRMSrcMem, (outs GR32:$dst), (ins i32mem:$src),
1504 "bsf{l}\t{$src, $dst|$dst, $src}",
1505 [(set GR32:$dst, EFLAGS, (X86bsf (loadi32 addr:$src)))]>,
1506 PS, OpSize32, Sched<[WriteBSFLd]>;
1507 def BSF64rr : RI<0xBC, MRMSrcReg, (outs GR64:$dst), (ins GR64:$src),
1508 "bsf{q}\t{$src, $dst|$dst, $src}",
1509 [(set GR64:$dst, EFLAGS, (X86bsf GR64:$src))]>,
1510 PS, Sched<[WriteBSF]>;
1511 def BSF64rm : RI<0xBC, MRMSrcMem, (outs GR64:$dst), (ins i64mem:$src),
1512 "bsf{q}\t{$src, $dst|$dst, $src}",
1513 [(set GR64:$dst, EFLAGS, (X86bsf (loadi64 addr:$src)))]>,
1514 PS, Sched<[WriteBSFLd]>;
1516 def BSR16rr : I<0xBD, MRMSrcReg, (outs GR16:$dst), (ins GR16:$src),
1517 "bsr{w}\t{$src, $dst|$dst, $src}",
1518 [(set GR16:$dst, EFLAGS, (X86bsr GR16:$src))]>,
1519 PS, OpSize16, Sched<[WriteBSR]>;
1520 def BSR16rm : I<0xBD, MRMSrcMem, (outs GR16:$dst), (ins i16mem:$src),
1521 "bsr{w}\t{$src, $dst|$dst, $src}",
1522 [(set GR16:$dst, EFLAGS, (X86bsr (loadi16 addr:$src)))]>,
1523 PS, OpSize16, Sched<[WriteBSRLd]>;
1524 def BSR32rr : I<0xBD, MRMSrcReg, (outs GR32:$dst), (ins GR32:$src),
1525 "bsr{l}\t{$src, $dst|$dst, $src}",
1526 [(set GR32:$dst, EFLAGS, (X86bsr GR32:$src))]>,
1527 PS, OpSize32, Sched<[WriteBSR]>;
1528 def BSR32rm : I<0xBD, MRMSrcMem, (outs GR32:$dst), (ins i32mem:$src),
1529 "bsr{l}\t{$src, $dst|$dst, $src}",
1530 [(set GR32:$dst, EFLAGS, (X86bsr (loadi32 addr:$src)))]>,
1531 PS, OpSize32, Sched<[WriteBSRLd]>;
1532 def BSR64rr : RI<0xBD, MRMSrcReg, (outs GR64:$dst), (ins GR64:$src),
1533 "bsr{q}\t{$src, $dst|$dst, $src}",
1534 [(set GR64:$dst, EFLAGS, (X86bsr GR64:$src))]>,
1535 PS, Sched<[WriteBSR]>;
1536 def BSR64rm : RI<0xBD, MRMSrcMem, (outs GR64:$dst), (ins i64mem:$src),
1537 "bsr{q}\t{$src, $dst|$dst, $src}",
1538 [(set GR64:$dst, EFLAGS, (X86bsr (loadi64 addr:$src)))]>,
1539 PS, Sched<[WriteBSRLd]>;
1540 } // Defs = [EFLAGS]
1542 let SchedRW = [WriteMicrocoded] in {
1543 let Defs = [EDI,ESI], Uses = [EDI,ESI,DF] in {
1544 def MOVSB : I<0xA4, RawFrmDstSrc, (outs), (ins dstidx8:$dst, srcidx8:$src),
1545 "movsb\t{$src, $dst|$dst, $src}", []>;
1546 def MOVSW : I<0xA5, RawFrmDstSrc, (outs), (ins dstidx16:$dst, srcidx16:$src),
1547 "movsw\t{$src, $dst|$dst, $src}", []>, OpSize16;
1548 def MOVSL : I<0xA5, RawFrmDstSrc, (outs), (ins dstidx32:$dst, srcidx32:$src),
1549 "movs{l|d}\t{$src, $dst|$dst, $src}", []>, OpSize32;
1550 def MOVSQ : RI<0xA5, RawFrmDstSrc, (outs), (ins dstidx64:$dst, srcidx64:$src),
1551 "movsq\t{$src, $dst|$dst, $src}", []>,
1552 Requires<[In64BitMode]>;
1555 let Defs = [EDI], Uses = [AL,EDI,DF] in
1556 def STOSB : I<0xAA, RawFrmDst, (outs), (ins dstidx8:$dst),
1557 "stosb\t{%al, $dst|$dst, al}", []>;
1558 let Defs = [EDI], Uses = [AX,EDI,DF] in
1559 def STOSW : I<0xAB, RawFrmDst, (outs), (ins dstidx16:$dst),
1560 "stosw\t{%ax, $dst|$dst, ax}", []>, OpSize16;
1561 let Defs = [EDI], Uses = [EAX,EDI,DF] in
1562 def STOSL : I<0xAB, RawFrmDst, (outs), (ins dstidx32:$dst),
1563 "stos{l|d}\t{%eax, $dst|$dst, eax}", []>, OpSize32;
1564 let Defs = [RDI], Uses = [RAX,RDI,DF] in
1565 def STOSQ : RI<0xAB, RawFrmDst, (outs), (ins dstidx64:$dst),
1566 "stosq\t{%rax, $dst|$dst, rax}", []>,
1567 Requires<[In64BitMode]>;
1569 let Defs = [EDI,EFLAGS], Uses = [AL,EDI,DF] in
1570 def SCASB : I<0xAE, RawFrmDst, (outs), (ins dstidx8:$dst),
1571 "scasb\t{$dst, %al|al, $dst}", []>;
1572 let Defs = [EDI,EFLAGS], Uses = [AX,EDI,DF] in
1573 def SCASW : I<0xAF, RawFrmDst, (outs), (ins dstidx16:$dst),
1574 "scasw\t{$dst, %ax|ax, $dst}", []>, OpSize16;
1575 let Defs = [EDI,EFLAGS], Uses = [EAX,EDI,DF] in
1576 def SCASL : I<0xAF, RawFrmDst, (outs), (ins dstidx32:$dst),
1577 "scas{l|d}\t{$dst, %eax|eax, $dst}", []>, OpSize32;
1578 let Defs = [EDI,EFLAGS], Uses = [RAX,EDI,DF] in
1579 def SCASQ : RI<0xAF, RawFrmDst, (outs), (ins dstidx64:$dst),
1580 "scasq\t{$dst, %rax|rax, $dst}", []>,
1581 Requires<[In64BitMode]>;
1583 let Defs = [EDI,ESI,EFLAGS], Uses = [EDI,ESI,DF] in {
1584 def CMPSB : I<0xA6, RawFrmDstSrc, (outs), (ins dstidx8:$dst, srcidx8:$src),
1585 "cmpsb\t{$dst, $src|$src, $dst}", []>;
1586 def CMPSW : I<0xA7, RawFrmDstSrc, (outs), (ins dstidx16:$dst, srcidx16:$src),
1587 "cmpsw\t{$dst, $src|$src, $dst}", []>, OpSize16;
1588 def CMPSL : I<0xA7, RawFrmDstSrc, (outs), (ins dstidx32:$dst, srcidx32:$src),
1589 "cmps{l|d}\t{$dst, $src|$src, $dst}", []>, OpSize32;
1590 def CMPSQ : RI<0xA7, RawFrmDstSrc, (outs), (ins dstidx64:$dst, srcidx64:$src),
1591 "cmpsq\t{$dst, $src|$src, $dst}", []>,
1592 Requires<[In64BitMode]>;
1596 //===----------------------------------------------------------------------===//
1597 // Move Instructions.
1599 let SchedRW = [WriteMove] in {
1600 let hasSideEffects = 0, isMoveReg = 1 in {
1601 def MOV8rr : I<0x88, MRMDestReg, (outs GR8 :$dst), (ins GR8 :$src),
1602 "mov{b}\t{$src, $dst|$dst, $src}", []>;
1603 def MOV16rr : I<0x89, MRMDestReg, (outs GR16:$dst), (ins GR16:$src),
1604 "mov{w}\t{$src, $dst|$dst, $src}", []>, OpSize16;
1605 def MOV32rr : I<0x89, MRMDestReg, (outs GR32:$dst), (ins GR32:$src),
1606 "mov{l}\t{$src, $dst|$dst, $src}", []>, OpSize32;
1607 def MOV64rr : RI<0x89, MRMDestReg, (outs GR64:$dst), (ins GR64:$src),
1608 "mov{q}\t{$src, $dst|$dst, $src}", []>;
1611 let isReMaterializable = 1, isAsCheapAsAMove = 1, isMoveImm = 1 in {
1612 def MOV8ri : Ii8 <0xB0, AddRegFrm, (outs GR8 :$dst), (ins i8imm :$src),
1613 "mov{b}\t{$src, $dst|$dst, $src}",
1614 [(set GR8:$dst, imm:$src)]>;
1615 def MOV16ri : Ii16<0xB8, AddRegFrm, (outs GR16:$dst), (ins i16imm:$src),
1616 "mov{w}\t{$src, $dst|$dst, $src}",
1617 [(set GR16:$dst, imm:$src)]>, OpSize16;
1618 def MOV32ri : Ii32<0xB8, AddRegFrm, (outs GR32:$dst), (ins i32imm:$src),
1619 "mov{l}\t{$src, $dst|$dst, $src}",
1620 [(set GR32:$dst, imm:$src)]>, OpSize32;
1621 def MOV64ri32 : RIi32S<0xC7, MRM0r, (outs GR64:$dst), (ins i64i32imm:$src),
1622 "mov{q}\t{$src, $dst|$dst, $src}",
1623 [(set GR64:$dst, i64immSExt32:$src)]>;
1625 let isReMaterializable = 1, isMoveImm = 1 in {
1626 def MOV64ri : RIi64<0xB8, AddRegFrm, (outs GR64:$dst), (ins i64imm:$src),
1627 "movabs{q}\t{$src, $dst|$dst, $src}",
1628 [(set GR64:$dst, imm:$src)]>;
1631 // Longer forms that use a ModR/M byte. Needed for disassembler
1632 let isCodeGenOnly = 1, ForceDisassemble = 1, hasSideEffects = 0 in {
1633 def MOV8ri_alt : Ii8 <0xC6, MRM0r, (outs GR8 :$dst), (ins i8imm :$src),
1634 "mov{b}\t{$src, $dst|$dst, $src}", []>,
1635 FoldGenData<"MOV8ri">;
1636 def MOV16ri_alt : Ii16<0xC7, MRM0r, (outs GR16:$dst), (ins i16imm:$src),
1637 "mov{w}\t{$src, $dst|$dst, $src}", []>, OpSize16,
1638 FoldGenData<"MOV16ri">;
1639 def MOV32ri_alt : Ii32<0xC7, MRM0r, (outs GR32:$dst), (ins i32imm:$src),
1640 "mov{l}\t{$src, $dst|$dst, $src}", []>, OpSize32,
1641 FoldGenData<"MOV32ri">;
1645 let SchedRW = [WriteStore] in {
1646 def MOV8mi : Ii8 <0xC6, MRM0m, (outs), (ins i8mem :$dst, i8imm :$src),
1647 "mov{b}\t{$src, $dst|$dst, $src}",
1648 [(store (i8 imm_su:$src), addr:$dst)]>;
1649 def MOV16mi : Ii16<0xC7, MRM0m, (outs), (ins i16mem:$dst, i16imm:$src),
1650 "mov{w}\t{$src, $dst|$dst, $src}",
1651 [(store (i16 imm_su:$src), addr:$dst)]>, OpSize16;
1652 def MOV32mi : Ii32<0xC7, MRM0m, (outs), (ins i32mem:$dst, i32imm:$src),
1653 "mov{l}\t{$src, $dst|$dst, $src}",
1654 [(store (i32 imm_su:$src), addr:$dst)]>, OpSize32;
1655 def MOV64mi32 : RIi32S<0xC7, MRM0m, (outs), (ins i64mem:$dst, i64i32imm:$src),
1656 "mov{q}\t{$src, $dst|$dst, $src}",
1657 [(store i64immSExt32_su:$src, addr:$dst)]>,
1658 Requires<[In64BitMode]>;
1661 def : Pat<(i32 relocImm:$src), (MOV32ri relocImm:$src)>;
1662 def : Pat<(i64 relocImm:$src), (MOV64ri relocImm:$src)>;
1664 def : Pat<(store (i8 relocImm8_su:$src), addr:$dst),
1665 (MOV8mi addr:$dst, relocImm8_su:$src)>;
1666 def : Pat<(store (i16 relocImm16_su:$src), addr:$dst),
1667 (MOV16mi addr:$dst, relocImm16_su:$src)>;
1668 def : Pat<(store (i32 relocImm32_su:$src), addr:$dst),
1669 (MOV32mi addr:$dst, relocImm32_su:$src)>;
1670 def : Pat<(store (i64 i64relocImmSExt32_su:$src), addr:$dst),
1671 (MOV64mi32 addr:$dst, i64immSExt32_su:$src)>;
1673 let hasSideEffects = 0 in {
1675 /// Memory offset versions of moves. The immediate is an address mode sized
1676 /// offset from the segment base.
1677 let SchedRW = [WriteALU] in {
1678 let mayLoad = 1 in {
1680 def MOV8ao32 : Ii32<0xA0, RawFrmMemOffs, (outs), (ins offset32_8:$src),
1681 "mov{b}\t{$src, %al|al, $src}", []>,
1684 def MOV16ao32 : Ii32<0xA1, RawFrmMemOffs, (outs), (ins offset32_16:$src),
1685 "mov{w}\t{$src, %ax|ax, $src}", []>,
1688 def MOV32ao32 : Ii32<0xA1, RawFrmMemOffs, (outs), (ins offset32_32:$src),
1689 "mov{l}\t{$src, %eax|eax, $src}", []>,
1692 def MOV64ao32 : RIi32<0xA1, RawFrmMemOffs, (outs), (ins offset32_64:$src),
1693 "mov{q}\t{$src, %rax|rax, $src}", []>,
1697 def MOV8ao16 : Ii16<0xA0, RawFrmMemOffs, (outs), (ins offset16_8:$src),
1698 "mov{b}\t{$src, %al|al, $src}", []>, AdSize16;
1700 def MOV16ao16 : Ii16<0xA1, RawFrmMemOffs, (outs), (ins offset16_16:$src),
1701 "mov{w}\t{$src, %ax|ax, $src}", []>,
1704 def MOV32ao16 : Ii16<0xA1, RawFrmMemOffs, (outs), (ins offset16_32:$src),
1705 "mov{l}\t{$src, %eax|eax, $src}", []>,
1708 let mayStore = 1 in {
1710 def MOV8o32a : Ii32<0xA2, RawFrmMemOffs, (outs), (ins offset32_8:$dst),
1711 "mov{b}\t{%al, $dst|$dst, al}", []>, AdSize32;
1713 def MOV16o32a : Ii32<0xA3, RawFrmMemOffs, (outs), (ins offset32_16:$dst),
1714 "mov{w}\t{%ax, $dst|$dst, ax}", []>,
1717 def MOV32o32a : Ii32<0xA3, RawFrmMemOffs, (outs), (ins offset32_32:$dst),
1718 "mov{l}\t{%eax, $dst|$dst, eax}", []>,
1721 def MOV64o32a : RIi32<0xA3, RawFrmMemOffs, (outs), (ins offset32_64:$dst),
1722 "mov{q}\t{%rax, $dst|$dst, rax}", []>,
1726 def MOV8o16a : Ii16<0xA2, RawFrmMemOffs, (outs), (ins offset16_8:$dst),
1727 "mov{b}\t{%al, $dst|$dst, al}", []>, AdSize16;
1729 def MOV16o16a : Ii16<0xA3, RawFrmMemOffs, (outs), (ins offset16_16:$dst),
1730 "mov{w}\t{%ax, $dst|$dst, ax}", []>,
1733 def MOV32o16a : Ii16<0xA3, RawFrmMemOffs, (outs), (ins offset16_32:$dst),
1734 "mov{l}\t{%eax, $dst|$dst, eax}", []>,
1738 // These forms all have full 64-bit absolute addresses in their instructions
1739 // and use the movabs mnemonic to indicate this specific form.
1740 let mayLoad = 1 in {
1742 def MOV8ao64 : Ii64<0xA0, RawFrmMemOffs, (outs), (ins offset64_8:$src),
1743 "movabs{b}\t{$src, %al|al, $src}", []>,
1746 def MOV16ao64 : Ii64<0xA1, RawFrmMemOffs, (outs), (ins offset64_16:$src),
1747 "movabs{w}\t{$src, %ax|ax, $src}", []>,
1750 def MOV32ao64 : Ii64<0xA1, RawFrmMemOffs, (outs), (ins offset64_32:$src),
1751 "movabs{l}\t{$src, %eax|eax, $src}", []>,
1754 def MOV64ao64 : RIi64<0xA1, RawFrmMemOffs, (outs), (ins offset64_64:$src),
1755 "movabs{q}\t{$src, %rax|rax, $src}", []>,
1759 let mayStore = 1 in {
1761 def MOV8o64a : Ii64<0xA2, RawFrmMemOffs, (outs), (ins offset64_8:$dst),
1762 "movabs{b}\t{%al, $dst|$dst, al}", []>,
1765 def MOV16o64a : Ii64<0xA3, RawFrmMemOffs, (outs), (ins offset64_16:$dst),
1766 "movabs{w}\t{%ax, $dst|$dst, ax}", []>,
1769 def MOV32o64a : Ii64<0xA3, RawFrmMemOffs, (outs), (ins offset64_32:$dst),
1770 "movabs{l}\t{%eax, $dst|$dst, eax}", []>,
1773 def MOV64o64a : RIi64<0xA3, RawFrmMemOffs, (outs), (ins offset64_64:$dst),
1774 "movabs{q}\t{%rax, $dst|$dst, rax}", []>,
1778 } // hasSideEffects = 0
1780 let isCodeGenOnly = 1, ForceDisassemble = 1, hasSideEffects = 0,
1781 SchedRW = [WriteMove], isMoveReg = 1 in {
1782 def MOV8rr_REV : I<0x8A, MRMSrcReg, (outs GR8:$dst), (ins GR8:$src),
1783 "mov{b}\t{$src, $dst|$dst, $src}", []>,
1784 FoldGenData<"MOV8rr">;
1785 def MOV16rr_REV : I<0x8B, MRMSrcReg, (outs GR16:$dst), (ins GR16:$src),
1786 "mov{w}\t{$src, $dst|$dst, $src}", []>, OpSize16,
1787 FoldGenData<"MOV16rr">;
1788 def MOV32rr_REV : I<0x8B, MRMSrcReg, (outs GR32:$dst), (ins GR32:$src),
1789 "mov{l}\t{$src, $dst|$dst, $src}", []>, OpSize32,
1790 FoldGenData<"MOV32rr">;
1791 def MOV64rr_REV : RI<0x8B, MRMSrcReg, (outs GR64:$dst), (ins GR64:$src),
1792 "mov{q}\t{$src, $dst|$dst, $src}", []>,
1793 FoldGenData<"MOV64rr">;
1796 // Reversed version with ".s" suffix for GAS compatibility.
1797 def : InstAlias<"mov{b}.s\t{$src, $dst|$dst, $src}",
1798 (MOV8rr_REV GR8:$dst, GR8:$src), 0>;
1799 def : InstAlias<"mov{w}.s\t{$src, $dst|$dst, $src}",
1800 (MOV16rr_REV GR16:$dst, GR16:$src), 0>;
1801 def : InstAlias<"mov{l}.s\t{$src, $dst|$dst, $src}",
1802 (MOV32rr_REV GR32:$dst, GR32:$src), 0>;
1803 def : InstAlias<"mov{q}.s\t{$src, $dst|$dst, $src}",
1804 (MOV64rr_REV GR64:$dst, GR64:$src), 0>;
1805 def : InstAlias<"mov.s\t{$src, $dst|$dst, $src}",
1806 (MOV8rr_REV GR8:$dst, GR8:$src), 0, "att">;
1807 def : InstAlias<"mov.s\t{$src, $dst|$dst, $src}",
1808 (MOV16rr_REV GR16:$dst, GR16:$src), 0, "att">;
1809 def : InstAlias<"mov.s\t{$src, $dst|$dst, $src}",
1810 (MOV32rr_REV GR32:$dst, GR32:$src), 0, "att">;
1811 def : InstAlias<"mov.s\t{$src, $dst|$dst, $src}",
1812 (MOV64rr_REV GR64:$dst, GR64:$src), 0, "att">;
1814 let canFoldAsLoad = 1, isReMaterializable = 1, SchedRW = [WriteLoad] in {
1815 def MOV8rm : I<0x8A, MRMSrcMem, (outs GR8 :$dst), (ins i8mem :$src),
1816 "mov{b}\t{$src, $dst|$dst, $src}",
1817 [(set GR8:$dst, (loadi8 addr:$src))]>;
1818 def MOV16rm : I<0x8B, MRMSrcMem, (outs GR16:$dst), (ins i16mem:$src),
1819 "mov{w}\t{$src, $dst|$dst, $src}",
1820 [(set GR16:$dst, (loadi16 addr:$src))]>, OpSize16;
1821 def MOV32rm : I<0x8B, MRMSrcMem, (outs GR32:$dst), (ins i32mem:$src),
1822 "mov{l}\t{$src, $dst|$dst, $src}",
1823 [(set GR32:$dst, (loadi32 addr:$src))]>, OpSize32;
1824 def MOV64rm : RI<0x8B, MRMSrcMem, (outs GR64:$dst), (ins i64mem:$src),
1825 "mov{q}\t{$src, $dst|$dst, $src}",
1826 [(set GR64:$dst, (load addr:$src))]>;
1829 let SchedRW = [WriteStore] in {
1830 def MOV8mr : I<0x88, MRMDestMem, (outs), (ins i8mem :$dst, GR8 :$src),
1831 "mov{b}\t{$src, $dst|$dst, $src}",
1832 [(store GR8:$src, addr:$dst)]>;
1833 def MOV16mr : I<0x89, MRMDestMem, (outs), (ins i16mem:$dst, GR16:$src),
1834 "mov{w}\t{$src, $dst|$dst, $src}",
1835 [(store GR16:$src, addr:$dst)]>, OpSize16;
1836 def MOV32mr : I<0x89, MRMDestMem, (outs), (ins i32mem:$dst, GR32:$src),
1837 "mov{l}\t{$src, $dst|$dst, $src}",
1838 [(store GR32:$src, addr:$dst)]>, OpSize32;
1839 def MOV64mr : RI<0x89, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src),
1840 "mov{q}\t{$src, $dst|$dst, $src}",
1841 [(store GR64:$src, addr:$dst)]>;
1844 // Versions of MOV8rr, MOV8mr, and MOV8rm that use i8mem_NOREX and GR8_NOREX so
1845 // that they can be used for copying and storing h registers, which can't be
1846 // encoded when a REX prefix is present.
1847 let isCodeGenOnly = 1 in {
1848 let hasSideEffects = 0, isMoveReg = 1 in
1849 def MOV8rr_NOREX : I<0x88, MRMDestReg,
1850 (outs GR8_NOREX:$dst), (ins GR8_NOREX:$src),
1851 "mov{b}\t{$src, $dst|$dst, $src}", []>,
1853 let mayStore = 1, hasSideEffects = 0 in
1854 def MOV8mr_NOREX : I<0x88, MRMDestMem,
1855 (outs), (ins i8mem_NOREX:$dst, GR8_NOREX:$src),
1856 "mov{b}\t{$src, $dst|$dst, $src}", []>,
1857 Sched<[WriteStore]>;
1858 let mayLoad = 1, hasSideEffects = 0,
1859 canFoldAsLoad = 1, isReMaterializable = 1 in
1860 def MOV8rm_NOREX : I<0x8A, MRMSrcMem,
1861 (outs GR8_NOREX:$dst), (ins i8mem_NOREX:$src),
1862 "mov{b}\t{$src, $dst|$dst, $src}", []>,
1867 // Condition code ops, incl. set if equal/not equal/...
1868 let SchedRW = [WriteLAHFSAHF] in {
1869 let Defs = [EFLAGS], Uses = [AH], hasSideEffects = 0 in
1870 def SAHF : I<0x9E, RawFrm, (outs), (ins), "sahf", []>, // flags = AH
1871 Requires<[HasLAHFSAHF]>;
1872 let Defs = [AH], Uses = [EFLAGS], hasSideEffects = 0 in
1873 def LAHF : I<0x9F, RawFrm, (outs), (ins), "lahf", []>, // AH = flags
1874 Requires<[HasLAHFSAHF]>;
1877 //===----------------------------------------------------------------------===//
1878 // Bit tests instructions: BT, BTS, BTR, BTC.
1880 let Defs = [EFLAGS] in {
1881 let SchedRW = [WriteBitTest] in {
1882 def BT16rr : I<0xA3, MRMDestReg, (outs), (ins GR16:$src1, GR16:$src2),
1883 "bt{w}\t{$src2, $src1|$src1, $src2}",
1884 [(set EFLAGS, (X86bt GR16:$src1, GR16:$src2))]>,
1885 OpSize16, TB, NotMemoryFoldable;
1886 def BT32rr : I<0xA3, MRMDestReg, (outs), (ins GR32:$src1, GR32:$src2),
1887 "bt{l}\t{$src2, $src1|$src1, $src2}",
1888 [(set EFLAGS, (X86bt GR32:$src1, GR32:$src2))]>,
1889 OpSize32, TB, NotMemoryFoldable;
1890 def BT64rr : RI<0xA3, MRMDestReg, (outs), (ins GR64:$src1, GR64:$src2),
1891 "bt{q}\t{$src2, $src1|$src1, $src2}",
1892 [(set EFLAGS, (X86bt GR64:$src1, GR64:$src2))]>, TB,
1896 // Unlike with the register+register form, the memory+register form of the
1897 // bt instruction does not ignore the high bits of the index. From ISel's
1898 // perspective, this is pretty bizarre. Make these instructions disassembly
1899 // only for now. These instructions are also slow on modern CPUs so that's
1900 // another reason to avoid generating them.
1902 let mayLoad = 1, hasSideEffects = 0, SchedRW = [WriteBitTestRegLd] in {
1903 def BT16mr : I<0xA3, MRMDestMem, (outs), (ins i16mem:$src1, GR16:$src2),
1904 "bt{w}\t{$src2, $src1|$src1, $src2}",
1905 []>, OpSize16, TB, NotMemoryFoldable;
1906 def BT32mr : I<0xA3, MRMDestMem, (outs), (ins i32mem:$src1, GR32:$src2),
1907 "bt{l}\t{$src2, $src1|$src1, $src2}",
1908 []>, OpSize32, TB, NotMemoryFoldable;
1909 def BT64mr : RI<0xA3, MRMDestMem, (outs), (ins i64mem:$src1, GR64:$src2),
1910 "bt{q}\t{$src2, $src1|$src1, $src2}",
1911 []>, TB, NotMemoryFoldable;
1914 let SchedRW = [WriteBitTest] in {
1915 def BT16ri8 : Ii8<0xBA, MRM4r, (outs), (ins GR16:$src1, i16u8imm:$src2),
1916 "bt{w}\t{$src2, $src1|$src1, $src2}",
1917 [(set EFLAGS, (X86bt GR16:$src1, imm:$src2))]>,
1919 def BT32ri8 : Ii8<0xBA, MRM4r, (outs), (ins GR32:$src1, i32u8imm:$src2),
1920 "bt{l}\t{$src2, $src1|$src1, $src2}",
1921 [(set EFLAGS, (X86bt GR32:$src1, imm:$src2))]>,
1923 def BT64ri8 : RIi8<0xBA, MRM4r, (outs), (ins GR64:$src1, i64u8imm:$src2),
1924 "bt{q}\t{$src2, $src1|$src1, $src2}",
1925 [(set EFLAGS, (X86bt GR64:$src1, imm:$src2))]>, TB;
1928 // Note that these instructions aren't slow because that only applies when the
1929 // other operand is in a register. When it's an immediate, bt is still fast.
1930 let SchedRW = [WriteBitTestImmLd] in {
1931 def BT16mi8 : Ii8<0xBA, MRM4m, (outs), (ins i16mem:$src1, i16u8imm:$src2),
1932 "bt{w}\t{$src2, $src1|$src1, $src2}",
1933 [(set EFLAGS, (X86bt (loadi16 addr:$src1),
1936 def BT32mi8 : Ii8<0xBA, MRM4m, (outs), (ins i32mem:$src1, i32u8imm:$src2),
1937 "bt{l}\t{$src2, $src1|$src1, $src2}",
1938 [(set EFLAGS, (X86bt (loadi32 addr:$src1),
1941 def BT64mi8 : RIi8<0xBA, MRM4m, (outs), (ins i64mem:$src1, i64u8imm:$src2),
1942 "bt{q}\t{$src2, $src1|$src1, $src2}",
1943 [(set EFLAGS, (X86bt (loadi64 addr:$src1),
1945 Requires<[In64BitMode]>;
1948 let hasSideEffects = 0 in {
1949 let SchedRW = [WriteBitTestSet], Constraints = "$src1 = $dst" in {
1950 def BTC16rr : I<0xBB, MRMDestReg, (outs GR16:$dst), (ins GR16:$src1, GR16:$src2),
1951 "btc{w}\t{$src2, $src1|$src1, $src2}", []>,
1952 OpSize16, TB, NotMemoryFoldable;
1953 def BTC32rr : I<0xBB, MRMDestReg, (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
1954 "btc{l}\t{$src2, $src1|$src1, $src2}", []>,
1955 OpSize32, TB, NotMemoryFoldable;
1956 def BTC64rr : RI<0xBB, MRMDestReg, (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
1957 "btc{q}\t{$src2, $src1|$src1, $src2}", []>, TB,
1961 let mayLoad = 1, mayStore = 1, SchedRW = [WriteBitTestSetRegRMW] in {
1962 def BTC16mr : I<0xBB, MRMDestMem, (outs), (ins i16mem:$src1, GR16:$src2),
1963 "btc{w}\t{$src2, $src1|$src1, $src2}", []>,
1964 OpSize16, TB, NotMemoryFoldable;
1965 def BTC32mr : I<0xBB, MRMDestMem, (outs), (ins i32mem:$src1, GR32:$src2),
1966 "btc{l}\t{$src2, $src1|$src1, $src2}", []>,
1967 OpSize32, TB, NotMemoryFoldable;
1968 def BTC64mr : RI<0xBB, MRMDestMem, (outs), (ins i64mem:$src1, GR64:$src2),
1969 "btc{q}\t{$src2, $src1|$src1, $src2}", []>, TB,
1973 let SchedRW = [WriteBitTestSet], Constraints = "$src1 = $dst" in {
1974 def BTC16ri8 : Ii8<0xBA, MRM7r, (outs GR16:$dst), (ins GR16:$src1, i16u8imm:$src2),
1975 "btc{w}\t{$src2, $src1|$src1, $src2}", []>, OpSize16, TB;
1976 def BTC32ri8 : Ii8<0xBA, MRM7r, (outs GR32:$dst), (ins GR32:$src1, i32u8imm:$src2),
1977 "btc{l}\t{$src2, $src1|$src1, $src2}", []>, OpSize32, TB;
1978 def BTC64ri8 : RIi8<0xBA, MRM7r, (outs GR64:$dst), (ins GR64:$src1, i64u8imm:$src2),
1979 "btc{q}\t{$src2, $src1|$src1, $src2}", []>, TB;
1982 let mayLoad = 1, mayStore = 1, SchedRW = [WriteBitTestSetImmRMW] in {
1983 def BTC16mi8 : Ii8<0xBA, MRM7m, (outs), (ins i16mem:$src1, i16u8imm:$src2),
1984 "btc{w}\t{$src2, $src1|$src1, $src2}", []>, OpSize16, TB;
1985 def BTC32mi8 : Ii8<0xBA, MRM7m, (outs), (ins i32mem:$src1, i32u8imm:$src2),
1986 "btc{l}\t{$src2, $src1|$src1, $src2}", []>, OpSize32, TB;
1987 def BTC64mi8 : RIi8<0xBA, MRM7m, (outs), (ins i64mem:$src1, i64u8imm:$src2),
1988 "btc{q}\t{$src2, $src1|$src1, $src2}", []>, TB,
1989 Requires<[In64BitMode]>;
1992 let SchedRW = [WriteBitTestSet], Constraints = "$src1 = $dst" in {
1993 def BTR16rr : I<0xB3, MRMDestReg, (outs GR16:$dst), (ins GR16:$src1, GR16:$src2),
1994 "btr{w}\t{$src2, $src1|$src1, $src2}", []>,
1995 OpSize16, TB, NotMemoryFoldable;
1996 def BTR32rr : I<0xB3, MRMDestReg, (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
1997 "btr{l}\t{$src2, $src1|$src1, $src2}", []>,
1998 OpSize32, TB, NotMemoryFoldable;
1999 def BTR64rr : RI<0xB3, MRMDestReg, (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
2000 "btr{q}\t{$src2, $src1|$src1, $src2}", []>, TB,
2004 let mayLoad = 1, mayStore = 1, SchedRW = [WriteBitTestSetRegRMW] in {
2005 def BTR16mr : I<0xB3, MRMDestMem, (outs), (ins i16mem:$src1, GR16:$src2),
2006 "btr{w}\t{$src2, $src1|$src1, $src2}", []>,
2007 OpSize16, TB, NotMemoryFoldable;
2008 def BTR32mr : I<0xB3, MRMDestMem, (outs), (ins i32mem:$src1, GR32:$src2),
2009 "btr{l}\t{$src2, $src1|$src1, $src2}", []>,
2010 OpSize32, TB, NotMemoryFoldable;
2011 def BTR64mr : RI<0xB3, MRMDestMem, (outs), (ins i64mem:$src1, GR64:$src2),
2012 "btr{q}\t{$src2, $src1|$src1, $src2}", []>, TB,
2016 let SchedRW = [WriteBitTestSet], Constraints = "$src1 = $dst" in {
2017 def BTR16ri8 : Ii8<0xBA, MRM6r, (outs GR16:$dst), (ins GR16:$src1, i16u8imm:$src2),
2018 "btr{w}\t{$src2, $src1|$src1, $src2}", []>,
2020 def BTR32ri8 : Ii8<0xBA, MRM6r, (outs GR32:$dst), (ins GR32:$src1, i32u8imm:$src2),
2021 "btr{l}\t{$src2, $src1|$src1, $src2}", []>,
2023 def BTR64ri8 : RIi8<0xBA, MRM6r, (outs GR64:$dst), (ins GR64:$src1, i64u8imm:$src2),
2024 "btr{q}\t{$src2, $src1|$src1, $src2}", []>, TB;
2027 let mayLoad = 1, mayStore = 1, SchedRW = [WriteBitTestSetImmRMW] in {
2028 def BTR16mi8 : Ii8<0xBA, MRM6m, (outs), (ins i16mem:$src1, i16u8imm:$src2),
2029 "btr{w}\t{$src2, $src1|$src1, $src2}", []>,
2031 def BTR32mi8 : Ii8<0xBA, MRM6m, (outs), (ins i32mem:$src1, i32u8imm:$src2),
2032 "btr{l}\t{$src2, $src1|$src1, $src2}", []>,
2034 def BTR64mi8 : RIi8<0xBA, MRM6m, (outs), (ins i64mem:$src1, i64u8imm:$src2),
2035 "btr{q}\t{$src2, $src1|$src1, $src2}", []>, TB,
2036 Requires<[In64BitMode]>;
2039 let SchedRW = [WriteBitTestSet], Constraints = "$src1 = $dst" in {
2040 def BTS16rr : I<0xAB, MRMDestReg, (outs GR16:$dst), (ins GR16:$src1, GR16:$src2),
2041 "bts{w}\t{$src2, $src1|$src1, $src2}", []>,
2042 OpSize16, TB, NotMemoryFoldable;
2043 def BTS32rr : I<0xAB, MRMDestReg, (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
2044 "bts{l}\t{$src2, $src1|$src1, $src2}", []>,
2045 OpSize32, TB, NotMemoryFoldable;
2046 def BTS64rr : RI<0xAB, MRMDestReg, (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
2047 "bts{q}\t{$src2, $src1|$src1, $src2}", []>, TB,
2051 let mayLoad = 1, mayStore = 1, SchedRW = [WriteBitTestSetRegRMW] in {
2052 def BTS16mr : I<0xAB, MRMDestMem, (outs), (ins i16mem:$src1, GR16:$src2),
2053 "bts{w}\t{$src2, $src1|$src1, $src2}", []>,
2054 OpSize16, TB, NotMemoryFoldable;
2055 def BTS32mr : I<0xAB, MRMDestMem, (outs), (ins i32mem:$src1, GR32:$src2),
2056 "bts{l}\t{$src2, $src1|$src1, $src2}", []>,
2057 OpSize32, TB, NotMemoryFoldable;
2058 def BTS64mr : RI<0xAB, MRMDestMem, (outs), (ins i64mem:$src1, GR64:$src2),
2059 "bts{q}\t{$src2, $src1|$src1, $src2}", []>, TB,
2063 let SchedRW = [WriteBitTestSet], Constraints = "$src1 = $dst" in {
2064 def BTS16ri8 : Ii8<0xBA, MRM5r, (outs GR16:$dst), (ins GR16:$src1, i16u8imm:$src2),
2065 "bts{w}\t{$src2, $src1|$src1, $src2}", []>, OpSize16, TB;
2066 def BTS32ri8 : Ii8<0xBA, MRM5r, (outs GR32:$dst), (ins GR32:$src1, i32u8imm:$src2),
2067 "bts{l}\t{$src2, $src1|$src1, $src2}", []>, OpSize32, TB;
2068 def BTS64ri8 : RIi8<0xBA, MRM5r, (outs GR64:$dst), (ins GR64:$src1, i64u8imm:$src2),
2069 "bts{q}\t{$src2, $src1|$src1, $src2}", []>, TB;
2072 let mayLoad = 1, mayStore = 1, SchedRW = [WriteBitTestSetImmRMW] in {
2073 def BTS16mi8 : Ii8<0xBA, MRM5m, (outs), (ins i16mem:$src1, i16u8imm:$src2),
2074 "bts{w}\t{$src2, $src1|$src1, $src2}", []>, OpSize16, TB;
2075 def BTS32mi8 : Ii8<0xBA, MRM5m, (outs), (ins i32mem:$src1, i32u8imm:$src2),
2076 "bts{l}\t{$src2, $src1|$src1, $src2}", []>, OpSize32, TB;
2077 def BTS64mi8 : RIi8<0xBA, MRM5m, (outs), (ins i64mem:$src1, i64u8imm:$src2),
2078 "bts{q}\t{$src2, $src1|$src1, $src2}", []>, TB,
2079 Requires<[In64BitMode]>;
2081 } // hasSideEffects = 0
2082 } // Defs = [EFLAGS]
2085 //===----------------------------------------------------------------------===//
2089 // Atomic swap. These are just normal xchg instructions. But since a memory
2090 // operand is referenced, the atomicity is ensured.
2091 multiclass ATOMIC_SWAP<bits<8> opc8, bits<8> opc, string mnemonic, string frag> {
2092 let Constraints = "$val = $dst", SchedRW = [WriteALULd, WriteRMW] in {
2093 def NAME#8rm : I<opc8, MRMSrcMem, (outs GR8:$dst),
2094 (ins GR8:$val, i8mem:$ptr),
2095 !strconcat(mnemonic, "{b}\t{$val, $ptr|$ptr, $val}"),
2098 (!cast<PatFrag>(frag # "_8") addr:$ptr, GR8:$val))]>;
2099 def NAME#16rm : I<opc, MRMSrcMem, (outs GR16:$dst),
2100 (ins GR16:$val, i16mem:$ptr),
2101 !strconcat(mnemonic, "{w}\t{$val, $ptr|$ptr, $val}"),
2104 (!cast<PatFrag>(frag # "_16") addr:$ptr, GR16:$val))]>,
2106 def NAME#32rm : I<opc, MRMSrcMem, (outs GR32:$dst),
2107 (ins GR32:$val, i32mem:$ptr),
2108 !strconcat(mnemonic, "{l}\t{$val, $ptr|$ptr, $val}"),
2111 (!cast<PatFrag>(frag # "_32") addr:$ptr, GR32:$val))]>,
2113 def NAME#64rm : RI<opc, MRMSrcMem, (outs GR64:$dst),
2114 (ins GR64:$val, i64mem:$ptr),
2115 !strconcat(mnemonic, "{q}\t{$val, $ptr|$ptr, $val}"),
2118 (!cast<PatFrag>(frag # "_64") addr:$ptr, GR64:$val))]>;
2122 defm XCHG : ATOMIC_SWAP<0x86, 0x87, "xchg", "atomic_swap">, NotMemoryFoldable;
2124 // Swap between registers.
2125 let SchedRW = [WriteXCHG] in {
2126 let Constraints = "$src1 = $dst1, $src2 = $dst2", hasSideEffects = 0 in {
2127 def XCHG8rr : I<0x86, MRMSrcReg, (outs GR8:$dst1, GR8:$dst2),
2128 (ins GR8:$src1, GR8:$src2),
2129 "xchg{b}\t{$src2, $src1|$src1, $src2}", []>, NotMemoryFoldable;
2130 def XCHG16rr : I<0x87, MRMSrcReg, (outs GR16:$dst1, GR16:$dst2),
2131 (ins GR16:$src1, GR16:$src2),
2132 "xchg{w}\t{$src2, $src1|$src1, $src2}", []>,
2133 OpSize16, NotMemoryFoldable;
2134 def XCHG32rr : I<0x87, MRMSrcReg, (outs GR32:$dst1, GR32:$dst2),
2135 (ins GR32:$src1, GR32:$src2),
2136 "xchg{l}\t{$src2, $src1|$src1, $src2}", []>,
2137 OpSize32, NotMemoryFoldable;
2138 def XCHG64rr : RI<0x87, MRMSrcReg, (outs GR64:$dst1, GR64:$dst2),
2139 (ins GR64:$src1 ,GR64:$src2),
2140 "xchg{q}\t{$src2, $src1|$src1, $src2}", []>, NotMemoryFoldable;
2143 // Swap between EAX and other registers.
2144 let Constraints = "$src = $dst", hasSideEffects = 0 in {
2145 let Uses = [AX], Defs = [AX] in
2146 def XCHG16ar : I<0x90, AddRegFrm, (outs GR16:$dst), (ins GR16:$src),
2147 "xchg{w}\t{$src, %ax|ax, $src}", []>, OpSize16;
2148 let Uses = [EAX], Defs = [EAX] in
2149 def XCHG32ar : I<0x90, AddRegFrm, (outs GR32:$dst), (ins GR32:$src),
2150 "xchg{l}\t{$src, %eax|eax, $src}", []>, OpSize32;
2151 let Uses = [RAX], Defs = [RAX] in
2152 def XCHG64ar : RI<0x90, AddRegFrm, (outs GR64:$dst), (ins GR64:$src),
2153 "xchg{q}\t{$src, %rax|rax, $src}", []>;
2157 let hasSideEffects = 0, Constraints = "$src1 = $dst1, $src2 = $dst2",
2158 Defs = [EFLAGS], SchedRW = [WriteXCHG] in {
2159 def XADD8rr : I<0xC0, MRMDestReg, (outs GR8:$dst1, GR8:$dst2),
2160 (ins GR8:$src1, GR8:$src2),
2161 "xadd{b}\t{$src2, $src1|$src1, $src2}", []>, TB;
2162 def XADD16rr : I<0xC1, MRMDestReg, (outs GR16:$dst1, GR16:$dst2),
2163 (ins GR16:$src1, GR16:$src2),
2164 "xadd{w}\t{$src2, $src1|$src1, $src2}", []>, TB, OpSize16;
2165 def XADD32rr : I<0xC1, MRMDestReg, (outs GR32:$dst1, GR32:$dst2),
2166 (ins GR32:$src1, GR32:$src2),
2167 "xadd{l}\t{$src2, $src1|$src1, $src2}", []>, TB, OpSize32;
2168 def XADD64rr : RI<0xC1, MRMDestReg, (outs GR64:$dst1, GR64:$dst2),
2169 (ins GR64:$src1, GR64:$src2),
2170 "xadd{q}\t{$src2, $src1|$src1, $src2}", []>, TB;
2173 let mayLoad = 1, mayStore = 1, hasSideEffects = 0, Constraints = "$val = $dst",
2174 Defs = [EFLAGS], SchedRW = [WriteALULd, WriteRMW] in {
2175 def XADD8rm : I<0xC0, MRMSrcMem, (outs GR8:$dst),
2176 (ins GR8:$val, i8mem:$ptr),
2177 "xadd{b}\t{$val, $ptr|$ptr, $val}", []>, TB;
2178 def XADD16rm : I<0xC1, MRMSrcMem, (outs GR16:$dst),
2179 (ins GR16:$val, i16mem:$ptr),
2180 "xadd{w}\t{$val, $ptr|$ptr, $val}", []>, TB,
2182 def XADD32rm : I<0xC1, MRMSrcMem, (outs GR32:$dst),
2183 (ins GR32:$val, i32mem:$ptr),
2184 "xadd{l}\t{$val, $ptr|$ptr, $val}", []>, TB,
2186 def XADD64rm : RI<0xC1, MRMSrcMem, (outs GR64:$dst),
2187 (ins GR64:$val, i64mem:$ptr),
2188 "xadd{q}\t{$val, $ptr|$ptr, $val}", []>, TB;
2192 let SchedRW = [WriteCMPXCHG], hasSideEffects = 0 in {
2193 let Defs = [AL, EFLAGS], Uses = [AL] in
2194 def CMPXCHG8rr : I<0xB0, MRMDestReg, (outs GR8:$dst), (ins GR8:$src),
2195 "cmpxchg{b}\t{$src, $dst|$dst, $src}", []>, TB,
2197 let Defs = [AX, EFLAGS], Uses = [AX] in
2198 def CMPXCHG16rr : I<0xB1, MRMDestReg, (outs GR16:$dst), (ins GR16:$src),
2199 "cmpxchg{w}\t{$src, $dst|$dst, $src}", []>, TB, OpSize16,
2201 let Defs = [EAX, EFLAGS], Uses = [EAX] in
2202 def CMPXCHG32rr : I<0xB1, MRMDestReg, (outs GR32:$dst), (ins GR32:$src),
2203 "cmpxchg{l}\t{$src, $dst|$dst, $src}", []>, TB, OpSize32,
2205 let Defs = [RAX, EFLAGS], Uses = [RAX] in
2206 def CMPXCHG64rr : RI<0xB1, MRMDestReg, (outs GR64:$dst), (ins GR64:$src),
2207 "cmpxchg{q}\t{$src, $dst|$dst, $src}", []>, TB,
2209 } // SchedRW, hasSideEffects
2211 let SchedRW = [WriteCMPXCHGRMW], mayLoad = 1, mayStore = 1,
2212 hasSideEffects = 0 in {
2213 let Defs = [AL, EFLAGS], Uses = [AL] in
2214 def CMPXCHG8rm : I<0xB0, MRMDestMem, (outs), (ins i8mem:$dst, GR8:$src),
2215 "cmpxchg{b}\t{$src, $dst|$dst, $src}", []>, TB,
2217 let Defs = [AX, EFLAGS], Uses = [AX] in
2218 def CMPXCHG16rm : I<0xB1, MRMDestMem, (outs), (ins i16mem:$dst, GR16:$src),
2219 "cmpxchg{w}\t{$src, $dst|$dst, $src}", []>, TB, OpSize16,
2221 let Defs = [EAX, EFLAGS], Uses = [EAX] in
2222 def CMPXCHG32rm : I<0xB1, MRMDestMem, (outs), (ins i32mem:$dst, GR32:$src),
2223 "cmpxchg{l}\t{$src, $dst|$dst, $src}", []>, TB, OpSize32,
2225 let Defs = [RAX, EFLAGS], Uses = [RAX] in
2226 def CMPXCHG64rm : RI<0xB1, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src),
2227 "cmpxchg{q}\t{$src, $dst|$dst, $src}", []>, TB,
2230 let Defs = [EAX, EDX, EFLAGS], Uses = [EAX, EBX, ECX, EDX] in
2231 def CMPXCHG8B : I<0xC7, MRM1m, (outs), (ins i64mem:$dst),
2232 "cmpxchg8b\t$dst", []>, TB, Requires<[HasCmpxchg8b]>;
2234 let Defs = [RAX, RDX, EFLAGS], Uses = [RAX, RBX, RCX, RDX] in
2235 // NOTE: In64BitMode check needed for the AssemblerPredicate.
2236 def CMPXCHG16B : RI<0xC7, MRM1m, (outs), (ins i128mem:$dst),
2237 "cmpxchg16b\t$dst", []>,
2238 TB, Requires<[HasCmpxchg16b,In64BitMode]>;
2239 } // SchedRW, mayLoad, mayStore, hasSideEffects
2242 // Lock instruction prefix
2243 let SchedRW = [WriteMicrocoded] in
2244 def LOCK_PREFIX : I<0xF0, PrefixByte, (outs), (ins), "lock", []>;
2246 let SchedRW = [WriteNop] in {
2248 // Rex64 instruction prefix
2249 def REX64_PREFIX : I<0x48, PrefixByte, (outs), (ins), "rex64", []>,
2250 Requires<[In64BitMode]>;
2252 // Data16 instruction prefix
2253 def DATA16_PREFIX : I<0x66, PrefixByte, (outs), (ins), "data16", []>;
2256 // Repeat string operation instruction prefixes
2257 let Defs = [ECX], Uses = [ECX,DF], SchedRW = [WriteMicrocoded] in {
2258 // Repeat (used with INS, OUTS, MOVS, LODS and STOS)
2259 def REP_PREFIX : I<0xF3, PrefixByte, (outs), (ins), "rep", []>;
2260 // Repeat while not equal (used with CMPS and SCAS)
2261 def REPNE_PREFIX : I<0xF2, PrefixByte, (outs), (ins), "repne", []>;
2264 // String manipulation instructions
2265 let SchedRW = [WriteMicrocoded] in {
2266 let Defs = [AL,ESI], Uses = [ESI,DF] in
2267 def LODSB : I<0xAC, RawFrmSrc, (outs), (ins srcidx8:$src),
2268 "lodsb\t{$src, %al|al, $src}", []>;
2269 let Defs = [AX,ESI], Uses = [ESI,DF] in
2270 def LODSW : I<0xAD, RawFrmSrc, (outs), (ins srcidx16:$src),
2271 "lodsw\t{$src, %ax|ax, $src}", []>, OpSize16;
2272 let Defs = [EAX,ESI], Uses = [ESI,DF] in
2273 def LODSL : I<0xAD, RawFrmSrc, (outs), (ins srcidx32:$src),
2274 "lods{l|d}\t{$src, %eax|eax, $src}", []>, OpSize32;
2275 let Defs = [RAX,ESI], Uses = [ESI,DF] in
2276 def LODSQ : RI<0xAD, RawFrmSrc, (outs), (ins srcidx64:$src),
2277 "lodsq\t{$src, %rax|rax, $src}", []>,
2278 Requires<[In64BitMode]>;
2281 let SchedRW = [WriteSystem] in {
2282 let Defs = [ESI], Uses = [DX,ESI,DF] in {
2283 def OUTSB : I<0x6E, RawFrmSrc, (outs), (ins srcidx8:$src),
2284 "outsb\t{$src, %dx|dx, $src}", []>;
2285 def OUTSW : I<0x6F, RawFrmSrc, (outs), (ins srcidx16:$src),
2286 "outsw\t{$src, %dx|dx, $src}", []>, OpSize16;
2287 def OUTSL : I<0x6F, RawFrmSrc, (outs), (ins srcidx32:$src),
2288 "outs{l|d}\t{$src, %dx|dx, $src}", []>, OpSize32;
2291 let Defs = [EDI], Uses = [DX,EDI,DF] in {
2292 def INSB : I<0x6C, RawFrmDst, (outs), (ins dstidx8:$dst),
2293 "insb\t{%dx, $dst|$dst, dx}", []>;
2294 def INSW : I<0x6D, RawFrmDst, (outs), (ins dstidx16:$dst),
2295 "insw\t{%dx, $dst|$dst, dx}", []>, OpSize16;
2296 def INSL : I<0x6D, RawFrmDst, (outs), (ins dstidx32:$dst),
2297 "ins{l|d}\t{%dx, $dst|$dst, dx}", []>, OpSize32;
2301 // EFLAGS management instructions.
2302 let SchedRW = [WriteALU], Defs = [EFLAGS], Uses = [EFLAGS] in {
2303 def CLC : I<0xF8, RawFrm, (outs), (ins), "clc", []>;
2304 def STC : I<0xF9, RawFrm, (outs), (ins), "stc", []>;
2305 def CMC : I<0xF5, RawFrm, (outs), (ins), "cmc", []>;
2308 // DF management instructions.
2309 let SchedRW = [WriteALU], Defs = [DF] in {
2310 def CLD : I<0xFC, RawFrm, (outs), (ins), "cld", []>;
2311 def STD : I<0xFD, RawFrm, (outs), (ins), "std", []>;
2314 // Table lookup instructions
2315 let Uses = [AL,EBX], Defs = [AL], hasSideEffects = 0, mayLoad = 1 in
2316 def XLAT : I<0xD7, RawFrm, (outs), (ins), "xlatb", []>, Sched<[WriteLoad]>;
2318 let SchedRW = [WriteMicrocoded] in {
2319 // ASCII Adjust After Addition
2320 let Uses = [AL,EFLAGS], Defs = [AX,EFLAGS], hasSideEffects = 0 in
2321 def AAA : I<0x37, RawFrm, (outs), (ins), "aaa", []>,
2322 Requires<[Not64BitMode]>;
2324 // ASCII Adjust AX Before Division
2325 let Uses = [AX], Defs = [AX,EFLAGS], hasSideEffects = 0 in
2326 def AAD8i8 : Ii8<0xD5, RawFrm, (outs), (ins i8imm:$src),
2327 "aad\t$src", []>, Requires<[Not64BitMode]>;
2329 // ASCII Adjust AX After Multiply
2330 let Uses = [AL], Defs = [AX,EFLAGS], hasSideEffects = 0 in
2331 def AAM8i8 : Ii8<0xD4, RawFrm, (outs), (ins i8imm:$src),
2332 "aam\t$src", []>, Requires<[Not64BitMode]>;
2334 // ASCII Adjust AL After Subtraction - sets
2335 let Uses = [AL,EFLAGS], Defs = [AX,EFLAGS], hasSideEffects = 0 in
2336 def AAS : I<0x3F, RawFrm, (outs), (ins), "aas", []>,
2337 Requires<[Not64BitMode]>;
2339 // Decimal Adjust AL after Addition
2340 let Uses = [AL,EFLAGS], Defs = [AL,EFLAGS], hasSideEffects = 0 in
2341 def DAA : I<0x27, RawFrm, (outs), (ins), "daa", []>,
2342 Requires<[Not64BitMode]>;
2344 // Decimal Adjust AL after Subtraction
2345 let Uses = [AL,EFLAGS], Defs = [AL,EFLAGS], hasSideEffects = 0 in
2346 def DAS : I<0x2F, RawFrm, (outs), (ins), "das", []>,
2347 Requires<[Not64BitMode]>;
2350 let SchedRW = [WriteSystem] in {
2351 // Check Array Index Against Bounds
2352 // Note: "bound" does not have reversed operands in at&t syntax.
2353 def BOUNDS16rm : I<0x62, MRMSrcMem, (outs GR16:$dst), (ins i16mem:$src),
2354 "bound\t$dst, $src", []>, OpSize16,
2355 Requires<[Not64BitMode]>;
2356 def BOUNDS32rm : I<0x62, MRMSrcMem, (outs GR32:$dst), (ins i32mem:$src),
2357 "bound\t$dst, $src", []>, OpSize32,
2358 Requires<[Not64BitMode]>;
2360 // Adjust RPL Field of Segment Selector
2361 def ARPL16rr : I<0x63, MRMDestReg, (outs GR16:$dst), (ins GR16:$src),
2362 "arpl\t{$src, $dst|$dst, $src}", []>,
2363 Requires<[Not64BitMode]>, NotMemoryFoldable;
2365 def ARPL16mr : I<0x63, MRMDestMem, (outs), (ins i16mem:$dst, GR16:$src),
2366 "arpl\t{$src, $dst|$dst, $src}", []>,
2367 Requires<[Not64BitMode]>, NotMemoryFoldable;
2370 //===----------------------------------------------------------------------===//
2371 // MOVBE Instructions
2373 let Predicates = [HasMOVBE] in {
2374 let SchedRW = [WriteALULd] in {
2375 def MOVBE16rm : I<0xF0, MRMSrcMem, (outs GR16:$dst), (ins i16mem:$src),
2376 "movbe{w}\t{$src, $dst|$dst, $src}",
2377 [(set GR16:$dst, (bswap (loadi16 addr:$src)))]>,
2379 def MOVBE32rm : I<0xF0, MRMSrcMem, (outs GR32:$dst), (ins i32mem:$src),
2380 "movbe{l}\t{$src, $dst|$dst, $src}",
2381 [(set GR32:$dst, (bswap (loadi32 addr:$src)))]>,
2383 def MOVBE64rm : RI<0xF0, MRMSrcMem, (outs GR64:$dst), (ins i64mem:$src),
2384 "movbe{q}\t{$src, $dst|$dst, $src}",
2385 [(set GR64:$dst, (bswap (loadi64 addr:$src)))]>,
2388 let SchedRW = [WriteStore] in {
2389 def MOVBE16mr : I<0xF1, MRMDestMem, (outs), (ins i16mem:$dst, GR16:$src),
2390 "movbe{w}\t{$src, $dst|$dst, $src}",
2391 [(store (bswap GR16:$src), addr:$dst)]>,
2393 def MOVBE32mr : I<0xF1, MRMDestMem, (outs), (ins i32mem:$dst, GR32:$src),
2394 "movbe{l}\t{$src, $dst|$dst, $src}",
2395 [(store (bswap GR32:$src), addr:$dst)]>,
2397 def MOVBE64mr : RI<0xF1, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src),
2398 "movbe{q}\t{$src, $dst|$dst, $src}",
2399 [(store (bswap GR64:$src), addr:$dst)]>,
2404 //===----------------------------------------------------------------------===//
2405 // RDRAND Instruction
2407 let Predicates = [HasRDRAND], Defs = [EFLAGS], SchedRW = [WriteSystem] in {
2408 def RDRAND16r : I<0xC7, MRM6r, (outs GR16:$dst), (ins),
2409 "rdrand{w}\t$dst", [(set GR16:$dst, EFLAGS, (X86rdrand))]>,
2411 def RDRAND32r : I<0xC7, MRM6r, (outs GR32:$dst), (ins),
2412 "rdrand{l}\t$dst", [(set GR32:$dst, EFLAGS, (X86rdrand))]>,
2414 def RDRAND64r : RI<0xC7, MRM6r, (outs GR64:$dst), (ins),
2415 "rdrand{q}\t$dst", [(set GR64:$dst, EFLAGS, (X86rdrand))]>,
2419 //===----------------------------------------------------------------------===//
2420 // RDSEED Instruction
2422 let Predicates = [HasRDSEED], Defs = [EFLAGS], SchedRW = [WriteSystem] in {
2423 def RDSEED16r : I<0xC7, MRM7r, (outs GR16:$dst), (ins), "rdseed{w}\t$dst",
2424 [(set GR16:$dst, EFLAGS, (X86rdseed))]>, OpSize16, PS;
2425 def RDSEED32r : I<0xC7, MRM7r, (outs GR32:$dst), (ins), "rdseed{l}\t$dst",
2426 [(set GR32:$dst, EFLAGS, (X86rdseed))]>, OpSize32, PS;
2427 def RDSEED64r : RI<0xC7, MRM7r, (outs GR64:$dst), (ins), "rdseed{q}\t$dst",
2428 [(set GR64:$dst, EFLAGS, (X86rdseed))]>, PS;
2431 //===----------------------------------------------------------------------===//
2432 // LZCNT Instruction
2434 let Predicates = [HasLZCNT], Defs = [EFLAGS] in {
2435 def LZCNT16rr : I<0xBD, MRMSrcReg, (outs GR16:$dst), (ins GR16:$src),
2436 "lzcnt{w}\t{$src, $dst|$dst, $src}",
2437 [(set GR16:$dst, (ctlz GR16:$src)), (implicit EFLAGS)]>,
2438 XS, OpSize16, Sched<[WriteLZCNT]>;
2439 def LZCNT16rm : I<0xBD, MRMSrcMem, (outs GR16:$dst), (ins i16mem:$src),
2440 "lzcnt{w}\t{$src, $dst|$dst, $src}",
2441 [(set GR16:$dst, (ctlz (loadi16 addr:$src))),
2442 (implicit EFLAGS)]>, XS, OpSize16, Sched<[WriteLZCNTLd]>;
2444 def LZCNT32rr : I<0xBD, MRMSrcReg, (outs GR32:$dst), (ins GR32:$src),
2445 "lzcnt{l}\t{$src, $dst|$dst, $src}",
2446 [(set GR32:$dst, (ctlz GR32:$src)), (implicit EFLAGS)]>,
2447 XS, OpSize32, Sched<[WriteLZCNT]>;
2448 def LZCNT32rm : I<0xBD, MRMSrcMem, (outs GR32:$dst), (ins i32mem:$src),
2449 "lzcnt{l}\t{$src, $dst|$dst, $src}",
2450 [(set GR32:$dst, (ctlz (loadi32 addr:$src))),
2451 (implicit EFLAGS)]>, XS, OpSize32, Sched<[WriteLZCNTLd]>;
2453 def LZCNT64rr : RI<0xBD, MRMSrcReg, (outs GR64:$dst), (ins GR64:$src),
2454 "lzcnt{q}\t{$src, $dst|$dst, $src}",
2455 [(set GR64:$dst, (ctlz GR64:$src)), (implicit EFLAGS)]>,
2456 XS, Sched<[WriteLZCNT]>;
2457 def LZCNT64rm : RI<0xBD, MRMSrcMem, (outs GR64:$dst), (ins i64mem:$src),
2458 "lzcnt{q}\t{$src, $dst|$dst, $src}",
2459 [(set GR64:$dst, (ctlz (loadi64 addr:$src))),
2460 (implicit EFLAGS)]>, XS, Sched<[WriteLZCNTLd]>;
2463 //===----------------------------------------------------------------------===//
2466 let Predicates = [HasBMI], Defs = [EFLAGS] in {
2467 def TZCNT16rr : I<0xBC, MRMSrcReg, (outs GR16:$dst), (ins GR16:$src),
2468 "tzcnt{w}\t{$src, $dst|$dst, $src}",
2469 [(set GR16:$dst, (cttz GR16:$src)), (implicit EFLAGS)]>,
2470 XS, OpSize16, Sched<[WriteTZCNT]>;
2471 def TZCNT16rm : I<0xBC, MRMSrcMem, (outs GR16:$dst), (ins i16mem:$src),
2472 "tzcnt{w}\t{$src, $dst|$dst, $src}",
2473 [(set GR16:$dst, (cttz (loadi16 addr:$src))),
2474 (implicit EFLAGS)]>, XS, OpSize16, Sched<[WriteTZCNTLd]>;
2476 def TZCNT32rr : I<0xBC, MRMSrcReg, (outs GR32:$dst), (ins GR32:$src),
2477 "tzcnt{l}\t{$src, $dst|$dst, $src}",
2478 [(set GR32:$dst, (cttz GR32:$src)), (implicit EFLAGS)]>,
2479 XS, OpSize32, Sched<[WriteTZCNT]>;
2480 def TZCNT32rm : I<0xBC, MRMSrcMem, (outs GR32:$dst), (ins i32mem:$src),
2481 "tzcnt{l}\t{$src, $dst|$dst, $src}",
2482 [(set GR32:$dst, (cttz (loadi32 addr:$src))),
2483 (implicit EFLAGS)]>, XS, OpSize32, Sched<[WriteTZCNTLd]>;
2485 def TZCNT64rr : RI<0xBC, MRMSrcReg, (outs GR64:$dst), (ins GR64:$src),
2486 "tzcnt{q}\t{$src, $dst|$dst, $src}",
2487 [(set GR64:$dst, (cttz GR64:$src)), (implicit EFLAGS)]>,
2488 XS, Sched<[WriteTZCNT]>;
2489 def TZCNT64rm : RI<0xBC, MRMSrcMem, (outs GR64:$dst), (ins i64mem:$src),
2490 "tzcnt{q}\t{$src, $dst|$dst, $src}",
2491 [(set GR64:$dst, (cttz (loadi64 addr:$src))),
2492 (implicit EFLAGS)]>, XS, Sched<[WriteTZCNTLd]>;
2495 multiclass bmi_bls<string mnemonic, Format RegMRM, Format MemMRM,
2496 RegisterClass RC, X86MemOperand x86memop,
2497 X86FoldableSchedWrite sched> {
2498 let hasSideEffects = 0 in {
2499 def rr : I<0xF3, RegMRM, (outs RC:$dst), (ins RC:$src),
2500 !strconcat(mnemonic, "\t{$src, $dst|$dst, $src}"), []>,
2501 T8PS, VEX_4V, Sched<[sched]>;
2503 def rm : I<0xF3, MemMRM, (outs RC:$dst), (ins x86memop:$src),
2504 !strconcat(mnemonic, "\t{$src, $dst|$dst, $src}"), []>,
2505 T8PS, VEX_4V, Sched<[sched.Folded]>;
2509 let Predicates = [HasBMI], Defs = [EFLAGS] in {
2510 defm BLSR32 : bmi_bls<"blsr{l}", MRM1r, MRM1m, GR32, i32mem, WriteBLS>;
2511 defm BLSR64 : bmi_bls<"blsr{q}", MRM1r, MRM1m, GR64, i64mem, WriteBLS>, VEX_W;
2512 defm BLSMSK32 : bmi_bls<"blsmsk{l}", MRM2r, MRM2m, GR32, i32mem, WriteBLS>;
2513 defm BLSMSK64 : bmi_bls<"blsmsk{q}", MRM2r, MRM2m, GR64, i64mem, WriteBLS>, VEX_W;
2514 defm BLSI32 : bmi_bls<"blsi{l}", MRM3r, MRM3m, GR32, i32mem, WriteBLS>;
2515 defm BLSI64 : bmi_bls<"blsi{q}", MRM3r, MRM3m, GR64, i64mem, WriteBLS>, VEX_W;
2518 //===----------------------------------------------------------------------===//
2519 // Pattern fragments to auto generate BMI instructions.
2520 //===----------------------------------------------------------------------===//
2522 def or_flag_nocf : PatFrag<(ops node:$lhs, node:$rhs),
2523 (X86or_flag node:$lhs, node:$rhs), [{
2524 return hasNoCarryFlagUses(SDValue(N, 1));
2527 def xor_flag_nocf : PatFrag<(ops node:$lhs, node:$rhs),
2528 (X86xor_flag node:$lhs, node:$rhs), [{
2529 return hasNoCarryFlagUses(SDValue(N, 1));
2532 def and_flag_nocf : PatFrag<(ops node:$lhs, node:$rhs),
2533 (X86and_flag node:$lhs, node:$rhs), [{
2534 return hasNoCarryFlagUses(SDValue(N, 1));
2537 let Predicates = [HasBMI] in {
2538 // FIXME: patterns for the load versions are not implemented
2539 def : Pat<(and GR32:$src, (add GR32:$src, -1)),
2540 (BLSR32rr GR32:$src)>;
2541 def : Pat<(and GR64:$src, (add GR64:$src, -1)),
2542 (BLSR64rr GR64:$src)>;
2544 def : Pat<(xor GR32:$src, (add GR32:$src, -1)),
2545 (BLSMSK32rr GR32:$src)>;
2546 def : Pat<(xor GR64:$src, (add GR64:$src, -1)),
2547 (BLSMSK64rr GR64:$src)>;
2549 def : Pat<(and GR32:$src, (ineg GR32:$src)),
2550 (BLSI32rr GR32:$src)>;
2551 def : Pat<(and GR64:$src, (ineg GR64:$src)),
2552 (BLSI64rr GR64:$src)>;
2554 // Versions to match flag producing ops.
2555 def : Pat<(and_flag_nocf GR32:$src, (add GR32:$src, -1)),
2556 (BLSR32rr GR32:$src)>;
2557 def : Pat<(and_flag_nocf GR64:$src, (add GR64:$src, -1)),
2558 (BLSR64rr GR64:$src)>;
2560 def : Pat<(xor_flag_nocf GR32:$src, (add GR32:$src, -1)),
2561 (BLSMSK32rr GR32:$src)>;
2562 def : Pat<(xor_flag_nocf GR64:$src, (add GR64:$src, -1)),
2563 (BLSMSK64rr GR64:$src)>;
2565 def : Pat<(and_flag_nocf GR32:$src, (ineg GR32:$src)),
2566 (BLSI32rr GR32:$src)>;
2567 def : Pat<(and_flag_nocf GR64:$src, (ineg GR64:$src)),
2568 (BLSI64rr GR64:$src)>;
2571 multiclass bmi_bextr<bits<8> opc, string mnemonic, RegisterClass RC,
2572 X86MemOperand x86memop, SDNode OpNode,
2573 PatFrag ld_frag, X86FoldableSchedWrite Sched> {
2574 def rr : I<opc, MRMSrcReg4VOp3, (outs RC:$dst), (ins RC:$src1, RC:$src2),
2575 !strconcat(mnemonic, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
2576 [(set RC:$dst, (OpNode RC:$src1, RC:$src2)), (implicit EFLAGS)]>,
2577 T8PS, VEX, Sched<[Sched]>;
2578 def rm : I<opc, MRMSrcMem4VOp3, (outs RC:$dst), (ins x86memop:$src1, RC:$src2),
2579 !strconcat(mnemonic, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
2580 [(set RC:$dst, (OpNode (ld_frag addr:$src1), RC:$src2)),
2581 (implicit EFLAGS)]>, T8PS, VEX,
2582 Sched<[Sched.Folded,
2584 ReadDefault, ReadDefault, ReadDefault, ReadDefault,
2587 Sched.ReadAfterFold]>;
2590 let Predicates = [HasBMI], Defs = [EFLAGS] in {
2591 defm BEXTR32 : bmi_bextr<0xF7, "bextr{l}", GR32, i32mem,
2592 X86bextr, loadi32, WriteBEXTR>;
2593 defm BEXTR64 : bmi_bextr<0xF7, "bextr{q}", GR64, i64mem,
2594 X86bextr, loadi64, WriteBEXTR>, VEX_W;
2597 multiclass bmi_bzhi<bits<8> opc, string mnemonic, RegisterClass RC,
2598 X86MemOperand x86memop, SDNode Int,
2599 PatFrag ld_frag, X86FoldableSchedWrite Sched> {
2600 def rr : I<opc, MRMSrcReg4VOp3, (outs RC:$dst), (ins RC:$src1, RC:$src2),
2601 !strconcat(mnemonic, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
2602 [(set RC:$dst, (Int RC:$src1, RC:$src2)), (implicit EFLAGS)]>,
2603 T8PS, VEX, Sched<[Sched]>;
2604 def rm : I<opc, MRMSrcMem4VOp3, (outs RC:$dst), (ins x86memop:$src1, RC:$src2),
2605 !strconcat(mnemonic, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
2606 [(set RC:$dst, (Int (ld_frag addr:$src1), RC:$src2)),
2607 (implicit EFLAGS)]>, T8PS, VEX,
2608 Sched<[Sched.Folded,
2610 ReadDefault, ReadDefault, ReadDefault, ReadDefault,
2613 Sched.ReadAfterFold]>;
2616 let Predicates = [HasBMI2], Defs = [EFLAGS] in {
2617 defm BZHI32 : bmi_bzhi<0xF5, "bzhi{l}", GR32, i32mem,
2618 X86bzhi, loadi32, WriteBZHI>;
2619 defm BZHI64 : bmi_bzhi<0xF5, "bzhi{q}", GR64, i64mem,
2620 X86bzhi, loadi64, WriteBZHI>, VEX_W;
2623 def CountTrailingOnes : SDNodeXForm<imm, [{
2624 // Count the trailing ones in the immediate.
2625 return getI8Imm(countTrailingOnes(N->getZExtValue()), SDLoc(N));
2628 def BEXTRMaskXForm : SDNodeXForm<imm, [{
2629 unsigned Length = countTrailingOnes(N->getZExtValue());
2630 return getI32Imm(Length << 8, SDLoc(N));
2633 def AndMask64 : ImmLeaf<i64, [{
2634 return isMask_64(Imm) && !isUInt<32>(Imm);
2637 // Use BEXTR for 64-bit 'and' with large immediate 'mask'.
2638 let Predicates = [HasBMI, NoBMI2, NoTBM] in {
2639 def : Pat<(and GR64:$src, AndMask64:$mask),
2640 (BEXTR64rr GR64:$src,
2641 (SUBREG_TO_REG (i64 0),
2642 (MOV32ri (BEXTRMaskXForm imm:$mask)), sub_32bit))>;
2643 def : Pat<(and (loadi64 addr:$src), AndMask64:$mask),
2644 (BEXTR64rm addr:$src,
2645 (SUBREG_TO_REG (i64 0),
2646 (MOV32ri (BEXTRMaskXForm imm:$mask)), sub_32bit))>;
2649 // Use BZHI for 64-bit 'and' with large immediate 'mask'.
2650 let Predicates = [HasBMI2, NoTBM] in {
2651 def : Pat<(and GR64:$src, AndMask64:$mask),
2652 (BZHI64rr GR64:$src,
2653 (INSERT_SUBREG (i64 (IMPLICIT_DEF)),
2654 (MOV8ri (CountTrailingOnes imm:$mask)), sub_8bit))>;
2655 def : Pat<(and (loadi64 addr:$src), AndMask64:$mask),
2656 (BZHI64rm addr:$src,
2657 (INSERT_SUBREG (i64 (IMPLICIT_DEF)),
2658 (MOV8ri (CountTrailingOnes imm:$mask)), sub_8bit))>;
2661 multiclass bmi_pdep_pext<string mnemonic, RegisterClass RC,
2662 X86MemOperand x86memop, SDNode OpNode,
2664 def rr : I<0xF5, MRMSrcReg, (outs RC:$dst), (ins RC:$src1, RC:$src2),
2665 !strconcat(mnemonic, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
2666 [(set RC:$dst, (OpNode RC:$src1, RC:$src2))]>,
2667 VEX_4V, Sched<[WriteALU]>;
2668 def rm : I<0xF5, MRMSrcMem, (outs RC:$dst), (ins RC:$src1, x86memop:$src2),
2669 !strconcat(mnemonic, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
2670 [(set RC:$dst, (OpNode RC:$src1, (ld_frag addr:$src2)))]>,
2671 VEX_4V, Sched<[WriteALU.Folded, WriteALU.ReadAfterFold]>;
2674 let Predicates = [HasBMI2] in {
2675 defm PDEP32 : bmi_pdep_pext<"pdep{l}", GR32, i32mem,
2676 X86pdep, loadi32>, T8XD;
2677 defm PDEP64 : bmi_pdep_pext<"pdep{q}", GR64, i64mem,
2678 X86pdep, loadi64>, T8XD, VEX_W;
2679 defm PEXT32 : bmi_pdep_pext<"pext{l}", GR32, i32mem,
2680 X86pext, loadi32>, T8XS;
2681 defm PEXT64 : bmi_pdep_pext<"pext{q}", GR64, i64mem,
2682 X86pext, loadi64>, T8XS, VEX_W;
2685 //===----------------------------------------------------------------------===//
2688 let Predicates = [HasTBM], Defs = [EFLAGS] in {
2690 multiclass tbm_bextri<bits<8> opc, RegisterClass RC, string OpcodeStr,
2691 X86MemOperand x86memop, PatFrag ld_frag,
2692 SDNode OpNode, Operand immtype,
2693 SDPatternOperator immoperator,
2694 X86FoldableSchedWrite Sched> {
2695 def ri : Ii32<opc, MRMSrcReg, (outs RC:$dst), (ins RC:$src1, immtype:$cntl),
2696 !strconcat(OpcodeStr,
2697 "\t{$cntl, $src1, $dst|$dst, $src1, $cntl}"),
2698 [(set RC:$dst, (OpNode RC:$src1, immoperator:$cntl))]>,
2699 XOP, XOPA, Sched<[Sched]>;
2700 def mi : Ii32<opc, MRMSrcMem, (outs RC:$dst),
2701 (ins x86memop:$src1, immtype:$cntl),
2702 !strconcat(OpcodeStr,
2703 "\t{$cntl, $src1, $dst|$dst, $src1, $cntl}"),
2704 [(set RC:$dst, (OpNode (ld_frag addr:$src1), immoperator:$cntl))]>,
2705 XOP, XOPA, Sched<[Sched.Folded]>;
2708 defm BEXTRI32 : tbm_bextri<0x10, GR32, "bextr{l}", i32mem, loadi32,
2709 X86bextri, i32imm, timm, WriteBEXTR>;
2710 let ImmT = Imm32S in
2711 defm BEXTRI64 : tbm_bextri<0x10, GR64, "bextr{q}", i64mem, loadi64,
2712 X86bextri, i64i32imm,
2713 i64timmSExt32, WriteBEXTR>, VEX_W;
2715 multiclass tbm_binary_rm<bits<8> opc, Format FormReg, Format FormMem,
2716 RegisterClass RC, string OpcodeStr,
2717 X86MemOperand x86memop, X86FoldableSchedWrite Sched> {
2718 let hasSideEffects = 0 in {
2719 def rr : I<opc, FormReg, (outs RC:$dst), (ins RC:$src),
2720 !strconcat(OpcodeStr,"\t{$src, $dst|$dst, $src}"), []>,
2721 XOP_4V, XOP9, Sched<[Sched]>;
2723 def rm : I<opc, FormMem, (outs RC:$dst), (ins x86memop:$src),
2724 !strconcat(OpcodeStr,"\t{$src, $dst|$dst, $src}"), []>,
2725 XOP_4V, XOP9, Sched<[Sched.Folded]>;
2729 multiclass tbm_binary_intr<bits<8> opc, string OpcodeStr,
2730 X86FoldableSchedWrite Sched,
2731 Format FormReg, Format FormMem> {
2732 defm NAME#32 : tbm_binary_rm<opc, FormReg, FormMem, GR32, OpcodeStr#"{l}",
2734 defm NAME#64 : tbm_binary_rm<opc, FormReg, FormMem, GR64, OpcodeStr#"{q}",
2735 i64mem, Sched>, VEX_W;
2738 defm BLCFILL : tbm_binary_intr<0x01, "blcfill", WriteALU, MRM1r, MRM1m>;
2739 defm BLCI : tbm_binary_intr<0x02, "blci", WriteALU, MRM6r, MRM6m>;
2740 defm BLCIC : tbm_binary_intr<0x01, "blcic", WriteALU, MRM5r, MRM5m>;
2741 defm BLCMSK : tbm_binary_intr<0x02, "blcmsk", WriteALU, MRM1r, MRM1m>;
2742 defm BLCS : tbm_binary_intr<0x01, "blcs", WriteALU, MRM3r, MRM3m>;
2743 defm BLSFILL : tbm_binary_intr<0x01, "blsfill", WriteALU, MRM2r, MRM2m>;
2744 defm BLSIC : tbm_binary_intr<0x01, "blsic", WriteALU, MRM6r, MRM6m>;
2745 defm T1MSKC : tbm_binary_intr<0x01, "t1mskc", WriteALU, MRM7r, MRM7m>;
2746 defm TZMSK : tbm_binary_intr<0x01, "tzmsk", WriteALU, MRM4r, MRM4m>;
2749 // Use BEXTRI for 64-bit 'and' with large immediate 'mask'.
2750 let Predicates = [HasTBM] in {
2751 def : Pat<(and GR64:$src, AndMask64:$mask),
2752 (BEXTRI64ri GR64:$src, (BEXTRMaskXForm imm:$mask))>;
2754 def : Pat<(and (loadi64 addr:$src), AndMask64:$mask),
2755 (BEXTRI64mi addr:$src, (BEXTRMaskXForm imm:$mask))>;
2758 //===----------------------------------------------------------------------===//
2759 // Lightweight Profiling Instructions
2761 let Predicates = [HasLWP], SchedRW = [WriteSystem] in {
2763 def LLWPCB : I<0x12, MRM0r, (outs), (ins GR32:$src), "llwpcb\t$src",
2764 [(int_x86_llwpcb GR32:$src)]>, XOP, XOP9;
2765 def SLWPCB : I<0x12, MRM1r, (outs GR32:$dst), (ins), "slwpcb\t$dst",
2766 [(set GR32:$dst, (int_x86_slwpcb))]>, XOP, XOP9;
2768 def LLWPCB64 : I<0x12, MRM0r, (outs), (ins GR64:$src), "llwpcb\t$src",
2769 [(int_x86_llwpcb GR64:$src)]>, XOP, XOP9, VEX_W;
2770 def SLWPCB64 : I<0x12, MRM1r, (outs GR64:$dst), (ins), "slwpcb\t$dst",
2771 [(set GR64:$dst, (int_x86_slwpcb))]>, XOP, XOP9, VEX_W;
2773 multiclass lwpins_intr<RegisterClass RC> {
2774 def rri : Ii32<0x12, MRM0r, (outs), (ins RC:$src0, GR32:$src1, i32imm:$cntl),
2775 "lwpins\t{$cntl, $src1, $src0|$src0, $src1, $cntl}",
2776 [(set EFLAGS, (X86lwpins RC:$src0, GR32:$src1, timm:$cntl))]>,
2779 def rmi : Ii32<0x12, MRM0m, (outs), (ins RC:$src0, i32mem:$src1, i32imm:$cntl),
2780 "lwpins\t{$cntl, $src1, $src0|$src0, $src1, $cntl}",
2781 [(set EFLAGS, (X86lwpins RC:$src0, (loadi32 addr:$src1), timm:$cntl))]>,
2785 let Defs = [EFLAGS] in {
2786 defm LWPINS32 : lwpins_intr<GR32>;
2787 defm LWPINS64 : lwpins_intr<GR64>, VEX_W;
2790 multiclass lwpval_intr<RegisterClass RC, Intrinsic Int> {
2791 def rri : Ii32<0x12, MRM1r, (outs), (ins RC:$src0, GR32:$src1, i32imm:$cntl),
2792 "lwpval\t{$cntl, $src1, $src0|$src0, $src1, $cntl}",
2793 [(Int RC:$src0, GR32:$src1, timm:$cntl)]>, XOP_4V, XOPA;
2795 def rmi : Ii32<0x12, MRM1m, (outs), (ins RC:$src0, i32mem:$src1, i32imm:$cntl),
2796 "lwpval\t{$cntl, $src1, $src0|$src0, $src1, $cntl}",
2797 [(Int RC:$src0, (loadi32 addr:$src1), timm:$cntl)]>,
2801 defm LWPVAL32 : lwpval_intr<GR32, int_x86_lwpval32>;
2802 defm LWPVAL64 : lwpval_intr<GR64, int_x86_lwpval64>, VEX_W;
2804 } // HasLWP, SchedRW
2806 //===----------------------------------------------------------------------===//
2807 // MONITORX/MWAITX Instructions
2809 let SchedRW = [ WriteSystem ] in {
2810 let Uses = [ EAX, ECX, EDX ] in
2811 def MONITORX32rrr : I<0x01, MRM_FA, (outs), (ins), "monitorx", []>,
2812 TB, Requires<[ HasMWAITX, Not64BitMode ]>;
2813 let Uses = [ RAX, ECX, EDX ] in
2814 def MONITORX64rrr : I<0x01, MRM_FA, (outs), (ins), "monitorx", []>,
2815 TB, Requires<[ HasMWAITX, In64BitMode ]>;
2817 let Uses = [ ECX, EAX, EBX ] in {
2818 def MWAITXrrr : I<0x01, MRM_FB, (outs), (ins), "mwaitx",
2819 []>, TB, Requires<[ HasMWAITX ]>;
2823 def : InstAlias<"mwaitx\t{%eax, %ecx, %ebx|ebx, ecx, eax}", (MWAITXrrr)>,
2824 Requires<[ Not64BitMode ]>;
2825 def : InstAlias<"mwaitx\t{%rax, %rcx, %rbx|rbx, rcx, rax}", (MWAITXrrr)>,
2826 Requires<[ In64BitMode ]>;
2828 def : InstAlias<"monitorx\t{%eax, %ecx, %edx|edx, ecx, eax}", (MONITORX32rrr)>,
2829 Requires<[ Not64BitMode ]>;
2830 def : InstAlias<"monitorx\t{%rax, %rcx, %rdx|rdx, rcx, rax}", (MONITORX64rrr)>,
2831 Requires<[ In64BitMode ]>;
2833 //===----------------------------------------------------------------------===//
2834 // WAITPKG Instructions
2836 let SchedRW = [WriteSystem] in {
2837 def UMONITOR16 : I<0xAE, MRM6r, (outs), (ins GR16:$src),
2838 "umonitor\t$src", [(int_x86_umonitor GR16:$src)]>,
2839 XS, AdSize16, Requires<[HasWAITPKG, Not64BitMode]>;
2840 def UMONITOR32 : I<0xAE, MRM6r, (outs), (ins GR32:$src),
2841 "umonitor\t$src", [(int_x86_umonitor GR32:$src)]>,
2842 XS, AdSize32, Requires<[HasWAITPKG]>;
2843 def UMONITOR64 : I<0xAE, MRM6r, (outs), (ins GR64:$src),
2844 "umonitor\t$src", [(int_x86_umonitor GR64:$src)]>,
2845 XS, AdSize64, Requires<[HasWAITPKG, In64BitMode]>;
2846 let Uses = [EAX, EDX], Defs = [EFLAGS] in {
2847 def UMWAIT : I<0xAE, MRM6r,
2848 (outs), (ins GR32orGR64:$src), "umwait\t$src",
2849 [(set EFLAGS, (X86umwait GR32orGR64:$src, EDX, EAX))]>,
2850 XD, Requires<[HasWAITPKG]>;
2851 def TPAUSE : I<0xAE, MRM6r,
2852 (outs), (ins GR32orGR64:$src), "tpause\t$src",
2853 [(set EFLAGS, (X86tpause GR32orGR64:$src, EDX, EAX))]>,
2854 PD, Requires<[HasWAITPKG]>, NotMemoryFoldable;
2858 //===----------------------------------------------------------------------===//
2859 // MOVDIRI - Move doubleword/quadword as direct store
2861 let SchedRW = [WriteStore] in {
2862 def MOVDIRI32 : I<0xF9, MRMDestMem, (outs), (ins i32mem:$dst, GR32:$src),
2863 "movdiri\t{$src, $dst|$dst, $src}",
2864 [(int_x86_directstore32 addr:$dst, GR32:$src)]>,
2865 T8PS, Requires<[HasMOVDIRI]>;
2866 def MOVDIRI64 : RI<0xF9, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src),
2867 "movdiri\t{$src, $dst|$dst, $src}",
2868 [(int_x86_directstore64 addr:$dst, GR64:$src)]>,
2869 T8PS, Requires<[In64BitMode, HasMOVDIRI]>;
2872 //===----------------------------------------------------------------------===//
2873 // MOVDIR64B - Move 64 bytes as direct store
2875 let SchedRW = [WriteStore] in {
2876 def MOVDIR64B16 : I<0xF8, MRMSrcMem, (outs), (ins GR16:$dst, i512mem:$src),
2877 "movdir64b\t{$src, $dst|$dst, $src}", []>,
2878 T8PD, AdSize16, Requires<[HasMOVDIR64B, Not64BitMode]>;
2879 def MOVDIR64B32 : I<0xF8, MRMSrcMem, (outs), (ins GR32:$dst, i512mem:$src),
2880 "movdir64b\t{$src, $dst|$dst, $src}",
2881 [(int_x86_movdir64b GR32:$dst, addr:$src)]>,
2882 T8PD, AdSize32, Requires<[HasMOVDIR64B]>;
2883 def MOVDIR64B64 : I<0xF8, MRMSrcMem, (outs), (ins GR64:$dst, i512mem:$src),
2884 "movdir64b\t{$src, $dst|$dst, $src}",
2885 [(int_x86_movdir64b GR64:$dst, addr:$src)]>,
2886 T8PD, AdSize64, Requires<[HasMOVDIR64B, In64BitMode]>;
2889 //===----------------------------------------------------------------------===//
2890 // ENQCMD/S - Enqueue 64-byte command as user with 64-byte write atomicity
2892 let SchedRW = [WriteStore], Defs = [EFLAGS] in {
2893 def ENQCMD16 : I<0xF8, MRMSrcMem, (outs), (ins GR16:$dst, i512mem:$src),
2894 "enqcmd\t{$src, $dst|$dst, $src}",
2895 [(set EFLAGS, (X86enqcmd GR16:$dst, addr:$src))]>,
2896 T8XD, AdSize16, Requires<[HasENQCMD, Not64BitMode]>;
2897 def ENQCMD32 : I<0xF8, MRMSrcMem, (outs), (ins GR32:$dst, i512mem:$src),
2898 "enqcmd\t{$src, $dst|$dst, $src}",
2899 [(set EFLAGS, (X86enqcmd GR32:$dst, addr:$src))]>,
2900 T8XD, AdSize32, Requires<[HasENQCMD]>;
2901 def ENQCMD64 : I<0xF8, MRMSrcMem, (outs), (ins GR64:$dst, i512mem:$src),
2902 "enqcmd\t{$src, $dst|$dst, $src}",
2903 [(set EFLAGS, (X86enqcmd GR64:$dst, addr:$src))]>,
2904 T8XD, AdSize64, Requires<[HasENQCMD, In64BitMode]>;
2906 def ENQCMDS16 : I<0xF8, MRMSrcMem, (outs), (ins GR16:$dst, i512mem:$src),
2907 "enqcmds\t{$src, $dst|$dst, $src}",
2908 [(set EFLAGS, (X86enqcmds GR16:$dst, addr:$src))]>,
2909 T8XS, AdSize16, Requires<[HasENQCMD, Not64BitMode]>;
2910 def ENQCMDS32 : I<0xF8, MRMSrcMem, (outs), (ins GR32:$dst, i512mem:$src),
2911 "enqcmds\t{$src, $dst|$dst, $src}",
2912 [(set EFLAGS, (X86enqcmds GR32:$dst, addr:$src))]>,
2913 T8XS, AdSize32, Requires<[HasENQCMD]>;
2914 def ENQCMDS64 : I<0xF8, MRMSrcMem, (outs), (ins GR64:$dst, i512mem:$src),
2915 "enqcmds\t{$src, $dst|$dst, $src}",
2916 [(set EFLAGS, (X86enqcmds GR64:$dst, addr:$src))]>,
2917 T8XS, AdSize64, Requires<[HasENQCMD, In64BitMode]>;
2920 //===----------------------------------------------------------------------===//
2921 // CLZERO Instruction
2923 let SchedRW = [WriteLoad] in {
2925 def CLZERO32r : I<0x01, MRM_FC, (outs), (ins), "clzero", []>,
2926 TB, Requires<[HasCLZERO, Not64BitMode]>;
2928 def CLZERO64r : I<0x01, MRM_FC, (outs), (ins), "clzero", []>,
2929 TB, Requires<[HasCLZERO, In64BitMode]>;
2932 def : InstAlias<"clzero\t{%eax|eax}", (CLZERO32r)>, Requires<[Not64BitMode]>;
2933 def : InstAlias<"clzero\t{%rax|rax}", (CLZERO64r)>, Requires<[In64BitMode]>;
2935 //===----------------------------------------------------------------------===//
2936 // INVLPGB Instruction
2939 let SchedRW = [WriteSystem] in {
2940 let Uses = [EAX, EDX] in
2941 def INVLPGB32 : I<0x01, MRM_FE, (outs), (ins),
2943 PS, Requires<[Not64BitMode]>;
2944 let Uses = [RAX, EDX] in
2945 def INVLPGB64 : I<0x01, MRM_FE, (outs), (ins),
2947 PS, Requires<[In64BitMode]>;
2950 def : InstAlias<"invlpgb\t{%eax, %edx|eax, edx}", (INVLPGB32)>, Requires<[Not64BitMode]>;
2951 def : InstAlias<"invlpgb\t{%rax, %edx|rax, edx}", (INVLPGB64)>, Requires<[In64BitMode]>;
2953 //===----------------------------------------------------------------------===//
2954 // TLBSYNC Instruction
2957 let SchedRW = [WriteSystem] in {
2958 def TLBSYNC : I<0x01, MRM_FF, (outs), (ins),
2963 //===----------------------------------------------------------------------===//
2964 // HRESET Instruction
2966 let Uses = [EAX], SchedRW = [WriteSystem] in
2967 def HRESET : Ii8<0xF0, MRM_C0, (outs), (ins i32u8imm:$imm), "hreset\t$imm", []>,
2968 Requires<[HasHRESET]>, TAXS;
2970 //===----------------------------------------------------------------------===//
2971 // SERIALIZE Instruction
2973 let SchedRW = [WriteSystem] in
2974 def SERIALIZE : I<0x01, MRM_E8, (outs), (ins), "serialize",
2975 [(int_x86_serialize)]>, PS,
2976 Requires<[HasSERIALIZE]>;
2978 //===----------------------------------------------------------------------===//
2979 // TSXLDTRK - TSX Suspend Load Address Tracking
2981 let Predicates = [HasTSXLDTRK], SchedRW = [WriteSystem] in {
2982 def XSUSLDTRK : I<0x01, MRM_E8, (outs), (ins), "xsusldtrk",
2983 [(int_x86_xsusldtrk)]>, XD;
2984 def XRESLDTRK : I<0x01, MRM_E9, (outs), (ins), "xresldtrk",
2985 [(int_x86_xresldtrk)]>, XD;
2988 //===----------------------------------------------------------------------===//
2989 // UINTR Instructions
2991 let Predicates = [HasUINTR, In64BitMode], SchedRW = [WriteSystem] in {
2992 def UIRET : I<0x01, MRM_EC, (outs), (ins), "uiret",
2994 def CLUI : I<0x01, MRM_EE, (outs), (ins), "clui",
2995 [(int_x86_clui)]>, XS;
2996 def STUI : I<0x01, MRM_EF, (outs), (ins), "stui",
2997 [(int_x86_stui)]>, XS;
2999 def SENDUIPI : I<0xC7, MRM6r, (outs), (ins GR64:$arg), "senduipi\t$arg",
3000 [(int_x86_senduipi GR64:$arg)]>, XS;
3002 let Defs = [EFLAGS] in
3003 def TESTUI : I<0x01, MRM_ED, (outs), (ins), "testui",
3004 [(set EFLAGS, (X86testui))]>, XS;
3007 //===----------------------------------------------------------------------===//
3008 // Pattern fragments to auto generate TBM instructions.
3009 //===----------------------------------------------------------------------===//
3011 let Predicates = [HasTBM] in {
3012 // FIXME: patterns for the load versions are not implemented
3013 def : Pat<(and GR32:$src, (add GR32:$src, 1)),
3014 (BLCFILL32rr GR32:$src)>;
3015 def : Pat<(and GR64:$src, (add GR64:$src, 1)),
3016 (BLCFILL64rr GR64:$src)>;
3018 def : Pat<(or GR32:$src, (not (add GR32:$src, 1))),
3019 (BLCI32rr GR32:$src)>;
3020 def : Pat<(or GR64:$src, (not (add GR64:$src, 1))),
3021 (BLCI64rr GR64:$src)>;
3023 // Extra patterns because opt can optimize the above patterns to this.
3024 def : Pat<(or GR32:$src, (sub -2, GR32:$src)),
3025 (BLCI32rr GR32:$src)>;
3026 def : Pat<(or GR64:$src, (sub -2, GR64:$src)),
3027 (BLCI64rr GR64:$src)>;
3029 def : Pat<(and (not GR32:$src), (add GR32:$src, 1)),
3030 (BLCIC32rr GR32:$src)>;
3031 def : Pat<(and (not GR64:$src), (add GR64:$src, 1)),
3032 (BLCIC64rr GR64:$src)>;
3034 def : Pat<(xor GR32:$src, (add GR32:$src, 1)),
3035 (BLCMSK32rr GR32:$src)>;
3036 def : Pat<(xor GR64:$src, (add GR64:$src, 1)),
3037 (BLCMSK64rr GR64:$src)>;
3039 def : Pat<(or GR32:$src, (add GR32:$src, 1)),
3040 (BLCS32rr GR32:$src)>;
3041 def : Pat<(or GR64:$src, (add GR64:$src, 1)),
3042 (BLCS64rr GR64:$src)>;
3044 def : Pat<(or GR32:$src, (add GR32:$src, -1)),
3045 (BLSFILL32rr GR32:$src)>;
3046 def : Pat<(or GR64:$src, (add GR64:$src, -1)),
3047 (BLSFILL64rr GR64:$src)>;
3049 def : Pat<(or (not GR32:$src), (add GR32:$src, -1)),
3050 (BLSIC32rr GR32:$src)>;
3051 def : Pat<(or (not GR64:$src), (add GR64:$src, -1)),
3052 (BLSIC64rr GR64:$src)>;
3054 def : Pat<(or (not GR32:$src), (add GR32:$src, 1)),
3055 (T1MSKC32rr GR32:$src)>;
3056 def : Pat<(or (not GR64:$src), (add GR64:$src, 1)),
3057 (T1MSKC64rr GR64:$src)>;
3059 def : Pat<(and (not GR32:$src), (add GR32:$src, -1)),
3060 (TZMSK32rr GR32:$src)>;
3061 def : Pat<(and (not GR64:$src), (add GR64:$src, -1)),
3062 (TZMSK64rr GR64:$src)>;
3064 // Patterns to match flag producing ops.
3065 def : Pat<(and_flag_nocf GR32:$src, (add GR32:$src, 1)),
3066 (BLCFILL32rr GR32:$src)>;
3067 def : Pat<(and_flag_nocf GR64:$src, (add GR64:$src, 1)),
3068 (BLCFILL64rr GR64:$src)>;
3070 def : Pat<(or_flag_nocf GR32:$src, (not (add GR32:$src, 1))),
3071 (BLCI32rr GR32:$src)>;
3072 def : Pat<(or_flag_nocf GR64:$src, (not (add GR64:$src, 1))),
3073 (BLCI64rr GR64:$src)>;
3075 // Extra patterns because opt can optimize the above patterns to this.
3076 def : Pat<(or_flag_nocf GR32:$src, (sub -2, GR32:$src)),
3077 (BLCI32rr GR32:$src)>;
3078 def : Pat<(or_flag_nocf GR64:$src, (sub -2, GR64:$src)),
3079 (BLCI64rr GR64:$src)>;
3081 def : Pat<(and_flag_nocf (not GR32:$src), (add GR32:$src, 1)),
3082 (BLCIC32rr GR32:$src)>;
3083 def : Pat<(and_flag_nocf (not GR64:$src), (add GR64:$src, 1)),
3084 (BLCIC64rr GR64:$src)>;
3086 def : Pat<(xor_flag_nocf GR32:$src, (add GR32:$src, 1)),
3087 (BLCMSK32rr GR32:$src)>;
3088 def : Pat<(xor_flag_nocf GR64:$src, (add GR64:$src, 1)),
3089 (BLCMSK64rr GR64:$src)>;
3091 def : Pat<(or_flag_nocf GR32:$src, (add GR32:$src, 1)),
3092 (BLCS32rr GR32:$src)>;
3093 def : Pat<(or_flag_nocf GR64:$src, (add GR64:$src, 1)),
3094 (BLCS64rr GR64:$src)>;
3096 def : Pat<(or_flag_nocf GR32:$src, (add GR32:$src, -1)),
3097 (BLSFILL32rr GR32:$src)>;
3098 def : Pat<(or_flag_nocf GR64:$src, (add GR64:$src, -1)),
3099 (BLSFILL64rr GR64:$src)>;
3101 def : Pat<(or_flag_nocf (not GR32:$src), (add GR32:$src, -1)),
3102 (BLSIC32rr GR32:$src)>;
3103 def : Pat<(or_flag_nocf (not GR64:$src), (add GR64:$src, -1)),
3104 (BLSIC64rr GR64:$src)>;
3106 def : Pat<(or_flag_nocf (not GR32:$src), (add GR32:$src, 1)),
3107 (T1MSKC32rr GR32:$src)>;
3108 def : Pat<(or_flag_nocf (not GR64:$src), (add GR64:$src, 1)),
3109 (T1MSKC64rr GR64:$src)>;
3111 def : Pat<(and_flag_nocf (not GR32:$src), (add GR32:$src, -1)),
3112 (TZMSK32rr GR32:$src)>;
3113 def : Pat<(and_flag_nocf (not GR64:$src), (add GR64:$src, -1)),
3114 (TZMSK64rr GR64:$src)>;
3117 //===----------------------------------------------------------------------===//
3118 // Memory Instructions
3121 let Predicates = [HasCLFLUSHOPT], SchedRW = [WriteLoad] in
3122 def CLFLUSHOPT : I<0xAE, MRM7m, (outs), (ins i8mem:$src),
3123 "clflushopt\t$src", [(int_x86_clflushopt addr:$src)]>, PD;
3125 let Predicates = [HasCLWB], SchedRW = [WriteLoad] in
3126 def CLWB : I<0xAE, MRM6m, (outs), (ins i8mem:$src), "clwb\t$src",
3127 [(int_x86_clwb addr:$src)]>, PD, NotMemoryFoldable;
3129 let Predicates = [HasCLDEMOTE], SchedRW = [WriteLoad] in
3130 def CLDEMOTE : I<0x1C, MRM0m, (outs), (ins i8mem:$src), "cldemote\t$src",
3131 [(int_x86_cldemote addr:$src)]>, PS;
3133 //===----------------------------------------------------------------------===//
3135 //===----------------------------------------------------------------------===//
3137 include "X86InstrArithmetic.td"
3138 include "X86InstrCMovSetCC.td"
3139 include "X86InstrExtension.td"
3140 include "X86InstrControl.td"
3141 include "X86InstrShiftRotate.td"
3143 // X87 Floating Point Stack.
3144 include "X86InstrFPStack.td"
3146 // SIMD support (SSE, MMX and AVX)
3147 include "X86InstrFragmentsSIMD.td"
3149 // FMA - Fused Multiply-Add support (requires FMA)
3150 include "X86InstrFMA.td"
3153 include "X86InstrXOP.td"
3155 // SSE, MMX and 3DNow! vector support.
3156 include "X86InstrSSE.td"
3157 include "X86InstrAVX512.td"
3158 include "X86InstrMMX.td"
3159 include "X86Instr3DNow.td"
3162 include "X86InstrMPX.td"
3164 include "X86InstrVMX.td"
3165 include "X86InstrSVM.td"
3166 include "X86InstrSNP.td"
3168 include "X86InstrTSX.td"
3169 include "X86InstrSGX.td"
3171 include "X86InstrTDX.td"
3173 // Key Locker instructions
3174 include "X86InstrKL.td"
3177 include "X86InstrAMX.td"
3179 // System instructions.
3180 include "X86InstrSystem.td"
3182 // Compiler Pseudo Instructions and Pat Patterns
3183 include "X86InstrCompiler.td"
3184 include "X86InstrVecCompiler.td"
3186 //===----------------------------------------------------------------------===//
3187 // Assembler Mnemonic Aliases
3188 //===----------------------------------------------------------------------===//
3190 def : MnemonicAlias<"call", "callw", "att">, Requires<[In16BitMode]>;
3191 def : MnemonicAlias<"call", "calll", "att">, Requires<[In32BitMode]>;
3192 def : MnemonicAlias<"call", "callq", "att">, Requires<[In64BitMode]>;
3194 def : MnemonicAlias<"cbw", "cbtw", "att">;
3195 def : MnemonicAlias<"cwde", "cwtl", "att">;
3196 def : MnemonicAlias<"cwd", "cwtd", "att">;
3197 def : MnemonicAlias<"cdq", "cltd", "att">;
3198 def : MnemonicAlias<"cdqe", "cltq", "att">;
3199 def : MnemonicAlias<"cqo", "cqto", "att">;
3201 // In 64-bit mode lret maps to lretl; it is not ambiguous with lretq.
3202 def : MnemonicAlias<"lret", "lretw", "att">, Requires<[In16BitMode]>;
3203 def : MnemonicAlias<"lret", "lretl", "att">, Requires<[Not16BitMode]>;
3205 def : MnemonicAlias<"leavel", "leave", "att">, Requires<[Not64BitMode]>;
3206 def : MnemonicAlias<"leaveq", "leave", "att">, Requires<[In64BitMode]>;
3208 def : MnemonicAlias<"loopz", "loope">;
3209 def : MnemonicAlias<"loopnz", "loopne">;
3211 def : MnemonicAlias<"pop", "popw", "att">, Requires<[In16BitMode]>;
3212 def : MnemonicAlias<"pop", "popl", "att">, Requires<[In32BitMode]>;
3213 def : MnemonicAlias<"pop", "popq", "att">, Requires<[In64BitMode]>;
3214 def : MnemonicAlias<"popf", "popfw", "att">, Requires<[In16BitMode]>;
3215 def : MnemonicAlias<"popf", "popfl", "att">, Requires<[In32BitMode]>;
3216 def : MnemonicAlias<"popf", "popfq", "att">, Requires<[In64BitMode]>;
3217 def : MnemonicAlias<"popf", "popfq", "intel">, Requires<[In64BitMode]>;
3218 def : MnemonicAlias<"popfd", "popfl", "att">;
3219 def : MnemonicAlias<"popfw", "popf", "intel">, Requires<[In32BitMode]>;
3220 def : MnemonicAlias<"popfw", "popf", "intel">, Requires<[In64BitMode]>;
3222 // FIXME: This is wrong for "push reg". "push %bx" should turn into pushw in
3223 // all modes. However: "push (addr)" and "push $42" should default to
3224 // pushl/pushq depending on the current mode. Similar for "pop %bx"
3225 def : MnemonicAlias<"push", "pushw", "att">, Requires<[In16BitMode]>;
3226 def : MnemonicAlias<"push", "pushl", "att">, Requires<[In32BitMode]>;
3227 def : MnemonicAlias<"push", "pushq", "att">, Requires<[In64BitMode]>;
3228 def : MnemonicAlias<"pushf", "pushfw", "att">, Requires<[In16BitMode]>;
3229 def : MnemonicAlias<"pushf", "pushfl", "att">, Requires<[In32BitMode]>;
3230 def : MnemonicAlias<"pushf", "pushfq", "att">, Requires<[In64BitMode]>;
3231 def : MnemonicAlias<"pushf", "pushfq", "intel">, Requires<[In64BitMode]>;
3232 def : MnemonicAlias<"pushfd", "pushfl", "att">;
3233 def : MnemonicAlias<"pushfw", "pushf", "intel">, Requires<[In32BitMode]>;
3234 def : MnemonicAlias<"pushfw", "pushf", "intel">, Requires<[In64BitMode]>;
3236 def : MnemonicAlias<"popad", "popal", "intel">, Requires<[Not64BitMode]>;
3237 def : MnemonicAlias<"pushad", "pushal", "intel">, Requires<[Not64BitMode]>;
3238 def : MnemonicAlias<"popa", "popaw", "intel">, Requires<[In16BitMode]>;
3239 def : MnemonicAlias<"pusha", "pushaw", "intel">, Requires<[In16BitMode]>;
3240 def : MnemonicAlias<"popa", "popal", "intel">, Requires<[In32BitMode]>;
3241 def : MnemonicAlias<"pusha", "pushal", "intel">, Requires<[In32BitMode]>;
3243 def : MnemonicAlias<"popa", "popaw", "att">, Requires<[In16BitMode]>;
3244 def : MnemonicAlias<"pusha", "pushaw", "att">, Requires<[In16BitMode]>;
3245 def : MnemonicAlias<"popa", "popal", "att">, Requires<[In32BitMode]>;
3246 def : MnemonicAlias<"pusha", "pushal", "att">, Requires<[In32BitMode]>;
3248 def : MnemonicAlias<"repe", "rep">;
3249 def : MnemonicAlias<"repz", "rep">;
3250 def : MnemonicAlias<"repnz", "repne">;
3252 def : MnemonicAlias<"ret", "retw", "att">, Requires<[In16BitMode]>;
3253 def : MnemonicAlias<"ret", "retl", "att">, Requires<[In32BitMode]>;
3254 def : MnemonicAlias<"ret", "retq", "att">, Requires<[In64BitMode]>;
3256 // Apply 'ret' behavior to 'retn'
3257 def : MnemonicAlias<"retn", "retw", "att">, Requires<[In16BitMode]>;
3258 def : MnemonicAlias<"retn", "retl", "att">, Requires<[In32BitMode]>;
3259 def : MnemonicAlias<"retn", "retq", "att">, Requires<[In64BitMode]>;
3260 def : MnemonicAlias<"retn", "ret", "intel">;
3262 def : MnemonicAlias<"sal", "shl", "intel">;
3263 def : MnemonicAlias<"salb", "shlb", "att">;
3264 def : MnemonicAlias<"salw", "shlw", "att">;
3265 def : MnemonicAlias<"sall", "shll", "att">;
3266 def : MnemonicAlias<"salq", "shlq", "att">;
3268 def : MnemonicAlias<"smovb", "movsb", "att">;
3269 def : MnemonicAlias<"smovw", "movsw", "att">;
3270 def : MnemonicAlias<"smovl", "movsl", "att">;
3271 def : MnemonicAlias<"smovq", "movsq", "att">;
3273 def : MnemonicAlias<"ud2a", "ud2", "att">;
3274 def : MnemonicAlias<"ud2bw", "ud1w", "att">;
3275 def : MnemonicAlias<"ud2bl", "ud1l", "att">;
3276 def : MnemonicAlias<"ud2bq", "ud1q", "att">;
3277 def : MnemonicAlias<"verrw", "verr", "att">;
3279 // MS recognizes 'xacquire'/'xrelease' as 'acquire'/'release'
3280 def : MnemonicAlias<"acquire", "xacquire", "intel">;
3281 def : MnemonicAlias<"release", "xrelease", "intel">;
3283 // System instruction aliases.
3284 def : MnemonicAlias<"iret", "iretw", "att">, Requires<[In16BitMode]>;
3285 def : MnemonicAlias<"iret", "iretl", "att">, Requires<[Not16BitMode]>;
3286 def : MnemonicAlias<"sysret", "sysretl", "att">;
3287 def : MnemonicAlias<"sysexit", "sysexitl", "att">;
3289 def : MnemonicAlias<"lgdt", "lgdtw", "att">, Requires<[In16BitMode]>;
3290 def : MnemonicAlias<"lgdt", "lgdtl", "att">, Requires<[In32BitMode]>;
3291 def : MnemonicAlias<"lgdt", "lgdtq", "att">, Requires<[In64BitMode]>;
3292 def : MnemonicAlias<"lidt", "lidtw", "att">, Requires<[In16BitMode]>;
3293 def : MnemonicAlias<"lidt", "lidtl", "att">, Requires<[In32BitMode]>;
3294 def : MnemonicAlias<"lidt", "lidtq", "att">, Requires<[In64BitMode]>;
3295 def : MnemonicAlias<"sgdt", "sgdtw", "att">, Requires<[In16BitMode]>;
3296 def : MnemonicAlias<"sgdt", "sgdtl", "att">, Requires<[In32BitMode]>;
3297 def : MnemonicAlias<"sgdt", "sgdtq", "att">, Requires<[In64BitMode]>;
3298 def : MnemonicAlias<"sidt", "sidtw", "att">, Requires<[In16BitMode]>;
3299 def : MnemonicAlias<"sidt", "sidtl", "att">, Requires<[In32BitMode]>;
3300 def : MnemonicAlias<"sidt", "sidtq", "att">, Requires<[In64BitMode]>;
3301 def : MnemonicAlias<"lgdt", "lgdtw", "intel">, Requires<[In16BitMode]>;
3302 def : MnemonicAlias<"lgdt", "lgdtd", "intel">, Requires<[In32BitMode]>;
3303 def : MnemonicAlias<"lidt", "lidtw", "intel">, Requires<[In16BitMode]>;
3304 def : MnemonicAlias<"lidt", "lidtd", "intel">, Requires<[In32BitMode]>;
3305 def : MnemonicAlias<"sgdt", "sgdtw", "intel">, Requires<[In16BitMode]>;
3306 def : MnemonicAlias<"sgdt", "sgdtd", "intel">, Requires<[In32BitMode]>;
3307 def : MnemonicAlias<"sidt", "sidtw", "intel">, Requires<[In16BitMode]>;
3308 def : MnemonicAlias<"sidt", "sidtd", "intel">, Requires<[In32BitMode]>;
3311 // Floating point stack aliases.
3312 def : MnemonicAlias<"fcmovz", "fcmove", "att">;
3313 def : MnemonicAlias<"fcmova", "fcmovnbe", "att">;
3314 def : MnemonicAlias<"fcmovnae", "fcmovb", "att">;
3315 def : MnemonicAlias<"fcmovna", "fcmovbe", "att">;
3316 def : MnemonicAlias<"fcmovae", "fcmovnb", "att">;
3317 def : MnemonicAlias<"fcomip", "fcompi">;
3318 def : MnemonicAlias<"fildq", "fildll", "att">;
3319 def : MnemonicAlias<"fistpq", "fistpll", "att">;
3320 def : MnemonicAlias<"fisttpq", "fisttpll", "att">;
3321 def : MnemonicAlias<"fldcww", "fldcw", "att">;
3322 def : MnemonicAlias<"fnstcww", "fnstcw", "att">;
3323 def : MnemonicAlias<"fnstsww", "fnstsw", "att">;
3324 def : MnemonicAlias<"fucomip", "fucompi">;
3325 def : MnemonicAlias<"fwait", "wait">;
3327 def : MnemonicAlias<"fxsaveq", "fxsave64", "att">;
3328 def : MnemonicAlias<"fxrstorq", "fxrstor64", "att">;
3329 def : MnemonicAlias<"xsaveq", "xsave64", "att">;
3330 def : MnemonicAlias<"xrstorq", "xrstor64", "att">;
3331 def : MnemonicAlias<"xsaveoptq", "xsaveopt64", "att">;
3332 def : MnemonicAlias<"xrstorsq", "xrstors64", "att">;
3333 def : MnemonicAlias<"xsavecq", "xsavec64", "att">;
3334 def : MnemonicAlias<"xsavesq", "xsaves64", "att">;
3336 class CondCodeAlias<string Prefix,string Suffix, string OldCond, string NewCond,
3338 : MnemonicAlias<!strconcat(Prefix, OldCond, Suffix),
3339 !strconcat(Prefix, NewCond, Suffix), VariantName>;
3341 /// IntegerCondCodeMnemonicAlias - This multiclass defines a bunch of
3342 /// MnemonicAlias's that canonicalize the condition code in a mnemonic, for
3343 /// example "setz" -> "sete".
3344 multiclass IntegerCondCodeMnemonicAlias<string Prefix, string Suffix,
3346 def C : CondCodeAlias<Prefix, Suffix, "c", "b", V>; // setc -> setb
3347 def Z : CondCodeAlias<Prefix, Suffix, "z" , "e", V>; // setz -> sete
3348 def NA : CondCodeAlias<Prefix, Suffix, "na", "be", V>; // setna -> setbe
3349 def NB : CondCodeAlias<Prefix, Suffix, "nb", "ae", V>; // setnb -> setae
3350 def NC : CondCodeAlias<Prefix, Suffix, "nc", "ae", V>; // setnc -> setae
3351 def NG : CondCodeAlias<Prefix, Suffix, "ng", "le", V>; // setng -> setle
3352 def NL : CondCodeAlias<Prefix, Suffix, "nl", "ge", V>; // setnl -> setge
3353 def NZ : CondCodeAlias<Prefix, Suffix, "nz", "ne", V>; // setnz -> setne
3354 def PE : CondCodeAlias<Prefix, Suffix, "pe", "p", V>; // setpe -> setp
3355 def PO : CondCodeAlias<Prefix, Suffix, "po", "np", V>; // setpo -> setnp
3357 def NAE : CondCodeAlias<Prefix, Suffix, "nae", "b", V>; // setnae -> setb
3358 def NBE : CondCodeAlias<Prefix, Suffix, "nbe", "a", V>; // setnbe -> seta
3359 def NGE : CondCodeAlias<Prefix, Suffix, "nge", "l", V>; // setnge -> setl
3360 def NLE : CondCodeAlias<Prefix, Suffix, "nle", "g", V>; // setnle -> setg
3363 // Aliases for set<CC>
3364 defm : IntegerCondCodeMnemonicAlias<"set", "">;
3365 // Aliases for j<CC>
3366 defm : IntegerCondCodeMnemonicAlias<"j", "">;
3367 // Aliases for cmov<CC>{w,l,q}
3368 defm : IntegerCondCodeMnemonicAlias<"cmov", "w", "att">;
3369 defm : IntegerCondCodeMnemonicAlias<"cmov", "l", "att">;
3370 defm : IntegerCondCodeMnemonicAlias<"cmov", "q", "att">;
3371 // No size suffix for intel-style asm.
3372 defm : IntegerCondCodeMnemonicAlias<"cmov", "", "intel">;
3375 //===----------------------------------------------------------------------===//
3376 // Assembler Instruction Aliases
3377 //===----------------------------------------------------------------------===//
3379 // aad/aam default to base 10 if no operand is specified.
3380 def : InstAlias<"aad", (AAD8i8 10)>, Requires<[Not64BitMode]>;
3381 def : InstAlias<"aam", (AAM8i8 10)>, Requires<[Not64BitMode]>;
3383 // Disambiguate the mem/imm form of bt-without-a-suffix as btl.
3384 // Likewise for btc/btr/bts.
3385 def : InstAlias<"bt\t{$imm, $mem|$mem, $imm}",
3386 (BT32mi8 i32mem:$mem, i32u8imm:$imm), 0, "att">;
3387 def : InstAlias<"btc\t{$imm, $mem|$mem, $imm}",
3388 (BTC32mi8 i32mem:$mem, i32u8imm:$imm), 0, "att">;
3389 def : InstAlias<"btr\t{$imm, $mem|$mem, $imm}",
3390 (BTR32mi8 i32mem:$mem, i32u8imm:$imm), 0, "att">;
3391 def : InstAlias<"bts\t{$imm, $mem|$mem, $imm}",
3392 (BTS32mi8 i32mem:$mem, i32u8imm:$imm), 0, "att">;
3395 def : InstAlias<"clr{b}\t$reg", (XOR8rr GR8 :$reg, GR8 :$reg), 0>;
3396 def : InstAlias<"clr{w}\t$reg", (XOR16rr GR16:$reg, GR16:$reg), 0>;
3397 def : InstAlias<"clr{l}\t$reg", (XOR32rr GR32:$reg, GR32:$reg), 0>;
3398 def : InstAlias<"clr{q}\t$reg", (XOR64rr GR64:$reg, GR64:$reg), 0>;
3400 // lods aliases. Accept the destination being omitted because it's implicit
3401 // in the mnemonic, or the mnemonic suffix being omitted because it's implicit
3402 // in the destination.
3403 def : InstAlias<"lodsb\t$src", (LODSB srcidx8:$src), 0>;
3404 def : InstAlias<"lodsw\t$src", (LODSW srcidx16:$src), 0>;
3405 def : InstAlias<"lods{l|d}\t$src", (LODSL srcidx32:$src), 0>;
3406 def : InstAlias<"lodsq\t$src", (LODSQ srcidx64:$src), 0>, Requires<[In64BitMode]>;
3407 def : InstAlias<"lods\t{$src, %al|al, $src}", (LODSB srcidx8:$src), 0>;
3408 def : InstAlias<"lods\t{$src, %ax|ax, $src}", (LODSW srcidx16:$src), 0>;
3409 def : InstAlias<"lods\t{$src, %eax|eax, $src}", (LODSL srcidx32:$src), 0>;
3410 def : InstAlias<"lods\t{$src, %rax|rax, $src}", (LODSQ srcidx64:$src), 0>, Requires<[In64BitMode]>;
3411 def : InstAlias<"lods\t$src", (LODSB srcidx8:$src), 0, "intel">;
3412 def : InstAlias<"lods\t$src", (LODSW srcidx16:$src), 0, "intel">;
3413 def : InstAlias<"lods\t$src", (LODSL srcidx32:$src), 0, "intel">;
3414 def : InstAlias<"lods\t$src", (LODSQ srcidx64:$src), 0, "intel">, Requires<[In64BitMode]>;
3417 // stos aliases. Accept the source being omitted because it's implicit in
3418 // the mnemonic, or the mnemonic suffix being omitted because it's implicit
3420 def : InstAlias<"stosb\t$dst", (STOSB dstidx8:$dst), 0>;
3421 def : InstAlias<"stosw\t$dst", (STOSW dstidx16:$dst), 0>;
3422 def : InstAlias<"stos{l|d}\t$dst", (STOSL dstidx32:$dst), 0>;
3423 def : InstAlias<"stosq\t$dst", (STOSQ dstidx64:$dst), 0>, Requires<[In64BitMode]>;
3424 def : InstAlias<"stos\t{%al, $dst|$dst, al}", (STOSB dstidx8:$dst), 0>;
3425 def : InstAlias<"stos\t{%ax, $dst|$dst, ax}", (STOSW dstidx16:$dst), 0>;
3426 def : InstAlias<"stos\t{%eax, $dst|$dst, eax}", (STOSL dstidx32:$dst), 0>;
3427 def : InstAlias<"stos\t{%rax, $dst|$dst, rax}", (STOSQ dstidx64:$dst), 0>, Requires<[In64BitMode]>;
3428 def : InstAlias<"stos\t$dst", (STOSB dstidx8:$dst), 0, "intel">;
3429 def : InstAlias<"stos\t$dst", (STOSW dstidx16:$dst), 0, "intel">;
3430 def : InstAlias<"stos\t$dst", (STOSL dstidx32:$dst), 0, "intel">;
3431 def : InstAlias<"stos\t$dst", (STOSQ dstidx64:$dst), 0, "intel">, Requires<[In64BitMode]>;
3434 // scas aliases. Accept the destination being omitted because it's implicit
3435 // in the mnemonic, or the mnemonic suffix being omitted because it's implicit
3436 // in the destination.
3437 def : InstAlias<"scasb\t$dst", (SCASB dstidx8:$dst), 0>;
3438 def : InstAlias<"scasw\t$dst", (SCASW dstidx16:$dst), 0>;
3439 def : InstAlias<"scas{l|d}\t$dst", (SCASL dstidx32:$dst), 0>;
3440 def : InstAlias<"scasq\t$dst", (SCASQ dstidx64:$dst), 0>, Requires<[In64BitMode]>;
3441 def : InstAlias<"scas\t{$dst, %al|al, $dst}", (SCASB dstidx8:$dst), 0>;
3442 def : InstAlias<"scas\t{$dst, %ax|ax, $dst}", (SCASW dstidx16:$dst), 0>;
3443 def : InstAlias<"scas\t{$dst, %eax|eax, $dst}", (SCASL dstidx32:$dst), 0>;
3444 def : InstAlias<"scas\t{$dst, %rax|rax, $dst}", (SCASQ dstidx64:$dst), 0>, Requires<[In64BitMode]>;
3445 def : InstAlias<"scas\t$dst", (SCASB dstidx8:$dst), 0, "intel">;
3446 def : InstAlias<"scas\t$dst", (SCASW dstidx16:$dst), 0, "intel">;
3447 def : InstAlias<"scas\t$dst", (SCASL dstidx32:$dst), 0, "intel">;
3448 def : InstAlias<"scas\t$dst", (SCASQ dstidx64:$dst), 0, "intel">, Requires<[In64BitMode]>;
3450 // cmps aliases. Mnemonic suffix being omitted because it's implicit
3451 // in the destination.
3452 def : InstAlias<"cmps\t{$dst, $src|$src, $dst}", (CMPSB dstidx8:$dst, srcidx8:$src), 0, "intel">;
3453 def : InstAlias<"cmps\t{$dst, $src|$src, $dst}", (CMPSW dstidx16:$dst, srcidx16:$src), 0, "intel">;
3454 def : InstAlias<"cmps\t{$dst, $src|$src, $dst}", (CMPSL dstidx32:$dst, srcidx32:$src), 0, "intel">;
3455 def : InstAlias<"cmps\t{$dst, $src|$src, $dst}", (CMPSQ dstidx64:$dst, srcidx64:$src), 0, "intel">, Requires<[In64BitMode]>;
3457 // movs aliases. Mnemonic suffix being omitted because it's implicit
3458 // in the destination.
3459 def : InstAlias<"movs\t{$src, $dst|$dst, $src}", (MOVSB dstidx8:$dst, srcidx8:$src), 0, "intel">;
3460 def : InstAlias<"movs\t{$src, $dst|$dst, $src}", (MOVSW dstidx16:$dst, srcidx16:$src), 0, "intel">;
3461 def : InstAlias<"movs\t{$src, $dst|$dst, $src}", (MOVSL dstidx32:$dst, srcidx32:$src), 0, "intel">;
3462 def : InstAlias<"movs\t{$src, $dst|$dst, $src}", (MOVSQ dstidx64:$dst, srcidx64:$src), 0, "intel">, Requires<[In64BitMode]>;
3464 // div and idiv aliases for explicit A register.
3465 def : InstAlias<"div{b}\t{$src, %al|al, $src}", (DIV8r GR8 :$src)>;
3466 def : InstAlias<"div{w}\t{$src, %ax|ax, $src}", (DIV16r GR16:$src)>;
3467 def : InstAlias<"div{l}\t{$src, %eax|eax, $src}", (DIV32r GR32:$src)>;
3468 def : InstAlias<"div{q}\t{$src, %rax|rax, $src}", (DIV64r GR64:$src)>;
3469 def : InstAlias<"div{b}\t{$src, %al|al, $src}", (DIV8m i8mem :$src)>;
3470 def : InstAlias<"div{w}\t{$src, %ax|ax, $src}", (DIV16m i16mem:$src)>;
3471 def : InstAlias<"div{l}\t{$src, %eax|eax, $src}", (DIV32m i32mem:$src)>;
3472 def : InstAlias<"div{q}\t{$src, %rax|rax, $src}", (DIV64m i64mem:$src)>;
3473 def : InstAlias<"idiv{b}\t{$src, %al|al, $src}", (IDIV8r GR8 :$src)>;
3474 def : InstAlias<"idiv{w}\t{$src, %ax|ax, $src}", (IDIV16r GR16:$src)>;
3475 def : InstAlias<"idiv{l}\t{$src, %eax|eax, $src}", (IDIV32r GR32:$src)>;
3476 def : InstAlias<"idiv{q}\t{$src, %rax|rax, $src}", (IDIV64r GR64:$src)>;
3477 def : InstAlias<"idiv{b}\t{$src, %al|al, $src}", (IDIV8m i8mem :$src)>;
3478 def : InstAlias<"idiv{w}\t{$src, %ax|ax, $src}", (IDIV16m i16mem:$src)>;
3479 def : InstAlias<"idiv{l}\t{$src, %eax|eax, $src}", (IDIV32m i32mem:$src)>;
3480 def : InstAlias<"idiv{q}\t{$src, %rax|rax, $src}", (IDIV64m i64mem:$src)>;
3484 // Various unary fpstack operations default to operating on ST1.
3485 // For example, "fxch" -> "fxch %st(1)"
3486 def : InstAlias<"faddp", (ADD_FPrST0 ST1), 0>;
3487 def: InstAlias<"fadd", (ADD_FPrST0 ST1), 0>;
3488 def : InstAlias<"fsub{|r}p", (SUBR_FPrST0 ST1), 0>;
3489 def : InstAlias<"fsub{r|}p", (SUB_FPrST0 ST1), 0>;
3490 def : InstAlias<"fmul", (MUL_FPrST0 ST1), 0>;
3491 def : InstAlias<"fmulp", (MUL_FPrST0 ST1), 0>;
3492 def : InstAlias<"fdiv{|r}p", (DIVR_FPrST0 ST1), 0>;
3493 def : InstAlias<"fdiv{r|}p", (DIV_FPrST0 ST1), 0>;
3494 def : InstAlias<"fxch", (XCH_F ST1), 0>;
3495 def : InstAlias<"fcom", (COM_FST0r ST1), 0>;
3496 def : InstAlias<"fcomp", (COMP_FST0r ST1), 0>;
3497 def : InstAlias<"fcomi", (COM_FIr ST1), 0>;
3498 def : InstAlias<"fcompi", (COM_FIPr ST1), 0>;
3499 def : InstAlias<"fucom", (UCOM_Fr ST1), 0>;
3500 def : InstAlias<"fucomp", (UCOM_FPr ST1), 0>;
3501 def : InstAlias<"fucomi", (UCOM_FIr ST1), 0>;
3502 def : InstAlias<"fucompi", (UCOM_FIPr ST1), 0>;
3504 // Handle fmul/fadd/fsub/fdiv instructions with explicitly written st(0) op.
3505 // For example, "fadd %st(4), %st(0)" -> "fadd %st(4)". We also disambiguate
3506 // instructions like "fadd %st(0), %st(0)" as "fadd %st(0)" for consistency with
3508 multiclass FpUnaryAlias<string Mnemonic, Instruction Inst, bit EmitAlias = 1> {
3509 def : InstAlias<!strconcat(Mnemonic, "\t$op"),
3510 (Inst RSTi:$op), EmitAlias>;
3511 def : InstAlias<!strconcat(Mnemonic, "\t{%st, %st|st, st}"),
3512 (Inst ST0), EmitAlias>;
3515 defm : FpUnaryAlias<"fadd", ADD_FST0r, 0>;
3516 defm : FpUnaryAlias<"faddp", ADD_FPrST0, 0>;
3517 defm : FpUnaryAlias<"fsub", SUB_FST0r, 0>;
3518 defm : FpUnaryAlias<"fsub{|r}p", SUBR_FPrST0, 0>;
3519 defm : FpUnaryAlias<"fsubr", SUBR_FST0r, 0>;
3520 defm : FpUnaryAlias<"fsub{r|}p", SUB_FPrST0, 0>;
3521 defm : FpUnaryAlias<"fmul", MUL_FST0r, 0>;
3522 defm : FpUnaryAlias<"fmulp", MUL_FPrST0, 0>;
3523 defm : FpUnaryAlias<"fdiv", DIV_FST0r, 0>;
3524 defm : FpUnaryAlias<"fdiv{|r}p", DIVR_FPrST0, 0>;
3525 defm : FpUnaryAlias<"fdivr", DIVR_FST0r, 0>;
3526 defm : FpUnaryAlias<"fdiv{r|}p", DIV_FPrST0, 0>;
3527 defm : FpUnaryAlias<"fcomi", COM_FIr, 0>;
3528 defm : FpUnaryAlias<"fucomi", UCOM_FIr, 0>;
3529 defm : FpUnaryAlias<"fcompi", COM_FIPr, 0>;
3530 defm : FpUnaryAlias<"fucompi", UCOM_FIPr, 0>;
3533 // Handle "f{mulp,addp} $op, %st(0)" the same as "f{mulp,addp} $op", since they
3534 // commute. We also allow fdiv[r]p/fsubrp even though they don't commute,
3535 // solely because gas supports it.
3536 def : InstAlias<"faddp\t{$op, %st|st, $op}", (ADD_FPrST0 RSTi:$op), 0>;
3537 def : InstAlias<"fmulp\t{$op, %st|st, $op}", (MUL_FPrST0 RSTi:$op), 0>;
3538 def : InstAlias<"fsub{|r}p\t{$op, %st|st, $op}", (SUBR_FPrST0 RSTi:$op), 0>;
3539 def : InstAlias<"fsub{r|}p\t{$op, %st|st, $op}", (SUB_FPrST0 RSTi:$op), 0>;
3540 def : InstAlias<"fdiv{|r}p\t{$op, %st|st, $op}", (DIVR_FPrST0 RSTi:$op), 0>;
3541 def : InstAlias<"fdiv{r|}p\t{$op, %st|st, $op}", (DIV_FPrST0 RSTi:$op), 0>;
3543 def : InstAlias<"fnstsw" , (FNSTSW16r), 0>;
3545 // lcall and ljmp aliases. This seems to be an odd mapping in 64-bit mode, but
3546 // this is compatible with what GAS does.
3547 def : InstAlias<"lcall\t$seg, $off", (FARCALL32i i32imm:$off, i16imm:$seg), 0>, Requires<[In32BitMode]>;
3548 def : InstAlias<"ljmp\t$seg, $off", (FARJMP32i i32imm:$off, i16imm:$seg), 0>, Requires<[In32BitMode]>;
3549 def : InstAlias<"lcall\t{*}$dst", (FARCALL32m opaquemem:$dst), 0>, Requires<[Not16BitMode]>;
3550 def : InstAlias<"ljmp\t{*}$dst", (FARJMP32m opaquemem:$dst), 0>, Requires<[Not16BitMode]>;
3551 def : InstAlias<"lcall\t$seg, $off", (FARCALL16i i16imm:$off, i16imm:$seg), 0>, Requires<[In16BitMode]>;
3552 def : InstAlias<"ljmp\t$seg, $off", (FARJMP16i i16imm:$off, i16imm:$seg), 0>, Requires<[In16BitMode]>;
3553 def : InstAlias<"lcall\t{*}$dst", (FARCALL16m opaquemem:$dst), 0>, Requires<[In16BitMode]>;
3554 def : InstAlias<"ljmp\t{*}$dst", (FARJMP16m opaquemem:$dst), 0>, Requires<[In16BitMode]>;
3556 def : InstAlias<"jmp\t{*}$dst", (JMP64m i64mem:$dst), 0, "att">, Requires<[In64BitMode]>;
3557 def : InstAlias<"jmp\t{*}$dst", (JMP32m i32mem:$dst), 0, "att">, Requires<[In32BitMode]>;
3558 def : InstAlias<"jmp\t{*}$dst", (JMP16m i16mem:$dst), 0, "att">, Requires<[In16BitMode]>;
3561 // "imul <imm>, B" is an alias for "imul <imm>, B, B".
3562 def : InstAlias<"imul{w}\t{$imm, $r|$r, $imm}", (IMUL16rri GR16:$r, GR16:$r, i16imm:$imm), 0>;
3563 def : InstAlias<"imul{w}\t{$imm, $r|$r, $imm}", (IMUL16rri8 GR16:$r, GR16:$r, i16i8imm:$imm), 0>;
3564 def : InstAlias<"imul{l}\t{$imm, $r|$r, $imm}", (IMUL32rri GR32:$r, GR32:$r, i32imm:$imm), 0>;
3565 def : InstAlias<"imul{l}\t{$imm, $r|$r, $imm}", (IMUL32rri8 GR32:$r, GR32:$r, i32i8imm:$imm), 0>;
3566 def : InstAlias<"imul{q}\t{$imm, $r|$r, $imm}", (IMUL64rri32 GR64:$r, GR64:$r, i64i32imm:$imm), 0>;
3567 def : InstAlias<"imul{q}\t{$imm, $r|$r, $imm}", (IMUL64rri8 GR64:$r, GR64:$r, i64i8imm:$imm), 0>;
3569 // ins aliases. Accept the mnemonic suffix being omitted because it's implicit
3570 // in the destination.
3571 def : InstAlias<"ins\t{%dx, $dst|$dst, dx}", (INSB dstidx8:$dst), 0, "intel">;
3572 def : InstAlias<"ins\t{%dx, $dst|$dst, dx}", (INSW dstidx16:$dst), 0, "intel">;
3573 def : InstAlias<"ins\t{%dx, $dst|$dst, dx}", (INSL dstidx32:$dst), 0, "intel">;
3575 // outs aliases. Accept the mnemonic suffix being omitted because it's implicit
3577 def : InstAlias<"outs\t{$src, %dx|dx, $src}", (OUTSB srcidx8:$src), 0, "intel">;
3578 def : InstAlias<"outs\t{$src, %dx|dx, $src}", (OUTSW srcidx16:$src), 0, "intel">;
3579 def : InstAlias<"outs\t{$src, %dx|dx, $src}", (OUTSL srcidx32:$src), 0, "intel">;
3581 // inb %dx -> inb %al, %dx
3582 def : InstAlias<"inb\t{%dx|dx}", (IN8rr), 0>;
3583 def : InstAlias<"inw\t{%dx|dx}", (IN16rr), 0>;
3584 def : InstAlias<"inl\t{%dx|dx}", (IN32rr), 0>;
3585 def : InstAlias<"inb\t$port", (IN8ri u8imm:$port), 0>;
3586 def : InstAlias<"inw\t$port", (IN16ri u8imm:$port), 0>;
3587 def : InstAlias<"inl\t$port", (IN32ri u8imm:$port), 0>;
3590 // jmp and call aliases for lcall and ljmp. jmp $42,$5 -> ljmp
3591 def : InstAlias<"call\t$seg, $off", (FARCALL16i i16imm:$off, i16imm:$seg)>, Requires<[In16BitMode]>;
3592 def : InstAlias<"jmp\t$seg, $off", (FARJMP16i i16imm:$off, i16imm:$seg)>, Requires<[In16BitMode]>;
3593 def : InstAlias<"call\t$seg, $off", (FARCALL32i i32imm:$off, i16imm:$seg)>, Requires<[In32BitMode]>;
3594 def : InstAlias<"jmp\t$seg, $off", (FARJMP32i i32imm:$off, i16imm:$seg)>, Requires<[In32BitMode]>;
3595 def : InstAlias<"callw\t$seg, $off", (FARCALL16i i16imm:$off, i16imm:$seg)>, Requires<[Not64BitMode]>;
3596 def : InstAlias<"jmpw\t$seg, $off", (FARJMP16i i16imm:$off, i16imm:$seg)>, Requires<[Not64BitMode]>;
3597 def : InstAlias<"calll\t$seg, $off", (FARCALL32i i32imm:$off, i16imm:$seg)>, Requires<[Not64BitMode]>;
3598 def : InstAlias<"jmpl\t$seg, $off", (FARJMP32i i32imm:$off, i16imm:$seg)>, Requires<[Not64BitMode]>;
3600 // Match 'movq <largeimm>, <reg>' as an alias for movabsq.
3601 def : InstAlias<"mov{q}\t{$imm, $reg|$reg, $imm}", (MOV64ri GR64:$reg, i64imm:$imm), 0>;
3603 // Match 'movd GR64, MMX' as an alias for movq to be compatible with gas,
3604 // which supports this due to an old AMD documentation bug when 64-bit mode was
3606 def : InstAlias<"movd\t{$src, $dst|$dst, $src}",
3607 (MMX_MOVD64to64rr VR64:$dst, GR64:$src), 0>;
3608 def : InstAlias<"movd\t{$src, $dst|$dst, $src}",
3609 (MMX_MOVD64from64rr GR64:$dst, VR64:$src), 0>;
3612 def : InstAlias<"movsx\t{$src, $dst|$dst, $src}", (MOVSX16rr8 GR16:$dst, GR8:$src), 0, "att">;
3613 def : InstAlias<"movsx\t{$src, $dst|$dst, $src}", (MOVSX16rm8 GR16:$dst, i8mem:$src), 0, "att">;
3614 def : InstAlias<"movsx\t{$src, $dst|$dst, $src}", (MOVSX32rr8 GR32:$dst, GR8:$src), 0, "att">;
3615 def : InstAlias<"movsx\t{$src, $dst|$dst, $src}", (MOVSX32rr16 GR32:$dst, GR16:$src), 0, "att">;
3616 def : InstAlias<"movsx\t{$src, $dst|$dst, $src}", (MOVSX64rr8 GR64:$dst, GR8:$src), 0, "att">;
3617 def : InstAlias<"movsx\t{$src, $dst|$dst, $src}", (MOVSX64rr16 GR64:$dst, GR16:$src), 0, "att">;
3618 def : InstAlias<"movsx\t{$src, $dst|$dst, $src}", (MOVSX64rr32 GR64:$dst, GR32:$src), 0, "att">;
3621 def : InstAlias<"movzx\t{$src, $dst|$dst, $src}", (MOVZX16rr8 GR16:$dst, GR8:$src), 0, "att">;
3622 def : InstAlias<"movzx\t{$src, $dst|$dst, $src}", (MOVZX16rm8 GR16:$dst, i8mem:$src), 0, "att">;
3623 def : InstAlias<"movzx\t{$src, $dst|$dst, $src}", (MOVZX32rr8 GR32:$dst, GR8:$src), 0, "att">;
3624 def : InstAlias<"movzx\t{$src, $dst|$dst, $src}", (MOVZX32rr16 GR32:$dst, GR16:$src), 0, "att">;
3625 def : InstAlias<"movzx\t{$src, $dst|$dst, $src}", (MOVZX64rr8 GR64:$dst, GR8:$src), 0, "att">;
3626 def : InstAlias<"movzx\t{$src, $dst|$dst, $src}", (MOVZX64rr16 GR64:$dst, GR16:$src), 0, "att">;
3627 // Note: No GR32->GR64 movzx form.
3629 // outb %dx -> outb %al, %dx
3630 def : InstAlias<"outb\t{%dx|dx}", (OUT8rr), 0>;
3631 def : InstAlias<"outw\t{%dx|dx}", (OUT16rr), 0>;
3632 def : InstAlias<"outl\t{%dx|dx}", (OUT32rr), 0>;
3633 def : InstAlias<"outb\t$port", (OUT8ir u8imm:$port), 0>;
3634 def : InstAlias<"outw\t$port", (OUT16ir u8imm:$port), 0>;
3635 def : InstAlias<"outl\t$port", (OUT32ir u8imm:$port), 0>;
3637 // 'sldt <mem>' can be encoded with either sldtw or sldtq with the same
3638 // effect (both store to a 16-bit mem). Force to sldtw to avoid ambiguity
3639 // errors, since its encoding is the most compact.
3640 def : InstAlias<"sldt $mem", (SLDT16m i16mem:$mem), 0>;
3642 // shld/shrd op,op -> shld op, op, CL
3643 def : InstAlias<"shld{w}\t{$r2, $r1|$r1, $r2}", (SHLD16rrCL GR16:$r1, GR16:$r2), 0>;
3644 def : InstAlias<"shld{l}\t{$r2, $r1|$r1, $r2}", (SHLD32rrCL GR32:$r1, GR32:$r2), 0>;
3645 def : InstAlias<"shld{q}\t{$r2, $r1|$r1, $r2}", (SHLD64rrCL GR64:$r1, GR64:$r2), 0>;
3646 def : InstAlias<"shrd{w}\t{$r2, $r1|$r1, $r2}", (SHRD16rrCL GR16:$r1, GR16:$r2), 0>;
3647 def : InstAlias<"shrd{l}\t{$r2, $r1|$r1, $r2}", (SHRD32rrCL GR32:$r1, GR32:$r2), 0>;
3648 def : InstAlias<"shrd{q}\t{$r2, $r1|$r1, $r2}", (SHRD64rrCL GR64:$r1, GR64:$r2), 0>;
3650 def : InstAlias<"shld{w}\t{$reg, $mem|$mem, $reg}", (SHLD16mrCL i16mem:$mem, GR16:$reg), 0>;
3651 def : InstAlias<"shld{l}\t{$reg, $mem|$mem, $reg}", (SHLD32mrCL i32mem:$mem, GR32:$reg), 0>;
3652 def : InstAlias<"shld{q}\t{$reg, $mem|$mem, $reg}", (SHLD64mrCL i64mem:$mem, GR64:$reg), 0>;
3653 def : InstAlias<"shrd{w}\t{$reg, $mem|$mem, $reg}", (SHRD16mrCL i16mem:$mem, GR16:$reg), 0>;
3654 def : InstAlias<"shrd{l}\t{$reg, $mem|$mem, $reg}", (SHRD32mrCL i32mem:$mem, GR32:$reg), 0>;
3655 def : InstAlias<"shrd{q}\t{$reg, $mem|$mem, $reg}", (SHRD64mrCL i64mem:$mem, GR64:$reg), 0>;
3657 /* FIXME: This is disabled because the asm matcher is currently incapable of
3658 * matching a fixed immediate like $1.
3659 // "shl X, $1" is an alias for "shl X".
3660 multiclass ShiftRotateByOneAlias<string Mnemonic, string Opc> {
3661 def : InstAlias<!strconcat(Mnemonic, "b $op, $$1"),
3662 (!cast<Instruction>(!strconcat(Opc, "8r1")) GR8:$op)>;
3663 def : InstAlias<!strconcat(Mnemonic, "w $op, $$1"),
3664 (!cast<Instruction>(!strconcat(Opc, "16r1")) GR16:$op)>;
3665 def : InstAlias<!strconcat(Mnemonic, "l $op, $$1"),
3666 (!cast<Instruction>(!strconcat(Opc, "32r1")) GR32:$op)>;
3667 def : InstAlias<!strconcat(Mnemonic, "q $op, $$1"),
3668 (!cast<Instruction>(!strconcat(Opc, "64r1")) GR64:$op)>;
3669 def : InstAlias<!strconcat(Mnemonic, "b $op, $$1"),
3670 (!cast<Instruction>(!strconcat(Opc, "8m1")) i8mem:$op)>;
3671 def : InstAlias<!strconcat(Mnemonic, "w $op, $$1"),
3672 (!cast<Instruction>(!strconcat(Opc, "16m1")) i16mem:$op)>;
3673 def : InstAlias<!strconcat(Mnemonic, "l $op, $$1"),
3674 (!cast<Instruction>(!strconcat(Opc, "32m1")) i32mem:$op)>;
3675 def : InstAlias<!strconcat(Mnemonic, "q $op, $$1"),
3676 (!cast<Instruction>(!strconcat(Opc, "64m1")) i64mem:$op)>;
3679 defm : ShiftRotateByOneAlias<"rcl", "RCL">;
3680 defm : ShiftRotateByOneAlias<"rcr", "RCR">;
3681 defm : ShiftRotateByOneAlias<"rol", "ROL">;
3682 defm : ShiftRotateByOneAlias<"ror", "ROR">;
3685 // test: We accept "testX <reg>, <mem>" and "testX <mem>, <reg>" as synonyms.
3686 def : InstAlias<"test{b}\t{$mem, $val|$val, $mem}",
3687 (TEST8mr i8mem :$mem, GR8 :$val), 0>;
3688 def : InstAlias<"test{w}\t{$mem, $val|$val, $mem}",
3689 (TEST16mr i16mem:$mem, GR16:$val), 0>;
3690 def : InstAlias<"test{l}\t{$mem, $val|$val, $mem}",
3691 (TEST32mr i32mem:$mem, GR32:$val), 0>;
3692 def : InstAlias<"test{q}\t{$mem, $val|$val, $mem}",
3693 (TEST64mr i64mem:$mem, GR64:$val), 0>;
3695 // xchg: We accept "xchgX <reg>, <mem>" and "xchgX <mem>, <reg>" as synonyms.
3696 def : InstAlias<"xchg{b}\t{$mem, $val|$val, $mem}",
3697 (XCHG8rm GR8 :$val, i8mem :$mem), 0>;
3698 def : InstAlias<"xchg{w}\t{$mem, $val|$val, $mem}",
3699 (XCHG16rm GR16:$val, i16mem:$mem), 0>;
3700 def : InstAlias<"xchg{l}\t{$mem, $val|$val, $mem}",
3701 (XCHG32rm GR32:$val, i32mem:$mem), 0>;
3702 def : InstAlias<"xchg{q}\t{$mem, $val|$val, $mem}",
3703 (XCHG64rm GR64:$val, i64mem:$mem), 0>;
3705 // xchg: We accept "xchgX <reg>, %eax" and "xchgX %eax, <reg>" as synonyms.
3706 def : InstAlias<"xchg{w}\t{%ax, $src|$src, ax}", (XCHG16ar GR16:$src), 0>;
3707 def : InstAlias<"xchg{l}\t{%eax, $src|$src, eax}", (XCHG32ar GR32:$src), 0>;
3708 def : InstAlias<"xchg{q}\t{%rax, $src|$src, rax}", (XCHG64ar GR64:$src), 0>;
3710 // In 64-bit mode, xchg %eax, %eax can't be encoded with the 0x90 opcode we
3711 // would get by default because it's defined as NOP. But xchg %eax, %eax implies
3712 // implicit zeroing of the upper 32 bits. So alias to the longer encoding.
3713 def : InstAlias<"xchg{l}\t{%eax, %eax|eax, eax}",
3714 (XCHG32rr EAX, EAX), 0>, Requires<[In64BitMode]>;
3716 // xchg %rax, %rax is a nop in x86-64 and can be encoded as such. Without this
3717 // we emit an unneeded REX.w prefix.
3718 def : InstAlias<"xchg{q}\t{%rax, %rax|rax, rax}", (NOOP), 0>;
3720 // These aliases exist to get the parser to prioritize matching 8-bit
3721 // immediate encodings over matching the implicit ax/eax/rax encodings. By
3722 // explicitly mentioning the A register here, these entries will be ordered
3723 // first due to the more explicit immediate type.
3724 def : InstAlias<"adc{w}\t{$imm, %ax|ax, $imm}", (ADC16ri8 AX, i16i8imm:$imm), 0>;
3725 def : InstAlias<"add{w}\t{$imm, %ax|ax, $imm}", (ADD16ri8 AX, i16i8imm:$imm), 0>;
3726 def : InstAlias<"and{w}\t{$imm, %ax|ax, $imm}", (AND16ri8 AX, i16i8imm:$imm), 0>;
3727 def : InstAlias<"cmp{w}\t{$imm, %ax|ax, $imm}", (CMP16ri8 AX, i16i8imm:$imm), 0>;
3728 def : InstAlias<"or{w}\t{$imm, %ax|ax, $imm}", (OR16ri8 AX, i16i8imm:$imm), 0>;
3729 def : InstAlias<"sbb{w}\t{$imm, %ax|ax, $imm}", (SBB16ri8 AX, i16i8imm:$imm), 0>;
3730 def : InstAlias<"sub{w}\t{$imm, %ax|ax, $imm}", (SUB16ri8 AX, i16i8imm:$imm), 0>;
3731 def : InstAlias<"xor{w}\t{$imm, %ax|ax, $imm}", (XOR16ri8 AX, i16i8imm:$imm), 0>;
3733 def : InstAlias<"adc{l}\t{$imm, %eax|eax, $imm}", (ADC32ri8 EAX, i32i8imm:$imm), 0>;
3734 def : InstAlias<"add{l}\t{$imm, %eax|eax, $imm}", (ADD32ri8 EAX, i32i8imm:$imm), 0>;
3735 def : InstAlias<"and{l}\t{$imm, %eax|eax, $imm}", (AND32ri8 EAX, i32i8imm:$imm), 0>;
3736 def : InstAlias<"cmp{l}\t{$imm, %eax|eax, $imm}", (CMP32ri8 EAX, i32i8imm:$imm), 0>;
3737 def : InstAlias<"or{l}\t{$imm, %eax|eax, $imm}", (OR32ri8 EAX, i32i8imm:$imm), 0>;
3738 def : InstAlias<"sbb{l}\t{$imm, %eax|eax, $imm}", (SBB32ri8 EAX, i32i8imm:$imm), 0>;
3739 def : InstAlias<"sub{l}\t{$imm, %eax|eax, $imm}", (SUB32ri8 EAX, i32i8imm:$imm), 0>;
3740 def : InstAlias<"xor{l}\t{$imm, %eax|eax, $imm}", (XOR32ri8 EAX, i32i8imm:$imm), 0>;
3742 def : InstAlias<"adc{q}\t{$imm, %rax|rax, $imm}", (ADC64ri8 RAX, i64i8imm:$imm), 0>;
3743 def : InstAlias<"add{q}\t{$imm, %rax|rax, $imm}", (ADD64ri8 RAX, i64i8imm:$imm), 0>;
3744 def : InstAlias<"and{q}\t{$imm, %rax|rax, $imm}", (AND64ri8 RAX, i64i8imm:$imm), 0>;
3745 def : InstAlias<"cmp{q}\t{$imm, %rax|rax, $imm}", (CMP64ri8 RAX, i64i8imm:$imm), 0>;
3746 def : InstAlias<"or{q}\t{$imm, %rax|rax, $imm}", (OR64ri8 RAX, i64i8imm:$imm), 0>;
3747 def : InstAlias<"sbb{q}\t{$imm, %rax|rax, $imm}", (SBB64ri8 RAX, i64i8imm:$imm), 0>;
3748 def : InstAlias<"sub{q}\t{$imm, %rax|rax, $imm}", (SUB64ri8 RAX, i64i8imm:$imm), 0>;
3749 def : InstAlias<"xor{q}\t{$imm, %rax|rax, $imm}", (XOR64ri8 RAX, i64i8imm:$imm), 0>;