1 //===-- X86InstrMisc.td - Misc X86 Instruction Definition -*- tablegen -*-===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This file defining the misc X86 instructions.
11 //===----------------------------------------------------------------------===//
13 //===----------------------------------------------------------------------===//
18 let hasSideEffects = 0, SchedRW = [WriteNop] in {
19 def NOOP : I<0x90, RawFrm, (outs), (ins), "nop", []>;
20 def NOOPW : I<0x1f, MRMXm, (outs), (ins i16mem:$zero),
21 "nop{w}\t$zero", []>, TB, OpSize16;
22 def NOOPL : I<0x1f, MRMXm, (outs), (ins i32mem:$zero),
23 "nop{l}\t$zero", []>, TB, OpSize32;
24 def NOOPQ : RI<0x1f, MRMXm, (outs), (ins i64mem:$zero),
25 "nop{q}\t$zero", []>, TB, Requires<[In64BitMode]>;
26 // Also allow register so we can assemble/disassemble
27 def NOOPWr : I<0x1f, MRMXr, (outs), (ins GR16:$zero),
28 "nop{w}\t$zero", []>, TB, OpSize16;
29 def NOOPLr : I<0x1f, MRMXr, (outs), (ins GR32:$zero),
30 "nop{l}\t$zero", []>, TB, OpSize32;
31 def NOOPQr : RI<0x1f, MRMXr, (outs), (ins GR64:$zero),
32 "nop{q}\t$zero", []>, TB, Requires<[In64BitMode]>;
36 // Constructing a stack frame.
37 def ENTER : Ii16<0xC8, RawFrmImm8, (outs), (ins i16imm:$len, i8imm:$lvl),
38 "enter\t$len, $lvl", []>, Sched<[WriteMicrocoded]>;
40 let SchedRW = [WriteALU] in {
41 let Defs = [EBP, ESP], Uses = [EBP, ESP], mayLoad = 1, hasSideEffects=0 in
42 def LEAVE : I<0xC9, RawFrm, (outs), (ins), "leave", []>,
43 Requires<[Not64BitMode]>;
45 let Defs = [RBP,RSP], Uses = [RBP,RSP], mayLoad = 1, hasSideEffects = 0 in
46 def LEAVE64 : I<0xC9, RawFrm, (outs), (ins), "leave", []>,
47 Requires<[In64BitMode]>;
50 //===----------------------------------------------------------------------===//
51 // Miscellaneous Instructions.
54 let isBarrier = 1, hasSideEffects = 1, usesCustomInserter = 1,
55 SchedRW = [WriteSystem] in
56 def Int_eh_sjlj_setup_dispatch
57 : PseudoI<(outs), (ins), [(X86eh_sjlj_setup_dispatch)]>;
59 let Defs = [ESP], Uses = [ESP], hasSideEffects=0 in {
60 let mayLoad = 1, SchedRW = [WriteLoad] in {
61 def POP16r : I<0x58, AddRegFrm, (outs GR16:$reg), (ins), "pop{w}\t$reg", []>,
63 def POP32r : I<0x58, AddRegFrm, (outs GR32:$reg), (ins), "pop{l}\t$reg", []>,
64 OpSize32, Requires<[Not64BitMode]>;
65 // Long form for the disassembler.
66 let isCodeGenOnly = 1, ForceDisassemble = 1 in {
67 def POP16rmr: I<0x8F, MRM0r, (outs GR16:$reg), (ins), "pop{w}\t$reg", []>,
69 def POP32rmr: I<0x8F, MRM0r, (outs GR32:$reg), (ins), "pop{l}\t$reg", []>,
70 OpSize32, Requires<[Not64BitMode]>;
71 } // isCodeGenOnly = 1, ForceDisassemble = 1
73 let mayStore = 1, mayLoad = 1, SchedRW = [WriteCopy] in {
74 def POP16rmm: I<0x8F, MRM0m, (outs), (ins i16mem:$dst), "pop{w}\t$dst", []>,
76 def POP32rmm: I<0x8F, MRM0m, (outs), (ins i32mem:$dst), "pop{l}\t$dst", []>,
77 OpSize32, Requires<[Not64BitMode]>;
78 } // mayStore, mayLoad, SchedRW
80 let mayStore = 1, SchedRW = [WriteStore] in {
81 def PUSH16r : I<0x50, AddRegFrm, (outs), (ins GR16:$reg), "push{w}\t$reg",[]>,
83 def PUSH32r : I<0x50, AddRegFrm, (outs), (ins GR32:$reg), "push{l}\t$reg",[]>,
84 OpSize32, Requires<[Not64BitMode]>;
85 // Long form for the disassembler.
86 let isCodeGenOnly = 1, ForceDisassemble = 1 in {
87 def PUSH16rmr: I<0xFF, MRM6r, (outs), (ins GR16:$reg), "push{w}\t$reg",[]>,
89 def PUSH32rmr: I<0xFF, MRM6r, (outs), (ins GR32:$reg), "push{l}\t$reg",[]>,
90 OpSize32, Requires<[Not64BitMode]>;
91 } // isCodeGenOnly = 1, ForceDisassemble = 1
93 def PUSH16i8 : Ii8<0x6a, RawFrm, (outs), (ins i16i8imm:$imm),
94 "push{w}\t$imm", []>, OpSize16;
95 def PUSH16i : Ii16<0x68, RawFrm, (outs), (ins i16imm:$imm),
96 "push{w}\t$imm", []>, OpSize16;
98 def PUSH32i8 : Ii8<0x6a, RawFrm, (outs), (ins i32i8imm:$imm),
99 "push{l}\t$imm", []>, OpSize32,
100 Requires<[Not64BitMode]>;
101 def PUSH32i : Ii32<0x68, RawFrm, (outs), (ins i32imm:$imm),
102 "push{l}\t$imm", []>, OpSize32,
103 Requires<[Not64BitMode]>;
104 } // mayStore, SchedRW
106 let mayLoad = 1, mayStore = 1, SchedRW = [WriteCopy] in {
107 def PUSH16rmm: I<0xFF, MRM6m, (outs), (ins i16mem:$src), "push{w}\t$src", []>,
109 def PUSH32rmm: I<0xFF, MRM6m, (outs), (ins i32mem:$src), "push{l}\t$src", []>,
110 OpSize32, Requires<[Not64BitMode]>;
111 } // mayLoad, mayStore, SchedRW
115 let isPseudo = 1, mayLoad = 1, mayStore = 1,
116 SchedRW = [WriteRMW], Defs = [ESP] in {
118 def RDFLAGS32 : PseudoI<(outs GR32:$dst), (ins),
119 [(set GR32:$dst, (int_x86_flags_read_u32))]>,
120 Requires<[Not64BitMode]>;
123 def RDFLAGS64 : PseudoI<(outs GR64:$dst), (ins),
124 [(set GR64:$dst, (int_x86_flags_read_u64))]>,
125 Requires<[In64BitMode]>;
128 let isPseudo = 1, mayLoad = 1, mayStore = 1,
129 SchedRW = [WriteRMW] in {
130 let Defs = [ESP, EFLAGS, DF], Uses = [ESP] in
131 def WRFLAGS32 : PseudoI<(outs), (ins GR32:$src),
132 [(int_x86_flags_write_u32 GR32:$src)]>,
133 Requires<[Not64BitMode]>;
135 let Defs = [RSP, EFLAGS, DF], Uses = [RSP] in
136 def WRFLAGS64 : PseudoI<(outs), (ins GR64:$src),
137 [(int_x86_flags_write_u64 GR64:$src)]>,
138 Requires<[In64BitMode]>;
141 let Defs = [ESP, EFLAGS, DF], Uses = [ESP], mayLoad = 1, hasSideEffects=0,
142 SchedRW = [WriteLoad] in {
143 def POPF16 : I<0x9D, RawFrm, (outs), (ins), "popf{w}", []>, OpSize16;
144 def POPF32 : I<0x9D, RawFrm, (outs), (ins), "popf{l|d}", []>, OpSize32,
145 Requires<[Not64BitMode]>;
148 let Defs = [ESP], Uses = [ESP, EFLAGS, DF], mayStore = 1, hasSideEffects=0,
149 SchedRW = [WriteStore] in {
150 def PUSHF16 : I<0x9C, RawFrm, (outs), (ins), "pushf{w}", []>, OpSize16;
151 def PUSHF32 : I<0x9C, RawFrm, (outs), (ins), "pushf{l|d}", []>, OpSize32,
152 Requires<[Not64BitMode]>;
155 let Defs = [RSP], Uses = [RSP], hasSideEffects=0 in {
156 let mayLoad = 1, SchedRW = [WriteLoad] in {
157 def POP64r : I<0x58, AddRegFrm, (outs GR64:$reg), (ins), "pop{q}\t$reg", []>,
158 OpSize32, Requires<[In64BitMode]>;
159 // Long form for the disassembler.
160 let isCodeGenOnly = 1, ForceDisassemble = 1 in {
161 def POP64rmr: I<0x8F, MRM0r, (outs GR64:$reg), (ins), "pop{q}\t$reg", []>,
162 OpSize32, Requires<[In64BitMode]>;
163 } // isCodeGenOnly = 1, ForceDisassemble = 1
164 def POPP64r : I<0x58, AddRegFrm, (outs GR64:$reg), (ins), "popp\t$reg", []>,
165 REX_W, ExplicitREX2Prefix, Requires<[In64BitMode]>;
166 def POP2: I<0x8F, MRM0r, (outs GR64:$reg1, GR64:$reg2), (ins),
167 "pop2\t{$reg2, $reg1|$reg1, $reg2}",
168 []>, EVEX, VVVV, EVEX_B, T_MAP4;
169 def POP2P: I<0x8F, MRM0r, (outs GR64:$reg1, GR64:$reg2), (ins),
170 "pop2p\t{$reg2, $reg1|$reg1, $reg2}",
171 []>, EVEX, VVVV, EVEX_B, T_MAP4, REX_W;
173 } // mayLoad, SchedRW
174 let mayLoad = 1, mayStore = 1, SchedRW = [WriteCopy] in
175 def POP64rmm: I<0x8F, MRM0m, (outs), (ins i64mem:$dst), "pop{q}\t$dst", []>,
176 OpSize32, Requires<[In64BitMode]>;
177 let mayStore = 1, SchedRW = [WriteStore] in {
178 def PUSH64r : I<0x50, AddRegFrm, (outs), (ins GR64:$reg), "push{q}\t$reg", []>,
179 OpSize32, Requires<[In64BitMode]>;
180 // Long form for the disassembler.
181 let isCodeGenOnly = 1, ForceDisassemble = 1 in {
182 def PUSH64rmr: I<0xFF, MRM6r, (outs), (ins GR64:$reg), "push{q}\t$reg", []>,
183 OpSize32, Requires<[In64BitMode]>;
184 } // isCodeGenOnly = 1, ForceDisassemble = 1
185 def PUSHP64r : I<0x50, AddRegFrm, (outs), (ins GR64:$reg), "pushp\t$reg", []>,
186 REX_W, ExplicitREX2Prefix, Requires<[In64BitMode]>;
187 def PUSH2: I<0xFF, MRM6r, (outs), (ins GR64:$reg1, GR64:$reg2),
188 "push2\t{$reg2, $reg1|$reg1, $reg2}",
189 []>, EVEX, VVVV, EVEX_B, T_MAP4;
190 def PUSH2P: I<0xFF, MRM6r, (outs), (ins GR64:$reg1, GR64:$reg2),
191 "push2p\t{$reg2, $reg1|$reg1, $reg2}",
192 []>, EVEX, VVVV, EVEX_B, T_MAP4, REX_W;
193 } // mayStore, SchedRW
194 let mayLoad = 1, mayStore = 1, SchedRW = [WriteCopy] in {
195 def PUSH64rmm: I<0xFF, MRM6m, (outs), (ins i64mem:$src), "push{q}\t$src", []>,
196 OpSize32, Requires<[In64BitMode]>;
197 } // mayLoad, mayStore, SchedRW
200 let Defs = [RSP], Uses = [RSP], hasSideEffects = 0, mayStore = 1,
201 SchedRW = [WriteStore] in {
202 def PUSH64i8 : Ii8<0x6a, RawFrm, (outs), (ins i64i8imm:$imm),
203 "push{q}\t$imm", []>, OpSize32,
204 Requires<[In64BitMode]>;
205 def PUSH64i32 : Ii32S<0x68, RawFrm, (outs), (ins i64i32imm:$imm),
206 "push{q}\t$imm", []>, OpSize32,
207 Requires<[In64BitMode]>;
210 let Defs = [RSP, EFLAGS, DF], Uses = [RSP], mayLoad = 1, hasSideEffects=0 in
211 def POPF64 : I<0x9D, RawFrm, (outs), (ins), "popfq", []>,
212 OpSize32, Requires<[In64BitMode]>, Sched<[WriteLoad]>;
213 let Defs = [RSP], Uses = [RSP, EFLAGS, DF], mayStore = 1, hasSideEffects=0 in
214 def PUSHF64 : I<0x9C, RawFrm, (outs), (ins), "pushfq", []>,
215 OpSize32, Requires<[In64BitMode]>, Sched<[WriteStore]>;
217 let Defs = [EDI, ESI, EBP, EBX, EDX, ECX, EAX, ESP], Uses = [ESP],
218 mayLoad = 1, hasSideEffects = 0, SchedRW = [WriteLoad] in {
219 def POPA32 : I<0x61, RawFrm, (outs), (ins), "popal", []>,
220 OpSize32, Requires<[Not64BitMode]>;
221 def POPA16 : I<0x61, RawFrm, (outs), (ins), "popaw", []>,
222 OpSize16, Requires<[Not64BitMode]>;
224 let Defs = [ESP], Uses = [EDI, ESI, EBP, EBX, EDX, ECX, EAX, ESP],
225 mayStore = 1, hasSideEffects = 0, SchedRW = [WriteStore] in {
226 def PUSHA32 : I<0x60, RawFrm, (outs), (ins), "pushal", []>,
227 OpSize32, Requires<[Not64BitMode]>;
228 def PUSHA16 : I<0x60, RawFrm, (outs), (ins), "pushaw", []>,
229 OpSize16, Requires<[Not64BitMode]>;
232 let Constraints = "$src = $dst", SchedRW = [WriteBSWAP32], Predicates = [NoNDD_Or_NoMOVBE] in {
233 // This instruction is a consequence of BSWAP32r observing operand size. The
234 // encoding is valid, but the behavior is undefined.
235 let isCodeGenOnly = 1, ForceDisassemble = 1, hasSideEffects = 0 in
236 def BSWAP16r_BAD : I<0xC8, AddRegFrm, (outs GR16:$dst), (ins GR16:$src),
237 "bswap{w}\t$dst", []>, OpSize16, TB;
239 def BSWAP32r : I<0xC8, AddRegFrm, (outs GR32:$dst), (ins GR32:$src),
241 [(set GR32:$dst, (bswap GR32:$src))]>, OpSize32, TB;
243 let SchedRW = [WriteBSWAP64] in
244 def BSWAP64r : RI<0xC8, AddRegFrm, (outs GR64:$dst), (ins GR64:$src),
246 [(set GR64:$dst, (bswap GR64:$src))]>, TB;
247 } // Constraints = "$src = $dst", SchedRW
249 // Bit scan instructions.
250 let Defs = [EFLAGS] in {
251 def BSF16rr : I<0xBC, MRMSrcReg, (outs GR16:$dst), (ins GR16:$src),
252 "bsf{w}\t{$src, $dst|$dst, $src}",
253 [(set GR16:$dst, EFLAGS, (X86bsf GR16:$src))]>,
254 TB, OpSize16, Sched<[WriteBSF]>;
255 def BSF16rm : I<0xBC, MRMSrcMem, (outs GR16:$dst), (ins i16mem:$src),
256 "bsf{w}\t{$src, $dst|$dst, $src}",
257 [(set GR16:$dst, EFLAGS, (X86bsf (loadi16 addr:$src)))]>,
258 TB, OpSize16, Sched<[WriteBSFLd]>;
259 def BSF32rr : I<0xBC, MRMSrcReg, (outs GR32:$dst), (ins GR32:$src),
260 "bsf{l}\t{$src, $dst|$dst, $src}",
261 [(set GR32:$dst, EFLAGS, (X86bsf GR32:$src))]>,
262 TB, OpSize32, Sched<[WriteBSF]>;
263 def BSF32rm : I<0xBC, MRMSrcMem, (outs GR32:$dst), (ins i32mem:$src),
264 "bsf{l}\t{$src, $dst|$dst, $src}",
265 [(set GR32:$dst, EFLAGS, (X86bsf (loadi32 addr:$src)))]>,
266 TB, OpSize32, Sched<[WriteBSFLd]>;
267 def BSF64rr : RI<0xBC, MRMSrcReg, (outs GR64:$dst), (ins GR64:$src),
268 "bsf{q}\t{$src, $dst|$dst, $src}",
269 [(set GR64:$dst, EFLAGS, (X86bsf GR64:$src))]>,
270 TB, Sched<[WriteBSF]>;
271 def BSF64rm : RI<0xBC, MRMSrcMem, (outs GR64:$dst), (ins i64mem:$src),
272 "bsf{q}\t{$src, $dst|$dst, $src}",
273 [(set GR64:$dst, EFLAGS, (X86bsf (loadi64 addr:$src)))]>,
274 TB, Sched<[WriteBSFLd]>;
276 def BSR16rr : I<0xBD, MRMSrcReg, (outs GR16:$dst), (ins GR16:$src),
277 "bsr{w}\t{$src, $dst|$dst, $src}",
278 [(set GR16:$dst, EFLAGS, (X86bsr GR16:$src))]>,
279 TB, OpSize16, Sched<[WriteBSR]>;
280 def BSR16rm : I<0xBD, MRMSrcMem, (outs GR16:$dst), (ins i16mem:$src),
281 "bsr{w}\t{$src, $dst|$dst, $src}",
282 [(set GR16:$dst, EFLAGS, (X86bsr (loadi16 addr:$src)))]>,
283 TB, OpSize16, Sched<[WriteBSRLd]>;
284 def BSR32rr : I<0xBD, MRMSrcReg, (outs GR32:$dst), (ins GR32:$src),
285 "bsr{l}\t{$src, $dst|$dst, $src}",
286 [(set GR32:$dst, EFLAGS, (X86bsr GR32:$src))]>,
287 TB, OpSize32, Sched<[WriteBSR]>;
288 def BSR32rm : I<0xBD, MRMSrcMem, (outs GR32:$dst), (ins i32mem:$src),
289 "bsr{l}\t{$src, $dst|$dst, $src}",
290 [(set GR32:$dst, EFLAGS, (X86bsr (loadi32 addr:$src)))]>,
291 TB, OpSize32, Sched<[WriteBSRLd]>;
292 def BSR64rr : RI<0xBD, MRMSrcReg, (outs GR64:$dst), (ins GR64:$src),
293 "bsr{q}\t{$src, $dst|$dst, $src}",
294 [(set GR64:$dst, EFLAGS, (X86bsr GR64:$src))]>,
295 TB, Sched<[WriteBSR]>;
296 def BSR64rm : RI<0xBD, MRMSrcMem, (outs GR64:$dst), (ins i64mem:$src),
297 "bsr{q}\t{$src, $dst|$dst, $src}",
298 [(set GR64:$dst, EFLAGS, (X86bsr (loadi64 addr:$src)))]>,
299 TB, Sched<[WriteBSRLd]>;
302 let SchedRW = [WriteMicrocoded] in {
303 let Defs = [EDI,ESI], Uses = [EDI,ESI,DF] in {
304 def MOVSB : I<0xA4, RawFrmDstSrc, (outs), (ins dstidx8:$dst, srcidx8:$src),
305 "movsb\t{$src, $dst|$dst, $src}", []>;
306 def MOVSW : I<0xA5, RawFrmDstSrc, (outs), (ins dstidx16:$dst, srcidx16:$src),
307 "movsw\t{$src, $dst|$dst, $src}", []>, OpSize16;
308 def MOVSL : I<0xA5, RawFrmDstSrc, (outs), (ins dstidx32:$dst, srcidx32:$src),
309 "movs{l|d}\t{$src, $dst|$dst, $src}", []>, OpSize32;
310 def MOVSQ : RI<0xA5, RawFrmDstSrc, (outs), (ins dstidx64:$dst, srcidx64:$src),
311 "movsq\t{$src, $dst|$dst, $src}", []>,
312 Requires<[In64BitMode]>;
315 let Defs = [EDI], Uses = [AL,EDI,DF] in
316 def STOSB : I<0xAA, RawFrmDst, (outs), (ins dstidx8:$dst),
317 "stosb\t{%al, $dst|$dst, al}", []>;
318 let Defs = [EDI], Uses = [AX,EDI,DF] in
319 def STOSW : I<0xAB, RawFrmDst, (outs), (ins dstidx16:$dst),
320 "stosw\t{%ax, $dst|$dst, ax}", []>, OpSize16;
321 let Defs = [EDI], Uses = [EAX,EDI,DF] in
322 def STOSL : I<0xAB, RawFrmDst, (outs), (ins dstidx32:$dst),
323 "stos{l|d}\t{%eax, $dst|$dst, eax}", []>, OpSize32;
324 let Defs = [RDI], Uses = [RAX,RDI,DF] in
325 def STOSQ : RI<0xAB, RawFrmDst, (outs), (ins dstidx64:$dst),
326 "stosq\t{%rax, $dst|$dst, rax}", []>,
327 Requires<[In64BitMode]>;
329 let Defs = [EDI,EFLAGS], Uses = [AL,EDI,DF] in
330 def SCASB : I<0xAE, RawFrmDst, (outs), (ins dstidx8:$dst),
331 "scasb\t{$dst, %al|al, $dst}", []>;
332 let Defs = [EDI,EFLAGS], Uses = [AX,EDI,DF] in
333 def SCASW : I<0xAF, RawFrmDst, (outs), (ins dstidx16:$dst),
334 "scasw\t{$dst, %ax|ax, $dst}", []>, OpSize16;
335 let Defs = [EDI,EFLAGS], Uses = [EAX,EDI,DF] in
336 def SCASL : I<0xAF, RawFrmDst, (outs), (ins dstidx32:$dst),
337 "scas{l|d}\t{$dst, %eax|eax, $dst}", []>, OpSize32;
338 let Defs = [EDI,EFLAGS], Uses = [RAX,EDI,DF] in
339 def SCASQ : RI<0xAF, RawFrmDst, (outs), (ins dstidx64:$dst),
340 "scasq\t{$dst, %rax|rax, $dst}", []>,
341 Requires<[In64BitMode]>;
343 let Defs = [EDI,ESI,EFLAGS], Uses = [EDI,ESI,DF] in {
344 def CMPSB : I<0xA6, RawFrmDstSrc, (outs), (ins dstidx8:$dst, srcidx8:$src),
345 "cmpsb\t{$dst, $src|$src, $dst}", []>;
346 def CMPSW : I<0xA7, RawFrmDstSrc, (outs), (ins dstidx16:$dst, srcidx16:$src),
347 "cmpsw\t{$dst, $src|$src, $dst}", []>, OpSize16;
348 def CMPSL : I<0xA7, RawFrmDstSrc, (outs), (ins dstidx32:$dst, srcidx32:$src),
349 "cmps{l|d}\t{$dst, $src|$src, $dst}", []>, OpSize32;
350 def CMPSQ : RI<0xA7, RawFrmDstSrc, (outs), (ins dstidx64:$dst, srcidx64:$src),
351 "cmpsq\t{$dst, $src|$src, $dst}", []>,
352 Requires<[In64BitMode]>;
356 //===----------------------------------------------------------------------===//
357 // Move Instructions.
359 let SchedRW = [WriteMove] in {
360 let hasSideEffects = 0, isMoveReg = 1 in {
361 def MOV8rr : I<0x88, MRMDestReg, (outs GR8 :$dst), (ins GR8 :$src),
362 "mov{b}\t{$src, $dst|$dst, $src}", []>;
363 def MOV16rr : I<0x89, MRMDestReg, (outs GR16:$dst), (ins GR16:$src),
364 "mov{w}\t{$src, $dst|$dst, $src}", []>, OpSize16;
365 def MOV32rr : I<0x89, MRMDestReg, (outs GR32:$dst), (ins GR32:$src),
366 "mov{l}\t{$src, $dst|$dst, $src}", []>, OpSize32;
367 def MOV64rr : RI<0x89, MRMDestReg, (outs GR64:$dst), (ins GR64:$src),
368 "mov{q}\t{$src, $dst|$dst, $src}", []>;
371 let isReMaterializable = 1, isAsCheapAsAMove = 1, isMoveImm = 1 in {
372 def MOV8ri : Ii8 <0xB0, AddRegFrm, (outs GR8 :$dst), (ins i8imm :$src),
373 "mov{b}\t{$src, $dst|$dst, $src}",
374 [(set GR8:$dst, imm:$src)]>;
375 def MOV16ri : Ii16<0xB8, AddRegFrm, (outs GR16:$dst), (ins i16imm:$src),
376 "mov{w}\t{$src, $dst|$dst, $src}",
377 [(set GR16:$dst, imm:$src)]>, OpSize16;
378 def MOV32ri : Ii32<0xB8, AddRegFrm, (outs GR32:$dst), (ins i32imm:$src),
379 "mov{l}\t{$src, $dst|$dst, $src}",
380 [(set GR32:$dst, imm:$src)]>, OpSize32;
381 def MOV64ri32 : RIi32S<0xC7, MRM0r, (outs GR64:$dst), (ins i64i32imm:$src),
382 "mov{q}\t{$src, $dst|$dst, $src}",
383 [(set GR64:$dst, i64immSExt32:$src)]>;
385 let isReMaterializable = 1, isMoveImm = 1 in {
386 def MOV64ri : RIi64<0xB8, AddRegFrm, (outs GR64:$dst), (ins i64imm:$src),
387 "movabs{q}\t{$src, $dst|$dst, $src}",
388 [(set GR64:$dst, imm:$src)]>;
391 // Longer forms that use a ModR/M byte. Needed for disassembler
392 let isCodeGenOnly = 1, ForceDisassemble = 1, hasSideEffects = 0 in {
393 def MOV8ri_alt : Ii8 <0xC6, MRM0r, (outs GR8 :$dst), (ins i8imm :$src),
394 "mov{b}\t{$src, $dst|$dst, $src}", []>;
395 def MOV16ri_alt : Ii16<0xC7, MRM0r, (outs GR16:$dst), (ins i16imm:$src),
396 "mov{w}\t{$src, $dst|$dst, $src}", []>, OpSize16;
397 def MOV32ri_alt : Ii32<0xC7, MRM0r, (outs GR32:$dst), (ins i32imm:$src),
398 "mov{l}\t{$src, $dst|$dst, $src}", []>, OpSize32;
402 let SchedRW = [WriteStore] in {
403 def MOV8mi : Ii8 <0xC6, MRM0m, (outs), (ins i8mem :$dst, i8imm :$src),
404 "mov{b}\t{$src, $dst|$dst, $src}",
405 [(store (i8 imm_su:$src), addr:$dst)]>;
406 def MOV16mi : Ii16<0xC7, MRM0m, (outs), (ins i16mem:$dst, i16imm:$src),
407 "mov{w}\t{$src, $dst|$dst, $src}",
408 [(store (i16 imm_su:$src), addr:$dst)]>, OpSize16;
409 def MOV32mi : Ii32<0xC7, MRM0m, (outs), (ins i32mem:$dst, i32imm:$src),
410 "mov{l}\t{$src, $dst|$dst, $src}",
411 [(store (i32 imm_su:$src), addr:$dst)]>, OpSize32;
412 def MOV64mi32 : RIi32S<0xC7, MRM0m, (outs), (ins i64mem:$dst, i64i32imm:$src),
413 "mov{q}\t{$src, $dst|$dst, $src}",
414 [(store i64immSExt32_su:$src, addr:$dst)]>,
415 Requires<[In64BitMode]>;
418 def : Pat<(i32 relocImm:$src), (MOV32ri relocImm:$src)>;
419 def : Pat<(i64 relocImm:$src), (MOV64ri relocImm:$src)>;
421 def : Pat<(store (i8 relocImm8_su:$src), addr:$dst),
422 (MOV8mi addr:$dst, relocImm8_su:$src)>;
423 def : Pat<(store (i16 relocImm16_su:$src), addr:$dst),
424 (MOV16mi addr:$dst, relocImm16_su:$src)>;
425 def : Pat<(store (i32 relocImm32_su:$src), addr:$dst),
426 (MOV32mi addr:$dst, relocImm32_su:$src)>;
427 def : Pat<(store (i64 i64relocImmSExt32_su:$src), addr:$dst),
428 (MOV64mi32 addr:$dst, i64immSExt32_su:$src)>;
430 let hasSideEffects = 0 in {
432 /// Memory offset versions of moves. The immediate is an address mode sized
433 /// offset from the segment base.
434 let SchedRW = [WriteALU] in {
437 def MOV8ao32 : Ii32<0xA0, RawFrmMemOffs, (outs), (ins offset32_8:$src),
438 "mov{b}\t{$src, %al|al, $src}", []>,
441 def MOV16ao32 : Ii32<0xA1, RawFrmMemOffs, (outs), (ins offset32_16:$src),
442 "mov{w}\t{$src, %ax|ax, $src}", []>,
445 def MOV32ao32 : Ii32<0xA1, RawFrmMemOffs, (outs), (ins offset32_32:$src),
446 "mov{l}\t{$src, %eax|eax, $src}", []>,
449 def MOV64ao32 : RIi32<0xA1, RawFrmMemOffs, (outs), (ins offset32_64:$src),
450 "mov{q}\t{$src, %rax|rax, $src}", []>,
454 def MOV8ao16 : Ii16<0xA0, RawFrmMemOffs, (outs), (ins offset16_8:$src),
455 "mov{b}\t{$src, %al|al, $src}", []>, AdSize16;
457 def MOV16ao16 : Ii16<0xA1, RawFrmMemOffs, (outs), (ins offset16_16:$src),
458 "mov{w}\t{$src, %ax|ax, $src}", []>,
461 def MOV32ao16 : Ii16<0xA1, RawFrmMemOffs, (outs), (ins offset16_32:$src),
462 "mov{l}\t{$src, %eax|eax, $src}", []>,
465 let mayStore = 1 in {
467 def MOV8o32a : Ii32<0xA2, RawFrmMemOffs, (outs), (ins offset32_8:$dst),
468 "mov{b}\t{%al, $dst|$dst, al}", []>, AdSize32;
470 def MOV16o32a : Ii32<0xA3, RawFrmMemOffs, (outs), (ins offset32_16:$dst),
471 "mov{w}\t{%ax, $dst|$dst, ax}", []>,
474 def MOV32o32a : Ii32<0xA3, RawFrmMemOffs, (outs), (ins offset32_32:$dst),
475 "mov{l}\t{%eax, $dst|$dst, eax}", []>,
478 def MOV64o32a : RIi32<0xA3, RawFrmMemOffs, (outs), (ins offset32_64:$dst),
479 "mov{q}\t{%rax, $dst|$dst, rax}", []>,
483 def MOV8o16a : Ii16<0xA2, RawFrmMemOffs, (outs), (ins offset16_8:$dst),
484 "mov{b}\t{%al, $dst|$dst, al}", []>, AdSize16;
486 def MOV16o16a : Ii16<0xA3, RawFrmMemOffs, (outs), (ins offset16_16:$dst),
487 "mov{w}\t{%ax, $dst|$dst, ax}", []>,
490 def MOV32o16a : Ii16<0xA3, RawFrmMemOffs, (outs), (ins offset16_32:$dst),
491 "mov{l}\t{%eax, $dst|$dst, eax}", []>,
495 // These forms all have full 64-bit absolute addresses in their instructions
496 // and use the movabs mnemonic to indicate this specific form.
499 def MOV8ao64 : Ii64<0xA0, RawFrmMemOffs, (outs), (ins offset64_8:$src),
500 "movabs{b}\t{$src, %al|al, $src}", []>,
503 def MOV16ao64 : Ii64<0xA1, RawFrmMemOffs, (outs), (ins offset64_16:$src),
504 "movabs{w}\t{$src, %ax|ax, $src}", []>,
507 def MOV32ao64 : Ii64<0xA1, RawFrmMemOffs, (outs), (ins offset64_32:$src),
508 "movabs{l}\t{$src, %eax|eax, $src}", []>,
511 def MOV64ao64 : RIi64<0xA1, RawFrmMemOffs, (outs), (ins offset64_64:$src),
512 "movabs{q}\t{$src, %rax|rax, $src}", []>,
516 let mayStore = 1 in {
518 def MOV8o64a : Ii64<0xA2, RawFrmMemOffs, (outs), (ins offset64_8:$dst),
519 "movabs{b}\t{%al, $dst|$dst, al}", []>,
522 def MOV16o64a : Ii64<0xA3, RawFrmMemOffs, (outs), (ins offset64_16:$dst),
523 "movabs{w}\t{%ax, $dst|$dst, ax}", []>,
526 def MOV32o64a : Ii64<0xA3, RawFrmMemOffs, (outs), (ins offset64_32:$dst),
527 "movabs{l}\t{%eax, $dst|$dst, eax}", []>,
530 def MOV64o64a : RIi64<0xA3, RawFrmMemOffs, (outs), (ins offset64_64:$dst),
531 "movabs{q}\t{%rax, $dst|$dst, rax}", []>,
535 } // hasSideEffects = 0
537 let isCodeGenOnly = 1, ForceDisassemble = 1, hasSideEffects = 0,
538 SchedRW = [WriteMove], isMoveReg = 1 in {
539 def MOV8rr_REV : I<0x8A, MRMSrcReg, (outs GR8:$dst), (ins GR8:$src),
540 "mov{b}\t{$src, $dst|$dst, $src}", []>;
541 def MOV16rr_REV : I<0x8B, MRMSrcReg, (outs GR16:$dst), (ins GR16:$src),
542 "mov{w}\t{$src, $dst|$dst, $src}", []>, OpSize16;
543 def MOV32rr_REV : I<0x8B, MRMSrcReg, (outs GR32:$dst), (ins GR32:$src),
544 "mov{l}\t{$src, $dst|$dst, $src}", []>, OpSize32;
545 def MOV64rr_REV : RI<0x8B, MRMSrcReg, (outs GR64:$dst), (ins GR64:$src),
546 "mov{q}\t{$src, $dst|$dst, $src}", []>;
549 let canFoldAsLoad = 1, isReMaterializable = 1, SchedRW = [WriteLoad] in {
550 def MOV8rm : I<0x8A, MRMSrcMem, (outs GR8 :$dst), (ins i8mem :$src),
551 "mov{b}\t{$src, $dst|$dst, $src}",
552 [(set GR8:$dst, (loadi8 addr:$src))]>;
553 def MOV16rm : I<0x8B, MRMSrcMem, (outs GR16:$dst), (ins i16mem:$src),
554 "mov{w}\t{$src, $dst|$dst, $src}",
555 [(set GR16:$dst, (loadi16 addr:$src))]>, OpSize16;
556 def MOV32rm : I<0x8B, MRMSrcMem, (outs GR32:$dst), (ins i32mem:$src),
557 "mov{l}\t{$src, $dst|$dst, $src}",
558 [(set GR32:$dst, (loadi32 addr:$src))]>, OpSize32;
559 def MOV64rm : RI<0x8B, MRMSrcMem, (outs GR64:$dst), (ins i64mem:$src),
560 "mov{q}\t{$src, $dst|$dst, $src}",
561 [(set GR64:$dst, (load addr:$src))]>;
564 let SchedRW = [WriteStore] in {
565 def MOV8mr : I<0x88, MRMDestMem, (outs), (ins i8mem :$dst, GR8 :$src),
566 "mov{b}\t{$src, $dst|$dst, $src}",
567 [(store GR8:$src, addr:$dst)]>;
568 def MOV16mr : I<0x89, MRMDestMem, (outs), (ins i16mem:$dst, GR16:$src),
569 "mov{w}\t{$src, $dst|$dst, $src}",
570 [(store GR16:$src, addr:$dst)]>, OpSize16;
571 def MOV32mr : I<0x89, MRMDestMem, (outs), (ins i32mem:$dst, GR32:$src),
572 "mov{l}\t{$src, $dst|$dst, $src}",
573 [(store GR32:$src, addr:$dst)]>, OpSize32;
574 def MOV64mr : RI<0x89, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src),
575 "mov{q}\t{$src, $dst|$dst, $src}",
576 [(store GR64:$src, addr:$dst)]>;
579 // Versions of MOV8rr, MOV8mr, and MOV8rm that use i8mem_NOREX and GR8_NOREX so
580 // that they can be used for copying and storing h registers, which can't be
581 // encoded when a REX prefix is present.
582 let isCodeGenOnly = 1 in {
583 let hasSideEffects = 0, isMoveReg = 1 in
584 def MOV8rr_NOREX : I<0x88, MRMDestReg,
585 (outs GR8_NOREX:$dst), (ins GR8_NOREX:$src),
586 "mov{b}\t{$src, $dst|$dst, $src}", []>,
588 let mayStore = 1, hasSideEffects = 0 in
589 def MOV8mr_NOREX : I<0x88, MRMDestMem,
590 (outs), (ins i8mem_NOREX:$dst, GR8_NOREX:$src),
591 "mov{b}\t{$src, $dst|$dst, $src}", []>,
593 let mayLoad = 1, hasSideEffects = 0,
594 canFoldAsLoad = 1, isReMaterializable = 1 in
595 def MOV8rm_NOREX : I<0x8A, MRMSrcMem,
596 (outs GR8_NOREX:$dst), (ins i8mem_NOREX:$src),
597 "mov{b}\t{$src, $dst|$dst, $src}", []>,
602 // Condition code ops, incl. set if equal/not equal/...
603 let SchedRW = [WriteLAHFSAHF] in {
604 let Defs = [EFLAGS], Uses = [AH], hasSideEffects = 0 in
605 def SAHF : I<0x9E, RawFrm, (outs), (ins), "sahf", []>, // flags = AH
606 Requires<[HasLAHFSAHF]>;
607 let Defs = [AH], Uses = [EFLAGS], hasSideEffects = 0 in
608 def LAHF : I<0x9F, RawFrm, (outs), (ins), "lahf", []>, // AH = flags
609 Requires<[HasLAHFSAHF]>;
612 //===----------------------------------------------------------------------===//
613 // Bit tests instructions: BT, BTS, BTR, BTC.
615 let Defs = [EFLAGS] in {
616 let SchedRW = [WriteBitTest] in {
617 def BT16rr : I<0xA3, MRMDestReg, (outs), (ins GR16:$src1, GR16:$src2),
618 "bt{w}\t{$src2, $src1|$src1, $src2}",
619 [(set EFLAGS, (X86bt GR16:$src1, GR16:$src2))]>,
621 def BT32rr : I<0xA3, MRMDestReg, (outs), (ins GR32:$src1, GR32:$src2),
622 "bt{l}\t{$src2, $src1|$src1, $src2}",
623 [(set EFLAGS, (X86bt GR32:$src1, GR32:$src2))]>,
625 def BT64rr : RI<0xA3, MRMDestReg, (outs), (ins GR64:$src1, GR64:$src2),
626 "bt{q}\t{$src2, $src1|$src1, $src2}",
627 [(set EFLAGS, (X86bt GR64:$src1, GR64:$src2))]>, TB;
630 // Unlike with the register+register form, the memory+register form of the
631 // bt instruction does not ignore the high bits of the index. From ISel's
632 // perspective, this is pretty bizarre. Make these instructions disassembly
633 // only for now. These instructions are also slow on modern CPUs so that's
634 // another reason to avoid generating them.
636 let mayLoad = 1, hasSideEffects = 0, SchedRW = [WriteBitTestRegLd] in {
637 def BT16mr : I<0xA3, MRMDestMem, (outs), (ins i16mem:$src1, GR16:$src2),
638 "bt{w}\t{$src2, $src1|$src1, $src2}",
640 def BT32mr : I<0xA3, MRMDestMem, (outs), (ins i32mem:$src1, GR32:$src2),
641 "bt{l}\t{$src2, $src1|$src1, $src2}",
643 def BT64mr : RI<0xA3, MRMDestMem, (outs), (ins i64mem:$src1, GR64:$src2),
644 "bt{q}\t{$src2, $src1|$src1, $src2}",
648 let SchedRW = [WriteBitTest] in {
649 def BT16ri8 : Ii8<0xBA, MRM4r, (outs), (ins GR16:$src1, i16u8imm:$src2),
650 "bt{w}\t{$src2, $src1|$src1, $src2}",
651 [(set EFLAGS, (X86bt GR16:$src1, imm:$src2))]>,
653 def BT32ri8 : Ii8<0xBA, MRM4r, (outs), (ins GR32:$src1, i32u8imm:$src2),
654 "bt{l}\t{$src2, $src1|$src1, $src2}",
655 [(set EFLAGS, (X86bt GR32:$src1, imm:$src2))]>,
657 def BT64ri8 : RIi8<0xBA, MRM4r, (outs), (ins GR64:$src1, i64u8imm:$src2),
658 "bt{q}\t{$src2, $src1|$src1, $src2}",
659 [(set EFLAGS, (X86bt GR64:$src1, imm:$src2))]>, TB;
662 // Note that these instructions aren't slow because that only applies when the
663 // other operand is in a register. When it's an immediate, bt is still fast.
664 let SchedRW = [WriteBitTestImmLd] in {
665 def BT16mi8 : Ii8<0xBA, MRM4m, (outs), (ins i16mem:$src1, i16u8imm:$src2),
666 "bt{w}\t{$src2, $src1|$src1, $src2}",
667 [(set EFLAGS, (X86bt (loadi16 addr:$src1),
670 def BT32mi8 : Ii8<0xBA, MRM4m, (outs), (ins i32mem:$src1, i32u8imm:$src2),
671 "bt{l}\t{$src2, $src1|$src1, $src2}",
672 [(set EFLAGS, (X86bt (loadi32 addr:$src1),
675 def BT64mi8 : RIi8<0xBA, MRM4m, (outs), (ins i64mem:$src1, i64u8imm:$src2),
676 "bt{q}\t{$src2, $src1|$src1, $src2}",
677 [(set EFLAGS, (X86bt (loadi64 addr:$src1),
679 Requires<[In64BitMode]>;
682 let hasSideEffects = 0 in {
683 let SchedRW = [WriteBitTestSet], Constraints = "$src1 = $dst" in {
684 def BTC16rr : I<0xBB, MRMDestReg, (outs GR16:$dst), (ins GR16:$src1, GR16:$src2),
685 "btc{w}\t{$src2, $src1|$src1, $src2}", []>,
687 def BTC32rr : I<0xBB, MRMDestReg, (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
688 "btc{l}\t{$src2, $src1|$src1, $src2}", []>,
690 def BTC64rr : RI<0xBB, MRMDestReg, (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
691 "btc{q}\t{$src2, $src1|$src1, $src2}", []>, TB;
694 let mayLoad = 1, mayStore = 1, SchedRW = [WriteBitTestSetRegRMW] in {
695 def BTC16mr : I<0xBB, MRMDestMem, (outs), (ins i16mem:$src1, GR16:$src2),
696 "btc{w}\t{$src2, $src1|$src1, $src2}", []>,
698 def BTC32mr : I<0xBB, MRMDestMem, (outs), (ins i32mem:$src1, GR32:$src2),
699 "btc{l}\t{$src2, $src1|$src1, $src2}", []>,
701 def BTC64mr : RI<0xBB, MRMDestMem, (outs), (ins i64mem:$src1, GR64:$src2),
702 "btc{q}\t{$src2, $src1|$src1, $src2}", []>, TB;
705 let SchedRW = [WriteBitTestSet], Constraints = "$src1 = $dst" in {
706 def BTC16ri8 : Ii8<0xBA, MRM7r, (outs GR16:$dst), (ins GR16:$src1, i16u8imm:$src2),
707 "btc{w}\t{$src2, $src1|$src1, $src2}", []>, OpSize16, TB;
708 def BTC32ri8 : Ii8<0xBA, MRM7r, (outs GR32:$dst), (ins GR32:$src1, i32u8imm:$src2),
709 "btc{l}\t{$src2, $src1|$src1, $src2}", []>, OpSize32, TB;
710 def BTC64ri8 : RIi8<0xBA, MRM7r, (outs GR64:$dst), (ins GR64:$src1, i64u8imm:$src2),
711 "btc{q}\t{$src2, $src1|$src1, $src2}", []>, TB;
714 let mayLoad = 1, mayStore = 1, SchedRW = [WriteBitTestSetImmRMW] in {
715 def BTC16mi8 : Ii8<0xBA, MRM7m, (outs), (ins i16mem:$src1, i16u8imm:$src2),
716 "btc{w}\t{$src2, $src1|$src1, $src2}", []>, OpSize16, TB;
717 def BTC32mi8 : Ii8<0xBA, MRM7m, (outs), (ins i32mem:$src1, i32u8imm:$src2),
718 "btc{l}\t{$src2, $src1|$src1, $src2}", []>, OpSize32, TB;
719 def BTC64mi8 : RIi8<0xBA, MRM7m, (outs), (ins i64mem:$src1, i64u8imm:$src2),
720 "btc{q}\t{$src2, $src1|$src1, $src2}", []>, TB,
721 Requires<[In64BitMode]>;
724 let SchedRW = [WriteBitTestSet], Constraints = "$src1 = $dst" in {
725 def BTR16rr : I<0xB3, MRMDestReg, (outs GR16:$dst), (ins GR16:$src1, GR16:$src2),
726 "btr{w}\t{$src2, $src1|$src1, $src2}", []>,
728 def BTR32rr : I<0xB3, MRMDestReg, (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
729 "btr{l}\t{$src2, $src1|$src1, $src2}", []>,
731 def BTR64rr : RI<0xB3, MRMDestReg, (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
732 "btr{q}\t{$src2, $src1|$src1, $src2}", []>, TB;
735 let mayLoad = 1, mayStore = 1, SchedRW = [WriteBitTestSetRegRMW] in {
736 def BTR16mr : I<0xB3, MRMDestMem, (outs), (ins i16mem:$src1, GR16:$src2),
737 "btr{w}\t{$src2, $src1|$src1, $src2}", []>,
739 def BTR32mr : I<0xB3, MRMDestMem, (outs), (ins i32mem:$src1, GR32:$src2),
740 "btr{l}\t{$src2, $src1|$src1, $src2}", []>,
742 def BTR64mr : RI<0xB3, MRMDestMem, (outs), (ins i64mem:$src1, GR64:$src2),
743 "btr{q}\t{$src2, $src1|$src1, $src2}", []>, TB;
746 let SchedRW = [WriteBitTestSet], Constraints = "$src1 = $dst" in {
747 def BTR16ri8 : Ii8<0xBA, MRM6r, (outs GR16:$dst), (ins GR16:$src1, i16u8imm:$src2),
748 "btr{w}\t{$src2, $src1|$src1, $src2}", []>,
750 def BTR32ri8 : Ii8<0xBA, MRM6r, (outs GR32:$dst), (ins GR32:$src1, i32u8imm:$src2),
751 "btr{l}\t{$src2, $src1|$src1, $src2}", []>,
753 def BTR64ri8 : RIi8<0xBA, MRM6r, (outs GR64:$dst), (ins GR64:$src1, i64u8imm:$src2),
754 "btr{q}\t{$src2, $src1|$src1, $src2}", []>, TB;
757 let mayLoad = 1, mayStore = 1, SchedRW = [WriteBitTestSetImmRMW] in {
758 def BTR16mi8 : Ii8<0xBA, MRM6m, (outs), (ins i16mem:$src1, i16u8imm:$src2),
759 "btr{w}\t{$src2, $src1|$src1, $src2}", []>,
761 def BTR32mi8 : Ii8<0xBA, MRM6m, (outs), (ins i32mem:$src1, i32u8imm:$src2),
762 "btr{l}\t{$src2, $src1|$src1, $src2}", []>,
764 def BTR64mi8 : RIi8<0xBA, MRM6m, (outs), (ins i64mem:$src1, i64u8imm:$src2),
765 "btr{q}\t{$src2, $src1|$src1, $src2}", []>, TB,
766 Requires<[In64BitMode]>;
769 let SchedRW = [WriteBitTestSet], Constraints = "$src1 = $dst" in {
770 def BTS16rr : I<0xAB, MRMDestReg, (outs GR16:$dst), (ins GR16:$src1, GR16:$src2),
771 "bts{w}\t{$src2, $src1|$src1, $src2}", []>,
773 def BTS32rr : I<0xAB, MRMDestReg, (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
774 "bts{l}\t{$src2, $src1|$src1, $src2}", []>,
776 def BTS64rr : RI<0xAB, MRMDestReg, (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
777 "bts{q}\t{$src2, $src1|$src1, $src2}", []>, TB;
780 let mayLoad = 1, mayStore = 1, SchedRW = [WriteBitTestSetRegRMW] in {
781 def BTS16mr : I<0xAB, MRMDestMem, (outs), (ins i16mem:$src1, GR16:$src2),
782 "bts{w}\t{$src2, $src1|$src1, $src2}", []>,
784 def BTS32mr : I<0xAB, MRMDestMem, (outs), (ins i32mem:$src1, GR32:$src2),
785 "bts{l}\t{$src2, $src1|$src1, $src2}", []>,
787 def BTS64mr : RI<0xAB, MRMDestMem, (outs), (ins i64mem:$src1, GR64:$src2),
788 "bts{q}\t{$src2, $src1|$src1, $src2}", []>, TB;
791 let SchedRW = [WriteBitTestSet], Constraints = "$src1 = $dst" in {
792 def BTS16ri8 : Ii8<0xBA, MRM5r, (outs GR16:$dst), (ins GR16:$src1, i16u8imm:$src2),
793 "bts{w}\t{$src2, $src1|$src1, $src2}", []>, OpSize16, TB;
794 def BTS32ri8 : Ii8<0xBA, MRM5r, (outs GR32:$dst), (ins GR32:$src1, i32u8imm:$src2),
795 "bts{l}\t{$src2, $src1|$src1, $src2}", []>, OpSize32, TB;
796 def BTS64ri8 : RIi8<0xBA, MRM5r, (outs GR64:$dst), (ins GR64:$src1, i64u8imm:$src2),
797 "bts{q}\t{$src2, $src1|$src1, $src2}", []>, TB;
800 let mayLoad = 1, mayStore = 1, SchedRW = [WriteBitTestSetImmRMW] in {
801 def BTS16mi8 : Ii8<0xBA, MRM5m, (outs), (ins i16mem:$src1, i16u8imm:$src2),
802 "bts{w}\t{$src2, $src1|$src1, $src2}", []>, OpSize16, TB;
803 def BTS32mi8 : Ii8<0xBA, MRM5m, (outs), (ins i32mem:$src1, i32u8imm:$src2),
804 "bts{l}\t{$src2, $src1|$src1, $src2}", []>, OpSize32, TB;
805 def BTS64mi8 : RIi8<0xBA, MRM5m, (outs), (ins i64mem:$src1, i64u8imm:$src2),
806 "bts{q}\t{$src2, $src1|$src1, $src2}", []>, TB,
807 Requires<[In64BitMode]>;
809 } // hasSideEffects = 0
813 //===----------------------------------------------------------------------===//
817 // Atomic swap. These are just normal xchg instructions. But since a memory
818 // operand is referenced, the atomicity is ensured.
819 multiclass ATOMIC_SWAP<bits<8> opc8, bits<8> opc, string mnemonic, string frag> {
820 let Constraints = "$val = $dst", SchedRW = [WriteALULd, WriteRMW] in {
821 def NAME#8rm : I<opc8, MRMSrcMem, (outs GR8:$dst),
822 (ins GR8:$val, i8mem:$ptr),
823 !strconcat(mnemonic, "{b}\t{$val, $ptr|$ptr, $val}"),
826 (!cast<PatFrag>(frag # "_i8") addr:$ptr, GR8:$val))]>;
827 def NAME#16rm : I<opc, MRMSrcMem, (outs GR16:$dst),
828 (ins GR16:$val, i16mem:$ptr),
829 !strconcat(mnemonic, "{w}\t{$val, $ptr|$ptr, $val}"),
832 (!cast<PatFrag>(frag # "_i16") addr:$ptr, GR16:$val))]>,
834 def NAME#32rm : I<opc, MRMSrcMem, (outs GR32:$dst),
835 (ins GR32:$val, i32mem:$ptr),
836 !strconcat(mnemonic, "{l}\t{$val, $ptr|$ptr, $val}"),
839 (!cast<PatFrag>(frag # "_i32") addr:$ptr, GR32:$val))]>,
841 def NAME#64rm : RI<opc, MRMSrcMem, (outs GR64:$dst),
842 (ins GR64:$val, i64mem:$ptr),
843 !strconcat(mnemonic, "{q}\t{$val, $ptr|$ptr, $val}"),
846 (!cast<PatFrag>(frag # "_i64") addr:$ptr, GR64:$val))]>;
850 defm XCHG : ATOMIC_SWAP<0x86, 0x87, "xchg", "atomic_swap">;
852 // Swap between registers.
853 let SchedRW = [WriteXCHG] in {
854 let Constraints = "$src1 = $dst1, $src2 = $dst2", hasSideEffects = 0 in {
855 def XCHG8rr : I<0x86, MRMSrcReg, (outs GR8:$dst1, GR8:$dst2),
856 (ins GR8:$src1, GR8:$src2),
857 "xchg{b}\t{$src2, $src1|$src1, $src2}", []>;
858 def XCHG16rr : I<0x87, MRMSrcReg, (outs GR16:$dst1, GR16:$dst2),
859 (ins GR16:$src1, GR16:$src2),
860 "xchg{w}\t{$src2, $src1|$src1, $src2}", []>,
862 def XCHG32rr : I<0x87, MRMSrcReg, (outs GR32:$dst1, GR32:$dst2),
863 (ins GR32:$src1, GR32:$src2),
864 "xchg{l}\t{$src2, $src1|$src1, $src2}", []>,
866 def XCHG64rr : RI<0x87, MRMSrcReg, (outs GR64:$dst1, GR64:$dst2),
867 (ins GR64:$src1 ,GR64:$src2),
868 "xchg{q}\t{$src2, $src1|$src1, $src2}", []>;
871 // Swap between EAX and other registers.
872 let Constraints = "$src = $dst", hasSideEffects = 0 in {
873 let Uses = [AX], Defs = [AX] in
874 def XCHG16ar : I<0x90, AddRegFrm, (outs GR16:$dst), (ins GR16:$src),
875 "xchg{w}\t{$src, %ax|ax, $src}", []>, OpSize16;
876 let Uses = [EAX], Defs = [EAX] in
877 def XCHG32ar : I<0x90, AddRegFrm, (outs GR32:$dst), (ins GR32:$src),
878 "xchg{l}\t{$src, %eax|eax, $src}", []>, OpSize32;
879 let Uses = [RAX], Defs = [RAX] in
880 def XCHG64ar : RI<0x90, AddRegFrm, (outs GR64:$dst), (ins GR64:$src),
881 "xchg{q}\t{$src, %rax|rax, $src}", []>;
885 let hasSideEffects = 0, Constraints = "$src1 = $dst1, $src2 = $dst2",
886 Defs = [EFLAGS], SchedRW = [WriteXCHG] in {
887 def XADD8rr : I<0xC0, MRMDestReg, (outs GR8:$dst1, GR8:$dst2),
888 (ins GR8:$src1, GR8:$src2),
889 "xadd{b}\t{$src2, $src1|$src1, $src2}", []>, TB;
890 def XADD16rr : I<0xC1, MRMDestReg, (outs GR16:$dst1, GR16:$dst2),
891 (ins GR16:$src1, GR16:$src2),
892 "xadd{w}\t{$src2, $src1|$src1, $src2}", []>, TB, OpSize16;
893 def XADD32rr : I<0xC1, MRMDestReg, (outs GR32:$dst1, GR32:$dst2),
894 (ins GR32:$src1, GR32:$src2),
895 "xadd{l}\t{$src2, $src1|$src1, $src2}", []>, TB, OpSize32;
896 def XADD64rr : RI<0xC1, MRMDestReg, (outs GR64:$dst1, GR64:$dst2),
897 (ins GR64:$src1, GR64:$src2),
898 "xadd{q}\t{$src2, $src1|$src1, $src2}", []>, TB;
901 let mayLoad = 1, mayStore = 1, hasSideEffects = 0, Constraints = "$val = $dst",
902 Defs = [EFLAGS], SchedRW = [WriteALULd, WriteRMW] in {
903 def XADD8rm : I<0xC0, MRMSrcMem, (outs GR8:$dst),
904 (ins GR8:$val, i8mem:$ptr),
905 "xadd{b}\t{$val, $ptr|$ptr, $val}", []>, TB;
906 def XADD16rm : I<0xC1, MRMSrcMem, (outs GR16:$dst),
907 (ins GR16:$val, i16mem:$ptr),
908 "xadd{w}\t{$val, $ptr|$ptr, $val}", []>, TB,
910 def XADD32rm : I<0xC1, MRMSrcMem, (outs GR32:$dst),
911 (ins GR32:$val, i32mem:$ptr),
912 "xadd{l}\t{$val, $ptr|$ptr, $val}", []>, TB,
914 def XADD64rm : RI<0xC1, MRMSrcMem, (outs GR64:$dst),
915 (ins GR64:$val, i64mem:$ptr),
916 "xadd{q}\t{$val, $ptr|$ptr, $val}", []>, TB;
920 let SchedRW = [WriteCMPXCHG], hasSideEffects = 0 in {
921 let Defs = [AL, EFLAGS], Uses = [AL] in
922 def CMPXCHG8rr : I<0xB0, MRMDestReg, (outs GR8:$dst), (ins GR8:$src),
923 "cmpxchg{b}\t{$src, $dst|$dst, $src}", []>, TB;
924 let Defs = [AX, EFLAGS], Uses = [AX] in
925 def CMPXCHG16rr : I<0xB1, MRMDestReg, (outs GR16:$dst), (ins GR16:$src),
926 "cmpxchg{w}\t{$src, $dst|$dst, $src}", []>, TB, OpSize16;
927 let Defs = [EAX, EFLAGS], Uses = [EAX] in
928 def CMPXCHG32rr : I<0xB1, MRMDestReg, (outs GR32:$dst), (ins GR32:$src),
929 "cmpxchg{l}\t{$src, $dst|$dst, $src}", []>, TB, OpSize32;
930 let Defs = [RAX, EFLAGS], Uses = [RAX] in
931 def CMPXCHG64rr : RI<0xB1, MRMDestReg, (outs GR64:$dst), (ins GR64:$src),
932 "cmpxchg{q}\t{$src, $dst|$dst, $src}", []>, TB;
933 } // SchedRW, hasSideEffects
935 let SchedRW = [WriteCMPXCHGRMW], mayLoad = 1, mayStore = 1,
936 hasSideEffects = 0 in {
937 let Defs = [AL, EFLAGS], Uses = [AL] in
938 def CMPXCHG8rm : I<0xB0, MRMDestMem, (outs), (ins i8mem:$dst, GR8:$src),
939 "cmpxchg{b}\t{$src, $dst|$dst, $src}", []>, TB;
940 let Defs = [AX, EFLAGS], Uses = [AX] in
941 def CMPXCHG16rm : I<0xB1, MRMDestMem, (outs), (ins i16mem:$dst, GR16:$src),
942 "cmpxchg{w}\t{$src, $dst|$dst, $src}", []>, TB, OpSize16;
943 let Defs = [EAX, EFLAGS], Uses = [EAX] in
944 def CMPXCHG32rm : I<0xB1, MRMDestMem, (outs), (ins i32mem:$dst, GR32:$src),
945 "cmpxchg{l}\t{$src, $dst|$dst, $src}", []>, TB, OpSize32;
946 let Defs = [RAX, EFLAGS], Uses = [RAX] in
947 def CMPXCHG64rm : RI<0xB1, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src),
948 "cmpxchg{q}\t{$src, $dst|$dst, $src}", []>, TB;
950 let Defs = [EAX, EDX, EFLAGS], Uses = [EAX, EBX, ECX, EDX] in
951 def CMPXCHG8B : I<0xC7, MRM1m, (outs), (ins i64mem:$dst),
952 "cmpxchg8b\t$dst", []>, TB, Requires<[HasCX8]>;
954 let Defs = [RAX, RDX, EFLAGS], Uses = [RAX, RBX, RCX, RDX] in
955 // NOTE: In64BitMode check needed for the AssemblerPredicate.
956 def CMPXCHG16B : RI<0xC7, MRM1m, (outs), (ins i128mem:$dst),
957 "cmpxchg16b\t$dst", []>,
958 TB, Requires<[HasCX16,In64BitMode]>;
959 } // SchedRW, mayLoad, mayStore, hasSideEffects
962 // Lock instruction prefix
963 let SchedRW = [WriteMicrocoded] in
964 def LOCK_PREFIX : I<0xF0, PrefixByte, (outs), (ins), "lock", []>;
966 let SchedRW = [WriteNop] in {
968 // Rex64 instruction prefix
969 def REX64_PREFIX : I<0x48, PrefixByte, (outs), (ins), "rex64", []>,
970 Requires<[In64BitMode]>;
972 // Data16 instruction prefix
973 def DATA16_PREFIX : I<0x66, PrefixByte, (outs), (ins), "data16", []>;
976 // Repeat string operation instruction prefixes
977 let Defs = [ECX], Uses = [ECX,DF], SchedRW = [WriteMicrocoded] in {
978 // Repeat (used with INS, OUTS, MOVS, LODS and STOS)
979 def REP_PREFIX : I<0xF3, PrefixByte, (outs), (ins), "rep", []>;
980 // Repeat while not equal (used with CMPS and SCAS)
981 def REPNE_PREFIX : I<0xF2, PrefixByte, (outs), (ins), "repne", []>;
984 // String manipulation instructions
985 let SchedRW = [WriteMicrocoded] in {
986 let Defs = [AL,ESI], Uses = [ESI,DF] in
987 def LODSB : I<0xAC, RawFrmSrc, (outs), (ins srcidx8:$src),
988 "lodsb\t{$src, %al|al, $src}", []>;
989 let Defs = [AX,ESI], Uses = [ESI,DF] in
990 def LODSW : I<0xAD, RawFrmSrc, (outs), (ins srcidx16:$src),
991 "lodsw\t{$src, %ax|ax, $src}", []>, OpSize16;
992 let Defs = [EAX,ESI], Uses = [ESI,DF] in
993 def LODSL : I<0xAD, RawFrmSrc, (outs), (ins srcidx32:$src),
994 "lods{l|d}\t{$src, %eax|eax, $src}", []>, OpSize32;
995 let Defs = [RAX,ESI], Uses = [ESI,DF] in
996 def LODSQ : RI<0xAD, RawFrmSrc, (outs), (ins srcidx64:$src),
997 "lodsq\t{$src, %rax|rax, $src}", []>,
998 Requires<[In64BitMode]>;
1001 let SchedRW = [WriteSystem] in {
1002 let Defs = [ESI], Uses = [DX,ESI,DF] in {
1003 def OUTSB : I<0x6E, RawFrmSrc, (outs), (ins srcidx8:$src),
1004 "outsb\t{$src, %dx|dx, $src}", []>;
1005 def OUTSW : I<0x6F, RawFrmSrc, (outs), (ins srcidx16:$src),
1006 "outsw\t{$src, %dx|dx, $src}", []>, OpSize16;
1007 def OUTSL : I<0x6F, RawFrmSrc, (outs), (ins srcidx32:$src),
1008 "outs{l|d}\t{$src, %dx|dx, $src}", []>, OpSize32;
1011 let Defs = [EDI], Uses = [DX,EDI,DF] in {
1012 def INSB : I<0x6C, RawFrmDst, (outs), (ins dstidx8:$dst),
1013 "insb\t{%dx, $dst|$dst, dx}", []>;
1014 def INSW : I<0x6D, RawFrmDst, (outs), (ins dstidx16:$dst),
1015 "insw\t{%dx, $dst|$dst, dx}", []>, OpSize16;
1016 def INSL : I<0x6D, RawFrmDst, (outs), (ins dstidx32:$dst),
1017 "ins{l|d}\t{%dx, $dst|$dst, dx}", []>, OpSize32;
1021 // EFLAGS management instructions.
1022 let SchedRW = [WriteALU], Defs = [EFLAGS], Uses = [EFLAGS] in {
1023 def CLC : I<0xF8, RawFrm, (outs), (ins), "clc", []>;
1024 def STC : I<0xF9, RawFrm, (outs), (ins), "stc", []>;
1025 def CMC : I<0xF5, RawFrm, (outs), (ins), "cmc", []>;
1028 // DF management instructions.
1029 let SchedRW = [WriteALU], Defs = [DF] in {
1030 def CLD : I<0xFC, RawFrm, (outs), (ins), "cld", []>;
1031 def STD : I<0xFD, RawFrm, (outs), (ins), "std", []>;
1034 // Table lookup instructions
1035 let Uses = [AL,EBX], Defs = [AL], hasSideEffects = 0, mayLoad = 1 in
1036 def XLAT : I<0xD7, RawFrm, (outs), (ins), "xlatb", []>, Sched<[WriteLoad]>;
1038 let SchedRW = [WriteMicrocoded] in {
1039 // ASCII Adjust After Addition
1040 let Uses = [AL,EFLAGS], Defs = [AX,EFLAGS], hasSideEffects = 0 in
1041 def AAA : I<0x37, RawFrm, (outs), (ins), "aaa", []>,
1042 Requires<[Not64BitMode]>;
1044 // ASCII Adjust AX Before Division
1045 let Uses = [AX], Defs = [AX,EFLAGS], hasSideEffects = 0 in
1046 def AAD8i8 : Ii8<0xD5, RawFrm, (outs), (ins i8imm:$src),
1047 "aad\t$src", []>, Requires<[Not64BitMode]>;
1049 // ASCII Adjust AX After Multiply
1050 let Uses = [AL], Defs = [AX,EFLAGS], hasSideEffects = 0 in
1051 def AAM8i8 : Ii8<0xD4, RawFrm, (outs), (ins i8imm:$src),
1052 "aam\t$src", []>, Requires<[Not64BitMode]>;
1054 // ASCII Adjust AL After Subtraction - sets
1055 let Uses = [AL,EFLAGS], Defs = [AX,EFLAGS], hasSideEffects = 0 in
1056 def AAS : I<0x3F, RawFrm, (outs), (ins), "aas", []>,
1057 Requires<[Not64BitMode]>;
1059 // Decimal Adjust AL after Addition
1060 let Uses = [AL,EFLAGS], Defs = [AL,EFLAGS], hasSideEffects = 0 in
1061 def DAA : I<0x27, RawFrm, (outs), (ins), "daa", []>,
1062 Requires<[Not64BitMode]>;
1064 // Decimal Adjust AL after Subtraction
1065 let Uses = [AL,EFLAGS], Defs = [AL,EFLAGS], hasSideEffects = 0 in
1066 def DAS : I<0x2F, RawFrm, (outs), (ins), "das", []>,
1067 Requires<[Not64BitMode]>;
1070 let SchedRW = [WriteSystem] in {
1071 // Check Array Index Against Bounds
1072 // Note: "bound" does not have reversed operands in at&t syntax.
1073 def BOUNDS16rm : I<0x62, MRMSrcMem, (outs GR16:$dst), (ins i16mem:$src),
1074 "bound\t$dst, $src", []>, OpSize16,
1075 Requires<[Not64BitMode]>;
1076 def BOUNDS32rm : I<0x62, MRMSrcMem, (outs GR32:$dst), (ins i32mem:$src),
1077 "bound\t$dst, $src", []>, OpSize32,
1078 Requires<[Not64BitMode]>;
1080 // Adjust RPL Field of Segment Selector
1081 def ARPL16rr : I<0x63, MRMDestReg, (outs GR16:$dst), (ins GR16:$src),
1082 "arpl\t{$src, $dst|$dst, $src}", []>,
1083 Requires<[Not64BitMode]>;
1085 def ARPL16mr : I<0x63, MRMDestMem, (outs), (ins i16mem:$dst, GR16:$src),
1086 "arpl\t{$src, $dst|$dst, $src}", []>,
1087 Requires<[Not64BitMode]>;
1090 //===----------------------------------------------------------------------===//
1091 // MOVBE Instructions
1093 multiclass Movbe<bits<8> o, X86TypeInfo t, string suffix = ""> {
1094 def rm#suffix : ITy<o, MRMSrcMem, t, (outs t.RegClass:$dst),
1095 (ins t.MemOperand:$src1), "movbe", unaryop_ndd_args,
1096 [(set t.RegClass:$dst, (bswap (t.LoadNode addr:$src1)))]>,
1097 Sched<[WriteALULd]>;
1098 def mr#suffix : ITy<!add(o, 1), MRMDestMem, t, (outs),
1099 (ins t.MemOperand:$dst, t.RegClass:$src1),
1100 "movbe", unaryop_ndd_args,
1101 [(store (bswap t.RegClass:$src1), addr:$dst)]>,
1102 Sched<[WriteStore]>;
1105 let Predicates = [HasMOVBE, NoEGPR] in {
1106 defm MOVBE16 : Movbe<0xF0, Xi16>, OpSize16, T8;
1107 defm MOVBE32 : Movbe<0xF0, Xi32>, OpSize32, T8;
1108 defm MOVBE64 : Movbe<0xF0, Xi64>, T8;
1111 let Predicates = [HasMOVBE, HasEGPR, In64BitMode] in {
1112 defm MOVBE16 : Movbe<0x60, Xi16, "_EVEX">, EVEX, T_MAP4, PD;
1113 defm MOVBE32 : Movbe<0x60, Xi32, "_EVEX">, EVEX, T_MAP4;
1114 defm MOVBE64 : Movbe<0x60, Xi64, "_EVEX">, EVEX, T_MAP4;
1117 multiclass Movberr<X86TypeInfo t> {
1118 def rr : ITy<0x61, MRMDestReg, t, (outs t.RegClass:$dst),
1119 (ins t.RegClass:$src1), "movbe", unaryop_ndd_args,
1120 [(set t.RegClass:$dst, (bswap t.RegClass:$src1))]>,
1122 def rr_REV : ITy<0x60, MRMSrcReg, t, (outs t.RegClass:$dst),
1123 (ins t.RegClass:$src1), "movbe", unaryop_ndd_args, []>,
1124 EVEX, T_MAP4, DisassembleOnly;
1126 let SchedRW = [WriteALU], Predicates = [HasMOVBE, HasNDD, In64BitMode] in {
1127 defm MOVBE16 : Movberr<Xi16>, PD;
1128 defm MOVBE32 : Movberr<Xi32>;
1129 defm MOVBE64 : Movberr<Xi64>;
1132 //===----------------------------------------------------------------------===//
1133 // RDRAND Instruction
1135 let Predicates = [HasRDRAND], Defs = [EFLAGS], SchedRW = [WriteSystem] in {
1136 def RDRAND16r : I<0xC7, MRM6r, (outs GR16:$dst), (ins),
1137 "rdrand{w}\t$dst", [(set GR16:$dst, EFLAGS, (X86rdrand))]>,
1139 def RDRAND32r : I<0xC7, MRM6r, (outs GR32:$dst), (ins),
1140 "rdrand{l}\t$dst", [(set GR32:$dst, EFLAGS, (X86rdrand))]>,
1142 def RDRAND64r : RI<0xC7, MRM6r, (outs GR64:$dst), (ins),
1143 "rdrand{q}\t$dst", [(set GR64:$dst, EFLAGS, (X86rdrand))]>,
1147 //===----------------------------------------------------------------------===//
1148 // RDSEED Instruction
1150 let Predicates = [HasRDSEED], Defs = [EFLAGS], SchedRW = [WriteSystem] in {
1151 def RDSEED16r : I<0xC7, MRM7r, (outs GR16:$dst), (ins), "rdseed{w}\t$dst",
1152 [(set GR16:$dst, EFLAGS, (X86rdseed))]>, OpSize16, TB;
1153 def RDSEED32r : I<0xC7, MRM7r, (outs GR32:$dst), (ins), "rdseed{l}\t$dst",
1154 [(set GR32:$dst, EFLAGS, (X86rdseed))]>, OpSize32, TB;
1155 def RDSEED64r : RI<0xC7, MRM7r, (outs GR64:$dst), (ins), "rdseed{q}\t$dst",
1156 [(set GR64:$dst, EFLAGS, (X86rdseed))]>, TB;
1159 //===----------------------------------------------------------------------===//
1160 // LZCNT Instruction
1162 multiclass Lzcnt<bits<8> o, string m, SDPatternOperator node, X86TypeInfo t,
1163 SchedWrite schedrr, SchedWrite schedrm, string suffix = ""> {
1164 def rr#suffix : ITy<o, MRMSrcReg, t, (outs t.RegClass:$dst),
1165 (ins t.RegClass:$src1), m, unaryop_ndd_args,
1166 [(set t.RegClass:$dst, (node t.RegClass:$src1))]>,
1167 TB, Sched<[schedrr]>;
1169 def rm#suffix : ITy<o, MRMSrcMem, t, (outs t.RegClass:$dst),
1170 (ins t.MemOperand:$src1), m, unaryop_ndd_args,
1171 [(set t.RegClass:$dst, (node (t.LoadNode addr:$src1)))]>,
1172 TB, Sched<[schedrm]>;
1175 let Predicates = [HasLZCNT], Defs = [EFLAGS] in {
1176 defm LZCNT16 : Lzcnt<0xBD, "lzcnt", ctlz, Xi16, WriteLZCNT, WriteLZCNTLd>, OpSize16, XS;
1177 defm LZCNT32 : Lzcnt<0xBD, "lzcnt", ctlz, Xi32, WriteLZCNT, WriteLZCNTLd>, OpSize32, XS;
1178 defm LZCNT64 : Lzcnt<0xBD, "lzcnt", ctlz, Xi64, WriteLZCNT, WriteLZCNTLd>, XS;
1180 defm LZCNT16 : Lzcnt<0xF5, "lzcnt", null_frag, Xi16, WriteLZCNT, WriteLZCNTLd, "_EVEX">, PL, PD;
1181 defm LZCNT32 : Lzcnt<0xF5, "lzcnt", null_frag, Xi32, WriteLZCNT, WriteLZCNTLd, "_EVEX">, PL;
1182 defm LZCNT64 : Lzcnt<0xF5, "lzcnt", null_frag, Xi64, WriteLZCNT, WriteLZCNTLd, "_EVEX">, PL;
1185 defm LZCNT16 : Lzcnt<0xF5, "lzcnt", null_frag, Xi16, WriteLZCNT, WriteLZCNTLd, "_NF">, NF, PD;
1186 defm LZCNT32 : Lzcnt<0xF5, "lzcnt", null_frag, Xi32, WriteLZCNT, WriteLZCNTLd, "_NF">, NF;
1187 defm LZCNT64 : Lzcnt<0xF5, "lzcnt", null_frag, Xi64, WriteLZCNT, WriteLZCNTLd, "_NF">, NF;
1189 //===----------------------------------------------------------------------===//
1192 let Predicates = [HasBMI], Defs = [EFLAGS] in {
1193 defm TZCNT16 : Lzcnt<0xBC, "tzcnt", cttz, Xi16, WriteTZCNT, WriteTZCNTLd>, OpSize16, XS;
1194 defm TZCNT32 : Lzcnt<0xBC, "tzcnt", cttz, Xi32, WriteTZCNT, WriteTZCNTLd>, OpSize32, XS;
1195 defm TZCNT64 : Lzcnt<0xBC, "tzcnt", cttz, Xi64, WriteTZCNT, WriteTZCNTLd>, XS;
1197 defm TZCNT16 : Lzcnt<0xF4, "tzcnt", null_frag, Xi16, WriteTZCNT, WriteTZCNTLd, "_EVEX">, PL, PD;
1198 defm TZCNT32 : Lzcnt<0xF4, "tzcnt", null_frag, Xi32, WriteTZCNT, WriteTZCNTLd, "_EVEX">, PL;
1199 defm TZCNT64 : Lzcnt<0xF4, "tzcnt", null_frag, Xi64, WriteTZCNT, WriteTZCNTLd, "_EVEX">, PL;
1202 defm TZCNT16 : Lzcnt<0xF4, "tzcnt", null_frag, Xi16, WriteTZCNT, WriteTZCNTLd, "_NF">, NF, PD;
1203 defm TZCNT32 : Lzcnt<0xF4, "tzcnt", null_frag, Xi32, WriteTZCNT, WriteTZCNTLd, "_NF">, NF;
1204 defm TZCNT64 : Lzcnt<0xF4, "tzcnt", null_frag, Xi64, WriteTZCNT, WriteTZCNTLd, "_NF">, NF;
1206 multiclass Bls<string m, Format RegMRM, Format MemMRM, X86TypeInfo t, string Suffix = ""> {
1207 let SchedRW = [WriteBLS] in {
1208 def rr#Suffix : UnaryOpR<0xF3, RegMRM, m, unaryop_ndd_args, t,
1209 (outs t.RegClass:$dst), []>, T8, VVVV;
1212 let SchedRW = [WriteBLS.Folded] in
1213 def rm#Suffix : UnaryOpM<0xF3, MemMRM, m, unaryop_ndd_args, t,
1214 (outs t.RegClass:$dst), []>, T8, VVVV;
1217 let Defs = [EFLAGS] in {
1218 defm BLSR32 : Bls<"blsr", MRM1r, MRM1m, Xi32>, VEX;
1219 defm BLSR64 : Bls<"blsr", MRM1r, MRM1m, Xi64>, VEX;
1220 defm BLSMSK32 : Bls<"blsmsk", MRM2r, MRM2m, Xi32>, VEX;
1221 defm BLSMSK64 : Bls<"blsmsk", MRM2r, MRM2m, Xi64>, VEX;
1222 defm BLSI32 : Bls<"blsi", MRM3r, MRM3m, Xi32>, VEX;
1223 defm BLSI64 : Bls<"blsi", MRM3r, MRM3m, Xi64>, VEX;
1226 let Predicates = [In64BitMode], Defs = [EFLAGS] in {
1227 defm BLSR32 : Bls<"blsr", MRM1r, MRM1m, Xi32, "_EVEX">, EVEX;
1228 defm BLSR64 : Bls<"blsr", MRM1r, MRM1m, Xi64, "_EVEX">, EVEX;
1229 defm BLSMSK32 : Bls<"blsmsk", MRM2r, MRM2m, Xi32, "_EVEX">, EVEX;
1230 defm BLSMSK64 : Bls<"blsmsk", MRM2r, MRM2m, Xi64, "_EVEX">, EVEX;
1231 defm BLSI32 : Bls<"blsi", MRM3r, MRM3m, Xi32, "_EVEX">, EVEX;
1232 defm BLSI64 : Bls<"blsi", MRM3r, MRM3m, Xi64, "_EVEX">, EVEX;
1235 let Predicates = [In64BitMode] in {
1236 defm BLSR32 : Bls<"blsr", MRM1r, MRM1m, Xi32, "_NF">, EVEX, EVEX_NF;
1237 defm BLSR64 : Bls<"blsr", MRM1r, MRM1m, Xi64, "_NF">, EVEX, EVEX_NF;
1238 defm BLSMSK32 : Bls<"blsmsk", MRM2r, MRM2m, Xi32, "_NF">, EVEX, EVEX_NF;
1239 defm BLSMSK64 : Bls<"blsmsk", MRM2r, MRM2m, Xi64, "_NF">, EVEX, EVEX_NF;
1240 defm BLSI32 : Bls<"blsi", MRM3r, MRM3m, Xi32, "_NF">, EVEX, EVEX_NF;
1241 defm BLSI64 : Bls<"blsi", MRM3r, MRM3m, Xi64, "_NF">, EVEX, EVEX_NF;
1244 multiclass Bls_Pats<string suffix> {
1245 // FIXME(1): patterns for the load versions are not implemented
1246 // FIXME(2): By only matching `add_su` and `ineg_su` we may emit
1247 // extra `mov` instructions if `src` has future uses. It may be better
1248 // to always match if `src` has more users.
1249 def : Pat<(and GR32:$src, (add_su GR32:$src, -1)),
1250 (!cast<Instruction>(BLSR32rr#suffix) GR32:$src)>;
1251 def : Pat<(and GR64:$src, (add_su GR64:$src, -1)),
1252 (!cast<Instruction>(BLSR64rr#suffix) GR64:$src)>;
1254 def : Pat<(xor GR32:$src, (add_su GR32:$src, -1)),
1255 (!cast<Instruction>(BLSMSK32rr#suffix) GR32:$src)>;
1256 def : Pat<(xor GR64:$src, (add_su GR64:$src, -1)),
1257 (!cast<Instruction>(BLSMSK64rr#suffix) GR64:$src)>;
1259 def : Pat<(and GR32:$src, (ineg_su GR32:$src)),
1260 (!cast<Instruction>(BLSI32rr#suffix) GR32:$src)>;
1261 def : Pat<(and GR64:$src, (ineg_su GR64:$src)),
1262 (!cast<Instruction>(BLSI64rr#suffix) GR64:$src)>;
1264 // Versions to match flag producing ops.
1265 def : Pat<(and_flag_nocf GR32:$src, (add_su GR32:$src, -1)),
1266 (!cast<Instruction>(BLSR32rr#suffix) GR32:$src)>;
1267 def : Pat<(and_flag_nocf GR64:$src, (add_su GR64:$src, -1)),
1268 (!cast<Instruction>(BLSR64rr#suffix) GR64:$src)>;
1270 def : Pat<(xor_flag_nocf GR32:$src, (add_su GR32:$src, -1)),
1271 (!cast<Instruction>(BLSMSK32rr#suffix) GR32:$src)>;
1272 def : Pat<(xor_flag_nocf GR64:$src, (add_su GR64:$src, -1)),
1273 (!cast<Instruction>(BLSMSK64rr#suffix) GR64:$src)>;
1275 def : Pat<(and_flag_nocf GR32:$src, (ineg_su GR32:$src)),
1276 (!cast<Instruction>(BLSI32rr#suffix) GR32:$src)>;
1277 def : Pat<(and_flag_nocf GR64:$src, (ineg_su GR64:$src)),
1278 (!cast<Instruction>(BLSI64rr#suffix) GR64:$src)>;
1281 let Predicates = [HasBMI, NoEGPR] in
1282 defm : Bls_Pats<"">;
1284 let Predicates = [HasBMI, HasEGPR] in
1285 defm : Bls_Pats<"_EVEX">;
1287 multiclass Bmi4VOp3<bits<8> o, string m, X86TypeInfo t, SDPatternOperator node,
1288 X86FoldableSchedWrite sched, string Suffix = ""> {
1289 let SchedRW = [sched], Form = MRMSrcReg4VOp3 in
1290 def rr#Suffix : BinOpRR<o, m, binop_ndd_args, t, (outs t.RegClass:$dst),
1291 [(set t.RegClass:$dst, EFLAGS,
1292 (node t.RegClass:$src1, t.RegClass:$src2))]>, T8;
1293 let SchedRW = [sched.Folded,
1294 ReadDefault, ReadDefault, ReadDefault, ReadDefault, ReadDefault,
1295 sched.ReadAfterFold], Form = MRMSrcMem4VOp3 in
1296 def rm#Suffix : BinOpMR<o, m, binop_ndd_args, t, (outs t.RegClass:$dst),
1297 [(set t.RegClass:$dst, EFLAGS, (node (t.LoadNode addr:$src1),
1298 t.RegClass:$src2))]>, T8;
1301 let Predicates = [HasBMI, NoEGPR], Defs = [EFLAGS] in {
1302 defm BEXTR32 : Bmi4VOp3<0xF7, "bextr", Xi32, X86bextr, WriteBEXTR>, VEX;
1303 defm BEXTR64 : Bmi4VOp3<0xF7, "bextr", Xi64, X86bextr, WriteBEXTR>, VEX;
1305 let Predicates = [HasBMI2, NoEGPR], Defs = [EFLAGS] in {
1306 defm BZHI32 : Bmi4VOp3<0xF5, "bzhi", Xi32, X86bzhi, WriteBZHI>, VEX;
1307 defm BZHI64 : Bmi4VOp3<0xF5, "bzhi", Xi64, X86bzhi, WriteBZHI>, VEX;
1309 let Predicates = [HasBMI, HasEGPR, In64BitMode], Defs = [EFLAGS] in {
1310 defm BEXTR32 : Bmi4VOp3<0xF7, "bextr", Xi32, X86bextr, WriteBEXTR, "_EVEX">, EVEX;
1311 defm BEXTR64 : Bmi4VOp3<0xF7, "bextr", Xi64, X86bextr, WriteBEXTR, "_EVEX">, EVEX;
1313 let Predicates = [HasBMI2, HasEGPR, In64BitMode], Defs = [EFLAGS] in {
1314 defm BZHI32 : Bmi4VOp3<0xF5, "bzhi", Xi32, X86bzhi, WriteBZHI, "_EVEX">, EVEX;
1315 defm BZHI64 : Bmi4VOp3<0xF5, "bzhi", Xi64, X86bzhi, WriteBZHI, "_EVEX">, EVEX;
1318 let Predicates = [In64BitMode] in {
1319 defm BEXTR32 : Bmi4VOp3<0xF7, "bextr", Xi32, null_frag, WriteBEXTR, "_NF">, EVEX, EVEX_NF;
1320 defm BEXTR64 : Bmi4VOp3<0xF7, "bextr", Xi64, null_frag, WriteBEXTR, "_NF">, EVEX, EVEX_NF;
1321 defm BZHI32 : Bmi4VOp3<0xF5, "bzhi", Xi32, null_frag, WriteBZHI, "_NF">, EVEX, EVEX_NF;
1322 defm BZHI64 : Bmi4VOp3<0xF5, "bzhi", Xi64, null_frag, WriteBZHI, "_NF">, EVEX, EVEX_NF;
1325 def CountTrailingOnes : SDNodeXForm<imm, [{
1326 // Count the trailing ones in the immediate.
1327 return getI8Imm(llvm::countr_one(N->getZExtValue()), SDLoc(N));
1330 def BEXTRMaskXForm : SDNodeXForm<imm, [{
1331 unsigned Length = llvm::countr_one(N->getZExtValue());
1332 return getI32Imm(Length << 8, SDLoc(N));
1335 def AndMask64 : ImmLeaf<i64, [{
1336 return isMask_64(Imm) && !isUInt<32>(Imm);
1339 // Use BEXTR for 64-bit 'and' with large immediate 'mask'.
1340 let Predicates = [HasBMI, NoBMI2, NoTBM, NoEGPR] in {
1341 def : Pat<(and GR64:$src, AndMask64:$mask),
1342 (BEXTR64rr GR64:$src,
1343 (SUBREG_TO_REG (i64 0),
1344 (MOV32ri (BEXTRMaskXForm imm:$mask)), sub_32bit))>;
1345 def : Pat<(and (loadi64 addr:$src), AndMask64:$mask),
1346 (BEXTR64rm addr:$src,
1347 (SUBREG_TO_REG (i64 0),
1348 (MOV32ri (BEXTRMaskXForm imm:$mask)), sub_32bit))>;
1351 let Predicates = [HasBMI, NoBMI2, NoTBM, HasEGPR] in {
1352 def : Pat<(and GR64:$src, AndMask64:$mask),
1353 (BEXTR64rr_EVEX GR64:$src,
1354 (SUBREG_TO_REG (i64 0),
1355 (MOV32ri (BEXTRMaskXForm imm:$mask)), sub_32bit))>;
1356 def : Pat<(and (loadi64 addr:$src), AndMask64:$mask),
1357 (BEXTR64rm_EVEX addr:$src,
1358 (SUBREG_TO_REG (i64 0),
1359 (MOV32ri (BEXTRMaskXForm imm:$mask)), sub_32bit))>;
1362 // Use BZHI for 64-bit 'and' with large immediate 'mask'.
1363 let Predicates = [HasBMI2, NoTBM, NoEGPR] in {
1364 def : Pat<(and GR64:$src, AndMask64:$mask),
1365 (BZHI64rr GR64:$src,
1366 (INSERT_SUBREG (i64 (IMPLICIT_DEF)),
1367 (MOV8ri (CountTrailingOnes imm:$mask)), sub_8bit))>;
1368 def : Pat<(and (loadi64 addr:$src), AndMask64:$mask),
1369 (BZHI64rm addr:$src,
1370 (INSERT_SUBREG (i64 (IMPLICIT_DEF)),
1371 (MOV8ri (CountTrailingOnes imm:$mask)), sub_8bit))>;
1374 let Predicates = [HasBMI2, NoTBM, HasEGPR] in {
1375 def : Pat<(and GR64:$src, AndMask64:$mask),
1376 (BZHI64rr_EVEX GR64:$src,
1377 (INSERT_SUBREG (i64 (IMPLICIT_DEF)),
1378 (MOV8ri (CountTrailingOnes imm:$mask)), sub_8bit))>;
1379 def : Pat<(and (loadi64 addr:$src), AndMask64:$mask),
1380 (BZHI64rm_EVEX addr:$src,
1381 (INSERT_SUBREG (i64 (IMPLICIT_DEF)),
1382 (MOV8ri (CountTrailingOnes imm:$mask)), sub_8bit))>;
1385 multiclass PdepPext<string m, X86TypeInfo t, SDPatternOperator node,
1386 string suffix = ""> {
1387 def rr#suffix : ITy<0xF5, MRMSrcReg, t, (outs t.RegClass:$dst),
1388 (ins t.RegClass:$src1, t.RegClass:$src2), m, binop_ndd_args,
1389 [(set t.RegClass:$dst, (node t.RegClass:$src1, t.RegClass:$src2))]>,
1390 T8, VVVV, Sched<[WriteALU]>;
1391 def rm#suffix : ITy<0xF5, MRMSrcMem, t, (outs t.RegClass:$dst),
1392 (ins t.RegClass:$src1, t.MemOperand:$src2), m, binop_ndd_args,
1393 [(set t.RegClass:$dst, (node t.RegClass:$src1, (t.LoadNode addr:$src2)))]>,
1394 T8, VVVV, Sched<[WriteALU.Folded, WriteALU.ReadAfterFold]>;
1397 let Predicates = [HasBMI2, NoEGPR] in {
1398 defm PDEP32 : PdepPext<"pdep", Xi32, X86pdep>, XD, VEX;
1399 defm PDEP64 : PdepPext<"pdep", Xi64, X86pdep>, XD, REX_W, VEX;
1400 defm PEXT32 : PdepPext<"pext", Xi32, X86pext>, XS, VEX;
1401 defm PEXT64 : PdepPext<"pext", Xi64, X86pext>, XS, REX_W, VEX;
1404 let Predicates = [HasBMI2, HasEGPR] in {
1405 defm PDEP32 : PdepPext<"pdep", Xi32, X86pdep, "_EVEX">, XD, EVEX;
1406 defm PDEP64 : PdepPext<"pdep", Xi64, X86pdep, "_EVEX">, XD, REX_W, EVEX;
1407 defm PEXT32 : PdepPext<"pext", Xi32, X86pext, "_EVEX">, XS, EVEX;
1408 defm PEXT64 : PdepPext<"pext", Xi64, X86pext, "_EVEX">, XS, REX_W, EVEX;
1411 //===----------------------------------------------------------------------===//
1412 // Lightweight Profiling Instructions
1414 let Predicates = [HasLWP], SchedRW = [WriteSystem] in {
1416 def LLWPCB : I<0x12, MRM0r, (outs), (ins GR32:$src), "llwpcb\t$src",
1417 [(int_x86_llwpcb GR32:$src)]>, XOP, XOP9;
1418 def SLWPCB : I<0x12, MRM1r, (outs GR32:$dst), (ins), "slwpcb\t$dst",
1419 [(set GR32:$dst, (int_x86_slwpcb))]>, XOP, XOP9;
1421 def LLWPCB64 : I<0x12, MRM0r, (outs), (ins GR64:$src), "llwpcb\t$src",
1422 [(int_x86_llwpcb GR64:$src)]>, XOP, XOP9, REX_W;
1423 def SLWPCB64 : I<0x12, MRM1r, (outs GR64:$dst), (ins), "slwpcb\t$dst",
1424 [(set GR64:$dst, (int_x86_slwpcb))]>, XOP, XOP9, REX_W;
1426 multiclass lwpins_intr<RegisterClass RC> {
1427 def rri : Ii32<0x12, MRM0r, (outs), (ins RC:$src0, GR32:$src1, i32imm:$cntl),
1428 "lwpins\t{$cntl, $src1, $src0|$src0, $src1, $cntl}",
1429 [(set EFLAGS, (X86lwpins RC:$src0, GR32:$src1, timm:$cntl))]>,
1432 def rmi : Ii32<0x12, MRM0m, (outs), (ins RC:$src0, i32mem:$src1, i32imm:$cntl),
1433 "lwpins\t{$cntl, $src1, $src0|$src0, $src1, $cntl}",
1434 [(set EFLAGS, (X86lwpins RC:$src0, (loadi32 addr:$src1), timm:$cntl))]>,
1438 let Defs = [EFLAGS] in {
1439 defm LWPINS32 : lwpins_intr<GR32>;
1440 defm LWPINS64 : lwpins_intr<GR64>, REX_W;
1443 multiclass lwpval_intr<RegisterClass RC, Intrinsic Int> {
1444 def rri : Ii32<0x12, MRM1r, (outs), (ins RC:$src0, GR32:$src1, i32imm:$cntl),
1445 "lwpval\t{$cntl, $src1, $src0|$src0, $src1, $cntl}",
1446 [(Int RC:$src0, GR32:$src1, timm:$cntl)]>, XOP, VVVV, XOPA;
1448 def rmi : Ii32<0x12, MRM1m, (outs), (ins RC:$src0, i32mem:$src1, i32imm:$cntl),
1449 "lwpval\t{$cntl, $src1, $src0|$src0, $src1, $cntl}",
1450 [(Int RC:$src0, (loadi32 addr:$src1), timm:$cntl)]>,
1454 defm LWPVAL32 : lwpval_intr<GR32, int_x86_lwpval32>;
1455 defm LWPVAL64 : lwpval_intr<GR64, int_x86_lwpval64>, REX_W;
1457 } // HasLWP, SchedRW
1459 //===----------------------------------------------------------------------===//
1460 // MONITORX/MWAITX Instructions
1462 let SchedRW = [ WriteSystem ] in {
1463 let Uses = [ EAX, ECX, EDX ] in
1464 def MONITORX32rrr : I<0x01, MRM_FA, (outs), (ins), "monitorx", []>,
1465 TB, Requires<[ HasMWAITX, Not64BitMode ]>;
1466 let Uses = [ RAX, ECX, EDX ] in
1467 def MONITORX64rrr : I<0x01, MRM_FA, (outs), (ins), "monitorx", []>,
1468 TB, Requires<[ HasMWAITX, In64BitMode ]>;
1470 let Uses = [ ECX, EAX, EBX ] in {
1471 def MWAITXrrr : I<0x01, MRM_FB, (outs), (ins), "mwaitx",
1472 []>, TB, Requires<[ HasMWAITX ]>;
1476 //===----------------------------------------------------------------------===//
1477 // WAITPKG Instructions
1479 let SchedRW = [WriteSystem] in {
1480 def UMONITOR16 : I<0xAE, MRM6r, (outs), (ins GR16:$src),
1481 "umonitor\t$src", [(int_x86_umonitor GR16:$src)]>,
1482 TB, XS, AdSize16, Requires<[HasWAITPKG, Not64BitMode]>;
1483 def UMONITOR32 : I<0xAE, MRM6r, (outs), (ins GR32:$src),
1484 "umonitor\t$src", [(int_x86_umonitor GR32:$src)]>,
1485 TB, XS, AdSize32, Requires<[HasWAITPKG]>;
1486 def UMONITOR64 : I<0xAE, MRM6r, (outs), (ins GR64:$src),
1487 "umonitor\t$src", [(int_x86_umonitor GR64:$src)]>,
1488 TB, XS, AdSize64, Requires<[HasWAITPKG, In64BitMode]>;
1489 let Uses = [EAX, EDX], Defs = [EFLAGS] in {
1490 def UMWAIT : I<0xAE, MRM6r,
1491 (outs), (ins GR32orGR64:$src), "umwait\t$src",
1492 [(set EFLAGS, (X86umwait GR32orGR64:$src, EDX, EAX))]>,
1493 TB, XD, Requires<[HasWAITPKG]>;
1494 def TPAUSE : I<0xAE, MRM6r,
1495 (outs), (ins GR32orGR64:$src), "tpause\t$src",
1496 [(set EFLAGS, (X86tpause GR32orGR64:$src, EDX, EAX))]>,
1497 TB, PD, Requires<[HasWAITPKG]>;
1501 //===----------------------------------------------------------------------===//
1502 // MOVDIRI - Move doubleword/quadword as direct store
1504 let SchedRW = [WriteStore] in {
1505 def MOVDIRI32 : I<0xF9, MRMDestMem, (outs), (ins i32mem:$dst, GR32:$src),
1506 "movdiri\t{$src, $dst|$dst, $src}",
1507 [(int_x86_directstore32 addr:$dst, GR32:$src)]>,
1508 T8, Requires<[HasMOVDIRI, NoEGPR]>;
1509 def MOVDIRI64 : RI<0xF9, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src),
1510 "movdiri\t{$src, $dst|$dst, $src}",
1511 [(int_x86_directstore64 addr:$dst, GR64:$src)]>,
1512 T8, Requires<[In64BitMode, HasMOVDIRI, NoEGPR]>;
1513 def MOVDIRI32_EVEX : I<0xF9, MRMDestMem, (outs), (ins i32mem:$dst, GR32:$src),
1514 "movdiri\t{$src, $dst|$dst, $src}",
1515 [(int_x86_directstore32 addr:$dst, GR32:$src)]>,
1516 EVEX, NoCD8, T_MAP4, Requires<[In64BitMode, HasMOVDIRI, HasEGPR]>;
1517 def MOVDIRI64_EVEX : RI<0xF9, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src),
1518 "movdiri\t{$src, $dst|$dst, $src}",
1519 [(int_x86_directstore64 addr:$dst, GR64:$src)]>,
1520 EVEX, NoCD8, T_MAP4, Requires<[In64BitMode, HasMOVDIRI, HasEGPR]>;
1523 //===----------------------------------------------------------------------===//
1524 // MOVDIR64B - Move 64 bytes as direct store
1526 let SchedRW = [WriteStore] in {
1527 def MOVDIR64B16 : I<0xF8, MRMSrcMem, (outs), (ins GR16:$dst, i512mem_GR16:$src),
1528 "movdir64b\t{$src, $dst|$dst, $src}", []>,
1529 T8, PD, AdSize16, Requires<[HasMOVDIR64B, Not64BitMode]>;
1530 def MOVDIR64B32 : I<0xF8, MRMSrcMem, (outs), (ins GR32:$dst, i512mem_GR32:$src),
1531 "movdir64b\t{$src, $dst|$dst, $src}",
1532 [(int_x86_movdir64b GR32:$dst, addr:$src)]>,
1533 T8, PD, AdSize32, Requires<[HasMOVDIR64B, NoEGPR]>;
1534 def MOVDIR64B64 : I<0xF8, MRMSrcMem, (outs), (ins GR64:$dst, i512mem_GR64:$src),
1535 "movdir64b\t{$src, $dst|$dst, $src}",
1536 [(int_x86_movdir64b GR64:$dst, addr:$src)]>,
1537 T8, PD, AdSize64, Requires<[HasMOVDIR64B, NoEGPR, In64BitMode]>;
1538 def MOVDIR64B32_EVEX : I<0xF8, MRMSrcMem, (outs), (ins GR32:$dst, i512mem_GR32:$src),
1539 "movdir64b\t{$src, $dst|$dst, $src}",
1540 [(int_x86_movdir64b GR32:$dst, addr:$src)]>,
1541 EVEX, NoCD8, T_MAP4, PD, AdSize32, Requires<[HasMOVDIR64B, HasEGPR, In64BitMode]>;
1542 def MOVDIR64B64_EVEX : I<0xF8, MRMSrcMem, (outs), (ins GR64:$dst, i512mem_GR64:$src),
1543 "movdir64b\t{$src, $dst|$dst, $src}",
1544 [(int_x86_movdir64b GR64:$dst, addr:$src)]>,
1545 EVEX, NoCD8, T_MAP4, PD, AdSize64, Requires<[HasMOVDIR64B, HasEGPR, In64BitMode]>;
1548 //===----------------------------------------------------------------------===//
1549 // ENQCMD/S - Enqueue 64-byte command as user with 64-byte write atomicity
1551 multiclass Enqcmds<string suffix> {
1552 def ENQCMD32#suffix : I<0xF8, MRMSrcMem, (outs), (ins GR32:$dst, i512mem_GR32:$src),
1553 "enqcmd\t{$src, $dst|$dst, $src}",
1554 [(set EFLAGS, (X86enqcmd GR32:$dst, addr:$src))]>,
1555 NoCD8, XD, AdSize32;
1556 def ENQCMD64#suffix : I<0xF8, MRMSrcMem, (outs), (ins GR64:$dst, i512mem_GR64:$src),
1557 "enqcmd\t{$src, $dst|$dst, $src}",
1558 [(set EFLAGS, (X86enqcmd GR64:$dst, addr:$src))]>,
1559 NoCD8, XD, AdSize64;
1561 def ENQCMDS32#suffix : I<0xF8, MRMSrcMem, (outs), (ins GR32:$dst, i512mem_GR32:$src),
1562 "enqcmds\t{$src, $dst|$dst, $src}",
1563 [(set EFLAGS, (X86enqcmds GR32:$dst, addr:$src))]>,
1564 NoCD8, XS, AdSize32;
1565 def ENQCMDS64#suffix : I<0xF8, MRMSrcMem, (outs), (ins GR64:$dst, i512mem_GR64:$src),
1566 "enqcmds\t{$src, $dst|$dst, $src}",
1567 [(set EFLAGS, (X86enqcmds GR64:$dst, addr:$src))]>,
1568 NoCD8, XS, AdSize64;
1571 let SchedRW = [WriteStore], Defs = [EFLAGS] in {
1572 def ENQCMD16 : I<0xF8, MRMSrcMem, (outs), (ins GR16:$dst, i512mem_GR16:$src),
1573 "enqcmd\t{$src, $dst|$dst, $src}",
1574 [(set EFLAGS, (X86enqcmd GR16:$dst, addr:$src))]>,
1575 T8, XD, AdSize16, Requires<[HasENQCMD, Not64BitMode]>;
1576 def ENQCMDS16 : I<0xF8, MRMSrcMem, (outs), (ins GR16:$dst, i512mem_GR16:$src),
1577 "enqcmds\t{$src, $dst|$dst, $src}",
1578 [(set EFLAGS, (X86enqcmds GR16:$dst, addr:$src))]>,
1579 T8, XS, AdSize16, Requires<[HasENQCMD, Not64BitMode]>;
1581 defm "" : Enqcmds<"">, T8, Requires<[HasENQCMD, NoEGPR]>;
1582 defm "" : Enqcmds<"_EVEX">, EVEX, T_MAP4, Requires<[HasENQCMD, HasEGPR, In64BitMode]>;
1586 //===----------------------------------------------------------------------===//
1587 // CLZERO Instruction
1589 let SchedRW = [WriteLoad] in {
1591 def CLZERO32r : I<0x01, MRM_FC, (outs), (ins), "clzero", []>,
1592 TB, Requires<[HasCLZERO, Not64BitMode]>;
1594 def CLZERO64r : I<0x01, MRM_FC, (outs), (ins), "clzero", []>,
1595 TB, Requires<[HasCLZERO, In64BitMode]>;
1598 //===----------------------------------------------------------------------===//
1599 // INVLPGB Instruction
1602 let SchedRW = [WriteSystem] in {
1603 let Uses = [EAX, EDX] in
1604 def INVLPGB32 : I<0x01, MRM_FE, (outs), (ins),
1606 TB, Requires<[Not64BitMode]>;
1607 let Uses = [RAX, EDX] in
1608 def INVLPGB64 : I<0x01, MRM_FE, (outs), (ins),
1610 TB, Requires<[In64BitMode]>;
1613 //===----------------------------------------------------------------------===//
1614 // TLBSYNC Instruction
1617 let SchedRW = [WriteSystem] in {
1618 def TLBSYNC : I<0x01, MRM_FF, (outs), (ins),
1623 //===----------------------------------------------------------------------===//
1624 // HRESET Instruction
1626 let Uses = [EAX], SchedRW = [WriteSystem] in
1627 def HRESET : Ii8<0xF0, MRM_C0, (outs), (ins i32u8imm:$imm), "hreset\t$imm", []>,
1628 Requires<[HasHRESET]>, TA, XS;
1630 //===----------------------------------------------------------------------===//
1631 // SERIALIZE Instruction
1633 let SchedRW = [WriteSystem] in
1634 def SERIALIZE : I<0x01, MRM_E8, (outs), (ins), "serialize",
1635 [(int_x86_serialize)]>, TB,
1636 Requires<[HasSERIALIZE]>;
1638 //===----------------------------------------------------------------------===//
1639 // TSXLDTRK - TSX Suspend Load Address Tracking
1641 let Predicates = [HasTSXLDTRK], SchedRW = [WriteSystem] in {
1642 def XSUSLDTRK : I<0x01, MRM_E8, (outs), (ins), "xsusldtrk",
1643 [(int_x86_xsusldtrk)]>, TB, XD;
1644 def XRESLDTRK : I<0x01, MRM_E9, (outs), (ins), "xresldtrk",
1645 [(int_x86_xresldtrk)]>, TB, XD;
1648 //===----------------------------------------------------------------------===//
1649 // UINTR Instructions
1651 let Predicates = [HasUINTR, In64BitMode], SchedRW = [WriteSystem] in {
1652 def UIRET : I<0x01, MRM_EC, (outs), (ins), "uiret",
1654 def CLUI : I<0x01, MRM_EE, (outs), (ins), "clui",
1655 [(int_x86_clui)]>, TB, XS;
1656 def STUI : I<0x01, MRM_EF, (outs), (ins), "stui",
1657 [(int_x86_stui)]>, TB, XS;
1659 def SENDUIPI : I<0xC7, MRM6r, (outs), (ins GR64:$arg), "senduipi\t$arg",
1660 [(int_x86_senduipi GR64:$arg)]>, TB, XS;
1662 let Defs = [EFLAGS] in
1663 def TESTUI : I<0x01, MRM_ED, (outs), (ins), "testui",
1664 [(set EFLAGS, (X86testui))]>, TB, XS;
1667 //===----------------------------------------------------------------------===//
1668 // PREFETCHIT0 and PREFETCHIT1 Instructions
1669 // prefetch ADDR, RW, Locality, Data
1670 let Predicates = [HasPREFETCHI, In64BitMode], SchedRW = [WriteLoad] in {
1671 def PREFETCHIT0 : I<0x18, MRM7m, (outs), (ins i8mem:$src),
1672 "prefetchit0\t$src", [(prefetch addr:$src, (i32 0), (i32 3), (i32 0))]>, TB;
1673 def PREFETCHIT1 : I<0x18, MRM6m, (outs), (ins i8mem:$src),
1674 "prefetchit1\t$src", [(prefetch addr:$src, (i32 0), (i32 2), (i32 0))]>, TB;
1677 //===----------------------------------------------------------------------===//
1678 // CMPCCXADD Instructions
1680 let isCodeGenOnly = 1, ForceDisassemble = 1, mayLoad = 1, mayStore = 1,
1681 Defs = [EFLAGS], Constraints = "$dstsrc1 = $dst" in {
1682 let Predicates = [HasCMPCCXADD, NoEGPR, In64BitMode] in {
1683 def CMPCCXADDmr32 : I<0xe0, MRMDestMem4VOp3CC, (outs GR32:$dst),
1684 (ins GR32:$dstsrc1, i32mem:$dstsrc2, GR32:$src3, ccode:$cond),
1685 "cmp${cond}xadd\t{$src3, $dst, $dstsrc2|$dstsrc2, $dst, $src3}",
1686 [(set GR32:$dst, (X86cmpccxadd addr:$dstsrc2,
1687 GR32:$dstsrc1, GR32:$src3, timm:$cond))]>,
1688 VEX, VVVV, T8, PD, Sched<[WriteXCHG]>;
1690 def CMPCCXADDmr64 : I<0xe0, MRMDestMem4VOp3CC, (outs GR64:$dst),
1691 (ins GR64:$dstsrc1, i64mem:$dstsrc2, GR64:$src3, ccode:$cond),
1692 "cmp${cond}xadd\t{$src3, $dst, $dstsrc2|$dstsrc2, $dst, $src3}",
1693 [(set GR64:$dst, (X86cmpccxadd addr:$dstsrc2,
1694 GR64:$dstsrc1, GR64:$src3, timm:$cond))]>,
1695 VEX, VVVV, REX_W, T8, PD, Sched<[WriteXCHG]>;
1698 let Predicates = [HasCMPCCXADD, HasEGPR, In64BitMode] in {
1699 def CMPCCXADDmr32_EVEX : I<0xe0, MRMDestMem4VOp3CC, (outs GR32:$dst),
1700 (ins GR32:$dstsrc1, i32mem:$dstsrc2, GR32:$src3, ccode:$cond),
1701 "cmp${cond}xadd\t{$src3, $dst, $dstsrc2|$dstsrc2, $dst, $src3}",
1702 [(set GR32:$dst, (X86cmpccxadd addr:$dstsrc2,
1703 GR32:$dstsrc1, GR32:$src3, timm:$cond))]>,
1704 EVEX, VVVV, NoCD8, T8, PD, Sched<[WriteXCHG]>;
1706 def CMPCCXADDmr64_EVEX : I<0xe0, MRMDestMem4VOp3CC, (outs GR64:$dst),
1707 (ins GR64:$dstsrc1, i64mem:$dstsrc2, GR64:$src3, ccode:$cond),
1708 "cmp${cond}xadd\t{$src3, $dst, $dstsrc2|$dstsrc2, $dst, $src3}",
1709 [(set GR64:$dst, (X86cmpccxadd addr:$dstsrc2,
1710 GR64:$dstsrc1, GR64:$src3, timm:$cond))]>,
1711 EVEX, VVVV, NoCD8, REX_W, T8, PD, Sched<[WriteXCHG]>;
1715 //===----------------------------------------------------------------------===//
1716 // Memory Instructions
1719 let Predicates = [HasCLFLUSHOPT], SchedRW = [WriteLoad] in
1720 def CLFLUSHOPT : I<0xAE, MRM7m, (outs), (ins i8mem:$src),
1721 "clflushopt\t$src", [(int_x86_clflushopt addr:$src)]>, TB, PD;
1723 let Predicates = [HasCLWB], SchedRW = [WriteLoad] in
1724 def CLWB : I<0xAE, MRM6m, (outs), (ins i8mem:$src), "clwb\t$src",
1725 [(int_x86_clwb addr:$src)]>, TB, PD;
1727 let Predicates = [HasCLDEMOTE], SchedRW = [WriteLoad] in
1728 def CLDEMOTE : I<0x1C, MRM0m, (outs), (ins i8mem:$src), "cldemote\t$src",
1729 [(int_x86_cldemote addr:$src)]>, TB;
1731 //===----------------------------------------------------------------------===//
1732 // MOVRS Instructions
1735 let SchedRW = [WriteLoad] in {
1736 let Predicates = [HasMOVRS, NoEGPR, In64BitMode] in {
1737 def MOVRS8rm : I<0x8A, MRMSrcMem, (outs GR8 :$dst), (ins i8mem :$src),
1738 "movrs{b}\t{$src, $dst|$dst, $src}",
1739 [(set GR8:$dst, (int_x86_movrsqi addr:$src))]>, T8;
1740 def MOVRS16rm : I<0x8B, MRMSrcMem, (outs GR16:$dst), (ins i16mem:$src),
1741 "movrs{w}\t{$src, $dst|$dst, $src}",
1742 [(set GR16:$dst, (int_x86_movrshi addr:$src))]>, OpSize16, T8;
1743 def MOVRS32rm : I<0x8B, MRMSrcMem, (outs GR32:$dst), (ins i32mem:$src),
1744 "movrs{l}\t{$src, $dst|$dst, $src}",
1745 [(set GR32:$dst, (int_x86_movrssi addr:$src))]>, OpSize32, T8;
1746 def MOVRS64rm : RI<0x8B, MRMSrcMem, (outs GR64:$dst), (ins i64mem:$src),
1747 "movrs{q}\t{$src, $dst|$dst, $src}",
1748 [(set GR64:$dst, (int_x86_movrsdi addr:$src))]>, T8;
1751 let Predicates = [HasMOVRS] in
1752 def PREFETCHRST2 : I<0x18, MRM4m, (outs), (ins i8mem:$src),
1753 "prefetchrst2\t$src",
1754 [(int_x86_prefetchrs addr:$src)]>, TB;
1756 let Predicates = [HasMOVRS, HasEGPR, In64BitMode] in {
1757 def MOVRS8rm_EVEX : I<0x8A, MRMSrcMem, (outs GR8 :$dst), (ins i8mem :$src),
1758 "movrs{b}\t{$src, $dst|$dst, $src}",
1759 [(set GR8:$dst, (int_x86_movrsqi addr:$src))]>, EVEX, NoCD8, T_MAP4;
1760 def MOVRS16rm_EVEX : I<0x8B, MRMSrcMem, (outs GR16:$dst), (ins i16mem:$src),
1761 "movrs{w}\t{$src, $dst|$dst, $src}",
1762 [(set GR16:$dst, (int_x86_movrshi addr:$src))]>, EVEX, NoCD8, PD, T_MAP4;
1763 def MOVRS32rm_EVEX : I<0x8B, MRMSrcMem, (outs GR32:$dst), (ins i32mem:$src),
1764 "movrs{l}\t{$src, $dst|$dst, $src}",
1765 [(set GR32:$dst, (int_x86_movrssi addr:$src))]>, EVEX, NoCD8, T_MAP4;
1766 def MOVRS64rm_EVEX : I<0x8B, MRMSrcMem, (outs GR64:$dst), (ins i64mem:$src),
1767 "movrs{q}\t{$src, $dst|$dst, $src}",
1768 [(set GR64:$dst, (int_x86_movrsdi addr:$src))]>, EVEX, NoCD8, T_MAP4, REX_W;