1 //===- X86InstrCompiler.td - Compiler Pseudos and Patterns -*- tablegen -*-===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This file describes the various pseudo instructions used by the compiler,
10 // as well as Pat patterns used during instruction selection.
12 //===----------------------------------------------------------------------===//
14 //===----------------------------------------------------------------------===//
15 // Pattern Matching Support
17 def GetLo32XForm : SDNodeXForm<imm, [{
18 // Transformation function: get the low 32 bits.
19 return getI32Imm((uint32_t)N->getZExtValue(), SDLoc(N));
23 //===----------------------------------------------------------------------===//
24 // Random Pseudo Instructions.
26 // PIC base construction. This expands to code that looks like this:
29 let hasSideEffects = 0, isNotDuplicable = 1, Uses = [ESP, SSP],
30 SchedRW = [WriteJump] in
31 def MOVPC32r : Ii32<0xE8, Pseudo, (outs GR32:$reg), (ins i32imm:$label),
34 // ADJCALLSTACKDOWN/UP implicitly use/def ESP because they may be expanded into
35 // a stack adjustment and the codegen must know that they may modify the stack
36 // pointer before prolog-epilog rewriting occurs.
37 // Pessimistically assume ADJCALLSTACKDOWN / ADJCALLSTACKUP will become
38 // sub / add which can clobber EFLAGS.
39 let Defs = [ESP, EFLAGS, SSP], Uses = [ESP, SSP], SchedRW = [WriteALU] in {
40 def ADJCALLSTACKDOWN32 : I<0, Pseudo, (outs),
41 (ins i32imm:$amt1, i32imm:$amt2, i32imm:$amt3),
42 "#ADJCALLSTACKDOWN", []>, Requires<[NotLP64]>;
43 def ADJCALLSTACKUP32 : I<0, Pseudo, (outs), (ins i32imm:$amt1, i32imm:$amt2),
45 [(X86callseq_end timm:$amt1, timm:$amt2)]>,
48 def : Pat<(X86callseq_start timm:$amt1, timm:$amt2),
49 (ADJCALLSTACKDOWN32 i32imm:$amt1, i32imm:$amt2, 0)>, Requires<[NotLP64]>;
52 // ADJCALLSTACKDOWN/UP implicitly use/def RSP because they may be expanded into
53 // a stack adjustment and the codegen must know that they may modify the stack
54 // pointer before prolog-epilog rewriting occurs.
55 // Pessimistically assume ADJCALLSTACKDOWN / ADJCALLSTACKUP will become
56 // sub / add which can clobber EFLAGS.
57 let Defs = [RSP, EFLAGS, SSP], Uses = [RSP, SSP], SchedRW = [WriteALU] in {
58 def ADJCALLSTACKDOWN64 : I<0, Pseudo, (outs),
59 (ins i32imm:$amt1, i32imm:$amt2, i32imm:$amt3),
60 "#ADJCALLSTACKDOWN", []>, Requires<[IsLP64]>;
61 def ADJCALLSTACKUP64 : I<0, Pseudo, (outs), (ins i32imm:$amt1, i32imm:$amt2),
63 [(X86callseq_end timm:$amt1, timm:$amt2)]>,
66 def : Pat<(X86callseq_start timm:$amt1, timm:$amt2),
67 (ADJCALLSTACKDOWN64 i32imm:$amt1, i32imm:$amt2, 0)>, Requires<[IsLP64]>;
69 let SchedRW = [WriteSystem] in {
71 // x86-64 va_start lowering magic.
72 let hasSideEffects = 1, mayStore = 1, Defs = [EFLAGS] in {
73 def VASTART_SAVE_XMM_REGS : I<0, Pseudo,
75 (ins GR8:$al, i8mem:$regsavefi, variable_ops),
76 "#VASTART_SAVE_XMM_REGS $al, $regsavefi",
77 [(X86vastart_save_xmm_regs GR8:$al, addr:$regsavefi)]>;
80 let usesCustomInserter = 1, Defs = [EFLAGS] in {
81 // The VAARG_64 and VAARG_X32 pseudo-instructions take the address of the
82 // va_list, and place the address of the next argument into a register.
83 let Defs = [EFLAGS] in {
84 def VAARG_64 : I<0, Pseudo,
86 (ins i8mem:$ap, i32imm:$size, i8imm:$mode, i32imm:$align),
87 "#VAARG_64 $dst, $ap, $size, $mode, $align",
89 (X86vaarg64 addr:$ap, timm:$size, timm:$mode, timm:$align))]>,
90 Requires<[In64BitMode, IsLP64]>;
91 def VAARG_X32 : I<0, Pseudo,
93 (ins i8mem:$ap, i32imm:$size, i8imm:$mode, i32imm:$align),
94 "#VAARG_X32 $dst, $ap, $size, $mode, $align",
96 (X86vaargx32 addr:$ap, timm:$size, timm:$mode, timm:$align))]>,
97 Requires<[In64BitMode, NotLP64]>;
100 // When using segmented stacks these are lowered into instructions which first
101 // check if the current stacklet has enough free memory. If it does, memory is
102 // allocated by bumping the stack pointer. Otherwise memory is allocated from
105 let Defs = [EAX, ESP, EFLAGS], Uses = [ESP] in
106 def SEG_ALLOCA_32 : I<0, Pseudo, (outs GR32:$dst), (ins GR32:$size),
107 "# variable sized alloca for segmented stacks",
109 (X86SegAlloca GR32:$size))]>,
112 let Defs = [RAX, RSP, EFLAGS], Uses = [RSP] in
113 def SEG_ALLOCA_64 : I<0, Pseudo, (outs GR64:$dst), (ins GR64:$size),
114 "# variable sized alloca for segmented stacks",
116 (X86SegAlloca GR64:$size))]>,
117 Requires<[In64BitMode]>;
119 // To protect against stack clash, dynamic allocation should perform a memory
120 // probe at each page.
122 let Defs = [EAX, ESP, EFLAGS], Uses = [ESP] in
123 def PROBED_ALLOCA_32 : I<0, Pseudo, (outs GR32:$dst), (ins GR32:$size),
124 "# variable sized alloca with probing",
126 (X86ProbedAlloca GR32:$size))]>,
129 let Defs = [RAX, RSP, EFLAGS], Uses = [RSP] in
130 def PROBED_ALLOCA_64 : I<0, Pseudo, (outs GR64:$dst), (ins GR64:$size),
131 "# variable sized alloca with probing",
133 (X86ProbedAlloca GR64:$size))]>,
134 Requires<[In64BitMode]>;
137 let hasNoSchedulingInfo = 1 in
138 def STACKALLOC_W_PROBING : I<0, Pseudo, (outs), (ins i64imm:$stacksize),
139 "# fixed size alloca with probing",
142 // Dynamic stack allocation yields a _chkstk or _alloca call for all Windows
143 // targets. These calls are needed to probe the stack when allocating more than
144 // 4k bytes in one go. Touching the stack at 4K increments is necessary to
145 // ensure that the guard pages used by the OS virtual memory manager are
146 // allocated in correct sequence.
147 // The main point of having separate instruction are extra unmodelled effects
148 // (compared to ordinary calls) like stack pointer change.
150 let Defs = [EAX, ESP, EFLAGS], Uses = [ESP] in
151 def DYN_ALLOCA_32 : I<0, Pseudo, (outs), (ins GR32:$size),
152 "# dynamic stack allocation",
153 [(X86DynAlloca GR32:$size)]>,
156 let Defs = [RAX, RSP, EFLAGS], Uses = [RSP] in
157 def DYN_ALLOCA_64 : I<0, Pseudo, (outs), (ins GR64:$size),
158 "# dynamic stack allocation",
159 [(X86DynAlloca GR64:$size)]>,
160 Requires<[In64BitMode]>;
163 // These instructions XOR the frame pointer into a GPR. They are used in some
164 // stack protection schemes. These are post-RA pseudos because we only know the
165 // frame register after register allocation.
166 let Constraints = "$src = $dst", isMoveImm = 1, isPseudo = 1, Defs = [EFLAGS] in {
167 def XOR32_FP : I<0, Pseudo, (outs GR32:$dst), (ins GR32:$src),
168 "xorl\t$$FP, $src", []>,
169 Requires<[NotLP64]>, Sched<[WriteALU]>;
170 def XOR64_FP : I<0, Pseudo, (outs GR64:$dst), (ins GR64:$src),
171 "xorq\t$$FP $src", []>,
172 Requires<[In64BitMode]>, Sched<[WriteALU]>;
175 //===----------------------------------------------------------------------===//
176 // EH Pseudo Instructions
178 let SchedRW = [WriteSystem] in {
179 let isTerminator = 1, isReturn = 1, isBarrier = 1,
180 hasCtrlDep = 1, isCodeGenOnly = 1 in {
181 def EH_RETURN : I<0xC3, RawFrm, (outs), (ins GR32:$addr),
182 "ret\t#eh_return, addr: $addr",
183 [(X86ehret GR32:$addr)]>, Sched<[WriteJumpLd]>;
187 let isTerminator = 1, isReturn = 1, isBarrier = 1,
188 hasCtrlDep = 1, isCodeGenOnly = 1 in {
189 def EH_RETURN64 : I<0xC3, RawFrm, (outs), (ins GR64:$addr),
190 "ret\t#eh_return, addr: $addr",
191 [(X86ehret GR64:$addr)]>, Sched<[WriteJumpLd]>;
195 let isTerminator = 1, hasSideEffects = 1, isBarrier = 1, hasCtrlDep = 1,
196 isCodeGenOnly = 1, isReturn = 1, isEHScopeReturn = 1 in {
197 def CLEANUPRET : I<0, Pseudo, (outs), (ins), "# CLEANUPRET",
200 // CATCHRET needs a custom inserter for SEH.
201 let usesCustomInserter = 1 in
202 def CATCHRET : I<0, Pseudo, (outs), (ins brtarget32:$dst, brtarget32:$from),
204 [(catchret bb:$dst, bb:$from)]>;
207 let hasSideEffects = 1, isBarrier = 1, isCodeGenOnly = 1,
208 usesCustomInserter = 1 in {
209 def EH_SjLj_SetJmp32 : I<0, Pseudo, (outs GR32:$dst), (ins i32mem:$buf),
211 [(set GR32:$dst, (X86eh_sjlj_setjmp addr:$buf))]>,
212 Requires<[Not64BitMode]>;
213 def EH_SjLj_SetJmp64 : I<0, Pseudo, (outs GR32:$dst), (ins i64mem:$buf),
215 [(set GR32:$dst, (X86eh_sjlj_setjmp addr:$buf))]>,
216 Requires<[In64BitMode]>;
217 let isTerminator = 1 in {
218 def EH_SjLj_LongJmp32 : I<0, Pseudo, (outs), (ins i32mem:$buf),
219 "#EH_SJLJ_LONGJMP32",
220 [(X86eh_sjlj_longjmp addr:$buf)]>,
221 Requires<[Not64BitMode]>;
222 def EH_SjLj_LongJmp64 : I<0, Pseudo, (outs), (ins i64mem:$buf),
223 "#EH_SJLJ_LONGJMP64",
224 [(X86eh_sjlj_longjmp addr:$buf)]>,
225 Requires<[In64BitMode]>;
229 let isBranch = 1, isTerminator = 1, isCodeGenOnly = 1 in {
230 def EH_SjLj_Setup : I<0, Pseudo, (outs), (ins brtarget:$dst),
231 "#EH_SjLj_Setup\t$dst", []>;
235 //===----------------------------------------------------------------------===//
236 // Pseudo instructions used by unwind info.
239 // Prolog instructions should not be duplicated, since this can cause issues
240 // because 1) if only some of the instructions are duplicated, then we will
241 // observe prolog instructions after the end-prolog instruction and 2) Windows
242 // expects there to only be a single prolog (e.g., when checking if unwinding
243 // is happening in the middle of a prolog).
244 let isPseudo = 1, isMeta = 1, isNotDuplicable = 1, SchedRW = [WriteSystem] in {
245 def SEH_PushReg : I<0, Pseudo, (outs), (ins i32imm:$reg),
246 "#SEH_PushReg $reg", []>;
247 def SEH_SaveReg : I<0, Pseudo, (outs), (ins i32imm:$reg, i32imm:$dst),
248 "#SEH_SaveReg $reg, $dst", []>;
249 def SEH_SaveXMM : I<0, Pseudo, (outs), (ins i32imm:$reg, i32imm:$dst),
250 "#SEH_SaveXMM $reg, $dst", []>;
251 def SEH_StackAlloc : I<0, Pseudo, (outs), (ins i32imm:$size),
252 "#SEH_StackAlloc $size", []>;
253 def SEH_StackAlign : I<0, Pseudo, (outs), (ins i32imm:$align),
254 "#SEH_StackAlign $align", []>;
255 def SEH_SetFrame : I<0, Pseudo, (outs), (ins i32imm:$reg, i32imm:$offset),
256 "#SEH_SetFrame $reg, $offset", []>;
257 def SEH_PushFrame : I<0, Pseudo, (outs), (ins i1imm:$mode),
258 "#SEH_PushFrame $mode", []>;
259 def SEH_EndPrologue : I<0, Pseudo, (outs), (ins),
260 "#SEH_EndPrologue", []>;
263 // Epilog instructions:
264 let isPseudo = 1, isMeta = 1, SchedRW = [WriteSystem] in {
265 def SEH_Epilogue : I<0, Pseudo, (outs), (ins),
266 "#SEH_Epilogue", []>;
269 //===----------------------------------------------------------------------===//
270 // Pseudo instructions used by KCFI.
271 //===----------------------------------------------------------------------===//
273 Defs = [R10, R11, EFLAGS] in {
274 def KCFI_CHECK : PseudoI<
275 (outs), (ins GR64:$ptr, i32imm:$type), []>, Sched<[]>;
278 //===----------------------------------------------------------------------===//
279 // Pseudo instructions used by address sanitizer.
280 //===----------------------------------------------------------------------===//
282 Defs = [R10, R11, EFLAGS] in {
283 def ASAN_CHECK_MEMACCESS : PseudoI<
284 (outs), (ins GR64PLTSafe:$addr, i32imm:$accessinfo),
285 [(int_asan_check_memaccess GR64PLTSafe:$addr, (i32 timm:$accessinfo))]>,
289 //===----------------------------------------------------------------------===//
290 // Pseudo instructions used by segmented stacks.
293 // This is lowered into a RET instruction by MCInstLower. We need
294 // this so that we don't have to have a MachineBasicBlock which ends
295 // with a RET and also has successors.
296 let isPseudo = 1, SchedRW = [WriteJumpLd] in {
297 def MORESTACK_RET: I<0, Pseudo, (outs), (ins), "", []>;
299 // This instruction is lowered to a RET followed by a MOV. The two
300 // instructions are not generated on a higher level since then the
301 // verifier sees a MachineBasicBlock ending with a non-terminator.
302 def MORESTACK_RET_RESTORE_R10 : I<0, Pseudo, (outs), (ins), "", []>;
305 //===----------------------------------------------------------------------===//
306 // Alias Instructions
307 //===----------------------------------------------------------------------===//
309 // Alias instruction mapping movr0 to xor.
310 // FIXME: remove when we can teach regalloc that xor reg, reg is ok.
311 let Defs = [EFLAGS], isReMaterializable = 1, isAsCheapAsAMove = 1,
312 isPseudo = 1, isMoveImm = 1, AddedComplexity = 10 in
313 def MOV32r0 : I<0, Pseudo, (outs GR32:$dst), (ins), "",
314 [(set GR32:$dst, 0)]>, Sched<[WriteZero]>;
316 // Other widths can also make use of the 32-bit xor, which may have a smaller
317 // encoding and avoid partial register updates.
318 let AddedComplexity = 10 in {
319 def : Pat<(i8 0), (EXTRACT_SUBREG (MOV32r0), sub_8bit)>;
320 def : Pat<(i16 0), (EXTRACT_SUBREG (MOV32r0), sub_16bit)>;
321 def : Pat<(i64 0), (SUBREG_TO_REG (i64 0), (MOV32r0), sub_32bit)>;
324 let Predicates = [OptForSize, Not64BitMode],
325 AddedComplexity = 10 in {
326 let SchedRW = [WriteALU] in {
327 // Pseudo instructions for materializing 1 and -1 using XOR+INC/DEC,
328 // which only require 3 bytes compared to MOV32ri which requires 5.
329 let Defs = [EFLAGS], isReMaterializable = 1, isPseudo = 1 in {
330 def MOV32r1 : I<0, Pseudo, (outs GR32:$dst), (ins), "",
331 [(set GR32:$dst, 1)]>;
332 def MOV32r_1 : I<0, Pseudo, (outs GR32:$dst), (ins), "",
333 [(set GR32:$dst, -1)]>;
337 // MOV16ri is 4 bytes, so the instructions above are smaller.
338 def : Pat<(i16 1), (EXTRACT_SUBREG (MOV32r1), sub_16bit)>;
339 def : Pat<(i16 -1), (EXTRACT_SUBREG (MOV32r_1), sub_16bit)>;
342 let isReMaterializable = 1, isPseudo = 1, AddedComplexity = 5,
343 SchedRW = [WriteALU] in {
344 // AddedComplexity higher than MOV64ri but lower than MOV32r0 and MOV32r1.
345 def MOV32ImmSExti8 : I<0, Pseudo, (outs GR32:$dst), (ins i32i8imm:$src), "",
346 [(set GR32:$dst, i32immSExt8:$src)]>,
347 Requires<[OptForMinSize, NotWin64WithoutFP]>;
348 def MOV64ImmSExti8 : I<0, Pseudo, (outs GR64:$dst), (ins i64i8imm:$src), "",
349 [(set GR64:$dst, i64immSExt8:$src)]>,
350 Requires<[OptForMinSize, NotWin64WithoutFP]>;
353 // Materialize i64 constant where top 32-bits are zero. This could theoretically
354 // use MOV32ri with a SUBREG_TO_REG to represent the zero-extension, however
355 // that would make it more difficult to rematerialize.
356 let AddedComplexity = 1, isReMaterializable = 1, isAsCheapAsAMove = 1,
357 isPseudo = 1, SchedRW = [WriteMove] in
358 def MOV32ri64 : I<0, Pseudo, (outs GR64:$dst), (ins i64i32imm:$src), "",
359 [(set GR64:$dst, i64immZExt32:$src)]>;
361 // This 64-bit pseudo-move can also be used for labels in the x86-64 small code
363 def mov64imm32 : ComplexPattern<i64, 1, "selectMOV64Imm32", [X86Wrapper]>;
364 def : Pat<(i64 mov64imm32:$src), (MOV32ri64 mov64imm32:$src)>;
366 // Use sbb to materialize carry bit.
367 let Uses = [EFLAGS], Defs = [EFLAGS], isPseudo = 1, SchedRW = [WriteADC],
368 hasSideEffects = 0 in {
369 // FIXME: These are pseudo ops that should be replaced with Pat<> patterns.
370 // However, Pat<> can't replicate the destination reg into the inputs of the
372 def SETB_C32r : I<0, Pseudo, (outs GR32:$dst), (ins), "", []>;
373 def SETB_C64r : I<0, Pseudo, (outs GR64:$dst), (ins), "", []>;
376 //===----------------------------------------------------------------------===//
377 // String Pseudo Instructions
379 let SchedRW = [WriteMicrocoded] in {
380 let Defs = [ECX,EDI,ESI], Uses = [ECX,EDI,ESI], isCodeGenOnly = 1 in {
381 def REP_MOVSB_32 : I<0xA4, RawFrm, (outs), (ins),
382 "{rep;movsb (%esi), %es:(%edi)|rep movsb es:[edi], [esi]}",
383 [(X86rep_movs i8)]>, REP, AdSize32,
385 def REP_MOVSW_32 : I<0xA5, RawFrm, (outs), (ins),
386 "{rep;movsw (%esi), %es:(%edi)|rep movsw es:[edi], [esi]}",
387 [(X86rep_movs i16)]>, REP, AdSize32, OpSize16,
389 def REP_MOVSD_32 : I<0xA5, RawFrm, (outs), (ins),
390 "{rep;movsl (%esi), %es:(%edi)|rep movsd es:[edi], [esi]}",
391 [(X86rep_movs i32)]>, REP, AdSize32, OpSize32,
393 def REP_MOVSQ_32 : RI<0xA5, RawFrm, (outs), (ins),
394 "{rep;movsq (%esi), %es:(%edi)|rep movsq es:[edi], [esi]}",
395 [(X86rep_movs i64)]>, REP, AdSize32,
396 Requires<[NotLP64, In64BitMode]>;
399 let Defs = [RCX,RDI,RSI], Uses = [RCX,RDI,RSI], isCodeGenOnly = 1 in {
400 def REP_MOVSB_64 : I<0xA4, RawFrm, (outs), (ins),
401 "{rep;movsb (%rsi), %es:(%rdi)|rep movsb es:[rdi], [rsi]}",
402 [(X86rep_movs i8)]>, REP, AdSize64,
404 def REP_MOVSW_64 : I<0xA5, RawFrm, (outs), (ins),
405 "{rep;movsw (%rsi), %es:(%rdi)|rep movsw es:[rdi], [rsi]}",
406 [(X86rep_movs i16)]>, REP, AdSize64, OpSize16,
408 def REP_MOVSD_64 : I<0xA5, RawFrm, (outs), (ins),
409 "{rep;movsl (%rsi), %es:(%rdi)|rep movsdi es:[rdi], [rsi]}",
410 [(X86rep_movs i32)]>, REP, AdSize64, OpSize32,
412 def REP_MOVSQ_64 : RI<0xA5, RawFrm, (outs), (ins),
413 "{rep;movsq (%rsi), %es:(%rdi)|rep movsq es:[rdi], [rsi]}",
414 [(X86rep_movs i64)]>, REP, AdSize64,
418 // FIXME: Should use "(X86rep_stos AL)" as the pattern.
419 let Defs = [ECX,EDI], isCodeGenOnly = 1 in {
420 let Uses = [AL,ECX,EDI] in
421 def REP_STOSB_32 : I<0xAA, RawFrm, (outs), (ins),
422 "{rep;stosb %al, %es:(%edi)|rep stosb es:[edi], al}",
423 [(X86rep_stos i8)]>, REP, AdSize32,
425 let Uses = [AX,ECX,EDI] in
426 def REP_STOSW_32 : I<0xAB, RawFrm, (outs), (ins),
427 "{rep;stosw %ax, %es:(%edi)|rep stosw es:[edi], ax}",
428 [(X86rep_stos i16)]>, REP, AdSize32, OpSize16,
430 let Uses = [EAX,ECX,EDI] in
431 def REP_STOSD_32 : I<0xAB, RawFrm, (outs), (ins),
432 "{rep;stosl %eax, %es:(%edi)|rep stosd es:[edi], eax}",
433 [(X86rep_stos i32)]>, REP, AdSize32, OpSize32,
435 let Uses = [RAX,RCX,RDI] in
436 def REP_STOSQ_32 : RI<0xAB, RawFrm, (outs), (ins),
437 "{rep;stosq %rax, %es:(%edi)|rep stosq es:[edi], rax}",
438 [(X86rep_stos i64)]>, REP, AdSize32,
439 Requires<[NotLP64, In64BitMode]>;
442 let Defs = [RCX,RDI], isCodeGenOnly = 1 in {
443 let Uses = [AL,RCX,RDI] in
444 def REP_STOSB_64 : I<0xAA, RawFrm, (outs), (ins),
445 "{rep;stosb %al, %es:(%rdi)|rep stosb es:[rdi], al}",
446 [(X86rep_stos i8)]>, REP, AdSize64,
448 let Uses = [AX,RCX,RDI] in
449 def REP_STOSW_64 : I<0xAB, RawFrm, (outs), (ins),
450 "{rep;stosw %ax, %es:(%rdi)|rep stosw es:[rdi], ax}",
451 [(X86rep_stos i16)]>, REP, AdSize64, OpSize16,
453 let Uses = [RAX,RCX,RDI] in
454 def REP_STOSD_64 : I<0xAB, RawFrm, (outs), (ins),
455 "{rep;stosl %eax, %es:(%rdi)|rep stosd es:[rdi], eax}",
456 [(X86rep_stos i32)]>, REP, AdSize64, OpSize32,
459 let Uses = [RAX,RCX,RDI] in
460 def REP_STOSQ_64 : RI<0xAB, RawFrm, (outs), (ins),
461 "{rep;stosq %rax, %es:(%rdi)|rep stosq es:[rdi], rax}",
462 [(X86rep_stos i64)]>, REP, AdSize64,
467 //===----------------------------------------------------------------------===//
468 // Thread Local Storage Instructions
470 let SchedRW = [WriteSystem] in {
473 // All calls clobber the non-callee saved registers. ESP is marked as
474 // a use to prevent stack-pointer assignments that appear immediately
475 // before calls from potentially appearing dead.
476 let Defs = [EAX, ECX, EDX, FP0, FP1, FP2, FP3, FP4, FP5, FP6, FP7,
477 ST0, ST1, ST2, ST3, ST4, ST5, ST6, ST7,
478 MM0, MM1, MM2, MM3, MM4, MM5, MM6, MM7,
479 XMM0, XMM1, XMM2, XMM3, XMM4, XMM5, XMM6, XMM7,
480 XMM8, XMM9, XMM10, XMM11, XMM12, XMM13, XMM14, XMM15, EFLAGS, DF],
481 Uses = [ESP, SSP] in {
482 def TLS_addr32 : I<0, Pseudo, (outs), (ins i32mem:$sym),
484 [(X86tlsaddr tls32addr:$sym)]>,
485 Requires<[Not64BitMode]>;
486 def TLS_base_addr32 : I<0, Pseudo, (outs), (ins i32mem:$sym),
488 [(X86tlsbaseaddr tls32baseaddr:$sym)]>,
489 Requires<[Not64BitMode]>;
492 // All calls clobber the non-callee saved registers. RSP is marked as
493 // a use to prevent stack-pointer assignments that appear immediately
494 // before calls from potentially appearing dead.
495 let Defs = [RAX, RCX, RDX, RSI, RDI, R8, R9, R10, R11,
496 FP0, FP1, FP2, FP3, FP4, FP5, FP6, FP7,
497 ST0, ST1, ST2, ST3, ST4, ST5, ST6, ST7,
498 MM0, MM1, MM2, MM3, MM4, MM5, MM6, MM7,
499 XMM0, XMM1, XMM2, XMM3, XMM4, XMM5, XMM6, XMM7,
500 XMM8, XMM9, XMM10, XMM11, XMM12, XMM13, XMM14, XMM15, EFLAGS, DF],
501 Uses = [RSP, SSP] in {
502 def TLS_addr64 : I<0, Pseudo, (outs), (ins i64mem:$sym),
504 [(X86tlsaddr tls64addr:$sym)]>,
505 Requires<[In64BitMode, IsLP64]>;
506 def TLS_base_addr64 : I<0, Pseudo, (outs), (ins i64mem:$sym),
508 [(X86tlsbaseaddr tls64baseaddr:$sym)]>,
509 Requires<[In64BitMode, IsLP64]>;
510 def TLS_addrX32 : I<0, Pseudo, (outs), (ins i32mem:$sym),
512 [(X86tlsaddr tls32addr:$sym)]>,
513 Requires<[In64BitMode, NotLP64]>;
514 def TLS_base_addrX32 : I<0, Pseudo, (outs), (ins i32mem:$sym),
515 "# TLS_base_addrX32",
516 [(X86tlsbaseaddr tls32baseaddr:$sym)]>,
517 Requires<[In64BitMode, NotLP64]>;
520 // TLSDESC only clobbers EAX and EFLAGS. ESP is marked as a use to prevent
521 // stack-pointer assignments that appear immediately before calls from
522 // potentially appearing dead.
523 let Defs = [EAX, EFLAGS], Uses = [RSP, SSP] in {
524 def TLS_desc32 : I<0, Pseudo, (outs), (ins i32mem:$sym),
525 "# TLS_desc32", [(X86tlsdesc tls32addr:$sym)]>;
526 def TLS_desc64 : I<0, Pseudo, (outs), (ins i64mem:$sym),
527 "# TLS_desc64", [(X86tlsdesc tls64addr:$sym)]>;
530 // Darwin TLS Support
531 // For i386, the address of the thunk is passed on the stack, on return the
532 // address of the variable is in %eax. %ecx is trashed during the function
533 // call. All other registers are preserved.
534 let Defs = [EAX, ECX, EFLAGS, DF],
536 usesCustomInserter = 1 in
537 def TLSCall_32 : I<0, Pseudo, (outs), (ins i32mem:$sym),
539 [(X86TLSCall addr:$sym)]>,
540 Requires<[Not64BitMode]>;
542 // For x86_64, the address of the thunk is passed in %rdi, but the
543 // pseudo directly use the symbol, so do not add an implicit use of
544 // %rdi. The lowering will do the right thing with RDI.
545 // On return the address of the variable is in %rax. All other
546 // registers are preserved.
547 let Defs = [RAX, EFLAGS, DF],
549 usesCustomInserter = 1 in
550 def TLSCall_64 : I<0, Pseudo, (outs), (ins i64mem:$sym),
552 [(X86TLSCall addr:$sym)]>,
553 Requires<[In64BitMode]>;
556 //===----------------------------------------------------------------------===//
557 // Conditional Move Pseudo Instructions
559 // CMOV* - Used to implement the SELECT DAG operation. Expanded after
560 // instruction selection into a branch sequence.
561 multiclass CMOVrr_PSEUDO<RegisterClass RC, ValueType VT> {
562 def CMOV#NAME : I<0, Pseudo,
563 (outs RC:$dst), (ins RC:$t, RC:$f, i8imm:$cond),
564 "#CMOV_"#NAME#" PSEUDO!",
565 [(set RC:$dst, (VT (X86cmov RC:$t, RC:$f, timm:$cond,
569 let usesCustomInserter = 1, hasNoSchedulingInfo = 1, Uses = [EFLAGS] in {
570 // X86 doesn't have 8-bit conditional moves. Use a customInserter to
571 // emit control flow. An alternative to this is to mark i8 SELECT as Promote,
572 // however that requires promoting the operands, and can induce additional
573 // i8 register pressure.
574 defm _GR8 : CMOVrr_PSEUDO<GR8, i8>;
576 let Predicates = [NoCMOV] in {
577 defm _GR32 : CMOVrr_PSEUDO<GR32, i32>;
578 defm _GR16 : CMOVrr_PSEUDO<GR16, i16>;
579 } // Predicates = [NoCMOV]
581 // fcmov doesn't handle all possible EFLAGS, provide a fallback if there is no
583 let Predicates = [FPStackf32] in
584 defm _RFP32 : CMOVrr_PSEUDO<RFP32, f32>;
586 let Predicates = [FPStackf64] in
587 defm _RFP64 : CMOVrr_PSEUDO<RFP64, f64>;
589 defm _RFP80 : CMOVrr_PSEUDO<RFP80, f80>;
591 let Predicates = [HasMMX] in
592 defm _VR64 : CMOVrr_PSEUDO<VR64, x86mmx>;
594 let Predicates = [HasSSE1,NoAVX512] in
595 defm _FR32 : CMOVrr_PSEUDO<FR32, f32>;
596 let Predicates = [HasSSE2,NoAVX512] in {
597 defm _FR16 : CMOVrr_PSEUDO<FR16, f16>;
598 defm _FR64 : CMOVrr_PSEUDO<FR64, f64>;
600 let Predicates = [HasAVX512] in {
601 defm _FR16X : CMOVrr_PSEUDO<FR16X, f16>;
602 defm _FR32X : CMOVrr_PSEUDO<FR32X, f32>;
603 defm _FR64X : CMOVrr_PSEUDO<FR64X, f64>;
605 let Predicates = [NoVLX] in {
606 defm _VR128 : CMOVrr_PSEUDO<VR128, v2i64>;
607 defm _VR256 : CMOVrr_PSEUDO<VR256, v4i64>;
609 let Predicates = [HasVLX] in {
610 defm _VR128X : CMOVrr_PSEUDO<VR128X, v2i64>;
611 defm _VR256X : CMOVrr_PSEUDO<VR256X, v4i64>;
613 defm _VR512 : CMOVrr_PSEUDO<VR512, v8i64>;
614 defm _VK1 : CMOVrr_PSEUDO<VK1, v1i1>;
615 defm _VK2 : CMOVrr_PSEUDO<VK2, v2i1>;
616 defm _VK4 : CMOVrr_PSEUDO<VK4, v4i1>;
617 defm _VK8 : CMOVrr_PSEUDO<VK8, v8i1>;
618 defm _VK16 : CMOVrr_PSEUDO<VK16, v16i1>;
619 defm _VK32 : CMOVrr_PSEUDO<VK32, v32i1>;
620 defm _VK64 : CMOVrr_PSEUDO<VK64, v64i1>;
621 } // usesCustomInserter = 1, hasNoSchedulingInfo = 1, Uses = [EFLAGS]
623 def : Pat<(f128 (X86cmov VR128:$t, VR128:$f, timm:$cond, EFLAGS)),
624 (CMOV_VR128 VR128:$t, VR128:$f, timm:$cond)>;
626 let Predicates = [NoVLX] in {
627 def : Pat<(v16i8 (X86cmov VR128:$t, VR128:$f, timm:$cond, EFLAGS)),
628 (CMOV_VR128 VR128:$t, VR128:$f, timm:$cond)>;
629 def : Pat<(v8i16 (X86cmov VR128:$t, VR128:$f, timm:$cond, EFLAGS)),
630 (CMOV_VR128 VR128:$t, VR128:$f, timm:$cond)>;
631 def : Pat<(v4i32 (X86cmov VR128:$t, VR128:$f, timm:$cond, EFLAGS)),
632 (CMOV_VR128 VR128:$t, VR128:$f, timm:$cond)>;
633 def : Pat<(v4f32 (X86cmov VR128:$t, VR128:$f, timm:$cond, EFLAGS)),
634 (CMOV_VR128 VR128:$t, VR128:$f, timm:$cond)>;
635 def : Pat<(v2f64 (X86cmov VR128:$t, VR128:$f, timm:$cond, EFLAGS)),
636 (CMOV_VR128 VR128:$t, VR128:$f, timm:$cond)>;
638 def : Pat<(v32i8 (X86cmov VR256:$t, VR256:$f, timm:$cond, EFLAGS)),
639 (CMOV_VR256 VR256:$t, VR256:$f, timm:$cond)>;
640 def : Pat<(v16i16 (X86cmov VR256:$t, VR256:$f, timm:$cond, EFLAGS)),
641 (CMOV_VR256 VR256:$t, VR256:$f, timm:$cond)>;
642 def : Pat<(v8i32 (X86cmov VR256:$t, VR256:$f, timm:$cond, EFLAGS)),
643 (CMOV_VR256 VR256:$t, VR256:$f, timm:$cond)>;
644 def : Pat<(v8f32 (X86cmov VR256:$t, VR256:$f, timm:$cond, EFLAGS)),
645 (CMOV_VR256 VR256:$t, VR256:$f, timm:$cond)>;
646 def : Pat<(v4f64 (X86cmov VR256:$t, VR256:$f, timm:$cond, EFLAGS)),
647 (CMOV_VR256 VR256:$t, VR256:$f, timm:$cond)>;
649 let Predicates = [HasVLX] in {
650 def : Pat<(v16i8 (X86cmov VR128X:$t, VR128X:$f, timm:$cond, EFLAGS)),
651 (CMOV_VR128X VR128X:$t, VR128X:$f, timm:$cond)>;
652 def : Pat<(v8i16 (X86cmov VR128X:$t, VR128X:$f, timm:$cond, EFLAGS)),
653 (CMOV_VR128X VR128X:$t, VR128X:$f, timm:$cond)>;
654 def : Pat<(v8f16 (X86cmov VR128X:$t, VR128X:$f, timm:$cond, EFLAGS)),
655 (CMOV_VR128X VR128X:$t, VR128X:$f, timm:$cond)>;
656 def : Pat<(v4i32 (X86cmov VR128X:$t, VR128X:$f, timm:$cond, EFLAGS)),
657 (CMOV_VR128X VR128X:$t, VR128X:$f, timm:$cond)>;
658 def : Pat<(v4f32 (X86cmov VR128X:$t, VR128X:$f, timm:$cond, EFLAGS)),
659 (CMOV_VR128X VR128X:$t, VR128X:$f, timm:$cond)>;
660 def : Pat<(v2f64 (X86cmov VR128X:$t, VR128X:$f, timm:$cond, EFLAGS)),
661 (CMOV_VR128X VR128X:$t, VR128X:$f, timm:$cond)>;
663 def : Pat<(v32i8 (X86cmov VR256X:$t, VR256X:$f, timm:$cond, EFLAGS)),
664 (CMOV_VR256X VR256X:$t, VR256X:$f, timm:$cond)>;
665 def : Pat<(v16i16 (X86cmov VR256X:$t, VR256X:$f, timm:$cond, EFLAGS)),
666 (CMOV_VR256X VR256X:$t, VR256X:$f, timm:$cond)>;
667 def : Pat<(v16f16 (X86cmov VR256X:$t, VR256X:$f, timm:$cond, EFLAGS)),
668 (CMOV_VR256X VR256X:$t, VR256X:$f, timm:$cond)>;
669 def : Pat<(v8i32 (X86cmov VR256X:$t, VR256X:$f, timm:$cond, EFLAGS)),
670 (CMOV_VR256X VR256X:$t, VR256X:$f, timm:$cond)>;
671 def : Pat<(v8f32 (X86cmov VR256X:$t, VR256X:$f, timm:$cond, EFLAGS)),
672 (CMOV_VR256X VR256X:$t, VR256X:$f, timm:$cond)>;
673 def : Pat<(v4f64 (X86cmov VR256X:$t, VR256X:$f, timm:$cond, EFLAGS)),
674 (CMOV_VR256X VR256X:$t, VR256X:$f, timm:$cond)>;
677 def : Pat<(v64i8 (X86cmov VR512:$t, VR512:$f, timm:$cond, EFLAGS)),
678 (CMOV_VR512 VR512:$t, VR512:$f, timm:$cond)>;
679 def : Pat<(v32i16 (X86cmov VR512:$t, VR512:$f, timm:$cond, EFLAGS)),
680 (CMOV_VR512 VR512:$t, VR512:$f, timm:$cond)>;
681 def : Pat<(v32f16 (X86cmov VR512:$t, VR512:$f, timm:$cond, EFLAGS)),
682 (CMOV_VR512 VR512:$t, VR512:$f, timm:$cond)>;
683 def : Pat<(v16i32 (X86cmov VR512:$t, VR512:$f, timm:$cond, EFLAGS)),
684 (CMOV_VR512 VR512:$t, VR512:$f, timm:$cond)>;
685 def : Pat<(v16f32 (X86cmov VR512:$t, VR512:$f, timm:$cond, EFLAGS)),
686 (CMOV_VR512 VR512:$t, VR512:$f, timm:$cond)>;
687 def : Pat<(v8f64 (X86cmov VR512:$t, VR512:$f, timm:$cond, EFLAGS)),
688 (CMOV_VR512 VR512:$t, VR512:$f, timm:$cond)>;
690 //===----------------------------------------------------------------------===//
691 // Normal-Instructions-With-Lock-Prefix Pseudo Instructions
692 //===----------------------------------------------------------------------===//
694 // FIXME: Use normal instructions and add lock prefix dynamically.
698 let isCodeGenOnly = 1, Defs = [EFLAGS] in
699 def OR32mi8Locked : Ii8<0x83, MRM1m, (outs), (ins i32mem:$dst, i32i8imm:$zero),
700 "or{l}\t{$zero, $dst|$dst, $zero}", []>,
701 Requires<[Not64BitMode]>, OpSize32, LOCK,
702 Sched<[WriteALURMW]>;
704 // RegOpc corresponds to the mr version of the instruction
705 // ImmOpc corresponds to the mi version of the instruction
706 // ImmOpc8 corresponds to the mi8 version of the instruction
707 // ImmMod corresponds to the instruction format of the mi and mi8 versions
708 multiclass LOCK_ArithBinOp<bits<8> RegOpc, bits<8> ImmOpc, bits<8> ImmOpc8,
709 Format ImmMod, SDNode Op, string mnemonic> {
710 let Defs = [EFLAGS], mayLoad = 1, mayStore = 1, isCodeGenOnly = 1,
711 SchedRW = [WriteALURMW] in {
713 def NAME#8mr : I<{RegOpc{7}, RegOpc{6}, RegOpc{5}, RegOpc{4},
714 RegOpc{3}, RegOpc{2}, RegOpc{1}, 0 },
715 MRMDestMem, (outs), (ins i8mem:$dst, GR8:$src2),
716 !strconcat(mnemonic, "{b}\t",
717 "{$src2, $dst|$dst, $src2}"),
718 [(set EFLAGS, (Op addr:$dst, GR8:$src2))]>, LOCK;
720 def NAME#16mr : I<{RegOpc{7}, RegOpc{6}, RegOpc{5}, RegOpc{4},
721 RegOpc{3}, RegOpc{2}, RegOpc{1}, 1 },
722 MRMDestMem, (outs), (ins i16mem:$dst, GR16:$src2),
723 !strconcat(mnemonic, "{w}\t",
724 "{$src2, $dst|$dst, $src2}"),
725 [(set EFLAGS, (Op addr:$dst, GR16:$src2))]>,
728 def NAME#32mr : I<{RegOpc{7}, RegOpc{6}, RegOpc{5}, RegOpc{4},
729 RegOpc{3}, RegOpc{2}, RegOpc{1}, 1 },
730 MRMDestMem, (outs), (ins i32mem:$dst, GR32:$src2),
731 !strconcat(mnemonic, "{l}\t",
732 "{$src2, $dst|$dst, $src2}"),
733 [(set EFLAGS, (Op addr:$dst, GR32:$src2))]>,
736 def NAME#64mr : RI<{RegOpc{7}, RegOpc{6}, RegOpc{5}, RegOpc{4},
737 RegOpc{3}, RegOpc{2}, RegOpc{1}, 1 },
738 MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src2),
739 !strconcat(mnemonic, "{q}\t",
740 "{$src2, $dst|$dst, $src2}"),
741 [(set EFLAGS, (Op addr:$dst, GR64:$src2))]>, LOCK;
743 // NOTE: These are order specific, we want the mi8 forms to be listed
744 // first so that they are slightly preferred to the mi forms.
745 def NAME#16mi8 : Ii8<{ImmOpc8{7}, ImmOpc8{6}, ImmOpc8{5}, ImmOpc8{4},
746 ImmOpc8{3}, ImmOpc8{2}, ImmOpc8{1}, 1 },
747 ImmMod, (outs), (ins i16mem :$dst, i16i8imm :$src2),
748 !strconcat(mnemonic, "{w}\t",
749 "{$src2, $dst|$dst, $src2}"),
750 [(set EFLAGS, (Op addr:$dst, i16immSExt8:$src2))]>,
753 def NAME#32mi8 : Ii8<{ImmOpc8{7}, ImmOpc8{6}, ImmOpc8{5}, ImmOpc8{4},
754 ImmOpc8{3}, ImmOpc8{2}, ImmOpc8{1}, 1 },
755 ImmMod, (outs), (ins i32mem :$dst, i32i8imm :$src2),
756 !strconcat(mnemonic, "{l}\t",
757 "{$src2, $dst|$dst, $src2}"),
758 [(set EFLAGS, (Op addr:$dst, i32immSExt8:$src2))]>,
761 def NAME#64mi8 : RIi8<{ImmOpc8{7}, ImmOpc8{6}, ImmOpc8{5}, ImmOpc8{4},
762 ImmOpc8{3}, ImmOpc8{2}, ImmOpc8{1}, 1 },
763 ImmMod, (outs), (ins i64mem :$dst, i64i8imm :$src2),
764 !strconcat(mnemonic, "{q}\t",
765 "{$src2, $dst|$dst, $src2}"),
766 [(set EFLAGS, (Op addr:$dst, i64immSExt8:$src2))]>,
769 def NAME#8mi : Ii8<{ImmOpc{7}, ImmOpc{6}, ImmOpc{5}, ImmOpc{4},
770 ImmOpc{3}, ImmOpc{2}, ImmOpc{1}, 0 },
771 ImmMod, (outs), (ins i8mem :$dst, i8imm :$src2),
772 !strconcat(mnemonic, "{b}\t",
773 "{$src2, $dst|$dst, $src2}"),
774 [(set EFLAGS, (Op addr:$dst, (i8 imm:$src2)))]>, LOCK;
776 def NAME#16mi : Ii16<{ImmOpc{7}, ImmOpc{6}, ImmOpc{5}, ImmOpc{4},
777 ImmOpc{3}, ImmOpc{2}, ImmOpc{1}, 1 },
778 ImmMod, (outs), (ins i16mem :$dst, i16imm :$src2),
779 !strconcat(mnemonic, "{w}\t",
780 "{$src2, $dst|$dst, $src2}"),
781 [(set EFLAGS, (Op addr:$dst, (i16 imm:$src2)))]>,
784 def NAME#32mi : Ii32<{ImmOpc{7}, ImmOpc{6}, ImmOpc{5}, ImmOpc{4},
785 ImmOpc{3}, ImmOpc{2}, ImmOpc{1}, 1 },
786 ImmMod, (outs), (ins i32mem :$dst, i32imm :$src2),
787 !strconcat(mnemonic, "{l}\t",
788 "{$src2, $dst|$dst, $src2}"),
789 [(set EFLAGS, (Op addr:$dst, (i32 imm:$src2)))]>,
792 def NAME#64mi32 : RIi32S<{ImmOpc{7}, ImmOpc{6}, ImmOpc{5}, ImmOpc{4},
793 ImmOpc{3}, ImmOpc{2}, ImmOpc{1}, 1 },
794 ImmMod, (outs), (ins i64mem :$dst, i64i32imm :$src2),
795 !strconcat(mnemonic, "{q}\t",
796 "{$src2, $dst|$dst, $src2}"),
797 [(set EFLAGS, (Op addr:$dst, i64immSExt32:$src2))]>,
803 defm LOCK_ADD : LOCK_ArithBinOp<0x00, 0x80, 0x83, MRM0m, X86lock_add, "add">;
804 defm LOCK_SUB : LOCK_ArithBinOp<0x28, 0x80, 0x83, MRM5m, X86lock_sub, "sub">;
805 defm LOCK_OR : LOCK_ArithBinOp<0x08, 0x80, 0x83, MRM1m, X86lock_or , "or">;
806 defm LOCK_AND : LOCK_ArithBinOp<0x20, 0x80, 0x83, MRM4m, X86lock_and, "and">;
807 defm LOCK_XOR : LOCK_ArithBinOp<0x30, 0x80, 0x83, MRM6m, X86lock_xor, "xor">;
809 let Defs = [EFLAGS], mayLoad = 1, mayStore = 1, isCodeGenOnly = 1,
810 SchedRW = [WriteALURMW] in {
811 let Predicates = [UseIncDec] in {
812 def LOCK_INC8m : I<0xFE, MRM0m, (outs), (ins i8mem :$dst),
814 [(set EFLAGS, (X86lock_add_nocf addr:$dst, (i8 1)))]>,
816 def LOCK_INC16m : I<0xFF, MRM0m, (outs), (ins i16mem:$dst),
818 [(set EFLAGS, (X86lock_add_nocf addr:$dst, (i16 1)))]>,
820 def LOCK_INC32m : I<0xFF, MRM0m, (outs), (ins i32mem:$dst),
822 [(set EFLAGS, (X86lock_add_nocf addr:$dst, (i32 1)))]>,
825 def LOCK_DEC8m : I<0xFE, MRM1m, (outs), (ins i8mem :$dst),
827 [(set EFLAGS, (X86lock_sub_nocf addr:$dst, (i8 1)))]>,
829 def LOCK_DEC16m : I<0xFF, MRM1m, (outs), (ins i16mem:$dst),
831 [(set EFLAGS, (X86lock_sub_nocf addr:$dst, (i16 1)))]>,
833 def LOCK_DEC32m : I<0xFF, MRM1m, (outs), (ins i32mem:$dst),
835 [(set EFLAGS, (X86lock_sub_nocf addr:$dst, (i32 1)))]>,
839 let Predicates = [UseIncDec, In64BitMode] in {
840 def LOCK_INC64m : RI<0xFF, MRM0m, (outs), (ins i64mem:$dst),
842 [(set EFLAGS, (X86lock_add_nocf addr:$dst, (i64 1)))]>,
844 def LOCK_DEC64m : RI<0xFF, MRM1m, (outs), (ins i64mem:$dst),
846 [(set EFLAGS, (X86lock_sub_nocf addr:$dst, (i64 1)))]>,
851 let Predicates = [UseIncDec] in {
852 // Additional patterns for -1 constant.
853 def : Pat<(X86lock_add addr:$dst, (i8 -1)), (LOCK_DEC8m addr:$dst)>;
854 def : Pat<(X86lock_add addr:$dst, (i16 -1)), (LOCK_DEC16m addr:$dst)>;
855 def : Pat<(X86lock_add addr:$dst, (i32 -1)), (LOCK_DEC32m addr:$dst)>;
856 def : Pat<(X86lock_sub addr:$dst, (i8 -1)), (LOCK_INC8m addr:$dst)>;
857 def : Pat<(X86lock_sub addr:$dst, (i16 -1)), (LOCK_INC16m addr:$dst)>;
858 def : Pat<(X86lock_sub addr:$dst, (i32 -1)), (LOCK_INC32m addr:$dst)>;
861 let Predicates = [UseIncDec, In64BitMode] in {
862 // Additional patterns for -1 constant.
863 def : Pat<(X86lock_add addr:$dst, (i64 -1)), (LOCK_DEC64m addr:$dst)>;
864 def : Pat<(X86lock_sub addr:$dst, (i64 -1)), (LOCK_INC64m addr:$dst)>;
868 def X86LBTest : SDTypeProfile<1, 3, [SDTCisVT<0, i32>, SDTCisPtrTy<1>,
869 SDTCisVT<2, i8>, SDTCisVT<3, i32>]>;
870 def x86bts : SDNode<"X86ISD::LBTS", X86LBTest,
871 [SDNPHasChain, SDNPMayLoad, SDNPMayStore, SDNPMemOperand]>;
872 def x86btc : SDNode<"X86ISD::LBTC", X86LBTest,
873 [SDNPHasChain, SDNPMayLoad, SDNPMayStore, SDNPMemOperand]>;
874 def x86btr : SDNode<"X86ISD::LBTR", X86LBTest,
875 [SDNPHasChain, SDNPMayLoad, SDNPMayStore, SDNPMemOperand]>;
877 def X86LBTestRM : SDTypeProfile<1, 2, [SDTCisVT<0, i32>, SDTCisPtrTy<1>,
880 def x86_rm_bts : SDNode<"X86ISD::LBTS_RM", X86LBTestRM,
881 [SDNPHasChain, SDNPMayLoad, SDNPMayStore, SDNPMemOperand]>;
882 def x86_rm_btc : SDNode<"X86ISD::LBTC_RM", X86LBTestRM,
883 [SDNPHasChain, SDNPMayLoad, SDNPMayStore, SDNPMemOperand]>;
884 def x86_rm_btr : SDNode<"X86ISD::LBTR_RM", X86LBTestRM,
885 [SDNPHasChain, SDNPMayLoad, SDNPMayStore, SDNPMemOperand]>;
888 multiclass ATOMIC_LOGIC_OP<Format Form, string s> {
889 let Defs = [EFLAGS], mayLoad = 1, mayStore = 1, isCodeGenOnly = 1,
890 SchedRW = [WriteBitTestSetRegRMW] in {
891 def 16m : Ii8<0xBA, Form, (outs), (ins i16mem:$src1, i8imm:$src2),
892 !strconcat(s, "{w}\t{$src2, $src1|$src1, $src2}"),
893 [(set EFLAGS, (!cast<SDNode>("x86" # s) addr:$src1, timm:$src2, (i32 16)))]>,
895 def 32m : Ii8<0xBA, Form, (outs), (ins i32mem:$src1, i8imm:$src2),
896 !strconcat(s, "{l}\t{$src2, $src1|$src1, $src2}"),
897 [(set EFLAGS, (!cast<SDNode>("x86" # s) addr:$src1, timm:$src2, (i32 32)))]>,
899 def 64m : RIi8<0xBA, Form, (outs), (ins i64mem:$src1, i8imm:$src2),
900 !strconcat(s, "{q}\t{$src2, $src1|$src1, $src2}"),
901 [(set EFLAGS, (!cast<SDNode>("x86" # s) addr:$src1, timm:$src2, (i32 64)))]>,
906 multiclass ATOMIC_LOGIC_OP_RM<bits<8> Opc8, string s> {
907 let Defs = [EFLAGS], mayLoad = 1, mayStore = 1, isCodeGenOnly = 1,
908 SchedRW = [WriteBitTestSetRegRMW] in {
909 def 16rm : I<Opc8, MRMDestMem, (outs), (ins i16mem:$src1, GR16:$src2),
910 !strconcat(s, "{w}\t{$src2, $src1|$src1, $src2}"),
911 [(set EFLAGS, (!cast<SDNode>("x86_rm_" # s) addr:$src1, GR16:$src2))]>,
913 def 32rm : I<Opc8, MRMDestMem, (outs), (ins i32mem:$src1, GR32:$src2),
914 !strconcat(s, "{l}\t{$src2, $src1|$src1, $src2}"),
915 [(set EFLAGS, (!cast<SDNode>("x86_rm_" # s) addr:$src1, GR32:$src2))]>,
917 def 64rm : RI<Opc8, MRMDestMem, (outs), (ins i64mem:$src1, GR64:$src2),
918 !strconcat(s, "{q}\t{$src2, $src1|$src1, $src2}"),
919 [(set EFLAGS, (!cast<SDNode>("x86_rm_" # s) addr:$src1, GR64:$src2))]>,
925 defm LOCK_BTS : ATOMIC_LOGIC_OP<MRM5m, "bts">;
926 defm LOCK_BTC : ATOMIC_LOGIC_OP<MRM7m, "btc">;
927 defm LOCK_BTR : ATOMIC_LOGIC_OP<MRM6m, "btr">;
929 defm LOCK_BTS_RM : ATOMIC_LOGIC_OP_RM<0xAB, "bts">;
930 defm LOCK_BTC_RM : ATOMIC_LOGIC_OP_RM<0xBB, "btc">;
931 defm LOCK_BTR_RM : ATOMIC_LOGIC_OP_RM<0xB3, "btr">;
933 // Atomic compare and swap.
934 multiclass LCMPXCHG_BinOp<bits<8> Opc8, bits<8> Opc, Format Form,
935 string mnemonic, SDPatternOperator frag> {
936 let isCodeGenOnly = 1, SchedRW = [WriteCMPXCHGRMW] in {
937 let Defs = [AL, EFLAGS], Uses = [AL] in
938 def NAME#8 : I<Opc8, Form, (outs), (ins i8mem:$ptr, GR8:$swap),
939 !strconcat(mnemonic, "{b}\t{$swap, $ptr|$ptr, $swap}"),
940 [(frag addr:$ptr, GR8:$swap, 1)]>, TB, LOCK;
941 let Defs = [AX, EFLAGS], Uses = [AX] in
942 def NAME#16 : I<Opc, Form, (outs), (ins i16mem:$ptr, GR16:$swap),
943 !strconcat(mnemonic, "{w}\t{$swap, $ptr|$ptr, $swap}"),
944 [(frag addr:$ptr, GR16:$swap, 2)]>, TB, OpSize16, LOCK;
945 let Defs = [EAX, EFLAGS], Uses = [EAX] in
946 def NAME#32 : I<Opc, Form, (outs), (ins i32mem:$ptr, GR32:$swap),
947 !strconcat(mnemonic, "{l}\t{$swap, $ptr|$ptr, $swap}"),
948 [(frag addr:$ptr, GR32:$swap, 4)]>, TB, OpSize32, LOCK;
949 let Defs = [RAX, EFLAGS], Uses = [RAX] in
950 def NAME#64 : RI<Opc, Form, (outs), (ins i64mem:$ptr, GR64:$swap),
951 !strconcat(mnemonic, "{q}\t{$swap, $ptr|$ptr, $swap}"),
952 [(frag addr:$ptr, GR64:$swap, 8)]>, TB, LOCK;
956 let Defs = [EAX, EDX, EFLAGS], Uses = [EAX, EBX, ECX, EDX],
957 Predicates = [HasCX8], SchedRW = [WriteCMPXCHGRMW],
958 isCodeGenOnly = 1, usesCustomInserter = 1 in {
959 def LCMPXCHG8B : I<0xC7, MRM1m, (outs), (ins i64mem:$ptr),
961 [(X86cas8 addr:$ptr)]>, TB, LOCK;
964 let Defs = [RAX, RDX, EFLAGS], Uses = [RAX, RBX, RCX, RDX],
965 Predicates = [HasCX16,In64BitMode], SchedRW = [WriteCMPXCHGRMW],
966 isCodeGenOnly = 1, mayLoad = 1, mayStore = 1, hasSideEffects = 0 in {
967 def LCMPXCHG16B : RI<0xC7, MRM1m, (outs), (ins i128mem:$ptr),
972 // This pseudo must be used when the frame uses RBX as
973 // the base pointer. Indeed, in such situation RBX is a reserved
974 // register and the register allocator will ignore any use/def of
975 // it. In other words, the register will not fix the clobbering of
976 // RBX that will happen when setting the arguments for the instrucion.
978 // Unlike the actual related instruction, we mark that this one
979 // defines RBX (instead of using RBX).
980 // The rationale is that we will define RBX during the expansion of
981 // the pseudo. The argument feeding RBX is rbx_input.
983 // The additional argument, $rbx_save, is a temporary register used to
984 // save the value of RBX across the actual instruction.
986 // To make sure the register assigned to $rbx_save does not interfere with
987 // the definition of the actual instruction, we use a definition $dst which
988 // is tied to $rbx_save. That way, the live-range of $rbx_save spans across
989 // the instruction and we are sure we will have a valid register to restore
991 let Defs = [RAX, RDX, RBX, EFLAGS], Uses = [RAX, RCX, RDX],
992 Predicates = [HasCX16,In64BitMode], SchedRW = [WriteCMPXCHGRMW],
993 isCodeGenOnly = 1, isPseudo = 1,
994 mayLoad = 1, mayStore = 1, hasSideEffects = 0,
995 Constraints = "$rbx_save = $dst" in {
996 def LCMPXCHG16B_SAVE_RBX :
997 I<0, Pseudo, (outs GR64:$dst),
998 (ins i128mem:$ptr, GR64:$rbx_input, GR64:$rbx_save), "", []>;
1001 // Pseudo instruction that doesn't read/write RBX. Will be turned into either
1002 // LCMPXCHG16B_SAVE_RBX or LCMPXCHG16B via a custom inserter.
1003 let Defs = [RAX, RDX, EFLAGS], Uses = [RAX, RCX, RDX],
1004 Predicates = [HasCX16,In64BitMode], SchedRW = [WriteCMPXCHGRMW],
1005 isCodeGenOnly = 1, isPseudo = 1,
1006 mayLoad = 1, mayStore = 1, hasSideEffects = 0,
1007 usesCustomInserter = 1 in {
1008 def LCMPXCHG16B_NO_RBX :
1009 I<0, Pseudo, (outs), (ins i128mem:$ptr, GR64:$rbx_input), "",
1010 [(X86cas16 addr:$ptr, GR64:$rbx_input)]>;
1013 // This pseudo must be used when the frame uses RBX/EBX as
1014 // the base pointer.
1015 // cf comment for LCMPXCHG16B_SAVE_RBX.
1016 let Defs = [EBX], Uses = [ECX, EAX],
1017 Predicates = [HasMWAITX], SchedRW = [WriteSystem],
1018 isCodeGenOnly = 1, isPseudo = 1, Constraints = "$rbx_save = $dst" in {
1019 def MWAITX_SAVE_RBX :
1020 I<0, Pseudo, (outs GR64:$dst),
1021 (ins GR32:$ebx_input, GR64:$rbx_save),
1026 // Pseudo mwaitx instruction to use for custom insertion.
1027 let Predicates = [HasMWAITX], SchedRW = [WriteSystem],
1028 isCodeGenOnly = 1, isPseudo = 1,
1029 usesCustomInserter = 1 in {
1031 I<0, Pseudo, (outs), (ins GR32:$ecx, GR32:$eax, GR32:$ebx),
1033 [(int_x86_mwaitx GR32:$ecx, GR32:$eax, GR32:$ebx)]>;
1037 defm LCMPXCHG : LCMPXCHG_BinOp<0xB0, 0xB1, MRMDestMem, "cmpxchg", X86cas>;
1039 // Atomic exchange and add
1040 multiclass ATOMIC_RMW_BINOP<bits<8> opc8, bits<8> opc, string mnemonic,
1042 let Constraints = "$val = $dst", Defs = [EFLAGS], mayLoad = 1, mayStore = 1,
1043 isCodeGenOnly = 1, SchedRW = [WriteALURMW] in {
1044 def NAME#8 : I<opc8, MRMSrcMem, (outs GR8:$dst),
1045 (ins GR8:$val, i8mem:$ptr),
1046 !strconcat(mnemonic, "{b}\t{$val, $ptr|$ptr, $val}"),
1048 (!cast<PatFrag>(frag # "_i8") addr:$ptr, GR8:$val))]>;
1049 def NAME#16 : I<opc, MRMSrcMem, (outs GR16:$dst),
1050 (ins GR16:$val, i16mem:$ptr),
1051 !strconcat(mnemonic, "{w}\t{$val, $ptr|$ptr, $val}"),
1054 (!cast<PatFrag>(frag # "_i16") addr:$ptr, GR16:$val))]>,
1056 def NAME#32 : I<opc, MRMSrcMem, (outs GR32:$dst),
1057 (ins GR32:$val, i32mem:$ptr),
1058 !strconcat(mnemonic, "{l}\t{$val, $ptr|$ptr, $val}"),
1061 (!cast<PatFrag>(frag # "_i32") addr:$ptr, GR32:$val))]>,
1063 def NAME#64 : RI<opc, MRMSrcMem, (outs GR64:$dst),
1064 (ins GR64:$val, i64mem:$ptr),
1065 !strconcat(mnemonic, "{q}\t{$val, $ptr|$ptr, $val}"),
1068 (!cast<PatFrag>(frag # "_i64") addr:$ptr, GR64:$val))]>;
1072 defm LXADD : ATOMIC_RMW_BINOP<0xc0, 0xc1, "xadd", "atomic_load_add">, TB, LOCK;
1074 /* The following multiclass tries to make sure that in code like
1075 * x.store (immediate op x.load(acquire), release)
1077 * x.store (register op x.load(acquire), release)
1078 * an operation directly on memory is generated instead of wasting a register.
1079 * It is not automatic as atomic_store/load are only lowered to MOV instructions
1080 * extremely late to prevent them from being accidentally reordered in the backend
1081 * (see below the RELEASE_MOV* / ACQUIRE_MOV* pseudo-instructions)
1083 multiclass RELEASE_BINOP_MI<string Name, SDNode op> {
1084 def : Pat<(atomic_store_8 (op (atomic_load_8 addr:$dst), (i8 imm:$src)),
1086 (!cast<Instruction>(Name#"8mi") addr:$dst, imm:$src)>;
1087 def : Pat<(atomic_store_16 (op (atomic_load_16 addr:$dst), (i16 imm:$src)),
1089 (!cast<Instruction>(Name#"16mi") addr:$dst, imm:$src)>;
1090 def : Pat<(atomic_store_32 (op (atomic_load_32 addr:$dst), (i32 imm:$src)),
1092 (!cast<Instruction>(Name#"32mi") addr:$dst, imm:$src)>;
1093 def : Pat<(atomic_store_64 (op (atomic_load_64 addr:$dst), (i64immSExt32:$src)),
1095 (!cast<Instruction>(Name#"64mi32") addr:$dst, (i64immSExt32:$src))>;
1096 def : Pat<(atomic_store_8 (op (atomic_load_8 addr:$dst), (i8 GR8:$src)), addr:$dst),
1097 (!cast<Instruction>(Name#"8mr") addr:$dst, GR8:$src)>;
1098 def : Pat<(atomic_store_16 (op (atomic_load_16 addr:$dst), (i16 GR16:$src)),
1100 (!cast<Instruction>(Name#"16mr") addr:$dst, GR16:$src)>;
1101 def : Pat<(atomic_store_32 (op (atomic_load_32 addr:$dst), (i32 GR32:$src)),
1103 (!cast<Instruction>(Name#"32mr") addr:$dst, GR32:$src)>;
1104 def : Pat<(atomic_store_64 (op (atomic_load_64 addr:$dst), (i64 GR64:$src)),
1106 (!cast<Instruction>(Name#"64mr") addr:$dst, GR64:$src)>;
1108 defm : RELEASE_BINOP_MI<"ADD", add>;
1109 defm : RELEASE_BINOP_MI<"AND", and>;
1110 defm : RELEASE_BINOP_MI<"OR", or>;
1111 defm : RELEASE_BINOP_MI<"XOR", xor>;
1112 defm : RELEASE_BINOP_MI<"SUB", sub>;
1114 // Atomic load + floating point patterns.
1115 // FIXME: This could also handle SIMD operations with *ps and *pd instructions.
1116 multiclass ATOMIC_LOAD_FP_BINOP_MI<string Name, SDNode op> {
1117 def : Pat<(op FR32:$src1, (bitconvert (i32 (atomic_load_32 addr:$src2)))),
1118 (!cast<Instruction>(Name#"SSrm") FR32:$src1, addr:$src2)>,
1119 Requires<[UseSSE1]>;
1120 def : Pat<(op FR32:$src1, (bitconvert (i32 (atomic_load_32 addr:$src2)))),
1121 (!cast<Instruction>("V"#Name#"SSrm") FR32:$src1, addr:$src2)>,
1123 def : Pat<(op FR32X:$src1, (bitconvert (i32 (atomic_load_32 addr:$src2)))),
1124 (!cast<Instruction>("V"#Name#"SSZrm") FR32X:$src1, addr:$src2)>,
1125 Requires<[HasAVX512]>;
1127 def : Pat<(op FR64:$src1, (bitconvert (i64 (atomic_load_64 addr:$src2)))),
1128 (!cast<Instruction>(Name#"SDrm") FR64:$src1, addr:$src2)>,
1129 Requires<[UseSSE1]>;
1130 def : Pat<(op FR64:$src1, (bitconvert (i64 (atomic_load_64 addr:$src2)))),
1131 (!cast<Instruction>("V"#Name#"SDrm") FR64:$src1, addr:$src2)>,
1133 def : Pat<(op FR64X:$src1, (bitconvert (i64 (atomic_load_64 addr:$src2)))),
1134 (!cast<Instruction>("V"#Name#"SDZrm") FR64X:$src1, addr:$src2)>,
1135 Requires<[HasAVX512]>;
1137 defm : ATOMIC_LOAD_FP_BINOP_MI<"ADD", fadd>;
1138 defm : ATOMIC_LOAD_FP_BINOP_MI<"SUB", fsub>;
1139 defm : ATOMIC_LOAD_FP_BINOP_MI<"MUL", fmul>;
1140 defm : ATOMIC_LOAD_FP_BINOP_MI<"DIV", fdiv>;
1142 multiclass RELEASE_UNOP<string Name, dag dag8, dag dag16, dag dag32,
1144 def : Pat<(atomic_store_8 dag8, addr:$dst),
1145 (!cast<Instruction>(Name#8m) addr:$dst)>;
1146 def : Pat<(atomic_store_16 dag16, addr:$dst),
1147 (!cast<Instruction>(Name#16m) addr:$dst)>;
1148 def : Pat<(atomic_store_32 dag32, addr:$dst),
1149 (!cast<Instruction>(Name#32m) addr:$dst)>;
1150 def : Pat<(atomic_store_64 dag64, addr:$dst),
1151 (!cast<Instruction>(Name#64m) addr:$dst)>;
1154 let Predicates = [UseIncDec] in {
1155 defm : RELEASE_UNOP<"INC",
1156 (add (atomic_load_8 addr:$dst), (i8 1)),
1157 (add (atomic_load_16 addr:$dst), (i16 1)),
1158 (add (atomic_load_32 addr:$dst), (i32 1)),
1159 (add (atomic_load_64 addr:$dst), (i64 1))>;
1160 defm : RELEASE_UNOP<"DEC",
1161 (add (atomic_load_8 addr:$dst), (i8 -1)),
1162 (add (atomic_load_16 addr:$dst), (i16 -1)),
1163 (add (atomic_load_32 addr:$dst), (i32 -1)),
1164 (add (atomic_load_64 addr:$dst), (i64 -1))>;
1167 defm : RELEASE_UNOP<"NEG",
1168 (ineg (i8 (atomic_load_8 addr:$dst))),
1169 (ineg (i16 (atomic_load_16 addr:$dst))),
1170 (ineg (i32 (atomic_load_32 addr:$dst))),
1171 (ineg (i64 (atomic_load_64 addr:$dst)))>;
1172 defm : RELEASE_UNOP<"NOT",
1173 (not (i8 (atomic_load_8 addr:$dst))),
1174 (not (i16 (atomic_load_16 addr:$dst))),
1175 (not (i32 (atomic_load_32 addr:$dst))),
1176 (not (i64 (atomic_load_64 addr:$dst)))>;
1178 def : Pat<(atomic_store_8 (i8 imm:$src), addr:$dst),
1179 (MOV8mi addr:$dst, imm:$src)>;
1180 def : Pat<(atomic_store_16 (i16 imm:$src), addr:$dst),
1181 (MOV16mi addr:$dst, imm:$src)>;
1182 def : Pat<(atomic_store_32 (i32 imm:$src), addr:$dst),
1183 (MOV32mi addr:$dst, imm:$src)>;
1184 def : Pat<(atomic_store_64 (i64immSExt32:$src), addr:$dst),
1185 (MOV64mi32 addr:$dst, i64immSExt32:$src)>;
1187 def : Pat<(atomic_store_8 GR8:$src, addr:$dst),
1188 (MOV8mr addr:$dst, GR8:$src)>;
1189 def : Pat<(atomic_store_16 GR16:$src, addr:$dst),
1190 (MOV16mr addr:$dst, GR16:$src)>;
1191 def : Pat<(atomic_store_32 GR32:$src, addr:$dst),
1192 (MOV32mr addr:$dst, GR32:$src)>;
1193 def : Pat<(atomic_store_64 GR64:$src, addr:$dst),
1194 (MOV64mr addr:$dst, GR64:$src)>;
1196 def : Pat<(i8 (atomic_load_8 addr:$src)), (MOV8rm addr:$src)>;
1197 def : Pat<(i16 (atomic_load_16 addr:$src)), (MOV16rm addr:$src)>;
1198 def : Pat<(i32 (atomic_load_32 addr:$src)), (MOV32rm addr:$src)>;
1199 def : Pat<(i64 (atomic_load_64 addr:$src)), (MOV64rm addr:$src)>;
1201 // Floating point loads/stores.
1202 def : Pat<(atomic_store_32 (i32 (bitconvert (f32 FR32:$src))), addr:$dst),
1203 (MOVSSmr addr:$dst, FR32:$src)>, Requires<[UseSSE1]>;
1204 def : Pat<(atomic_store_32 (i32 (bitconvert (f32 FR32:$src))), addr:$dst),
1205 (VMOVSSmr addr:$dst, FR32:$src)>, Requires<[UseAVX]>;
1206 def : Pat<(atomic_store_32 (i32 (bitconvert (f32 FR32:$src))), addr:$dst),
1207 (VMOVSSZmr addr:$dst, FR32:$src)>, Requires<[HasAVX512]>;
1209 def : Pat<(atomic_store_64 (i64 (bitconvert (f64 FR64:$src))), addr:$dst),
1210 (MOVSDmr addr:$dst, FR64:$src)>, Requires<[UseSSE2]>;
1211 def : Pat<(atomic_store_64 (i64 (bitconvert (f64 FR64:$src))), addr:$dst),
1212 (VMOVSDmr addr:$dst, FR64:$src)>, Requires<[UseAVX]>;
1213 def : Pat<(atomic_store_64 (i64 (bitconvert (f64 FR64:$src))), addr:$dst),
1214 (VMOVSDmr addr:$dst, FR64:$src)>, Requires<[HasAVX512]>;
1216 def : Pat<(f32 (bitconvert (i32 (atomic_load_32 addr:$src)))),
1217 (MOVSSrm_alt addr:$src)>, Requires<[UseSSE1]>;
1218 def : Pat<(f32 (bitconvert (i32 (atomic_load_32 addr:$src)))),
1219 (VMOVSSrm_alt addr:$src)>, Requires<[UseAVX]>;
1220 def : Pat<(f32 (bitconvert (i32 (atomic_load_32 addr:$src)))),
1221 (VMOVSSZrm_alt addr:$src)>, Requires<[HasAVX512]>;
1223 def : Pat<(f64 (bitconvert (i64 (atomic_load_64 addr:$src)))),
1224 (MOVSDrm_alt addr:$src)>, Requires<[UseSSE2]>;
1225 def : Pat<(f64 (bitconvert (i64 (atomic_load_64 addr:$src)))),
1226 (VMOVSDrm_alt addr:$src)>, Requires<[UseAVX]>;
1227 def : Pat<(f64 (bitconvert (i64 (atomic_load_64 addr:$src)))),
1228 (VMOVSDZrm_alt addr:$src)>, Requires<[HasAVX512]>;
1230 //===----------------------------------------------------------------------===//
1231 // DAG Pattern Matching Rules
1232 //===----------------------------------------------------------------------===//
1234 // Use AND/OR to store 0/-1 in memory when optimizing for minsize. This saves
1235 // binary size compared to a regular MOV, but it introduces an unnecessary
1236 // load, so is not suitable for regular or optsize functions.
1237 let Predicates = [OptForMinSize] in {
1238 def : Pat<(simple_store (i16 0), addr:$dst), (AND16mi addr:$dst, 0)>;
1239 def : Pat<(simple_store (i32 0), addr:$dst), (AND32mi addr:$dst, 0)>;
1240 def : Pat<(simple_store (i64 0), addr:$dst), (AND64mi32 addr:$dst, 0)>;
1241 def : Pat<(simple_store (i16 -1), addr:$dst), (OR16mi addr:$dst, -1)>;
1242 def : Pat<(simple_store (i32 -1), addr:$dst), (OR32mi addr:$dst, -1)>;
1243 def : Pat<(simple_store (i64 -1), addr:$dst), (OR64mi32 addr:$dst, -1)>;
1246 // In kernel code model, we can get the address of a label
1247 // into a register with 'movq'. FIXME: This is a hack, the 'imm' predicate of
1248 // the MOV64ri32 should accept these.
1249 def : Pat<(i64 (X86Wrapper tconstpool :$dst)),
1250 (MOV64ri32 tconstpool :$dst)>, Requires<[KernelCode]>;
1251 def : Pat<(i64 (X86Wrapper tjumptable :$dst)),
1252 (MOV64ri32 tjumptable :$dst)>, Requires<[KernelCode]>;
1253 def : Pat<(i64 (X86Wrapper tglobaladdr :$dst)),
1254 (MOV64ri32 tglobaladdr :$dst)>, Requires<[KernelCode]>;
1255 def : Pat<(i64 (X86Wrapper texternalsym:$dst)),
1256 (MOV64ri32 texternalsym:$dst)>, Requires<[KernelCode]>;
1257 def : Pat<(i64 (X86Wrapper mcsym:$dst)),
1258 (MOV64ri32 mcsym:$dst)>, Requires<[KernelCode]>;
1259 def : Pat<(i64 (X86Wrapper tblockaddress:$dst)),
1260 (MOV64ri32 tblockaddress:$dst)>, Requires<[KernelCode]>;
1262 // If we have small model and -static mode, it is safe to store global addresses
1263 // directly as immediates. FIXME: This is really a hack, the 'imm' predicate
1264 // for MOV64mi32 should handle this sort of thing.
1265 def : Pat<(store (i64 (X86Wrapper tconstpool:$src)), addr:$dst),
1266 (MOV64mi32 addr:$dst, tconstpool:$src)>,
1267 Requires<[NearData, IsNotPIC]>;
1268 def : Pat<(store (i64 (X86Wrapper tjumptable:$src)), addr:$dst),
1269 (MOV64mi32 addr:$dst, tjumptable:$src)>,
1270 Requires<[NearData, IsNotPIC]>;
1271 def : Pat<(store (i64 (X86Wrapper tglobaladdr:$src)), addr:$dst),
1272 (MOV64mi32 addr:$dst, tglobaladdr:$src)>,
1273 Requires<[NearData, IsNotPIC]>;
1274 def : Pat<(store (i64 (X86Wrapper texternalsym:$src)), addr:$dst),
1275 (MOV64mi32 addr:$dst, texternalsym:$src)>,
1276 Requires<[NearData, IsNotPIC]>;
1277 def : Pat<(store (i64 (X86Wrapper mcsym:$src)), addr:$dst),
1278 (MOV64mi32 addr:$dst, mcsym:$src)>,
1279 Requires<[NearData, IsNotPIC]>;
1280 def : Pat<(store (i64 (X86Wrapper tblockaddress:$src)), addr:$dst),
1281 (MOV64mi32 addr:$dst, tblockaddress:$src)>,
1282 Requires<[NearData, IsNotPIC]>;
1284 def : Pat<(i32 (X86RecoverFrameAlloc mcsym:$dst)), (MOV32ri mcsym:$dst)>;
1285 def : Pat<(i64 (X86RecoverFrameAlloc mcsym:$dst)), (MOV64ri mcsym:$dst)>;
1289 // tls has some funny stuff here...
1290 // This corresponds to movabs $foo@tpoff, %rax
1291 def : Pat<(i64 (X86Wrapper tglobaltlsaddr :$dst)),
1292 (MOV64ri32 tglobaltlsaddr :$dst)>;
1293 // This corresponds to add $foo@tpoff, %rax
1294 def : Pat<(add GR64:$src1, (X86Wrapper tglobaltlsaddr :$dst)),
1295 (ADD64ri32 GR64:$src1, tglobaltlsaddr :$dst)>;
1298 // Direct PC relative function call for small code model. 32-bit displacement
1299 // sign extended to 64-bit.
1300 def : Pat<(X86call (i64 tglobaladdr:$dst)),
1301 (CALL64pcrel32 tglobaladdr:$dst)>;
1302 def : Pat<(X86call (i64 texternalsym:$dst)),
1303 (CALL64pcrel32 texternalsym:$dst)>;
1305 def : Pat<(X86call_rvmarker (i64 tglobaladdr:$rvfunc), (i64 texternalsym:$dst)),
1306 (CALL64pcrel32_RVMARKER tglobaladdr:$rvfunc, texternalsym:$dst)>;
1307 def : Pat<(X86call_rvmarker (i64 tglobaladdr:$rvfunc), (i64 tglobaladdr:$dst)),
1308 (CALL64pcrel32_RVMARKER tglobaladdr:$rvfunc, tglobaladdr:$dst)>;
1311 // Tailcall stuff. The TCRETURN instructions execute after the epilog, so they
1312 // can never use callee-saved registers. That is the purpose of the GR64_TC
1313 // register classes.
1315 // The only volatile register that is never used by the calling convention is
1316 // %r11. This happens when calling a vararg function with 6 arguments.
1318 // Match an X86tcret that uses less than 7 volatile registers.
1319 def : Pat<(X86tcret ptr_rc_tailcall:$dst, timm:$off),
1320 (TCRETURNri ptr_rc_tailcall:$dst, timm:$off)>,
1321 Requires<[Not64BitMode, NotUseIndirectThunkCalls]>;
1323 // FIXME: This is disabled for 32-bit PIC mode because the global base
1324 // register which is part of the address mode may be assigned a
1325 // callee-saved register.
1326 // Similar to X86tcret_6regs, here we only have 1 register left
1327 def : Pat<(X86tcret_1reg (load addr:$dst), timm:$off),
1328 (TCRETURNmi addr:$dst, timm:$off)>,
1329 Requires<[Not64BitMode, IsNotPIC, NotUseIndirectThunkCalls]>;
1331 def : Pat<(X86tcret (i32 tglobaladdr:$dst), timm:$off),
1332 (TCRETURNdi tglobaladdr:$dst, timm:$off)>,
1333 Requires<[NotLP64]>;
1335 def : Pat<(X86tcret (i32 texternalsym:$dst), timm:$off),
1336 (TCRETURNdi texternalsym:$dst, timm:$off)>,
1337 Requires<[NotLP64]>;
1339 def : Pat<(X86tcret ptr_rc_tailcall:$dst, timm:$off),
1340 (TCRETURNri64 ptr_rc_tailcall:$dst, timm:$off)>,
1341 Requires<[In64BitMode, NotUseIndirectThunkCalls]>;
1343 // Don't fold loads into X86tcret requiring more than 6 regs.
1344 // There wouldn't be enough scratch registers for base+index.
1345 def : Pat<(X86tcret_6regs (load addr:$dst), timm:$off),
1346 (TCRETURNmi64 addr:$dst, timm:$off)>,
1347 Requires<[In64BitMode, NotUseIndirectThunkCalls]>;
1349 def : Pat<(X86tcret ptr_rc_tailcall:$dst, timm:$off),
1350 (INDIRECT_THUNK_TCRETURN64 ptr_rc_tailcall:$dst, timm:$off)>,
1351 Requires<[In64BitMode, UseIndirectThunkCalls]>;
1353 def : Pat<(X86tcret ptr_rc_tailcall:$dst, timm:$off),
1354 (INDIRECT_THUNK_TCRETURN32 ptr_rc_tailcall:$dst, timm:$off)>,
1355 Requires<[Not64BitMode, UseIndirectThunkCalls]>;
1357 def : Pat<(X86tcret (i64 tglobaladdr:$dst), timm:$off),
1358 (TCRETURNdi64 tglobaladdr:$dst, timm:$off)>,
1361 def : Pat<(X86tcret (i64 texternalsym:$dst), timm:$off),
1362 (TCRETURNdi64 texternalsym:$dst, timm:$off)>,
1365 // Normal calls, with various flavors of addresses.
1366 def : Pat<(X86call (i32 tglobaladdr:$dst)),
1367 (CALLpcrel32 tglobaladdr:$dst)>;
1368 def : Pat<(X86call (i32 texternalsym:$dst)),
1369 (CALLpcrel32 texternalsym:$dst)>;
1370 def : Pat<(X86call (i32 imm:$dst)),
1371 (CALLpcrel32 imm:$dst)>, Requires<[CallImmAddr]>;
1375 // TEST R,R is smaller than CMP R,0
1376 def : Pat<(X86cmp GR8:$src1, 0),
1377 (TEST8rr GR8:$src1, GR8:$src1)>;
1378 def : Pat<(X86cmp GR16:$src1, 0),
1379 (TEST16rr GR16:$src1, GR16:$src1)>;
1380 def : Pat<(X86cmp GR32:$src1, 0),
1381 (TEST32rr GR32:$src1, GR32:$src1)>;
1382 def : Pat<(X86cmp GR64:$src1, 0),
1383 (TEST64rr GR64:$src1, GR64:$src1)>;
1385 // zextload bool -> zextload byte
1386 // i1 stored in one byte in zero-extended form.
1387 // Upper bits cleanup should be executed before Store.
1388 def : Pat<(zextloadi8i1 addr:$src), (MOV8rm addr:$src)>;
1389 def : Pat<(zextloadi16i1 addr:$src),
1390 (EXTRACT_SUBREG (MOVZX32rm8 addr:$src), sub_16bit)>;
1391 def : Pat<(zextloadi32i1 addr:$src), (MOVZX32rm8 addr:$src)>;
1392 def : Pat<(zextloadi64i1 addr:$src),
1393 (SUBREG_TO_REG (i64 0), (MOVZX32rm8 addr:$src), sub_32bit)>;
1395 // extload bool -> extload byte
1396 // When extloading from 16-bit and smaller memory locations into 64-bit
1397 // registers, use zero-extending loads so that the entire 64-bit register is
1398 // defined, avoiding partial-register updates.
1400 def : Pat<(extloadi8i1 addr:$src), (MOV8rm addr:$src)>;
1401 def : Pat<(extloadi16i1 addr:$src),
1402 (EXTRACT_SUBREG (MOVZX32rm8 addr:$src), sub_16bit)>;
1403 def : Pat<(extloadi32i1 addr:$src), (MOVZX32rm8 addr:$src)>;
1404 def : Pat<(extloadi16i8 addr:$src),
1405 (EXTRACT_SUBREG (MOVZX32rm8 addr:$src), sub_16bit)>;
1406 def : Pat<(extloadi32i8 addr:$src), (MOVZX32rm8 addr:$src)>;
1407 def : Pat<(extloadi32i16 addr:$src), (MOVZX32rm16 addr:$src)>;
1409 // For other extloads, use subregs, since the high contents of the register are
1410 // defined after an extload.
1411 // NOTE: The extloadi64i32 pattern needs to be first as it will try to form
1412 // 32-bit loads for 4 byte aligned i8/i16 loads.
1413 def : Pat<(extloadi64i32 addr:$src),
1414 (SUBREG_TO_REG (i64 0), (MOV32rm addr:$src), sub_32bit)>;
1415 def : Pat<(extloadi64i1 addr:$src),
1416 (SUBREG_TO_REG (i64 0), (MOVZX32rm8 addr:$src), sub_32bit)>;
1417 def : Pat<(extloadi64i8 addr:$src),
1418 (SUBREG_TO_REG (i64 0), (MOVZX32rm8 addr:$src), sub_32bit)>;
1419 def : Pat<(extloadi64i16 addr:$src),
1420 (SUBREG_TO_REG (i64 0), (MOVZX32rm16 addr:$src), sub_32bit)>;
1422 // anyext. Define these to do an explicit zero-extend to
1423 // avoid partial-register updates.
1424 def : Pat<(i16 (anyext GR8 :$src)), (EXTRACT_SUBREG
1425 (MOVZX32rr8 GR8 :$src), sub_16bit)>;
1426 def : Pat<(i32 (anyext GR8 :$src)), (MOVZX32rr8 GR8 :$src)>;
1428 // Except for i16 -> i32 since isel expect i16 ops to be promoted to i32.
1429 def : Pat<(i32 (anyext GR16:$src)),
1430 (INSERT_SUBREG (i32 (IMPLICIT_DEF)), GR16:$src, sub_16bit)>;
1432 def : Pat<(i64 (anyext GR8 :$src)),
1433 (SUBREG_TO_REG (i64 0), (MOVZX32rr8 GR8 :$src), sub_32bit)>;
1434 def : Pat<(i64 (anyext GR16:$src)),
1435 (SUBREG_TO_REG (i64 0), (MOVZX32rr16 GR16 :$src), sub_32bit)>;
1436 def : Pat<(i64 (anyext GR32:$src)),
1437 (INSERT_SUBREG (i64 (IMPLICIT_DEF)), GR32:$src, sub_32bit)>;
1439 def : Pat<(i32 (anyext_sdiv GR8:$src)), (MOVSX32rr8 GR8:$src)>;
1441 // In the case of a 32-bit def that is known to implicitly zero-extend,
1442 // we can use a SUBREG_TO_REG.
1443 def : Pat<(i64 (zext def32:$src)),
1444 (SUBREG_TO_REG (i64 0), GR32:$src, sub_32bit)>;
1445 def : Pat<(i64 (and (anyext def32:$src), 0x00000000FFFFFFFF)),
1446 (SUBREG_TO_REG (i64 0), GR32:$src, sub_32bit)>;
1448 //===----------------------------------------------------------------------===//
1449 // Pattern match OR as ADD
1450 //===----------------------------------------------------------------------===//
1452 // If safe, we prefer to pattern match OR as ADD at isel time. ADD can be
1453 // 3-addressified into an LEA instruction to avoid copies. However, we also
1454 // want to finally emit these instructions as an or at the end of the code
1455 // generator to make the generated code easier to read. To do this, we select
1456 // into "disjoint bits" pseudo ops.
1458 // (or x1, x2) -> (add x1, x2) if two operands are known not to share bits.
1459 // Try this before the selecting to OR.
1460 let SchedRW = [WriteALU] in {
1462 let isConvertibleToThreeAddress = 1, isPseudo = 1,
1463 Constraints = "$src1 = $dst", Defs = [EFLAGS] in {
1464 let isCommutable = 1 in {
1465 def ADD8rr_DB : I<0, Pseudo, (outs GR8:$dst), (ins GR8:$src1, GR8:$src2),
1466 "", // orb/addb REG, REG
1467 [(set GR8:$dst, (or_is_add GR8:$src1, GR8:$src2))]>;
1468 def ADD16rr_DB : I<0, Pseudo, (outs GR16:$dst), (ins GR16:$src1, GR16:$src2),
1469 "", // orw/addw REG, REG
1470 [(set GR16:$dst, (or_is_add GR16:$src1, GR16:$src2))]>;
1471 def ADD32rr_DB : I<0, Pseudo, (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
1472 "", // orl/addl REG, REG
1473 [(set GR32:$dst, (or_is_add GR32:$src1, GR32:$src2))]>;
1474 def ADD64rr_DB : I<0, Pseudo, (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
1475 "", // orq/addq REG, REG
1476 [(set GR64:$dst, (or_is_add GR64:$src1, GR64:$src2))]>;
1479 def ADD8ri_DB : I<0, Pseudo,
1480 (outs GR8:$dst), (ins GR8:$src1, i8imm:$src2),
1481 "", // orb/addb REG, imm8
1482 [(set GR8:$dst, (or_is_add GR8:$src1, imm:$src2))]>;
1483 def ADD16ri_DB : I<0, Pseudo, (outs GR16:$dst), (ins GR16:$src1, i16imm:$src2),
1484 "", // orw/addw REG, imm
1485 [(set GR16:$dst, (or_is_add GR16:$src1, imm:$src2))]>;
1486 def ADD32ri_DB : I<0, Pseudo, (outs GR32:$dst), (ins GR32:$src1, i32imm:$src2),
1487 "", // orl/addl REG, imm
1488 [(set GR32:$dst, (or_is_add GR32:$src1, imm:$src2))]>;
1489 def ADD64ri32_DB : I<0, Pseudo,
1490 (outs GR64:$dst), (ins GR64:$src1, i64i32imm:$src2),
1491 "", // orq/addq REG, imm
1492 [(set GR64:$dst, (or_is_add GR64:$src1,
1493 i64immSExt32:$src2))]>;
1495 } // AddedComplexity, SchedRW
1497 //===----------------------------------------------------------------------===//
1498 // Pattern match XOR as ADD
1499 //===----------------------------------------------------------------------===//
1501 // Prefer to pattern match XOR with min_signed_value as ADD at isel time.
1502 // ADD can be 3-addressified into an LEA instruction to avoid copies.
1503 let AddedComplexity = 5 in {
1504 def : Pat<(xor GR8:$src1, -128),
1505 (ADD8ri GR8:$src1, -128)>;
1506 def : Pat<(xor GR16:$src1, -32768),
1507 (ADD16ri GR16:$src1, -32768)>;
1508 def : Pat<(xor GR32:$src1, -2147483648),
1509 (ADD32ri GR32:$src1, -2147483648)>;
1512 //===----------------------------------------------------------------------===//
1514 //===----------------------------------------------------------------------===//
1516 // Odd encoding trick: -128 fits into an 8-bit immediate field while
1517 // +128 doesn't, so in this special case use a sub instead of an add.
1518 let Predicates = [NoNDD] in {
1519 def : Pat<(add GR16:$src1, 128),
1520 (SUB16ri GR16:$src1, -128)>;
1521 def : Pat<(add GR32:$src1, 128),
1522 (SUB32ri GR32:$src1, -128)>;
1523 def : Pat<(add GR64:$src1, 128),
1524 (SUB64ri32 GR64:$src1, -128)>;
1526 def : Pat<(X86add_flag_nocf GR16:$src1, 128),
1527 (SUB16ri GR16:$src1, -128)>;
1528 def : Pat<(X86add_flag_nocf GR32:$src1, 128),
1529 (SUB32ri GR32:$src1, -128)>;
1530 def : Pat<(X86add_flag_nocf GR64:$src1, 128),
1531 (SUB64ri32 GR64:$src1, -128)>;
1533 let Predicates = [HasNDD] in {
1534 def : Pat<(add GR16:$src1, 128),
1535 (SUB16ri_ND GR16:$src1, -128)>;
1536 def : Pat<(add GR32:$src1, 128),
1537 (SUB32ri_ND GR32:$src1, -128)>;
1538 def : Pat<(add GR64:$src1, 128),
1539 (SUB64ri32_ND GR64:$src1, -128)>;
1541 def : Pat<(X86add_flag_nocf GR16:$src1, 128),
1542 (SUB16ri_ND GR16:$src1, -128)>;
1543 def : Pat<(X86add_flag_nocf GR32:$src1, 128),
1544 (SUB32ri_ND GR32:$src1, -128)>;
1545 def : Pat<(X86add_flag_nocf GR64:$src1, 128),
1546 (SUB64ri32_ND GR64:$src1, -128)>;
1548 def : Pat<(store (add (loadi16 addr:$dst), 128), addr:$dst),
1549 (SUB16mi addr:$dst, -128)>;
1550 def : Pat<(store (add (loadi32 addr:$dst), 128), addr:$dst),
1551 (SUB32mi addr:$dst, -128)>;
1552 def : Pat<(store (add (loadi64 addr:$dst), 128), addr:$dst),
1553 (SUB64mi32 addr:$dst, -128)>;
1554 let Predicates = [HasNDD] in {
1555 def : Pat<(add (loadi16 addr:$src), 128),
1556 (SUB16mi_ND addr:$src, -128)>;
1557 def : Pat<(add (loadi32 addr:$src), 128),
1558 (SUB32mi_ND addr:$src, -128)>;
1559 def : Pat<(add (loadi64 addr:$src), 128),
1560 (SUB64mi32_ND addr:$src, -128)>;
1563 // The same trick applies for 32-bit immediate fields in 64-bit
1565 let Predicates = [NoNDD] in {
1566 def : Pat<(add GR64:$src1, 0x0000000080000000),
1567 (SUB64ri32 GR64:$src1, 0xffffffff80000000)>;
1568 def : Pat<(X86add_flag_nocf GR64:$src1, 0x0000000080000000),
1569 (SUB64ri32 GR64:$src1, 0xffffffff80000000)>;
1571 let Predicates = [HasNDD] in {
1572 def : Pat<(add GR64:$src1, 0x0000000080000000),
1573 (SUB64ri32_ND GR64:$src1, 0xffffffff80000000)>;
1574 def : Pat<(X86add_flag_nocf GR64:$src1, 0x0000000080000000),
1575 (SUB64ri32_ND GR64:$src1, 0xffffffff80000000)>;
1577 def : Pat<(store (add (loadi64 addr:$dst), 0x0000000080000000), addr:$dst),
1578 (SUB64mi32 addr:$dst, 0xffffffff80000000)>;
1579 let Predicates = [HasNDD] in {
1580 def : Pat<(add(loadi64 addr:$src), 0x0000000080000000),
1581 (SUB64mi32_ND addr:$src, 0xffffffff80000000)>;
1584 // Depositing value to 8/16 bit subreg:
1585 def : Pat<(or (and GR64:$dst, -256),
1586 (i64 (zextloadi8 addr:$src))),
1587 (INSERT_SUBREG (i64 (COPY $dst)), (MOV8rm i8mem:$src), sub_8bit)>;
1589 def : Pat<(or (and GR32:$dst, -256),
1590 (i32 (zextloadi8 addr:$src))),
1591 (INSERT_SUBREG (i32 (COPY $dst)), (MOV8rm i8mem:$src), sub_8bit)>;
1593 def : Pat<(or (and GR64:$dst, -65536),
1594 (i64 (zextloadi16 addr:$src))),
1595 (INSERT_SUBREG (i64 (COPY $dst)), (MOV16rm i16mem:$src), sub_16bit)>;
1597 def : Pat<(or (and GR32:$dst, -65536),
1598 (i32 (zextloadi16 addr:$src))),
1599 (INSERT_SUBREG (i32 (COPY $dst)), (MOV16rm i16mem:$src), sub_16bit)>;
1601 // To avoid needing to materialize an immediate in a register, use a 32-bit and
1602 // with implicit zero-extension instead of a 64-bit and if the immediate has at
1603 // least 32 bits of leading zeros. If in addition the last 32 bits can be
1604 // represented with a sign extension of a 8 bit constant, use that.
1605 // This can also reduce instruction size by eliminating the need for the REX
1608 // AddedComplexity is needed to give priority over i64immSExt8 and i64immSExt32.
1609 let AddedComplexity = 1 in {
1610 let Predicates = [NoNDD] in {
1611 def : Pat<(and GR64:$src, i64immZExt32:$imm),
1615 (EXTRACT_SUBREG GR64:$src, sub_32bit),
1616 (i32 (GetLo32XForm imm:$imm))),
1619 let Predicates = [HasNDD] in {
1620 def : Pat<(and GR64:$src, i64immZExt32:$imm),
1624 (EXTRACT_SUBREG GR64:$src, sub_32bit),
1625 (i32 (GetLo32XForm imm:$imm))),
1628 } // AddedComplexity = 1
1631 // AddedComplexity is needed due to the increased complexity on the
1632 // i64immZExt32SExt8 and i64immZExt32 patterns above. Applying this to all
1633 // the MOVZX patterns keeps thems together in DAGIsel tables.
1634 let AddedComplexity = 1 in {
1635 // r & (2^16-1) ==> movz
1636 def : Pat<(and GR32:$src1, 0xffff),
1637 (MOVZX32rr16 (EXTRACT_SUBREG GR32:$src1, sub_16bit))>;
1638 // r & (2^8-1) ==> movz
1639 def : Pat<(and GR32:$src1, 0xff),
1640 (MOVZX32rr8 (EXTRACT_SUBREG GR32:$src1, sub_8bit))>;
1641 // r & (2^8-1) ==> movz
1642 def : Pat<(and GR16:$src1, 0xff),
1643 (EXTRACT_SUBREG (MOVZX32rr8 (EXTRACT_SUBREG GR16:$src1, sub_8bit)),
1646 // r & (2^32-1) ==> movz
1647 def : Pat<(and GR64:$src, 0x00000000FFFFFFFF),
1648 (SUBREG_TO_REG (i64 0),
1649 (MOV32rr (EXTRACT_SUBREG GR64:$src, sub_32bit)),
1651 // r & (2^16-1) ==> movz
1652 def : Pat<(and GR64:$src, 0xffff),
1653 (SUBREG_TO_REG (i64 0),
1654 (MOVZX32rr16 (i16 (EXTRACT_SUBREG GR64:$src, sub_16bit))),
1656 // r & (2^8-1) ==> movz
1657 def : Pat<(and GR64:$src, 0xff),
1658 (SUBREG_TO_REG (i64 0),
1659 (MOVZX32rr8 (i8 (EXTRACT_SUBREG GR64:$src, sub_8bit))),
1661 } // AddedComplexity = 1
1664 // Try to use BTS/BTR/BTC for single bit operations on the upper 32-bits.
1666 def BTRXForm : SDNodeXForm<imm, [{
1667 // Transformation function: Find the lowest 0.
1668 return getI64Imm((uint8_t)N->getAPIntValue().countr_one(), SDLoc(N));
1671 def BTCBTSXForm : SDNodeXForm<imm, [{
1672 // Transformation function: Find the lowest 1.
1673 return getI64Imm((uint8_t)N->getAPIntValue().countr_zero(), SDLoc(N));
1676 def BTRMask64 : ImmLeaf<i64, [{
1677 return !isUInt<32>(Imm) && !isInt<32>(Imm) && isPowerOf2_64(~Imm);
1680 def BTCBTSMask64 : ImmLeaf<i64, [{
1681 return !isInt<32>(Imm) && isPowerOf2_64(Imm);
1684 // For now only do this for optsize.
1685 let AddedComplexity = 1, Predicates=[OptForSize] in {
1686 def : Pat<(and GR64:$src1, BTRMask64:$mask),
1687 (BTR64ri8 GR64:$src1, (BTRXForm imm:$mask))>;
1688 def : Pat<(or GR64:$src1, BTCBTSMask64:$mask),
1689 (BTS64ri8 GR64:$src1, (BTCBTSXForm imm:$mask))>;
1690 def : Pat<(xor GR64:$src1, BTCBTSMask64:$mask),
1691 (BTC64ri8 GR64:$src1, (BTCBTSXForm imm:$mask))>;
1695 // sext_inreg patterns
1696 def : Pat<(sext_inreg GR32:$src, i16),
1697 (MOVSX32rr16 (EXTRACT_SUBREG GR32:$src, sub_16bit))>;
1698 def : Pat<(sext_inreg GR32:$src, i8),
1699 (MOVSX32rr8 (EXTRACT_SUBREG GR32:$src, sub_8bit))>;
1701 def : Pat<(sext_inreg GR16:$src, i8),
1702 (EXTRACT_SUBREG (MOVSX32rr8 (EXTRACT_SUBREG GR16:$src, sub_8bit)),
1705 def : Pat<(sext_inreg GR64:$src, i32),
1706 (MOVSX64rr32 (EXTRACT_SUBREG GR64:$src, sub_32bit))>;
1707 def : Pat<(sext_inreg GR64:$src, i16),
1708 (MOVSX64rr16 (EXTRACT_SUBREG GR64:$src, sub_16bit))>;
1709 def : Pat<(sext_inreg GR64:$src, i8),
1710 (MOVSX64rr8 (EXTRACT_SUBREG GR64:$src, sub_8bit))>;
1712 // sext, sext_load, zext, zext_load
1713 def: Pat<(i16 (sext GR8:$src)),
1714 (EXTRACT_SUBREG (MOVSX32rr8 GR8:$src), sub_16bit)>;
1715 def: Pat<(sextloadi16i8 addr:$src),
1716 (EXTRACT_SUBREG (MOVSX32rm8 addr:$src), sub_16bit)>;
1717 def: Pat<(i16 (zext GR8:$src)),
1718 (EXTRACT_SUBREG (MOVZX32rr8 GR8:$src), sub_16bit)>;
1719 def: Pat<(zextloadi16i8 addr:$src),
1720 (EXTRACT_SUBREG (MOVZX32rm8 addr:$src), sub_16bit)>;
1723 def : Pat<(i16 (trunc GR32:$src)),
1724 (EXTRACT_SUBREG GR32:$src, sub_16bit)>;
1725 def : Pat<(i8 (trunc GR32:$src)),
1726 (EXTRACT_SUBREG (i32 (COPY_TO_REGCLASS GR32:$src, GR32_ABCD)),
1728 Requires<[Not64BitMode]>;
1729 def : Pat<(i8 (trunc GR16:$src)),
1730 (EXTRACT_SUBREG (i16 (COPY_TO_REGCLASS GR16:$src, GR16_ABCD)),
1732 Requires<[Not64BitMode]>;
1733 def : Pat<(i32 (trunc GR64:$src)),
1734 (EXTRACT_SUBREG GR64:$src, sub_32bit)>;
1735 def : Pat<(i16 (trunc GR64:$src)),
1736 (EXTRACT_SUBREG GR64:$src, sub_16bit)>;
1737 def : Pat<(i8 (trunc GR64:$src)),
1738 (EXTRACT_SUBREG GR64:$src, sub_8bit)>;
1739 def : Pat<(i8 (trunc GR32:$src)),
1740 (EXTRACT_SUBREG GR32:$src, sub_8bit)>,
1741 Requires<[In64BitMode]>;
1742 def : Pat<(i8 (trunc GR16:$src)),
1743 (EXTRACT_SUBREG GR16:$src, sub_8bit)>,
1744 Requires<[In64BitMode]>;
1746 def immff00_ffff : ImmLeaf<i32, [{
1747 return Imm >= 0xff00 && Imm <= 0xffff;
1750 // h-register tricks
1751 def : Pat<(i8 (trunc (srl_su GR16:$src, (i8 8)))),
1752 (EXTRACT_SUBREG GR16:$src, sub_8bit_hi)>,
1753 Requires<[Not64BitMode]>;
1754 def : Pat<(i8 (trunc (srl_su (i32 (anyext GR16:$src)), (i8 8)))),
1755 (EXTRACT_SUBREG GR16:$src, sub_8bit_hi)>,
1756 Requires<[Not64BitMode]>;
1757 def : Pat<(i8 (trunc (srl_su GR32:$src, (i8 8)))),
1758 (EXTRACT_SUBREG GR32:$src, sub_8bit_hi)>,
1759 Requires<[Not64BitMode]>;
1760 def : Pat<(srl GR16:$src, (i8 8)),
1762 (MOVZX32rr8_NOREX (EXTRACT_SUBREG GR16:$src, sub_8bit_hi)),
1764 def : Pat<(i32 (zext (srl_su GR16:$src, (i8 8)))),
1765 (MOVZX32rr8_NOREX (EXTRACT_SUBREG GR16:$src, sub_8bit_hi))>;
1766 def : Pat<(i32 (anyext (srl_su GR16:$src, (i8 8)))),
1767 (MOVZX32rr8_NOREX (EXTRACT_SUBREG GR16:$src, sub_8bit_hi))>;
1768 def : Pat<(and (srl_su GR32:$src, (i8 8)), (i32 255)),
1769 (MOVZX32rr8_NOREX (EXTRACT_SUBREG GR32:$src, sub_8bit_hi))>;
1770 def : Pat<(srl (and_su GR32:$src, immff00_ffff), (i8 8)),
1771 (MOVZX32rr8_NOREX (EXTRACT_SUBREG GR32:$src, sub_8bit_hi))>;
1773 // h-register tricks.
1774 // For now, be conservative on x86-64 and use an h-register extract only if the
1775 // value is immediately zero-extended or stored, which are somewhat common
1776 // cases. This uses a bunch of code to prevent a register requiring a REX prefix
1777 // from being allocated in the same instruction as the h register, as there's
1778 // currently no way to describe this requirement to the register allocator.
1780 // h-register extract and zero-extend.
1781 def : Pat<(and (srl_su GR64:$src, (i8 8)), (i64 255)),
1785 (EXTRACT_SUBREG GR64:$src, sub_8bit_hi)),
1787 def : Pat<(i64 (zext (srl_su GR16:$src, (i8 8)))),
1791 (EXTRACT_SUBREG GR16:$src, sub_8bit_hi)),
1793 def : Pat<(i64 (anyext (srl_su GR16:$src, (i8 8)))),
1797 (EXTRACT_SUBREG GR16:$src, sub_8bit_hi)),
1800 // h-register extract and store.
1801 def : Pat<(store (i8 (trunc_su (srl_su GR64:$src, (i8 8)))), addr:$dst),
1804 (EXTRACT_SUBREG GR64:$src, sub_8bit_hi))>;
1805 def : Pat<(store (i8 (trunc_su (srl_su GR32:$src, (i8 8)))), addr:$dst),
1808 (EXTRACT_SUBREG GR32:$src, sub_8bit_hi))>,
1809 Requires<[In64BitMode]>;
1810 def : Pat<(store (i8 (trunc_su (srl_su GR16:$src, (i8 8)))), addr:$dst),
1813 (EXTRACT_SUBREG GR16:$src, sub_8bit_hi))>,
1814 Requires<[In64BitMode]>;
1816 // Special pattern to catch the last step of __builtin_parity handling. Our
1817 // goal is to use an xor of an h-register with the corresponding l-register.
1818 // The above patterns would handle this on non 64-bit targets, but for 64-bit
1819 // we need to be more careful. We're using a NOREX instruction here in case
1820 // register allocation fails to keep the two registers together. So we need to
1821 // make sure we can't accidentally mix R8-R15 with an h-register.
1822 def : Pat<(X86xor_flag (i8 (trunc GR32:$src)),
1823 (i8 (trunc (srl_su GR32:$src, (i8 8))))),
1824 (XOR8rr_NOREX (EXTRACT_SUBREG GR32:$src, sub_8bit),
1825 (EXTRACT_SUBREG GR32:$src, sub_8bit_hi))>;
1827 // (shl x, 1) ==> (add x, x)
1828 // Note that if x is undef (immediate or otherwise), we could theoretically
1829 // end up with the two uses of x getting different values, producing a result
1830 // where the least significant bit is not 0. However, the probability of this
1831 // happening is considered low enough that this is officially not a
1833 let Predicates = [NoNDD] in {
1834 def : Pat<(shl GR8 :$src1, (i8 1)), (ADD8rr GR8 :$src1, GR8 :$src1)>;
1835 def : Pat<(shl GR16:$src1, (i8 1)), (ADD16rr GR16:$src1, GR16:$src1)>;
1836 def : Pat<(shl GR32:$src1, (i8 1)), (ADD32rr GR32:$src1, GR32:$src1)>;
1837 def : Pat<(shl GR64:$src1, (i8 1)), (ADD64rr GR64:$src1, GR64:$src1)>;
1839 let Predicates = [HasNDD] in {
1840 def : Pat<(shl GR8 :$src1, (i8 1)), (ADD8rr_ND GR8 :$src1, GR8 :$src1)>;
1841 def : Pat<(shl GR16:$src1, (i8 1)), (ADD16rr_ND GR16:$src1, GR16:$src1)>;
1842 def : Pat<(shl GR32:$src1, (i8 1)), (ADD32rr_ND GR32:$src1, GR32:$src1)>;
1843 def : Pat<(shl GR64:$src1, (i8 1)), (ADD64rr_ND GR64:$src1, GR64:$src1)>;
1846 // Shift amount is implicitly masked.
1847 multiclass MaskedShiftAmountPats<SDNode frag> {
1848 // (shift x (and y, 31)) ==> (shift x, y)
1849 // (shift x (and y, 63)) ==> (shift x, y)
1850 let Predicates = [NoNDD] in {
1851 def : Pat<(frag GR8:$src1, (shiftMask32 CL)),
1852 (!cast<Instruction>(NAME # "8rCL") GR8:$src1)>;
1853 def : Pat<(frag GR16:$src1, (shiftMask32 CL)),
1854 (!cast<Instruction>(NAME # "16rCL") GR16:$src1)>;
1855 def : Pat<(frag GR32:$src1, (shiftMask32 CL)),
1856 (!cast<Instruction>(NAME # "32rCL") GR32:$src1)>;
1857 def : Pat<(frag GR64:$src1, (shiftMask64 CL)),
1858 (!cast<Instruction>(NAME # "64rCL") GR64:$src1)>;
1860 let Predicates = [HasNDD] in {
1861 def : Pat<(frag GR8:$src1, (shiftMask32 CL)),
1862 (!cast<Instruction>(NAME # "8rCL_ND") GR8:$src1)>;
1863 def : Pat<(frag GR16:$src1, (shiftMask32 CL)),
1864 (!cast<Instruction>(NAME # "16rCL_ND") GR16:$src1)>;
1865 def : Pat<(frag GR32:$src1, (shiftMask32 CL)),
1866 (!cast<Instruction>(NAME # "32rCL_ND") GR32:$src1)>;
1867 def : Pat<(frag GR64:$src1, (shiftMask64 CL)),
1868 (!cast<Instruction>(NAME # "64rCL_ND") GR64:$src1)>;
1871 def : Pat<(store (frag (loadi8 addr:$dst), (shiftMask32 CL)), addr:$dst),
1872 (!cast<Instruction>(NAME # "8mCL") addr:$dst)>;
1873 def : Pat<(store (frag (loadi16 addr:$dst), (shiftMask32 CL)), addr:$dst),
1874 (!cast<Instruction>(NAME # "16mCL") addr:$dst)>;
1875 def : Pat<(store (frag (loadi32 addr:$dst), (shiftMask32 CL)), addr:$dst),
1876 (!cast<Instruction>(NAME # "32mCL") addr:$dst)>;
1877 def : Pat<(store (frag (loadi64 addr:$dst), (shiftMask64 CL)), addr:$dst),
1878 (!cast<Instruction>(NAME # "64mCL") addr:$dst)>;
1880 let Predicates = [HasNDD] in {
1881 def : Pat<(frag (loadi8 addr:$src), (shiftMask32 CL)),
1882 (!cast<Instruction>(NAME # "8mCL_ND") addr:$src)>;
1883 def : Pat<(frag (loadi16 addr:$src), (shiftMask32 CL)),
1884 (!cast<Instruction>(NAME # "16mCL_ND") addr:$src)>;
1885 def : Pat<(frag (loadi32 addr:$src), (shiftMask32 CL)),
1886 (!cast<Instruction>(NAME # "32mCL_ND") addr:$src)>;
1887 def : Pat<(frag (loadi64 addr:$src), (shiftMask64 CL)),
1888 (!cast<Instruction>(NAME # "64mCL_ND") addr:$src)>;
1892 defm SHL : MaskedShiftAmountPats<shl>;
1893 defm SHR : MaskedShiftAmountPats<srl>;
1894 defm SAR : MaskedShiftAmountPats<sra>;
1896 // ROL/ROR instructions allow a stronger mask optimization than shift for 8- and
1897 // 16-bit. We can remove a mask of any (bitwidth - 1) on the rotation amount
1898 // because over-rotating produces the same result. This is noted in the Intel
1899 // docs with: "tempCOUNT <- (COUNT & COUNTMASK) MOD SIZE". Masking the rotation
1900 // amount could affect EFLAGS results, but that does not matter because we are
1901 // not tracking flags for these nodes.
1902 multiclass MaskedRotateAmountPats<SDNode frag> {
1903 // (rot x (and y, BitWidth - 1)) ==> (rot x, y)
1904 let Predicates = [NoNDD] in {
1905 def : Pat<(frag GR8:$src1, (shiftMask8 CL)),
1906 (!cast<Instruction>(NAME # "8rCL") GR8:$src1)>;
1907 def : Pat<(frag GR16:$src1, (shiftMask16 CL)),
1908 (!cast<Instruction>(NAME # "16rCL") GR16:$src1)>;
1909 def : Pat<(frag GR32:$src1, (shiftMask32 CL)),
1910 (!cast<Instruction>(NAME # "32rCL") GR32:$src1)>;
1911 def : Pat<(frag GR64:$src1, (shiftMask64 CL)),
1912 (!cast<Instruction>(NAME # "64rCL") GR64:$src1)>;
1914 let Predicates = [HasNDD] in {
1915 def : Pat<(frag GR8:$src1, (shiftMask8 CL)),
1916 (!cast<Instruction>(NAME # "8rCL_ND") GR8:$src1)>;
1917 def : Pat<(frag GR16:$src1, (shiftMask16 CL)),
1918 (!cast<Instruction>(NAME # "16rCL_ND") GR16:$src1)>;
1919 def : Pat<(frag GR32:$src1, (shiftMask32 CL)),
1920 (!cast<Instruction>(NAME # "32rCL_ND") GR32:$src1)>;
1921 def : Pat<(frag GR64:$src1, (shiftMask64 CL)),
1922 (!cast<Instruction>(NAME # "64rCL_ND") GR64:$src1)>;
1925 def : Pat<(store (frag (loadi8 addr:$dst), (shiftMask8 CL)), addr:$dst),
1926 (!cast<Instruction>(NAME # "8mCL") addr:$dst)>;
1927 def : Pat<(store (frag (loadi16 addr:$dst), (shiftMask16 CL)), addr:$dst),
1928 (!cast<Instruction>(NAME # "16mCL") addr:$dst)>;
1929 def : Pat<(store (frag (loadi32 addr:$dst), (shiftMask32 CL)), addr:$dst),
1930 (!cast<Instruction>(NAME # "32mCL") addr:$dst)>;
1931 def : Pat<(store (frag (loadi64 addr:$dst), (shiftMask64 CL)), addr:$dst),
1932 (!cast<Instruction>(NAME # "64mCL") addr:$dst)>;
1934 let Predicates = [HasNDD] in {
1935 def : Pat<(frag (loadi8 addr:$src), (shiftMask8 CL)),
1936 (!cast<Instruction>(NAME # "8mCL_ND") addr:$src)>;
1937 def : Pat<(frag (loadi16 addr:$src), (shiftMask16 CL)),
1938 (!cast<Instruction>(NAME # "16mCL_ND") addr:$src)>;
1939 def : Pat<(frag (loadi32 addr:$src), (shiftMask32 CL)),
1940 (!cast<Instruction>(NAME # "32mCL_ND") addr:$src)>;
1941 def : Pat<(frag (loadi64 addr:$src), (shiftMask64 CL)),
1942 (!cast<Instruction>(NAME # "64mCL_ND") addr:$src)>;
1946 defm ROL : MaskedRotateAmountPats<rotl>;
1947 defm ROR : MaskedRotateAmountPats<rotr>;
1949 multiclass MaskedShlrdAmountPats<string suffix, Predicate p> {
1950 let Predicates = [p] in {
1951 // Double "funnel" shift amount is implicitly masked.
1952 // (fshl/fshr x (and y, 31)) ==> (fshl/fshr x, y) (NOTE: modulo32)
1953 def : Pat<(X86fshl GR16:$src1, GR16:$src2, (shiftMask32 CL)),
1954 (!cast<Instruction>(SHLD16rrCL#suffix) GR16:$src1, GR16:$src2)>;
1955 def : Pat<(X86fshr GR16:$src2, GR16:$src1, (shiftMask32 CL)),
1956 (!cast<Instruction>(SHRD16rrCL#suffix) GR16:$src1, GR16:$src2)>;
1958 // (fshl/fshr x (and y, 31)) ==> (fshl/fshr x, y)
1959 def : Pat<(fshl GR32:$src1, GR32:$src2, (shiftMask32 CL)),
1960 (!cast<Instruction>(SHLD32rrCL#suffix) GR32:$src1, GR32:$src2)>;
1961 def : Pat<(fshr GR32:$src2, GR32:$src1, (shiftMask32 CL)),
1962 (!cast<Instruction>(SHRD32rrCL#suffix) GR32:$src1, GR32:$src2)>;
1964 // (fshl/fshr x (and y, 63)) ==> (fshl/fshr x, y)
1965 def : Pat<(fshl GR64:$src1, GR64:$src2, (shiftMask64 CL)),
1966 (!cast<Instruction>(SHLD64rrCL#suffix) GR64:$src1, GR64:$src2)>;
1967 def : Pat<(fshr GR64:$src2, GR64:$src1, (shiftMask64 CL)),
1968 (!cast<Instruction>(SHRD64rrCL#suffix) GR64:$src1, GR64:$src2)>;
1972 defm : MaskedShlrdAmountPats<"", NoNDD>;
1973 defm : MaskedShlrdAmountPats<"_ND", HasNDD>;
1975 // Use BTR/BTS/BTC for clearing/setting/toggling a bit in a variable location.
1976 multiclass OneBitPats<RegisterClass rc, ValueType vt, Instruction btr,
1977 Instruction bts, Instruction btc, PatFrag mask> {
1978 def : Pat<(and rc:$src1, (rotl -2, GR8:$src2)),
1980 (INSERT_SUBREG (vt (IMPLICIT_DEF)), GR8:$src2, sub_8bit))>;
1981 def : Pat<(or rc:$src1, (shl 1, GR8:$src2)),
1983 (INSERT_SUBREG (vt (IMPLICIT_DEF)), GR8:$src2, sub_8bit))>;
1984 def : Pat<(xor rc:$src1, (shl 1, GR8:$src2)),
1986 (INSERT_SUBREG (vt (IMPLICIT_DEF)), GR8:$src2, sub_8bit))>;
1988 // Similar to above, but removing unneeded masking of the shift amount.
1989 def : Pat<(and rc:$src1, (rotl -2, (mask GR8:$src2))),
1991 (INSERT_SUBREG (vt (IMPLICIT_DEF)), GR8:$src2, sub_8bit))>;
1992 def : Pat<(or rc:$src1, (shl 1, (mask GR8:$src2))),
1994 (INSERT_SUBREG (vt (IMPLICIT_DEF)), GR8:$src2, sub_8bit))>;
1995 def : Pat<(xor rc:$src1, (shl 1, (mask GR8:$src2))),
1997 (INSERT_SUBREG (vt (IMPLICIT_DEF)), GR8:$src2, sub_8bit))>;
2000 defm : OneBitPats<GR16, i16, BTR16rr, BTS16rr, BTC16rr, shiftMask16>;
2001 defm : OneBitPats<GR32, i32, BTR32rr, BTS32rr, BTC32rr, shiftMask32>;
2002 defm : OneBitPats<GR64, i64, BTR64rr, BTS64rr, BTC64rr, shiftMask64>;
2004 //===----------------------------------------------------------------------===//
2005 // EFLAGS-defining Patterns
2006 //===----------------------------------------------------------------------===//
2008 multiclass EFLAGSDefiningPats<string suffix, Predicate p> {
2009 let Predicates = [p] in {
2011 def : Pat<(add GR8 :$src1, GR8 :$src2), (!cast<Instruction>(ADD8rr#suffix) GR8 :$src1, GR8 :$src2)>;
2012 def : Pat<(add GR16:$src1, GR16:$src2), (!cast<Instruction>(ADD16rr#suffix) GR16:$src1, GR16:$src2)>;
2013 def : Pat<(add GR32:$src1, GR32:$src2), (!cast<Instruction>(ADD32rr#suffix) GR32:$src1, GR32:$src2)>;
2014 def : Pat<(add GR64:$src1, GR64:$src2), (!cast<Instruction>(ADD64rr#suffix) GR64:$src1, GR64:$src2)>;
2017 def : Pat<(add GR8:$src1, (loadi8 addr:$src2)),
2018 (!cast<Instruction>(ADD8rm#suffix) GR8:$src1, addr:$src2)>;
2019 def : Pat<(add GR16:$src1, (loadi16 addr:$src2)),
2020 (!cast<Instruction>(ADD16rm#suffix) GR16:$src1, addr:$src2)>;
2021 def : Pat<(add GR32:$src1, (loadi32 addr:$src2)),
2022 (!cast<Instruction>(ADD32rm#suffix) GR32:$src1, addr:$src2)>;
2023 def : Pat<(add GR64:$src1, (loadi64 addr:$src2)),
2024 (!cast<Instruction>(ADD64rm#suffix) GR64:$src1, addr:$src2)>;
2027 def : Pat<(add GR8 :$src1, imm:$src2), (!cast<Instruction>(ADD8ri#suffix) GR8:$src1 , imm:$src2)>;
2028 def : Pat<(add GR16:$src1, imm:$src2), (!cast<Instruction>(ADD16ri#suffix) GR16:$src1, imm:$src2)>;
2029 def : Pat<(add GR32:$src1, imm:$src2), (!cast<Instruction>(ADD32ri#suffix) GR32:$src1, imm:$src2)>;
2030 def : Pat<(add GR64:$src1, i64immSExt32:$src2), (!cast<Instruction>(ADD64ri32#suffix) GR64:$src1, i64immSExt32:$src2)>;
2033 def : Pat<(sub GR8 :$src1, GR8 :$src2), (!cast<Instruction>(SUB8rr#suffix) GR8 :$src1, GR8 :$src2)>;
2034 def : Pat<(sub GR16:$src1, GR16:$src2), (!cast<Instruction>(SUB16rr#suffix) GR16:$src1, GR16:$src2)>;
2035 def : Pat<(sub GR32:$src1, GR32:$src2), (!cast<Instruction>(SUB32rr#suffix) GR32:$src1, GR32:$src2)>;
2036 def : Pat<(sub GR64:$src1, GR64:$src2), (!cast<Instruction>(SUB64rr#suffix) GR64:$src1, GR64:$src2)>;
2039 def : Pat<(sub GR8:$src1, (loadi8 addr:$src2)),
2040 (!cast<Instruction>(SUB8rm#suffix) GR8:$src1, addr:$src2)>;
2041 def : Pat<(sub GR16:$src1, (loadi16 addr:$src2)),
2042 (!cast<Instruction>(SUB16rm#suffix) GR16:$src1, addr:$src2)>;
2043 def : Pat<(sub GR32:$src1, (loadi32 addr:$src2)),
2044 (!cast<Instruction>(SUB32rm#suffix) GR32:$src1, addr:$src2)>;
2045 def : Pat<(sub GR64:$src1, (loadi64 addr:$src2)),
2046 (!cast<Instruction>(SUB64rm#suffix) GR64:$src1, addr:$src2)>;
2049 def : Pat<(sub GR8:$src1, imm:$src2),
2050 (!cast<Instruction>(SUB8ri#suffix) GR8:$src1, imm:$src2)>;
2051 def : Pat<(sub GR16:$src1, imm:$src2),
2052 (!cast<Instruction>(SUB16ri#suffix) GR16:$src1, imm:$src2)>;
2053 def : Pat<(sub GR32:$src1, imm:$src2),
2054 (!cast<Instruction>(SUB32ri#suffix) GR32:$src1, imm:$src2)>;
2055 def : Pat<(sub GR64:$src1, i64immSExt32:$src2),
2056 (!cast<Instruction>(SUB64ri32#suffix) GR64:$src1, i64immSExt32:$src2)>;
2059 def : Pat<(X86sub_flag 0, GR8 :$src), (!cast<Instruction>(NEG8r#suffix) GR8 :$src)>;
2060 def : Pat<(X86sub_flag 0, GR16:$src), (!cast<Instruction>(NEG16r#suffix) GR16:$src)>;
2061 def : Pat<(X86sub_flag 0, GR32:$src), (!cast<Instruction>(NEG32r#suffix) GR32:$src)>;
2062 def : Pat<(X86sub_flag 0, GR64:$src), (!cast<Instruction>(NEG64r#suffix) GR64:$src)>;
2065 def : Pat<(mul GR16:$src1, GR16:$src2),
2066 (!cast<Instruction>(IMUL16rr#suffix) GR16:$src1, GR16:$src2)>;
2067 def : Pat<(mul GR32:$src1, GR32:$src2),
2068 (!cast<Instruction>(IMUL32rr#suffix) GR32:$src1, GR32:$src2)>;
2069 def : Pat<(mul GR64:$src1, GR64:$src2),
2070 (!cast<Instruction>(IMUL64rr#suffix) GR64:$src1, GR64:$src2)>;
2073 def : Pat<(mul GR16:$src1, (loadi16 addr:$src2)),
2074 (!cast<Instruction>(IMUL16rm#suffix) GR16:$src1, addr:$src2)>;
2075 def : Pat<(mul GR32:$src1, (loadi32 addr:$src2)),
2076 (!cast<Instruction>(IMUL32rm#suffix) GR32:$src1, addr:$src2)>;
2077 def : Pat<(mul GR64:$src1, (loadi64 addr:$src2)),
2078 (!cast<Instruction>(IMUL64rm#suffix) GR64:$src1, addr:$src2)>;
2081 def : Pat<(or GR8 :$src1, GR8 :$src2), (!cast<Instruction>(OR8rr#suffix) GR8 :$src1, GR8 :$src2)>;
2082 def : Pat<(or GR16:$src1, GR16:$src2), (!cast<Instruction>(OR16rr#suffix) GR16:$src1, GR16:$src2)>;
2083 def : Pat<(or GR32:$src1, GR32:$src2), (!cast<Instruction>(OR32rr#suffix) GR32:$src1, GR32:$src2)>;
2084 def : Pat<(or GR64:$src1, GR64:$src2), (!cast<Instruction>(OR64rr#suffix) GR64:$src1, GR64:$src2)>;
2087 def : Pat<(or GR8:$src1, (loadi8 addr:$src2)),
2088 (!cast<Instruction>(OR8rm#suffix) GR8:$src1, addr:$src2)>;
2089 def : Pat<(or GR16:$src1, (loadi16 addr:$src2)),
2090 (!cast<Instruction>(OR16rm#suffix) GR16:$src1, addr:$src2)>;
2091 def : Pat<(or GR32:$src1, (loadi32 addr:$src2)),
2092 (!cast<Instruction>(OR32rm#suffix) GR32:$src1, addr:$src2)>;
2093 def : Pat<(or GR64:$src1, (loadi64 addr:$src2)),
2094 (!cast<Instruction>(OR64rm#suffix) GR64:$src1, addr:$src2)>;
2097 def : Pat<(or GR8:$src1 , imm:$src2), (!cast<Instruction>(OR8ri#suffix) GR8 :$src1, imm:$src2)>;
2098 def : Pat<(or GR16:$src1, imm:$src2), (!cast<Instruction>(OR16ri#suffix) GR16:$src1, imm:$src2)>;
2099 def : Pat<(or GR32:$src1, imm:$src2), (!cast<Instruction>(OR32ri#suffix) GR32:$src1, imm:$src2)>;
2100 def : Pat<(or GR64:$src1, i64immSExt32:$src2),
2101 (!cast<Instruction>(OR64ri32#suffix) GR64:$src1, i64immSExt32:$src2)>;
2104 def : Pat<(xor GR8 :$src1, GR8 :$src2), (!cast<Instruction>(XOR8rr#suffix) GR8 :$src1, GR8 :$src2)>;
2105 def : Pat<(xor GR16:$src1, GR16:$src2), (!cast<Instruction>(XOR16rr#suffix) GR16:$src1, GR16:$src2)>;
2106 def : Pat<(xor GR32:$src1, GR32:$src2), (!cast<Instruction>(XOR32rr#suffix) GR32:$src1, GR32:$src2)>;
2107 def : Pat<(xor GR64:$src1, GR64:$src2), (!cast<Instruction>(XOR64rr#suffix) GR64:$src1, GR64:$src2)>;
2110 def : Pat<(xor GR8:$src1, (loadi8 addr:$src2)),
2111 (!cast<Instruction>(XOR8rm#suffix) GR8:$src1, addr:$src2)>;
2112 def : Pat<(xor GR16:$src1, (loadi16 addr:$src2)),
2113 (!cast<Instruction>(XOR16rm#suffix) GR16:$src1, addr:$src2)>;
2114 def : Pat<(xor GR32:$src1, (loadi32 addr:$src2)),
2115 (!cast<Instruction>(XOR32rm#suffix) GR32:$src1, addr:$src2)>;
2116 def : Pat<(xor GR64:$src1, (loadi64 addr:$src2)),
2117 (!cast<Instruction>(XOR64rm#suffix) GR64:$src1, addr:$src2)>;
2120 def : Pat<(xor GR8:$src1, imm:$src2),
2121 (!cast<Instruction>(XOR8ri#suffix) GR8:$src1, imm:$src2)>;
2122 def : Pat<(xor GR16:$src1, imm:$src2),
2123 (!cast<Instruction>(XOR16ri#suffix) GR16:$src1, imm:$src2)>;
2124 def : Pat<(xor GR32:$src1, imm:$src2),
2125 (!cast<Instruction>(XOR32ri#suffix) GR32:$src1, imm:$src2)>;
2126 def : Pat<(xor GR64:$src1, i64immSExt32:$src2),
2127 (!cast<Instruction>(XOR64ri32#suffix) GR64:$src1, i64immSExt32:$src2)>;
2130 def : Pat<(and GR8 :$src1, GR8 :$src2), (!cast<Instruction>(AND8rr#suffix) GR8 :$src1, GR8 :$src2)>;
2131 def : Pat<(and GR16:$src1, GR16:$src2), (!cast<Instruction>(AND16rr#suffix) GR16:$src1, GR16:$src2)>;
2132 def : Pat<(and GR32:$src1, GR32:$src2), (!cast<Instruction>(AND32rr#suffix) GR32:$src1, GR32:$src2)>;
2133 def : Pat<(and GR64:$src1, GR64:$src2), (!cast<Instruction>(AND64rr#suffix) GR64:$src1, GR64:$src2)>;
2136 def : Pat<(and GR8:$src1, (loadi8 addr:$src2)),
2137 (!cast<Instruction>(AND8rm#suffix) GR8:$src1, addr:$src2)>;
2138 def : Pat<(and GR16:$src1, (loadi16 addr:$src2)),
2139 (!cast<Instruction>(AND16rm#suffix) GR16:$src1, addr:$src2)>;
2140 def : Pat<(and GR32:$src1, (loadi32 addr:$src2)),
2141 (!cast<Instruction>(AND32rm#suffix) GR32:$src1, addr:$src2)>;
2142 def : Pat<(and GR64:$src1, (loadi64 addr:$src2)),
2143 (!cast<Instruction>(AND64rm#suffix) GR64:$src1, addr:$src2)>;
2146 def : Pat<(and GR8:$src1, imm:$src2),
2147 (!cast<Instruction>(AND8ri#suffix) GR8:$src1, imm:$src2)>;
2148 def : Pat<(and GR16:$src1, imm:$src2),
2149 (!cast<Instruction>(AND16ri#suffix) GR16:$src1, imm:$src2)>;
2150 def : Pat<(and GR32:$src1, imm:$src2),
2151 (!cast<Instruction>(AND32ri#suffix) GR32:$src1, imm:$src2)>;
2152 def : Pat<(and GR64:$src1, i64immSExt32:$src2),
2153 (!cast<Instruction>(AND64ri32#suffix) GR64:$src1, i64immSExt32:$src2)>;
2156 // Increment/Decrement reg.
2157 // Do not make INC/DEC if it is slow
2158 let Predicates = [UseIncDec, p] in {
2159 def : Pat<(add GR8:$src, 1), (!cast<Instruction>(INC8r#suffix) GR8:$src)>;
2160 def : Pat<(add GR16:$src, 1), (!cast<Instruction>(INC16r#suffix) GR16:$src)>;
2161 def : Pat<(add GR32:$src, 1), (!cast<Instruction>(INC32r#suffix) GR32:$src)>;
2162 def : Pat<(add GR64:$src, 1), (!cast<Instruction>(INC64r#suffix) GR64:$src)>;
2163 def : Pat<(add GR8:$src, -1), (!cast<Instruction>(DEC8r#suffix) GR8:$src)>;
2164 def : Pat<(add GR16:$src, -1), (!cast<Instruction>(DEC16r#suffix) GR16:$src)>;
2165 def : Pat<(add GR32:$src, -1), (!cast<Instruction>(DEC32r#suffix) GR32:$src)>;
2166 def : Pat<(add GR64:$src, -1), (!cast<Instruction>(DEC64r#suffix) GR64:$src)>;
2168 def : Pat<(X86add_flag_nocf GR8:$src, -1), (!cast<Instruction>(DEC8r#suffix) GR8:$src)>;
2169 def : Pat<(X86add_flag_nocf GR16:$src, -1), (!cast<Instruction>(DEC16r#suffix) GR16:$src)>;
2170 def : Pat<(X86add_flag_nocf GR32:$src, -1), (!cast<Instruction>(DEC32r#suffix) GR32:$src)>;
2171 def : Pat<(X86add_flag_nocf GR64:$src, -1), (!cast<Instruction>(DEC64r#suffix) GR64:$src)>;
2172 def : Pat<(X86sub_flag_nocf GR8:$src, -1), (!cast<Instruction>(INC8r#suffix) GR8:$src)>;
2173 def : Pat<(X86sub_flag_nocf GR16:$src, -1), (!cast<Instruction>(INC16r#suffix) GR16:$src)>;
2174 def : Pat<(X86sub_flag_nocf GR32:$src, -1), (!cast<Instruction>(INC32r#suffix) GR32:$src)>;
2175 def : Pat<(X86sub_flag_nocf GR64:$src, -1), (!cast<Instruction>(INC64r#suffix) GR64:$src)>;
2177 def : Pat<(or_is_add GR8:$src, 1), (!cast<Instruction>(INC8r#suffix) GR8:$src)>;
2178 def : Pat<(or_is_add GR16:$src, 1), (!cast<Instruction>(INC16r#suffix) GR16:$src)>;
2179 def : Pat<(or_is_add GR32:$src, 1), (!cast<Instruction>(INC32r#suffix) GR32:$src)>;
2180 def : Pat<(or_is_add GR64:$src, 1), (!cast<Instruction>(INC64r#suffix) GR64:$src)>;
2184 defm : EFLAGSDefiningPats<"", NoNDD>;
2185 defm : EFLAGSDefiningPats<"_ND", HasNDD>;
2187 let Predicates = [HasZU] in {
2188 // zext (mul reg/mem, imm) -> imulzu
2189 def : Pat<(i32 (zext (i16 (mul GR16:$src1, imm:$src2)))),
2190 (SUBREG_TO_REG (i32 0), (IMULZU16rri GR16:$src1, imm:$src2), sub_16bit)>;
2191 def : Pat<(i32 (zext (i16 (mul (loadi16 addr:$src1), imm:$src2)))),
2192 (SUBREG_TO_REG (i32 0), (IMULZU16rmi addr:$src1, imm:$src2), sub_16bit)>;
2193 def : Pat<(i64 (zext (i16 (mul GR16:$src1, imm:$src2)))),
2194 (SUBREG_TO_REG (i64 0), (IMULZU16rri GR16:$src1, imm:$src2), sub_16bit)>;
2195 def : Pat<(i64 (zext (i16 (mul (loadi16 addr:$src1), imm:$src2)))),
2196 (SUBREG_TO_REG (i64 0), (IMULZU16rmi addr:$src1, imm:$src2), sub_16bit)>;
2200 def : Pat<(mul GR16:$src1, imm:$src2),
2201 (IMUL16rri GR16:$src1, imm:$src2)>;
2202 def : Pat<(mul GR32:$src1, imm:$src2),
2203 (IMUL32rri GR32:$src1, imm:$src2)>;
2204 def : Pat<(mul GR64:$src1, i64immSExt32:$src2),
2205 (IMUL64rri32 GR64:$src1, i64immSExt32:$src2)>;
2207 // reg = mul mem, imm
2208 def : Pat<(mul (loadi16 addr:$src1), imm:$src2),
2209 (IMUL16rmi addr:$src1, imm:$src2)>;
2210 def : Pat<(mul (loadi32 addr:$src1), imm:$src2),
2211 (IMUL32rmi addr:$src1, imm:$src2)>;
2212 def : Pat<(mul (loadi64 addr:$src1), i64immSExt32:$src2),
2213 (IMUL64rmi32 addr:$src1, i64immSExt32:$src2)>;
2215 // Bit scan instruction patterns to match explicit zero-undef behavior.
2216 def : Pat<(cttz_zero_undef GR16:$src), (BSF16rr GR16:$src)>;
2217 def : Pat<(cttz_zero_undef GR32:$src), (BSF32rr GR32:$src)>;
2218 def : Pat<(cttz_zero_undef GR64:$src), (BSF64rr GR64:$src)>;
2219 def : Pat<(cttz_zero_undef (loadi16 addr:$src)), (BSF16rm addr:$src)>;
2220 def : Pat<(cttz_zero_undef (loadi32 addr:$src)), (BSF32rm addr:$src)>;
2221 def : Pat<(cttz_zero_undef (loadi64 addr:$src)), (BSF64rm addr:$src)>;
2223 // When HasMOVBE is enabled it is possible to get a non-legalized
2224 // register-register 16 bit bswap. This maps it to a ROL instruction.
2225 let Predicates = [HasMOVBE] in {
2226 def : Pat<(bswap GR16:$src), (ROL16ri GR16:$src, (i8 8))>;