1 //===- X86InstrCompiler.td - Compiler Pseudos and Patterns -*- tablegen -*-===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This file describes the various pseudo instructions used by the compiler,
10 // as well as Pat patterns used during instruction selection.
12 //===----------------------------------------------------------------------===//
14 //===----------------------------------------------------------------------===//
15 // Pattern Matching Support
17 def GetLo32XForm : SDNodeXForm<imm, [{
18 // Transformation function: get the low 32 bits.
19 return getI32Imm((uint32_t)N->getZExtValue(), SDLoc(N));
23 //===----------------------------------------------------------------------===//
24 // Random Pseudo Instructions.
26 // PIC base construction. This expands to code that looks like this:
29 let hasSideEffects = 0, isNotDuplicable = 1, Uses = [ESP, SSP],
30 SchedRW = [WriteJump] in
31 def MOVPC32r : Ii32<0xE8, Pseudo, (outs GR32:$reg), (ins i32imm:$label),
34 // ADJCALLSTACKDOWN/UP implicitly use/def ESP because they may be expanded into
35 // a stack adjustment and the codegen must know that they may modify the stack
36 // pointer before prolog-epilog rewriting occurs.
37 // Pessimistically assume ADJCALLSTACKDOWN / ADJCALLSTACKUP will become
38 // sub / add which can clobber EFLAGS.
39 let Defs = [ESP, EFLAGS, SSP], Uses = [ESP, SSP], SchedRW = [WriteALU] in {
40 def ADJCALLSTACKDOWN32 : I<0, Pseudo, (outs),
41 (ins i32imm:$amt1, i32imm:$amt2, i32imm:$amt3),
42 "#ADJCALLSTACKDOWN", []>, Requires<[NotLP64]>;
43 def ADJCALLSTACKUP32 : I<0, Pseudo, (outs), (ins i32imm:$amt1, i32imm:$amt2),
45 [(X86callseq_end timm:$amt1, timm:$amt2)]>,
48 def : Pat<(X86callseq_start timm:$amt1, timm:$amt2),
49 (ADJCALLSTACKDOWN32 i32imm:$amt1, i32imm:$amt2, 0)>, Requires<[NotLP64]>;
52 // ADJCALLSTACKDOWN/UP implicitly use/def RSP because they may be expanded into
53 // a stack adjustment and the codegen must know that they may modify the stack
54 // pointer before prolog-epilog rewriting occurs.
55 // Pessimistically assume ADJCALLSTACKDOWN / ADJCALLSTACKUP will become
56 // sub / add which can clobber EFLAGS.
57 let Defs = [RSP, EFLAGS, SSP], Uses = [RSP, SSP], SchedRW = [WriteALU] in {
58 def ADJCALLSTACKDOWN64 : I<0, Pseudo, (outs),
59 (ins i32imm:$amt1, i32imm:$amt2, i32imm:$amt3),
60 "#ADJCALLSTACKDOWN", []>, Requires<[IsLP64]>;
61 def ADJCALLSTACKUP64 : I<0, Pseudo, (outs), (ins i32imm:$amt1, i32imm:$amt2),
63 [(X86callseq_end timm:$amt1, timm:$amt2)]>,
66 def : Pat<(X86callseq_start timm:$amt1, timm:$amt2),
67 (ADJCALLSTACKDOWN64 i32imm:$amt1, i32imm:$amt2, 0)>, Requires<[IsLP64]>;
69 let SchedRW = [WriteSystem] in {
71 // x86-64 va_start lowering magic.
72 let hasSideEffects = 1, mayStore = 1, Defs = [EFLAGS] in {
73 def VASTART_SAVE_XMM_REGS : I<0, Pseudo,
75 (ins GR8:$al, i8mem:$regsavefi, variable_ops),
76 "#VASTART_SAVE_XMM_REGS $al, $regsavefi",
77 [(X86vastart_save_xmm_regs GR8:$al, addr:$regsavefi),
81 let usesCustomInserter = 1, Defs = [EFLAGS] in {
82 // The VAARG_64 and VAARG_X32 pseudo-instructions take the address of the
83 // va_list, and place the address of the next argument into a register.
84 let Defs = [EFLAGS] in {
85 def VAARG_64 : I<0, Pseudo,
87 (ins i8mem:$ap, i32imm:$size, i8imm:$mode, i32imm:$align),
88 "#VAARG_64 $dst, $ap, $size, $mode, $align",
90 (X86vaarg64 addr:$ap, timm:$size, timm:$mode, timm:$align)),
91 (implicit EFLAGS)]>, Requires<[In64BitMode, IsLP64]>;
92 def VAARG_X32 : I<0, Pseudo,
94 (ins i8mem:$ap, i32imm:$size, i8imm:$mode, i32imm:$align),
95 "#VAARG_X32 $dst, $ap, $size, $mode, $align",
97 (X86vaargx32 addr:$ap, timm:$size, timm:$mode, timm:$align)),
98 (implicit EFLAGS)]>, Requires<[In64BitMode, NotLP64]>;
101 // When using segmented stacks these are lowered into instructions which first
102 // check if the current stacklet has enough free memory. If it does, memory is
103 // allocated by bumping the stack pointer. Otherwise memory is allocated from
106 let Defs = [EAX, ESP, EFLAGS], Uses = [ESP] in
107 def SEG_ALLOCA_32 : I<0, Pseudo, (outs GR32:$dst), (ins GR32:$size),
108 "# variable sized alloca for segmented stacks",
110 (X86SegAlloca GR32:$size))]>,
113 let Defs = [RAX, RSP, EFLAGS], Uses = [RSP] in
114 def SEG_ALLOCA_64 : I<0, Pseudo, (outs GR64:$dst), (ins GR64:$size),
115 "# variable sized alloca for segmented stacks",
117 (X86SegAlloca GR64:$size))]>,
118 Requires<[In64BitMode]>;
120 // To protect against stack clash, dynamic allocation should perform a memory
121 // probe at each page.
123 let Defs = [EAX, ESP, EFLAGS], Uses = [ESP] in
124 def PROBED_ALLOCA_32 : I<0, Pseudo, (outs GR32:$dst), (ins GR32:$size),
125 "# variable sized alloca with probing",
127 (X86ProbedAlloca GR32:$size))]>,
130 let Defs = [RAX, RSP, EFLAGS], Uses = [RSP] in
131 def PROBED_ALLOCA_64 : I<0, Pseudo, (outs GR64:$dst), (ins GR64:$size),
132 "# variable sized alloca with probing",
134 (X86ProbedAlloca GR64:$size))]>,
135 Requires<[In64BitMode]>;
138 let hasNoSchedulingInfo = 1 in
139 def STACKALLOC_W_PROBING : I<0, Pseudo, (outs), (ins i64imm:$stacksize),
140 "# fixed size alloca with probing",
143 // Dynamic stack allocation yields a _chkstk or _alloca call for all Windows
144 // targets. These calls are needed to probe the stack when allocating more than
145 // 4k bytes in one go. Touching the stack at 4K increments is necessary to
146 // ensure that the guard pages used by the OS virtual memory manager are
147 // allocated in correct sequence.
148 // The main point of having separate instruction are extra unmodelled effects
149 // (compared to ordinary calls) like stack pointer change.
151 let Defs = [EAX, ESP, EFLAGS], Uses = [ESP] in
152 def DYN_ALLOCA_32 : I<0, Pseudo, (outs), (ins GR32:$size),
153 "# dynamic stack allocation",
154 [(X86DynAlloca GR32:$size)]>,
157 let Defs = [RAX, RSP, EFLAGS], Uses = [RSP] in
158 def DYN_ALLOCA_64 : I<0, Pseudo, (outs), (ins GR64:$size),
159 "# dynamic stack allocation",
160 [(X86DynAlloca GR64:$size)]>,
161 Requires<[In64BitMode]>;
164 // These instructions XOR the frame pointer into a GPR. They are used in some
165 // stack protection schemes. These are post-RA pseudos because we only know the
166 // frame register after register allocation.
167 let Constraints = "$src = $dst", isMoveImm = 1, isPseudo = 1, Defs = [EFLAGS] in {
168 def XOR32_FP : I<0, Pseudo, (outs GR32:$dst), (ins GR32:$src),
169 "xorl\t$$FP, $src", []>,
170 Requires<[NotLP64]>, Sched<[WriteALU]>;
171 def XOR64_FP : I<0, Pseudo, (outs GR64:$dst), (ins GR64:$src),
172 "xorq\t$$FP $src", []>,
173 Requires<[In64BitMode]>, Sched<[WriteALU]>;
176 //===----------------------------------------------------------------------===//
177 // EH Pseudo Instructions
179 let SchedRW = [WriteSystem] in {
180 let isTerminator = 1, isReturn = 1, isBarrier = 1,
181 hasCtrlDep = 1, isCodeGenOnly = 1 in {
182 def EH_RETURN : I<0xC3, RawFrm, (outs), (ins GR32:$addr),
183 "ret\t#eh_return, addr: $addr",
184 [(X86ehret GR32:$addr)]>, Sched<[WriteJumpLd]>;
188 let isTerminator = 1, isReturn = 1, isBarrier = 1,
189 hasCtrlDep = 1, isCodeGenOnly = 1 in {
190 def EH_RETURN64 : I<0xC3, RawFrm, (outs), (ins GR64:$addr),
191 "ret\t#eh_return, addr: $addr",
192 [(X86ehret GR64:$addr)]>, Sched<[WriteJumpLd]>;
196 let isTerminator = 1, hasSideEffects = 1, isBarrier = 1, hasCtrlDep = 1,
197 isCodeGenOnly = 1, isReturn = 1, isEHScopeReturn = 1 in {
198 def CLEANUPRET : I<0, Pseudo, (outs), (ins), "# CLEANUPRET", [(cleanupret)]>;
200 // CATCHRET needs a custom inserter for SEH.
201 let usesCustomInserter = 1 in
202 def CATCHRET : I<0, Pseudo, (outs), (ins brtarget32:$dst, brtarget32:$from),
204 [(catchret bb:$dst, bb:$from)]>;
207 let hasSideEffects = 1, isBarrier = 1, isCodeGenOnly = 1,
208 usesCustomInserter = 1 in {
209 def EH_SjLj_SetJmp32 : I<0, Pseudo, (outs GR32:$dst), (ins i32mem:$buf),
211 [(set GR32:$dst, (X86eh_sjlj_setjmp addr:$buf))]>,
212 Requires<[Not64BitMode]>;
213 def EH_SjLj_SetJmp64 : I<0, Pseudo, (outs GR32:$dst), (ins i64mem:$buf),
215 [(set GR32:$dst, (X86eh_sjlj_setjmp addr:$buf))]>,
216 Requires<[In64BitMode]>;
217 let isTerminator = 1 in {
218 def EH_SjLj_LongJmp32 : I<0, Pseudo, (outs), (ins i32mem:$buf),
219 "#EH_SJLJ_LONGJMP32",
220 [(X86eh_sjlj_longjmp addr:$buf)]>,
221 Requires<[Not64BitMode]>;
222 def EH_SjLj_LongJmp64 : I<0, Pseudo, (outs), (ins i64mem:$buf),
223 "#EH_SJLJ_LONGJMP64",
224 [(X86eh_sjlj_longjmp addr:$buf)]>,
225 Requires<[In64BitMode]>;
229 let isBranch = 1, isTerminator = 1, isCodeGenOnly = 1 in {
230 def EH_SjLj_Setup : I<0, Pseudo, (outs), (ins brtarget:$dst),
231 "#EH_SjLj_Setup\t$dst", []>;
235 //===----------------------------------------------------------------------===//
236 // Pseudo instructions used by unwind info.
238 let isPseudo = 1, SchedRW = [WriteSystem] in {
239 def SEH_PushReg : I<0, Pseudo, (outs), (ins i32imm:$reg),
240 "#SEH_PushReg $reg", []>;
241 def SEH_SaveReg : I<0, Pseudo, (outs), (ins i32imm:$reg, i32imm:$dst),
242 "#SEH_SaveReg $reg, $dst", []>;
243 def SEH_SaveXMM : I<0, Pseudo, (outs), (ins i32imm:$reg, i32imm:$dst),
244 "#SEH_SaveXMM $reg, $dst", []>;
245 def SEH_StackAlloc : I<0, Pseudo, (outs), (ins i32imm:$size),
246 "#SEH_StackAlloc $size", []>;
247 def SEH_StackAlign : I<0, Pseudo, (outs), (ins i32imm:$align),
248 "#SEH_StackAlign $align", []>;
249 def SEH_SetFrame : I<0, Pseudo, (outs), (ins i32imm:$reg, i32imm:$offset),
250 "#SEH_SetFrame $reg, $offset", []>;
251 def SEH_PushFrame : I<0, Pseudo, (outs), (ins i1imm:$mode),
252 "#SEH_PushFrame $mode", []>;
253 def SEH_EndPrologue : I<0, Pseudo, (outs), (ins),
254 "#SEH_EndPrologue", []>;
255 def SEH_Epilogue : I<0, Pseudo, (outs), (ins),
256 "#SEH_Epilogue", []>;
259 //===----------------------------------------------------------------------===//
260 // Pseudo instructions used by KCFI.
261 //===----------------------------------------------------------------------===//
263 Defs = [R10, R11, EFLAGS] in {
264 def KCFI_CHECK : PseudoI<
265 (outs), (ins GR64:$ptr, i32imm:$type), []>, Sched<[]>;
268 //===----------------------------------------------------------------------===//
269 // Pseudo instructions used by address sanitizer.
270 //===----------------------------------------------------------------------===//
272 Defs = [R10, R11, EFLAGS] in {
273 def ASAN_CHECK_MEMACCESS : PseudoI<
274 (outs), (ins GR64PLTSafe:$addr, i32imm:$accessinfo),
275 [(int_asan_check_memaccess GR64PLTSafe:$addr, (i32 timm:$accessinfo))]>,
279 //===----------------------------------------------------------------------===//
280 // Pseudo instructions used by segmented stacks.
283 // This is lowered into a RET instruction by MCInstLower. We need
284 // this so that we don't have to have a MachineBasicBlock which ends
285 // with a RET and also has successors.
286 let isPseudo = 1, SchedRW = [WriteJumpLd] in {
287 def MORESTACK_RET: I<0, Pseudo, (outs), (ins), "", []>;
289 // This instruction is lowered to a RET followed by a MOV. The two
290 // instructions are not generated on a higher level since then the
291 // verifier sees a MachineBasicBlock ending with a non-terminator.
292 def MORESTACK_RET_RESTORE_R10 : I<0, Pseudo, (outs), (ins), "", []>;
295 //===----------------------------------------------------------------------===//
296 // Alias Instructions
297 //===----------------------------------------------------------------------===//
299 // Alias instruction mapping movr0 to xor.
300 // FIXME: remove when we can teach regalloc that xor reg, reg is ok.
301 let Defs = [EFLAGS], isReMaterializable = 1, isAsCheapAsAMove = 1,
302 isPseudo = 1, isMoveImm = 1, AddedComplexity = 10 in
303 def MOV32r0 : I<0, Pseudo, (outs GR32:$dst), (ins), "",
304 [(set GR32:$dst, 0)]>, Sched<[WriteZero]>;
306 // Other widths can also make use of the 32-bit xor, which may have a smaller
307 // encoding and avoid partial register updates.
308 let AddedComplexity = 10 in {
309 def : Pat<(i8 0), (EXTRACT_SUBREG (MOV32r0), sub_8bit)>;
310 def : Pat<(i16 0), (EXTRACT_SUBREG (MOV32r0), sub_16bit)>;
311 def : Pat<(i64 0), (SUBREG_TO_REG (i64 0), (MOV32r0), sub_32bit)>;
314 let Predicates = [OptForSize, Not64BitMode],
315 AddedComplexity = 10 in {
316 let SchedRW = [WriteALU] in {
317 // Pseudo instructions for materializing 1 and -1 using XOR+INC/DEC,
318 // which only require 3 bytes compared to MOV32ri which requires 5.
319 let Defs = [EFLAGS], isReMaterializable = 1, isPseudo = 1 in {
320 def MOV32r1 : I<0, Pseudo, (outs GR32:$dst), (ins), "",
321 [(set GR32:$dst, 1)]>;
322 def MOV32r_1 : I<0, Pseudo, (outs GR32:$dst), (ins), "",
323 [(set GR32:$dst, -1)]>;
327 // MOV16ri is 4 bytes, so the instructions above are smaller.
328 def : Pat<(i16 1), (EXTRACT_SUBREG (MOV32r1), sub_16bit)>;
329 def : Pat<(i16 -1), (EXTRACT_SUBREG (MOV32r_1), sub_16bit)>;
332 let isReMaterializable = 1, isPseudo = 1, AddedComplexity = 5,
333 SchedRW = [WriteALU] in {
334 // AddedComplexity higher than MOV64ri but lower than MOV32r0 and MOV32r1.
335 def MOV32ImmSExti8 : I<0, Pseudo, (outs GR32:$dst), (ins i32i8imm:$src), "",
336 [(set GR32:$dst, i32immSExt8:$src)]>,
337 Requires<[OptForMinSize, NotWin64WithoutFP]>;
338 def MOV64ImmSExti8 : I<0, Pseudo, (outs GR64:$dst), (ins i64i8imm:$src), "",
339 [(set GR64:$dst, i64immSExt8:$src)]>,
340 Requires<[OptForMinSize, NotWin64WithoutFP]>;
343 // Materialize i64 constant where top 32-bits are zero. This could theoretically
344 // use MOV32ri with a SUBREG_TO_REG to represent the zero-extension, however
345 // that would make it more difficult to rematerialize.
346 let AddedComplexity = 1, isReMaterializable = 1, isAsCheapAsAMove = 1,
347 isPseudo = 1, SchedRW = [WriteMove] in
348 def MOV32ri64 : I<0, Pseudo, (outs GR64:$dst), (ins i64i32imm:$src), "",
349 [(set GR64:$dst, i64immZExt32:$src)]>;
351 // This 64-bit pseudo-move can also be used for labels in the x86-64 small code
353 def mov64imm32 : ComplexPattern<i64, 1, "selectMOV64Imm32", [X86Wrapper]>;
354 def : Pat<(i64 mov64imm32:$src), (MOV32ri64 mov64imm32:$src)>;
356 // Use sbb to materialize carry bit.
357 let Uses = [EFLAGS], Defs = [EFLAGS], isPseudo = 1, SchedRW = [WriteADC],
358 hasSideEffects = 0 in {
359 // FIXME: These are pseudo ops that should be replaced with Pat<> patterns.
360 // However, Pat<> can't replicate the destination reg into the inputs of the
362 def SETB_C32r : I<0, Pseudo, (outs GR32:$dst), (ins), "", []>;
363 def SETB_C64r : I<0, Pseudo, (outs GR64:$dst), (ins), "", []>;
366 //===----------------------------------------------------------------------===//
367 // String Pseudo Instructions
369 let SchedRW = [WriteMicrocoded] in {
370 let Defs = [ECX,EDI,ESI], Uses = [ECX,EDI,ESI], isCodeGenOnly = 1 in {
371 def REP_MOVSB_32 : I<0xA4, RawFrm, (outs), (ins),
372 "{rep;movsb (%esi), %es:(%edi)|rep movsb es:[edi], [esi]}",
373 [(X86rep_movs i8)]>, REP, AdSize32,
375 def REP_MOVSW_32 : I<0xA5, RawFrm, (outs), (ins),
376 "{rep;movsw (%esi), %es:(%edi)|rep movsw es:[edi], [esi]}",
377 [(X86rep_movs i16)]>, REP, AdSize32, OpSize16,
379 def REP_MOVSD_32 : I<0xA5, RawFrm, (outs), (ins),
380 "{rep;movsl (%esi), %es:(%edi)|rep movsd es:[edi], [esi]}",
381 [(X86rep_movs i32)]>, REP, AdSize32, OpSize32,
383 def REP_MOVSQ_32 : RI<0xA5, RawFrm, (outs), (ins),
384 "{rep;movsq (%esi), %es:(%edi)|rep movsq es:[edi], [esi]}",
385 [(X86rep_movs i64)]>, REP, AdSize32,
386 Requires<[NotLP64, In64BitMode]>;
389 let Defs = [RCX,RDI,RSI], Uses = [RCX,RDI,RSI], isCodeGenOnly = 1 in {
390 def REP_MOVSB_64 : I<0xA4, RawFrm, (outs), (ins),
391 "{rep;movsb (%rsi), %es:(%rdi)|rep movsb es:[rdi], [rsi]}",
392 [(X86rep_movs i8)]>, REP, AdSize64,
394 def REP_MOVSW_64 : I<0xA5, RawFrm, (outs), (ins),
395 "{rep;movsw (%rsi), %es:(%rdi)|rep movsw es:[rdi], [rsi]}",
396 [(X86rep_movs i16)]>, REP, AdSize64, OpSize16,
398 def REP_MOVSD_64 : I<0xA5, RawFrm, (outs), (ins),
399 "{rep;movsl (%rsi), %es:(%rdi)|rep movsdi es:[rdi], [rsi]}",
400 [(X86rep_movs i32)]>, REP, AdSize64, OpSize32,
402 def REP_MOVSQ_64 : RI<0xA5, RawFrm, (outs), (ins),
403 "{rep;movsq (%rsi), %es:(%rdi)|rep movsq es:[rdi], [rsi]}",
404 [(X86rep_movs i64)]>, REP, AdSize64,
408 // FIXME: Should use "(X86rep_stos AL)" as the pattern.
409 let Defs = [ECX,EDI], isCodeGenOnly = 1 in {
410 let Uses = [AL,ECX,EDI] in
411 def REP_STOSB_32 : I<0xAA, RawFrm, (outs), (ins),
412 "{rep;stosb %al, %es:(%edi)|rep stosb es:[edi], al}",
413 [(X86rep_stos i8)]>, REP, AdSize32,
415 let Uses = [AX,ECX,EDI] in
416 def REP_STOSW_32 : I<0xAB, RawFrm, (outs), (ins),
417 "{rep;stosw %ax, %es:(%edi)|rep stosw es:[edi], ax}",
418 [(X86rep_stos i16)]>, REP, AdSize32, OpSize16,
420 let Uses = [EAX,ECX,EDI] in
421 def REP_STOSD_32 : I<0xAB, RawFrm, (outs), (ins),
422 "{rep;stosl %eax, %es:(%edi)|rep stosd es:[edi], eax}",
423 [(X86rep_stos i32)]>, REP, AdSize32, OpSize32,
425 let Uses = [RAX,RCX,RDI] in
426 def REP_STOSQ_32 : RI<0xAB, RawFrm, (outs), (ins),
427 "{rep;stosq %rax, %es:(%edi)|rep stosq es:[edi], rax}",
428 [(X86rep_stos i64)]>, REP, AdSize32,
429 Requires<[NotLP64, In64BitMode]>;
432 let Defs = [RCX,RDI], isCodeGenOnly = 1 in {
433 let Uses = [AL,RCX,RDI] in
434 def REP_STOSB_64 : I<0xAA, RawFrm, (outs), (ins),
435 "{rep;stosb %al, %es:(%rdi)|rep stosb es:[rdi], al}",
436 [(X86rep_stos i8)]>, REP, AdSize64,
438 let Uses = [AX,RCX,RDI] in
439 def REP_STOSW_64 : I<0xAB, RawFrm, (outs), (ins),
440 "{rep;stosw %ax, %es:(%rdi)|rep stosw es:[rdi], ax}",
441 [(X86rep_stos i16)]>, REP, AdSize64, OpSize16,
443 let Uses = [RAX,RCX,RDI] in
444 def REP_STOSD_64 : I<0xAB, RawFrm, (outs), (ins),
445 "{rep;stosl %eax, %es:(%rdi)|rep stosd es:[rdi], eax}",
446 [(X86rep_stos i32)]>, REP, AdSize64, OpSize32,
449 let Uses = [RAX,RCX,RDI] in
450 def REP_STOSQ_64 : RI<0xAB, RawFrm, (outs), (ins),
451 "{rep;stosq %rax, %es:(%rdi)|rep stosq es:[rdi], rax}",
452 [(X86rep_stos i64)]>, REP, AdSize64,
457 //===----------------------------------------------------------------------===//
458 // Thread Local Storage Instructions
460 let SchedRW = [WriteSystem] in {
463 // All calls clobber the non-callee saved registers. ESP is marked as
464 // a use to prevent stack-pointer assignments that appear immediately
465 // before calls from potentially appearing dead.
466 let Defs = [EAX, ECX, EDX, FP0, FP1, FP2, FP3, FP4, FP5, FP6, FP7,
467 ST0, ST1, ST2, ST3, ST4, ST5, ST6, ST7,
468 MM0, MM1, MM2, MM3, MM4, MM5, MM6, MM7,
469 XMM0, XMM1, XMM2, XMM3, XMM4, XMM5, XMM6, XMM7,
470 XMM8, XMM9, XMM10, XMM11, XMM12, XMM13, XMM14, XMM15, EFLAGS, DF],
471 usesCustomInserter = 1, Uses = [ESP, SSP] in {
472 def TLS_addr32 : I<0, Pseudo, (outs), (ins i32mem:$sym),
474 [(X86tlsaddr tls32addr:$sym)]>,
475 Requires<[Not64BitMode]>;
476 def TLS_base_addr32 : I<0, Pseudo, (outs), (ins i32mem:$sym),
478 [(X86tlsbaseaddr tls32baseaddr:$sym)]>,
479 Requires<[Not64BitMode]>;
482 // All calls clobber the non-callee saved registers. RSP is marked as
483 // a use to prevent stack-pointer assignments that appear immediately
484 // before calls from potentially appearing dead.
485 let Defs = [RAX, RCX, RDX, RSI, RDI, R8, R9, R10, R11,
486 FP0, FP1, FP2, FP3, FP4, FP5, FP6, FP7,
487 ST0, ST1, ST2, ST3, ST4, ST5, ST6, ST7,
488 MM0, MM1, MM2, MM3, MM4, MM5, MM6, MM7,
489 XMM0, XMM1, XMM2, XMM3, XMM4, XMM5, XMM6, XMM7,
490 XMM8, XMM9, XMM10, XMM11, XMM12, XMM13, XMM14, XMM15, EFLAGS, DF],
491 usesCustomInserter = 1, Uses = [RSP, SSP] in {
492 def TLS_addr64 : I<0, Pseudo, (outs), (ins i64mem:$sym),
494 [(X86tlsaddr tls64addr:$sym)]>,
495 Requires<[In64BitMode, IsLP64]>;
496 def TLS_base_addr64 : I<0, Pseudo, (outs), (ins i64mem:$sym),
498 [(X86tlsbaseaddr tls64baseaddr:$sym)]>,
499 Requires<[In64BitMode, IsLP64]>;
500 def TLS_addrX32 : I<0, Pseudo, (outs), (ins i32mem:$sym),
502 [(X86tlsaddr tls32addr:$sym)]>,
503 Requires<[In64BitMode, NotLP64]>;
504 def TLS_base_addrX32 : I<0, Pseudo, (outs), (ins i32mem:$sym),
505 "# TLS_base_addrX32",
506 [(X86tlsbaseaddr tls32baseaddr:$sym)]>,
507 Requires<[In64BitMode, NotLP64]>;
510 // Darwin TLS Support
511 // For i386, the address of the thunk is passed on the stack, on return the
512 // address of the variable is in %eax. %ecx is trashed during the function
513 // call. All other registers are preserved.
514 let Defs = [EAX, ECX, EFLAGS, DF],
516 usesCustomInserter = 1 in
517 def TLSCall_32 : I<0, Pseudo, (outs), (ins i32mem:$sym),
519 [(X86TLSCall addr:$sym)]>,
520 Requires<[Not64BitMode]>;
522 // For x86_64, the address of the thunk is passed in %rdi, but the
523 // pseudo directly use the symbol, so do not add an implicit use of
524 // %rdi. The lowering will do the right thing with RDI.
525 // On return the address of the variable is in %rax. All other
526 // registers are preserved.
527 let Defs = [RAX, EFLAGS, DF],
529 usesCustomInserter = 1 in
530 def TLSCall_64 : I<0, Pseudo, (outs), (ins i64mem:$sym),
532 [(X86TLSCall addr:$sym)]>,
533 Requires<[In64BitMode]>;
536 //===----------------------------------------------------------------------===//
537 // Conditional Move Pseudo Instructions
539 // CMOV* - Used to implement the SELECT DAG operation. Expanded after
540 // instruction selection into a branch sequence.
541 multiclass CMOVrr_PSEUDO<RegisterClass RC, ValueType VT> {
542 def CMOV#NAME : I<0, Pseudo,
543 (outs RC:$dst), (ins RC:$t, RC:$f, i8imm:$cond),
544 "#CMOV_"#NAME#" PSEUDO!",
545 [(set RC:$dst, (VT (X86cmov RC:$t, RC:$f, timm:$cond,
549 let usesCustomInserter = 1, hasNoSchedulingInfo = 1, Uses = [EFLAGS] in {
550 // X86 doesn't have 8-bit conditional moves. Use a customInserter to
551 // emit control flow. An alternative to this is to mark i8 SELECT as Promote,
552 // however that requires promoting the operands, and can induce additional
553 // i8 register pressure.
554 defm _GR8 : CMOVrr_PSEUDO<GR8, i8>;
556 let Predicates = [NoCMOV] in {
557 defm _GR32 : CMOVrr_PSEUDO<GR32, i32>;
558 defm _GR16 : CMOVrr_PSEUDO<GR16, i16>;
559 } // Predicates = [NoCMOV]
561 // fcmov doesn't handle all possible EFLAGS, provide a fallback if there is no
563 let Predicates = [FPStackf32] in
564 defm _RFP32 : CMOVrr_PSEUDO<RFP32, f32>;
566 let Predicates = [FPStackf64] in
567 defm _RFP64 : CMOVrr_PSEUDO<RFP64, f64>;
569 defm _RFP80 : CMOVrr_PSEUDO<RFP80, f80>;
571 let Predicates = [HasMMX] in
572 defm _VR64 : CMOVrr_PSEUDO<VR64, x86mmx>;
574 let Predicates = [HasSSE1,NoAVX512] in
575 defm _FR32 : CMOVrr_PSEUDO<FR32, f32>;
576 let Predicates = [HasSSE2,NoAVX512] in {
577 defm _FR16 : CMOVrr_PSEUDO<FR16, f16>;
578 defm _FR64 : CMOVrr_PSEUDO<FR64, f64>;
580 let Predicates = [HasAVX512] in {
581 defm _FR16X : CMOVrr_PSEUDO<FR16X, f16>;
582 defm _FR32X : CMOVrr_PSEUDO<FR32X, f32>;
583 defm _FR64X : CMOVrr_PSEUDO<FR64X, f64>;
585 let Predicates = [NoVLX] in {
586 defm _VR128 : CMOVrr_PSEUDO<VR128, v2i64>;
587 defm _VR256 : CMOVrr_PSEUDO<VR256, v4i64>;
589 let Predicates = [HasVLX] in {
590 defm _VR128X : CMOVrr_PSEUDO<VR128X, v2i64>;
591 defm _VR256X : CMOVrr_PSEUDO<VR256X, v4i64>;
593 defm _VR512 : CMOVrr_PSEUDO<VR512, v8i64>;
594 defm _VK1 : CMOVrr_PSEUDO<VK1, v1i1>;
595 defm _VK2 : CMOVrr_PSEUDO<VK2, v2i1>;
596 defm _VK4 : CMOVrr_PSEUDO<VK4, v4i1>;
597 defm _VK8 : CMOVrr_PSEUDO<VK8, v8i1>;
598 defm _VK16 : CMOVrr_PSEUDO<VK16, v16i1>;
599 defm _VK32 : CMOVrr_PSEUDO<VK32, v32i1>;
600 defm _VK64 : CMOVrr_PSEUDO<VK64, v64i1>;
601 } // usesCustomInserter = 1, hasNoSchedulingInfo = 1, Uses = [EFLAGS]
603 def : Pat<(f128 (X86cmov VR128:$t, VR128:$f, timm:$cond, EFLAGS)),
604 (CMOV_VR128 VR128:$t, VR128:$f, timm:$cond)>;
606 let Predicates = [NoVLX] in {
607 def : Pat<(v16i8 (X86cmov VR128:$t, VR128:$f, timm:$cond, EFLAGS)),
608 (CMOV_VR128 VR128:$t, VR128:$f, timm:$cond)>;
609 def : Pat<(v8i16 (X86cmov VR128:$t, VR128:$f, timm:$cond, EFLAGS)),
610 (CMOV_VR128 VR128:$t, VR128:$f, timm:$cond)>;
611 def : Pat<(v4i32 (X86cmov VR128:$t, VR128:$f, timm:$cond, EFLAGS)),
612 (CMOV_VR128 VR128:$t, VR128:$f, timm:$cond)>;
613 def : Pat<(v4f32 (X86cmov VR128:$t, VR128:$f, timm:$cond, EFLAGS)),
614 (CMOV_VR128 VR128:$t, VR128:$f, timm:$cond)>;
615 def : Pat<(v2f64 (X86cmov VR128:$t, VR128:$f, timm:$cond, EFLAGS)),
616 (CMOV_VR128 VR128:$t, VR128:$f, timm:$cond)>;
618 def : Pat<(v32i8 (X86cmov VR256:$t, VR256:$f, timm:$cond, EFLAGS)),
619 (CMOV_VR256 VR256:$t, VR256:$f, timm:$cond)>;
620 def : Pat<(v16i16 (X86cmov VR256:$t, VR256:$f, timm:$cond, EFLAGS)),
621 (CMOV_VR256 VR256:$t, VR256:$f, timm:$cond)>;
622 def : Pat<(v8i32 (X86cmov VR256:$t, VR256:$f, timm:$cond, EFLAGS)),
623 (CMOV_VR256 VR256:$t, VR256:$f, timm:$cond)>;
624 def : Pat<(v8f32 (X86cmov VR256:$t, VR256:$f, timm:$cond, EFLAGS)),
625 (CMOV_VR256 VR256:$t, VR256:$f, timm:$cond)>;
626 def : Pat<(v4f64 (X86cmov VR256:$t, VR256:$f, timm:$cond, EFLAGS)),
627 (CMOV_VR256 VR256:$t, VR256:$f, timm:$cond)>;
629 let Predicates = [HasVLX] in {
630 def : Pat<(v16i8 (X86cmov VR128X:$t, VR128X:$f, timm:$cond, EFLAGS)),
631 (CMOV_VR128X VR128X:$t, VR128X:$f, timm:$cond)>;
632 def : Pat<(v8i16 (X86cmov VR128X:$t, VR128X:$f, timm:$cond, EFLAGS)),
633 (CMOV_VR128X VR128X:$t, VR128X:$f, timm:$cond)>;
634 def : Pat<(v8f16 (X86cmov VR128X:$t, VR128X:$f, timm:$cond, EFLAGS)),
635 (CMOV_VR128X VR128X:$t, VR128X:$f, timm:$cond)>;
636 def : Pat<(v4i32 (X86cmov VR128X:$t, VR128X:$f, timm:$cond, EFLAGS)),
637 (CMOV_VR128X VR128X:$t, VR128X:$f, timm:$cond)>;
638 def : Pat<(v4f32 (X86cmov VR128X:$t, VR128X:$f, timm:$cond, EFLAGS)),
639 (CMOV_VR128X VR128X:$t, VR128X:$f, timm:$cond)>;
640 def : Pat<(v2f64 (X86cmov VR128X:$t, VR128X:$f, timm:$cond, EFLAGS)),
641 (CMOV_VR128X VR128X:$t, VR128X:$f, timm:$cond)>;
643 def : Pat<(v32i8 (X86cmov VR256X:$t, VR256X:$f, timm:$cond, EFLAGS)),
644 (CMOV_VR256X VR256X:$t, VR256X:$f, timm:$cond)>;
645 def : Pat<(v16i16 (X86cmov VR256X:$t, VR256X:$f, timm:$cond, EFLAGS)),
646 (CMOV_VR256X VR256X:$t, VR256X:$f, timm:$cond)>;
647 def : Pat<(v16f16 (X86cmov VR256X:$t, VR256X:$f, timm:$cond, EFLAGS)),
648 (CMOV_VR256X VR256X:$t, VR256X:$f, timm:$cond)>;
649 def : Pat<(v8i32 (X86cmov VR256X:$t, VR256X:$f, timm:$cond, EFLAGS)),
650 (CMOV_VR256X VR256X:$t, VR256X:$f, timm:$cond)>;
651 def : Pat<(v8f32 (X86cmov VR256X:$t, VR256X:$f, timm:$cond, EFLAGS)),
652 (CMOV_VR256X VR256X:$t, VR256X:$f, timm:$cond)>;
653 def : Pat<(v4f64 (X86cmov VR256X:$t, VR256X:$f, timm:$cond, EFLAGS)),
654 (CMOV_VR256X VR256X:$t, VR256X:$f, timm:$cond)>;
657 def : Pat<(v64i8 (X86cmov VR512:$t, VR512:$f, timm:$cond, EFLAGS)),
658 (CMOV_VR512 VR512:$t, VR512:$f, timm:$cond)>;
659 def : Pat<(v32i16 (X86cmov VR512:$t, VR512:$f, timm:$cond, EFLAGS)),
660 (CMOV_VR512 VR512:$t, VR512:$f, timm:$cond)>;
661 def : Pat<(v32f16 (X86cmov VR512:$t, VR512:$f, timm:$cond, EFLAGS)),
662 (CMOV_VR512 VR512:$t, VR512:$f, timm:$cond)>;
663 def : Pat<(v16i32 (X86cmov VR512:$t, VR512:$f, timm:$cond, EFLAGS)),
664 (CMOV_VR512 VR512:$t, VR512:$f, timm:$cond)>;
665 def : Pat<(v16f32 (X86cmov VR512:$t, VR512:$f, timm:$cond, EFLAGS)),
666 (CMOV_VR512 VR512:$t, VR512:$f, timm:$cond)>;
667 def : Pat<(v8f64 (X86cmov VR512:$t, VR512:$f, timm:$cond, EFLAGS)),
668 (CMOV_VR512 VR512:$t, VR512:$f, timm:$cond)>;
670 //===----------------------------------------------------------------------===//
671 // Normal-Instructions-With-Lock-Prefix Pseudo Instructions
672 //===----------------------------------------------------------------------===//
674 // FIXME: Use normal instructions and add lock prefix dynamically.
678 let isCodeGenOnly = 1, Defs = [EFLAGS] in
679 def OR32mi8Locked : Ii8<0x83, MRM1m, (outs), (ins i32mem:$dst, i32i8imm:$zero),
680 "or{l}\t{$zero, $dst|$dst, $zero}", []>,
681 Requires<[Not64BitMode]>, OpSize32, LOCK,
682 Sched<[WriteALURMW]>;
684 // RegOpc corresponds to the mr version of the instruction
685 // ImmOpc corresponds to the mi version of the instruction
686 // ImmOpc8 corresponds to the mi8 version of the instruction
687 // ImmMod corresponds to the instruction format of the mi and mi8 versions
688 multiclass LOCK_ArithBinOp<bits<8> RegOpc, bits<8> ImmOpc, bits<8> ImmOpc8,
689 Format ImmMod, SDNode Op, string mnemonic> {
690 let Defs = [EFLAGS], mayLoad = 1, mayStore = 1, isCodeGenOnly = 1,
691 SchedRW = [WriteALURMW] in {
693 def NAME#8mr : I<{RegOpc{7}, RegOpc{6}, RegOpc{5}, RegOpc{4},
694 RegOpc{3}, RegOpc{2}, RegOpc{1}, 0 },
695 MRMDestMem, (outs), (ins i8mem:$dst, GR8:$src2),
696 !strconcat(mnemonic, "{b}\t",
697 "{$src2, $dst|$dst, $src2}"),
698 [(set EFLAGS, (Op addr:$dst, GR8:$src2))]>, LOCK;
700 def NAME#16mr : I<{RegOpc{7}, RegOpc{6}, RegOpc{5}, RegOpc{4},
701 RegOpc{3}, RegOpc{2}, RegOpc{1}, 1 },
702 MRMDestMem, (outs), (ins i16mem:$dst, GR16:$src2),
703 !strconcat(mnemonic, "{w}\t",
704 "{$src2, $dst|$dst, $src2}"),
705 [(set EFLAGS, (Op addr:$dst, GR16:$src2))]>,
708 def NAME#32mr : I<{RegOpc{7}, RegOpc{6}, RegOpc{5}, RegOpc{4},
709 RegOpc{3}, RegOpc{2}, RegOpc{1}, 1 },
710 MRMDestMem, (outs), (ins i32mem:$dst, GR32:$src2),
711 !strconcat(mnemonic, "{l}\t",
712 "{$src2, $dst|$dst, $src2}"),
713 [(set EFLAGS, (Op addr:$dst, GR32:$src2))]>,
716 def NAME#64mr : RI<{RegOpc{7}, RegOpc{6}, RegOpc{5}, RegOpc{4},
717 RegOpc{3}, RegOpc{2}, RegOpc{1}, 1 },
718 MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src2),
719 !strconcat(mnemonic, "{q}\t",
720 "{$src2, $dst|$dst, $src2}"),
721 [(set EFLAGS, (Op addr:$dst, GR64:$src2))]>, LOCK;
723 // NOTE: These are order specific, we want the mi8 forms to be listed
724 // first so that they are slightly preferred to the mi forms.
725 def NAME#16mi8 : Ii8<{ImmOpc8{7}, ImmOpc8{6}, ImmOpc8{5}, ImmOpc8{4},
726 ImmOpc8{3}, ImmOpc8{2}, ImmOpc8{1}, 1 },
727 ImmMod, (outs), (ins i16mem :$dst, i16i8imm :$src2),
728 !strconcat(mnemonic, "{w}\t",
729 "{$src2, $dst|$dst, $src2}"),
730 [(set EFLAGS, (Op addr:$dst, i16immSExt8:$src2))]>,
733 def NAME#32mi8 : Ii8<{ImmOpc8{7}, ImmOpc8{6}, ImmOpc8{5}, ImmOpc8{4},
734 ImmOpc8{3}, ImmOpc8{2}, ImmOpc8{1}, 1 },
735 ImmMod, (outs), (ins i32mem :$dst, i32i8imm :$src2),
736 !strconcat(mnemonic, "{l}\t",
737 "{$src2, $dst|$dst, $src2}"),
738 [(set EFLAGS, (Op addr:$dst, i32immSExt8:$src2))]>,
741 def NAME#64mi8 : RIi8<{ImmOpc8{7}, ImmOpc8{6}, ImmOpc8{5}, ImmOpc8{4},
742 ImmOpc8{3}, ImmOpc8{2}, ImmOpc8{1}, 1 },
743 ImmMod, (outs), (ins i64mem :$dst, i64i8imm :$src2),
744 !strconcat(mnemonic, "{q}\t",
745 "{$src2, $dst|$dst, $src2}"),
746 [(set EFLAGS, (Op addr:$dst, i64immSExt8:$src2))]>,
749 def NAME#8mi : Ii8<{ImmOpc{7}, ImmOpc{6}, ImmOpc{5}, ImmOpc{4},
750 ImmOpc{3}, ImmOpc{2}, ImmOpc{1}, 0 },
751 ImmMod, (outs), (ins i8mem :$dst, i8imm :$src2),
752 !strconcat(mnemonic, "{b}\t",
753 "{$src2, $dst|$dst, $src2}"),
754 [(set EFLAGS, (Op addr:$dst, (i8 imm:$src2)))]>, LOCK;
756 def NAME#16mi : Ii16<{ImmOpc{7}, ImmOpc{6}, ImmOpc{5}, ImmOpc{4},
757 ImmOpc{3}, ImmOpc{2}, ImmOpc{1}, 1 },
758 ImmMod, (outs), (ins i16mem :$dst, i16imm :$src2),
759 !strconcat(mnemonic, "{w}\t",
760 "{$src2, $dst|$dst, $src2}"),
761 [(set EFLAGS, (Op addr:$dst, (i16 imm:$src2)))]>,
764 def NAME#32mi : Ii32<{ImmOpc{7}, ImmOpc{6}, ImmOpc{5}, ImmOpc{4},
765 ImmOpc{3}, ImmOpc{2}, ImmOpc{1}, 1 },
766 ImmMod, (outs), (ins i32mem :$dst, i32imm :$src2),
767 !strconcat(mnemonic, "{l}\t",
768 "{$src2, $dst|$dst, $src2}"),
769 [(set EFLAGS, (Op addr:$dst, (i32 imm:$src2)))]>,
772 def NAME#64mi32 : RIi32S<{ImmOpc{7}, ImmOpc{6}, ImmOpc{5}, ImmOpc{4},
773 ImmOpc{3}, ImmOpc{2}, ImmOpc{1}, 1 },
774 ImmMod, (outs), (ins i64mem :$dst, i64i32imm :$src2),
775 !strconcat(mnemonic, "{q}\t",
776 "{$src2, $dst|$dst, $src2}"),
777 [(set EFLAGS, (Op addr:$dst, i64immSExt32:$src2))]>,
783 defm LOCK_ADD : LOCK_ArithBinOp<0x00, 0x80, 0x83, MRM0m, X86lock_add, "add">;
784 defm LOCK_SUB : LOCK_ArithBinOp<0x28, 0x80, 0x83, MRM5m, X86lock_sub, "sub">;
785 defm LOCK_OR : LOCK_ArithBinOp<0x08, 0x80, 0x83, MRM1m, X86lock_or , "or">;
786 defm LOCK_AND : LOCK_ArithBinOp<0x20, 0x80, 0x83, MRM4m, X86lock_and, "and">;
787 defm LOCK_XOR : LOCK_ArithBinOp<0x30, 0x80, 0x83, MRM6m, X86lock_xor, "xor">;
789 def X86lock_add_nocf : PatFrag<(ops node:$lhs, node:$rhs),
790 (X86lock_add node:$lhs, node:$rhs), [{
791 return hasNoCarryFlagUses(SDValue(N, 0));
794 def X86lock_sub_nocf : PatFrag<(ops node:$lhs, node:$rhs),
795 (X86lock_sub node:$lhs, node:$rhs), [{
796 return hasNoCarryFlagUses(SDValue(N, 0));
799 let Defs = [EFLAGS], mayLoad = 1, mayStore = 1, isCodeGenOnly = 1,
800 SchedRW = [WriteALURMW] in {
801 let Predicates = [UseIncDec] in {
802 def LOCK_INC8m : I<0xFE, MRM0m, (outs), (ins i8mem :$dst),
804 [(set EFLAGS, (X86lock_add_nocf addr:$dst, (i8 1)))]>,
806 def LOCK_INC16m : I<0xFF, MRM0m, (outs), (ins i16mem:$dst),
808 [(set EFLAGS, (X86lock_add_nocf addr:$dst, (i16 1)))]>,
810 def LOCK_INC32m : I<0xFF, MRM0m, (outs), (ins i32mem:$dst),
812 [(set EFLAGS, (X86lock_add_nocf addr:$dst, (i32 1)))]>,
815 def LOCK_DEC8m : I<0xFE, MRM1m, (outs), (ins i8mem :$dst),
817 [(set EFLAGS, (X86lock_sub_nocf addr:$dst, (i8 1)))]>,
819 def LOCK_DEC16m : I<0xFF, MRM1m, (outs), (ins i16mem:$dst),
821 [(set EFLAGS, (X86lock_sub_nocf addr:$dst, (i16 1)))]>,
823 def LOCK_DEC32m : I<0xFF, MRM1m, (outs), (ins i32mem:$dst),
825 [(set EFLAGS, (X86lock_sub_nocf addr:$dst, (i32 1)))]>,
829 let Predicates = [UseIncDec, In64BitMode] in {
830 def LOCK_INC64m : RI<0xFF, MRM0m, (outs), (ins i64mem:$dst),
832 [(set EFLAGS, (X86lock_add_nocf addr:$dst, (i64 1)))]>,
834 def LOCK_DEC64m : RI<0xFF, MRM1m, (outs), (ins i64mem:$dst),
836 [(set EFLAGS, (X86lock_sub_nocf addr:$dst, (i64 1)))]>,
841 let Predicates = [UseIncDec] in {
842 // Additional patterns for -1 constant.
843 def : Pat<(X86lock_add addr:$dst, (i8 -1)), (LOCK_DEC8m addr:$dst)>;
844 def : Pat<(X86lock_add addr:$dst, (i16 -1)), (LOCK_DEC16m addr:$dst)>;
845 def : Pat<(X86lock_add addr:$dst, (i32 -1)), (LOCK_DEC32m addr:$dst)>;
846 def : Pat<(X86lock_sub addr:$dst, (i8 -1)), (LOCK_INC8m addr:$dst)>;
847 def : Pat<(X86lock_sub addr:$dst, (i16 -1)), (LOCK_INC16m addr:$dst)>;
848 def : Pat<(X86lock_sub addr:$dst, (i32 -1)), (LOCK_INC32m addr:$dst)>;
851 let Predicates = [UseIncDec, In64BitMode] in {
852 // Additional patterns for -1 constant.
853 def : Pat<(X86lock_add addr:$dst, (i64 -1)), (LOCK_DEC64m addr:$dst)>;
854 def : Pat<(X86lock_sub addr:$dst, (i64 -1)), (LOCK_INC64m addr:$dst)>;
858 def X86LBTest : SDTypeProfile<1, 3, [SDTCisVT<0, i32>, SDTCisPtrTy<1>,
859 SDTCisVT<2, i8>, SDTCisVT<3, i32>]>;
860 def x86bts : SDNode<"X86ISD::LBTS", X86LBTest,
861 [SDNPHasChain, SDNPMayLoad, SDNPMayStore, SDNPMemOperand]>;
862 def x86btc : SDNode<"X86ISD::LBTC", X86LBTest,
863 [SDNPHasChain, SDNPMayLoad, SDNPMayStore, SDNPMemOperand]>;
864 def x86btr : SDNode<"X86ISD::LBTR", X86LBTest,
865 [SDNPHasChain, SDNPMayLoad, SDNPMayStore, SDNPMemOperand]>;
867 def X86LBTestRM : SDTypeProfile<1, 2, [SDTCisVT<0, i32>, SDTCisPtrTy<1>,
870 def x86_rm_bts : SDNode<"X86ISD::LBTS_RM", X86LBTestRM,
871 [SDNPHasChain, SDNPMayLoad, SDNPMayStore, SDNPMemOperand]>;
872 def x86_rm_btc : SDNode<"X86ISD::LBTC_RM", X86LBTestRM,
873 [SDNPHasChain, SDNPMayLoad, SDNPMayStore, SDNPMemOperand]>;
874 def x86_rm_btr : SDNode<"X86ISD::LBTR_RM", X86LBTestRM,
875 [SDNPHasChain, SDNPMayLoad, SDNPMayStore, SDNPMemOperand]>;
878 multiclass ATOMIC_LOGIC_OP<Format Form, string s> {
879 let Defs = [EFLAGS], mayLoad = 1, mayStore = 1, isCodeGenOnly = 1,
880 SchedRW = [WriteBitTestSetRegRMW] in {
881 def 16m : Ii8<0xBA, Form, (outs), (ins i16mem:$src1, i8imm:$src2),
882 !strconcat(s, "{w}\t{$src2, $src1|$src1, $src2}"),
883 [(set EFLAGS, (!cast<SDNode>("x86" # s) addr:$src1, timm:$src2, (i32 16)))]>,
885 def 32m : Ii8<0xBA, Form, (outs), (ins i32mem:$src1, i8imm:$src2),
886 !strconcat(s, "{l}\t{$src2, $src1|$src1, $src2}"),
887 [(set EFLAGS, (!cast<SDNode>("x86" # s) addr:$src1, timm:$src2, (i32 32)))]>,
889 def 64m : RIi8<0xBA, Form, (outs), (ins i64mem:$src1, i8imm:$src2),
890 !strconcat(s, "{q}\t{$src2, $src1|$src1, $src2}"),
891 [(set EFLAGS, (!cast<SDNode>("x86" # s) addr:$src1, timm:$src2, (i32 64)))]>,
896 multiclass ATOMIC_LOGIC_OP_RM<bits<8> Opc8, string s> {
897 let Defs = [EFLAGS], mayLoad = 1, mayStore = 1, isCodeGenOnly = 1,
898 SchedRW = [WriteBitTestSetRegRMW] in {
899 def 16rm : I<Opc8, MRMDestMem, (outs), (ins i16mem:$src1, GR16:$src2),
900 !strconcat(s, "{w}\t{$src2, $src1|$src1, $src2}"),
901 [(set EFLAGS, (!cast<SDNode>("x86_rm_" # s) addr:$src1, GR16:$src2))]>,
903 def 32rm : I<Opc8, MRMDestMem, (outs), (ins i32mem:$src1, GR32:$src2),
904 !strconcat(s, "{l}\t{$src2, $src1|$src1, $src2}"),
905 [(set EFLAGS, (!cast<SDNode>("x86_rm_" # s) addr:$src1, GR32:$src2))]>,
907 def 64rm : RI<Opc8, MRMDestMem, (outs), (ins i64mem:$src1, GR64:$src2),
908 !strconcat(s, "{q}\t{$src2, $src1|$src1, $src2}"),
909 [(set EFLAGS, (!cast<SDNode>("x86_rm_" # s) addr:$src1, GR64:$src2))]>,
915 defm LOCK_BTS : ATOMIC_LOGIC_OP<MRM5m, "bts">;
916 defm LOCK_BTC : ATOMIC_LOGIC_OP<MRM7m, "btc">;
917 defm LOCK_BTR : ATOMIC_LOGIC_OP<MRM6m, "btr">;
919 defm LOCK_BTS_RM : ATOMIC_LOGIC_OP_RM<0xAB, "bts">;
920 defm LOCK_BTC_RM : ATOMIC_LOGIC_OP_RM<0xBB, "btc">;
921 defm LOCK_BTR_RM : ATOMIC_LOGIC_OP_RM<0xB3, "btr">;
923 // Atomic compare and swap.
924 multiclass LCMPXCHG_BinOp<bits<8> Opc8, bits<8> Opc, Format Form,
925 string mnemonic, SDPatternOperator frag> {
926 let isCodeGenOnly = 1, SchedRW = [WriteCMPXCHGRMW] in {
927 let Defs = [AL, EFLAGS], Uses = [AL] in
928 def NAME#8 : I<Opc8, Form, (outs), (ins i8mem:$ptr, GR8:$swap),
929 !strconcat(mnemonic, "{b}\t{$swap, $ptr|$ptr, $swap}"),
930 [(frag addr:$ptr, GR8:$swap, 1)]>, TB, LOCK;
931 let Defs = [AX, EFLAGS], Uses = [AX] in
932 def NAME#16 : I<Opc, Form, (outs), (ins i16mem:$ptr, GR16:$swap),
933 !strconcat(mnemonic, "{w}\t{$swap, $ptr|$ptr, $swap}"),
934 [(frag addr:$ptr, GR16:$swap, 2)]>, TB, OpSize16, LOCK;
935 let Defs = [EAX, EFLAGS], Uses = [EAX] in
936 def NAME#32 : I<Opc, Form, (outs), (ins i32mem:$ptr, GR32:$swap),
937 !strconcat(mnemonic, "{l}\t{$swap, $ptr|$ptr, $swap}"),
938 [(frag addr:$ptr, GR32:$swap, 4)]>, TB, OpSize32, LOCK;
939 let Defs = [RAX, EFLAGS], Uses = [RAX] in
940 def NAME#64 : RI<Opc, Form, (outs), (ins i64mem:$ptr, GR64:$swap),
941 !strconcat(mnemonic, "{q}\t{$swap, $ptr|$ptr, $swap}"),
942 [(frag addr:$ptr, GR64:$swap, 8)]>, TB, LOCK;
946 let Defs = [EAX, EDX, EFLAGS], Uses = [EAX, EBX, ECX, EDX],
947 Predicates = [HasCX8], SchedRW = [WriteCMPXCHGRMW],
948 isCodeGenOnly = 1, usesCustomInserter = 1 in {
949 def LCMPXCHG8B : I<0xC7, MRM1m, (outs), (ins i64mem:$ptr),
951 [(X86cas8 addr:$ptr)]>, TB, LOCK;
954 let Defs = [RAX, RDX, EFLAGS], Uses = [RAX, RBX, RCX, RDX],
955 Predicates = [HasCX16,In64BitMode], SchedRW = [WriteCMPXCHGRMW],
956 isCodeGenOnly = 1, mayLoad = 1, mayStore = 1, hasSideEffects = 0 in {
957 def LCMPXCHG16B : RI<0xC7, MRM1m, (outs), (ins i128mem:$ptr),
962 // This pseudo must be used when the frame uses RBX as
963 // the base pointer. Indeed, in such situation RBX is a reserved
964 // register and the register allocator will ignore any use/def of
965 // it. In other words, the register will not fix the clobbering of
966 // RBX that will happen when setting the arguments for the instrucion.
968 // Unlike the actual related instruction, we mark that this one
969 // defines RBX (instead of using RBX).
970 // The rationale is that we will define RBX during the expansion of
971 // the pseudo. The argument feeding RBX is rbx_input.
973 // The additional argument, $rbx_save, is a temporary register used to
974 // save the value of RBX across the actual instruction.
976 // To make sure the register assigned to $rbx_save does not interfere with
977 // the definition of the actual instruction, we use a definition $dst which
978 // is tied to $rbx_save. That way, the live-range of $rbx_save spans across
979 // the instruction and we are sure we will have a valid register to restore
981 let Defs = [RAX, RDX, RBX, EFLAGS], Uses = [RAX, RCX, RDX],
982 Predicates = [HasCX16,In64BitMode], SchedRW = [WriteCMPXCHGRMW],
983 isCodeGenOnly = 1, isPseudo = 1,
984 mayLoad = 1, mayStore = 1, hasSideEffects = 0,
985 Constraints = "$rbx_save = $dst" in {
986 def LCMPXCHG16B_SAVE_RBX :
987 I<0, Pseudo, (outs GR64:$dst),
988 (ins i128mem:$ptr, GR64:$rbx_input, GR64:$rbx_save), "", []>;
991 // Pseudo instruction that doesn't read/write RBX. Will be turned into either
992 // LCMPXCHG16B_SAVE_RBX or LCMPXCHG16B via a custom inserter.
993 let Defs = [RAX, RDX, EFLAGS], Uses = [RAX, RCX, RDX],
994 Predicates = [HasCX16,In64BitMode], SchedRW = [WriteCMPXCHGRMW],
995 isCodeGenOnly = 1, isPseudo = 1,
996 mayLoad = 1, mayStore = 1, hasSideEffects = 0,
997 usesCustomInserter = 1 in {
998 def LCMPXCHG16B_NO_RBX :
999 I<0, Pseudo, (outs), (ins i128mem:$ptr, GR64:$rbx_input), "",
1000 [(X86cas16 addr:$ptr, GR64:$rbx_input)]>;
1003 // This pseudo must be used when the frame uses RBX/EBX as
1004 // the base pointer.
1005 // cf comment for LCMPXCHG16B_SAVE_RBX.
1006 let Defs = [EBX], Uses = [ECX, EAX],
1007 Predicates = [HasMWAITX], SchedRW = [WriteSystem],
1008 isCodeGenOnly = 1, isPseudo = 1, Constraints = "$rbx_save = $dst" in {
1009 def MWAITX_SAVE_RBX :
1010 I<0, Pseudo, (outs GR64:$dst),
1011 (ins GR32:$ebx_input, GR64:$rbx_save),
1016 // Pseudo mwaitx instruction to use for custom insertion.
1017 let Predicates = [HasMWAITX], SchedRW = [WriteSystem],
1018 isCodeGenOnly = 1, isPseudo = 1,
1019 usesCustomInserter = 1 in {
1021 I<0, Pseudo, (outs), (ins GR32:$ecx, GR32:$eax, GR32:$ebx),
1023 [(int_x86_mwaitx GR32:$ecx, GR32:$eax, GR32:$ebx)]>;
1027 defm LCMPXCHG : LCMPXCHG_BinOp<0xB0, 0xB1, MRMDestMem, "cmpxchg", X86cas>;
1029 // Atomic exchange and add
1030 multiclass ATOMIC_RMW_BINOP<bits<8> opc8, bits<8> opc, string mnemonic,
1032 let Constraints = "$val = $dst", Defs = [EFLAGS], mayLoad = 1, mayStore = 1,
1033 isCodeGenOnly = 1, SchedRW = [WriteALURMW] in {
1034 def NAME#8 : I<opc8, MRMSrcMem, (outs GR8:$dst),
1035 (ins GR8:$val, i8mem:$ptr),
1036 !strconcat(mnemonic, "{b}\t{$val, $ptr|$ptr, $val}"),
1038 (!cast<PatFrag>(frag # "_8") addr:$ptr, GR8:$val))]>;
1039 def NAME#16 : I<opc, MRMSrcMem, (outs GR16:$dst),
1040 (ins GR16:$val, i16mem:$ptr),
1041 !strconcat(mnemonic, "{w}\t{$val, $ptr|$ptr, $val}"),
1044 (!cast<PatFrag>(frag # "_16") addr:$ptr, GR16:$val))]>,
1046 def NAME#32 : I<opc, MRMSrcMem, (outs GR32:$dst),
1047 (ins GR32:$val, i32mem:$ptr),
1048 !strconcat(mnemonic, "{l}\t{$val, $ptr|$ptr, $val}"),
1051 (!cast<PatFrag>(frag # "_32") addr:$ptr, GR32:$val))]>,
1053 def NAME#64 : RI<opc, MRMSrcMem, (outs GR64:$dst),
1054 (ins GR64:$val, i64mem:$ptr),
1055 !strconcat(mnemonic, "{q}\t{$val, $ptr|$ptr, $val}"),
1058 (!cast<PatFrag>(frag # "_64") addr:$ptr, GR64:$val))]>;
1062 defm LXADD : ATOMIC_RMW_BINOP<0xc0, 0xc1, "xadd", "atomic_load_add">, TB, LOCK;
1064 /* The following multiclass tries to make sure that in code like
1065 * x.store (immediate op x.load(acquire), release)
1067 * x.store (register op x.load(acquire), release)
1068 * an operation directly on memory is generated instead of wasting a register.
1069 * It is not automatic as atomic_store/load are only lowered to MOV instructions
1070 * extremely late to prevent them from being accidentally reordered in the backend
1071 * (see below the RELEASE_MOV* / ACQUIRE_MOV* pseudo-instructions)
1073 multiclass RELEASE_BINOP_MI<string Name, SDNode op> {
1074 def : Pat<(atomic_store_8 (op (atomic_load_8 addr:$dst), (i8 imm:$src)),
1076 (!cast<Instruction>(Name#"8mi") addr:$dst, imm:$src)>;
1077 def : Pat<(atomic_store_16 (op (atomic_load_16 addr:$dst), (i16 imm:$src)),
1079 (!cast<Instruction>(Name#"16mi") addr:$dst, imm:$src)>;
1080 def : Pat<(atomic_store_32 (op (atomic_load_32 addr:$dst), (i32 imm:$src)),
1082 (!cast<Instruction>(Name#"32mi") addr:$dst, imm:$src)>;
1083 def : Pat<(atomic_store_64 (op (atomic_load_64 addr:$dst), (i64immSExt32:$src)),
1085 (!cast<Instruction>(Name#"64mi32") addr:$dst, (i64immSExt32:$src))>;
1086 def : Pat<(atomic_store_8 (op (atomic_load_8 addr:$dst), (i8 GR8:$src)), addr:$dst),
1087 (!cast<Instruction>(Name#"8mr") addr:$dst, GR8:$src)>;
1088 def : Pat<(atomic_store_16 (op (atomic_load_16 addr:$dst), (i16 GR16:$src)),
1090 (!cast<Instruction>(Name#"16mr") addr:$dst, GR16:$src)>;
1091 def : Pat<(atomic_store_32 (op (atomic_load_32 addr:$dst), (i32 GR32:$src)),
1093 (!cast<Instruction>(Name#"32mr") addr:$dst, GR32:$src)>;
1094 def : Pat<(atomic_store_64 (op (atomic_load_64 addr:$dst), (i64 GR64:$src)),
1096 (!cast<Instruction>(Name#"64mr") addr:$dst, GR64:$src)>;
1098 defm : RELEASE_BINOP_MI<"ADD", add>;
1099 defm : RELEASE_BINOP_MI<"AND", and>;
1100 defm : RELEASE_BINOP_MI<"OR", or>;
1101 defm : RELEASE_BINOP_MI<"XOR", xor>;
1102 defm : RELEASE_BINOP_MI<"SUB", sub>;
1104 // Atomic load + floating point patterns.
1105 // FIXME: This could also handle SIMD operations with *ps and *pd instructions.
1106 multiclass ATOMIC_LOAD_FP_BINOP_MI<string Name, SDNode op> {
1107 def : Pat<(op FR32:$src1, (bitconvert (i32 (atomic_load_32 addr:$src2)))),
1108 (!cast<Instruction>(Name#"SSrm") FR32:$src1, addr:$src2)>,
1109 Requires<[UseSSE1]>;
1110 def : Pat<(op FR32:$src1, (bitconvert (i32 (atomic_load_32 addr:$src2)))),
1111 (!cast<Instruction>("V"#Name#"SSrm") FR32:$src1, addr:$src2)>,
1113 def : Pat<(op FR32X:$src1, (bitconvert (i32 (atomic_load_32 addr:$src2)))),
1114 (!cast<Instruction>("V"#Name#"SSZrm") FR32X:$src1, addr:$src2)>,
1115 Requires<[HasAVX512]>;
1117 def : Pat<(op FR64:$src1, (bitconvert (i64 (atomic_load_64 addr:$src2)))),
1118 (!cast<Instruction>(Name#"SDrm") FR64:$src1, addr:$src2)>,
1119 Requires<[UseSSE1]>;
1120 def : Pat<(op FR64:$src1, (bitconvert (i64 (atomic_load_64 addr:$src2)))),
1121 (!cast<Instruction>("V"#Name#"SDrm") FR64:$src1, addr:$src2)>,
1123 def : Pat<(op FR64X:$src1, (bitconvert (i64 (atomic_load_64 addr:$src2)))),
1124 (!cast<Instruction>("V"#Name#"SDZrm") FR64X:$src1, addr:$src2)>,
1125 Requires<[HasAVX512]>;
1127 defm : ATOMIC_LOAD_FP_BINOP_MI<"ADD", fadd>;
1128 // FIXME: Add fsub, fmul, fdiv, ...
1130 multiclass RELEASE_UNOP<string Name, dag dag8, dag dag16, dag dag32,
1132 def : Pat<(atomic_store_8 dag8, addr:$dst),
1133 (!cast<Instruction>(Name#8m) addr:$dst)>;
1134 def : Pat<(atomic_store_16 dag16, addr:$dst),
1135 (!cast<Instruction>(Name#16m) addr:$dst)>;
1136 def : Pat<(atomic_store_32 dag32, addr:$dst),
1137 (!cast<Instruction>(Name#32m) addr:$dst)>;
1138 def : Pat<(atomic_store_64 dag64, addr:$dst),
1139 (!cast<Instruction>(Name#64m) addr:$dst)>;
1142 let Predicates = [UseIncDec] in {
1143 defm : RELEASE_UNOP<"INC",
1144 (add (atomic_load_8 addr:$dst), (i8 1)),
1145 (add (atomic_load_16 addr:$dst), (i16 1)),
1146 (add (atomic_load_32 addr:$dst), (i32 1)),
1147 (add (atomic_load_64 addr:$dst), (i64 1))>;
1148 defm : RELEASE_UNOP<"DEC",
1149 (add (atomic_load_8 addr:$dst), (i8 -1)),
1150 (add (atomic_load_16 addr:$dst), (i16 -1)),
1151 (add (atomic_load_32 addr:$dst), (i32 -1)),
1152 (add (atomic_load_64 addr:$dst), (i64 -1))>;
1155 defm : RELEASE_UNOP<"NEG",
1156 (ineg (i8 (atomic_load_8 addr:$dst))),
1157 (ineg (i16 (atomic_load_16 addr:$dst))),
1158 (ineg (i32 (atomic_load_32 addr:$dst))),
1159 (ineg (i64 (atomic_load_64 addr:$dst)))>;
1160 defm : RELEASE_UNOP<"NOT",
1161 (not (i8 (atomic_load_8 addr:$dst))),
1162 (not (i16 (atomic_load_16 addr:$dst))),
1163 (not (i32 (atomic_load_32 addr:$dst))),
1164 (not (i64 (atomic_load_64 addr:$dst)))>;
1166 def : Pat<(atomic_store_8 (i8 imm:$src), addr:$dst),
1167 (MOV8mi addr:$dst, imm:$src)>;
1168 def : Pat<(atomic_store_16 (i16 imm:$src), addr:$dst),
1169 (MOV16mi addr:$dst, imm:$src)>;
1170 def : Pat<(atomic_store_32 (i32 imm:$src), addr:$dst),
1171 (MOV32mi addr:$dst, imm:$src)>;
1172 def : Pat<(atomic_store_64 (i64immSExt32:$src), addr:$dst),
1173 (MOV64mi32 addr:$dst, i64immSExt32:$src)>;
1175 def : Pat<(atomic_store_8 GR8:$src, addr:$dst),
1176 (MOV8mr addr:$dst, GR8:$src)>;
1177 def : Pat<(atomic_store_16 GR16:$src, addr:$dst),
1178 (MOV16mr addr:$dst, GR16:$src)>;
1179 def : Pat<(atomic_store_32 GR32:$src, addr:$dst),
1180 (MOV32mr addr:$dst, GR32:$src)>;
1181 def : Pat<(atomic_store_64 GR64:$src, addr:$dst),
1182 (MOV64mr addr:$dst, GR64:$src)>;
1184 def : Pat<(i8 (atomic_load_8 addr:$src)), (MOV8rm addr:$src)>;
1185 def : Pat<(i16 (atomic_load_16 addr:$src)), (MOV16rm addr:$src)>;
1186 def : Pat<(i32 (atomic_load_32 addr:$src)), (MOV32rm addr:$src)>;
1187 def : Pat<(i64 (atomic_load_64 addr:$src)), (MOV64rm addr:$src)>;
1189 // Floating point loads/stores.
1190 def : Pat<(atomic_store_32 (i32 (bitconvert (f32 FR32:$src))), addr:$dst),
1191 (MOVSSmr addr:$dst, FR32:$src)>, Requires<[UseSSE1]>;
1192 def : Pat<(atomic_store_32 (i32 (bitconvert (f32 FR32:$src))), addr:$dst),
1193 (VMOVSSmr addr:$dst, FR32:$src)>, Requires<[UseAVX]>;
1194 def : Pat<(atomic_store_32 (i32 (bitconvert (f32 FR32:$src))), addr:$dst),
1195 (VMOVSSZmr addr:$dst, FR32:$src)>, Requires<[HasAVX512]>;
1197 def : Pat<(atomic_store_64 (i64 (bitconvert (f64 FR64:$src))), addr:$dst),
1198 (MOVSDmr addr:$dst, FR64:$src)>, Requires<[UseSSE2]>;
1199 def : Pat<(atomic_store_64 (i64 (bitconvert (f64 FR64:$src))), addr:$dst),
1200 (VMOVSDmr addr:$dst, FR64:$src)>, Requires<[UseAVX]>;
1201 def : Pat<(atomic_store_64 (i64 (bitconvert (f64 FR64:$src))), addr:$dst),
1202 (VMOVSDmr addr:$dst, FR64:$src)>, Requires<[HasAVX512]>;
1204 def : Pat<(f32 (bitconvert (i32 (atomic_load_32 addr:$src)))),
1205 (MOVSSrm_alt addr:$src)>, Requires<[UseSSE1]>;
1206 def : Pat<(f32 (bitconvert (i32 (atomic_load_32 addr:$src)))),
1207 (VMOVSSrm_alt addr:$src)>, Requires<[UseAVX]>;
1208 def : Pat<(f32 (bitconvert (i32 (atomic_load_32 addr:$src)))),
1209 (VMOVSSZrm_alt addr:$src)>, Requires<[HasAVX512]>;
1211 def : Pat<(f64 (bitconvert (i64 (atomic_load_64 addr:$src)))),
1212 (MOVSDrm_alt addr:$src)>, Requires<[UseSSE2]>;
1213 def : Pat<(f64 (bitconvert (i64 (atomic_load_64 addr:$src)))),
1214 (VMOVSDrm_alt addr:$src)>, Requires<[UseAVX]>;
1215 def : Pat<(f64 (bitconvert (i64 (atomic_load_64 addr:$src)))),
1216 (VMOVSDZrm_alt addr:$src)>, Requires<[HasAVX512]>;
1218 //===----------------------------------------------------------------------===//
1219 // DAG Pattern Matching Rules
1220 //===----------------------------------------------------------------------===//
1222 // Use AND/OR to store 0/-1 in memory when optimizing for minsize. This saves
1223 // binary size compared to a regular MOV, but it introduces an unnecessary
1224 // load, so is not suitable for regular or optsize functions.
1225 let Predicates = [OptForMinSize] in {
1226 def : Pat<(simple_store (i16 0), addr:$dst), (AND16mi addr:$dst, 0)>;
1227 def : Pat<(simple_store (i32 0), addr:$dst), (AND32mi addr:$dst, 0)>;
1228 def : Pat<(simple_store (i64 0), addr:$dst), (AND64mi32 addr:$dst, 0)>;
1229 def : Pat<(simple_store (i16 -1), addr:$dst), (OR16mi addr:$dst, -1)>;
1230 def : Pat<(simple_store (i32 -1), addr:$dst), (OR32mi addr:$dst, -1)>;
1231 def : Pat<(simple_store (i64 -1), addr:$dst), (OR64mi32 addr:$dst, -1)>;
1234 // In kernel code model, we can get the address of a label
1235 // into a register with 'movq'. FIXME: This is a hack, the 'imm' predicate of
1236 // the MOV64ri32 should accept these.
1237 def : Pat<(i64 (X86Wrapper tconstpool :$dst)),
1238 (MOV64ri32 tconstpool :$dst)>, Requires<[KernelCode]>;
1239 def : Pat<(i64 (X86Wrapper tjumptable :$dst)),
1240 (MOV64ri32 tjumptable :$dst)>, Requires<[KernelCode]>;
1241 def : Pat<(i64 (X86Wrapper tglobaladdr :$dst)),
1242 (MOV64ri32 tglobaladdr :$dst)>, Requires<[KernelCode]>;
1243 def : Pat<(i64 (X86Wrapper texternalsym:$dst)),
1244 (MOV64ri32 texternalsym:$dst)>, Requires<[KernelCode]>;
1245 def : Pat<(i64 (X86Wrapper mcsym:$dst)),
1246 (MOV64ri32 mcsym:$dst)>, Requires<[KernelCode]>;
1247 def : Pat<(i64 (X86Wrapper tblockaddress:$dst)),
1248 (MOV64ri32 tblockaddress:$dst)>, Requires<[KernelCode]>;
1250 // If we have small model and -static mode, it is safe to store global addresses
1251 // directly as immediates. FIXME: This is really a hack, the 'imm' predicate
1252 // for MOV64mi32 should handle this sort of thing.
1253 def : Pat<(store (i64 (X86Wrapper tconstpool:$src)), addr:$dst),
1254 (MOV64mi32 addr:$dst, tconstpool:$src)>,
1255 Requires<[NearData, IsNotPIC]>;
1256 def : Pat<(store (i64 (X86Wrapper tjumptable:$src)), addr:$dst),
1257 (MOV64mi32 addr:$dst, tjumptable:$src)>,
1258 Requires<[NearData, IsNotPIC]>;
1259 def : Pat<(store (i64 (X86Wrapper tglobaladdr:$src)), addr:$dst),
1260 (MOV64mi32 addr:$dst, tglobaladdr:$src)>,
1261 Requires<[NearData, IsNotPIC]>;
1262 def : Pat<(store (i64 (X86Wrapper texternalsym:$src)), addr:$dst),
1263 (MOV64mi32 addr:$dst, texternalsym:$src)>,
1264 Requires<[NearData, IsNotPIC]>;
1265 def : Pat<(store (i64 (X86Wrapper mcsym:$src)), addr:$dst),
1266 (MOV64mi32 addr:$dst, mcsym:$src)>,
1267 Requires<[NearData, IsNotPIC]>;
1268 def : Pat<(store (i64 (X86Wrapper tblockaddress:$src)), addr:$dst),
1269 (MOV64mi32 addr:$dst, tblockaddress:$src)>,
1270 Requires<[NearData, IsNotPIC]>;
1272 def : Pat<(i32 (X86RecoverFrameAlloc mcsym:$dst)), (MOV32ri mcsym:$dst)>;
1273 def : Pat<(i64 (X86RecoverFrameAlloc mcsym:$dst)), (MOV64ri mcsym:$dst)>;
1277 // tls has some funny stuff here...
1278 // This corresponds to movabs $foo@tpoff, %rax
1279 def : Pat<(i64 (X86Wrapper tglobaltlsaddr :$dst)),
1280 (MOV64ri32 tglobaltlsaddr :$dst)>;
1281 // This corresponds to add $foo@tpoff, %rax
1282 def : Pat<(add GR64:$src1, (X86Wrapper tglobaltlsaddr :$dst)),
1283 (ADD64ri32 GR64:$src1, tglobaltlsaddr :$dst)>;
1286 // Direct PC relative function call for small code model. 32-bit displacement
1287 // sign extended to 64-bit.
1288 def : Pat<(X86call (i64 tglobaladdr:$dst)),
1289 (CALL64pcrel32 tglobaladdr:$dst)>;
1290 def : Pat<(X86call (i64 texternalsym:$dst)),
1291 (CALL64pcrel32 texternalsym:$dst)>;
1293 def : Pat<(X86call_rvmarker (i64 tglobaladdr:$rvfunc), (i64 texternalsym:$dst)),
1294 (CALL64pcrel32_RVMARKER tglobaladdr:$rvfunc, texternalsym:$dst)>;
1295 def : Pat<(X86call_rvmarker (i64 tglobaladdr:$rvfunc), (i64 tglobaladdr:$dst)),
1296 (CALL64pcrel32_RVMARKER tglobaladdr:$rvfunc, tglobaladdr:$dst)>;
1299 // Tailcall stuff. The TCRETURN instructions execute after the epilog, so they
1300 // can never use callee-saved registers. That is the purpose of the GR64_TC
1301 // register classes.
1303 // The only volatile register that is never used by the calling convention is
1304 // %r11. This happens when calling a vararg function with 6 arguments.
1306 // Match an X86tcret that uses less than 7 volatile registers.
1307 def X86tcret_6regs : PatFrag<(ops node:$ptr, node:$off),
1308 (X86tcret node:$ptr, node:$off), [{
1309 // X86tcret args: (*chain, ptr, imm, regs..., glue)
1310 unsigned NumRegs = 0;
1311 for (unsigned i = 3, e = N->getNumOperands(); i != e; ++i)
1312 if (isa<RegisterSDNode>(N->getOperand(i)) && ++NumRegs > 6)
1317 def X86tcret_1reg : PatFrag<(ops node:$ptr, node:$off),
1318 (X86tcret node:$ptr, node:$off), [{
1319 // X86tcret args: (*chain, ptr, imm, regs..., glue)
1320 unsigned NumRegs = 1;
1321 const SDValue& BasePtr = cast<LoadSDNode>(N->getOperand(1))->getBasePtr();
1322 if (isa<FrameIndexSDNode>(BasePtr))
1324 else if (BasePtr->getNumOperands() && isa<GlobalAddressSDNode>(BasePtr->getOperand(0)))
1326 for (unsigned i = 3, e = N->getNumOperands(); i != e; ++i)
1327 if (isa<RegisterSDNode>(N->getOperand(i)) && ( NumRegs-- == 0))
1332 def : Pat<(X86tcret ptr_rc_tailcall:$dst, timm:$off),
1333 (TCRETURNri ptr_rc_tailcall:$dst, timm:$off)>,
1334 Requires<[Not64BitMode, NotUseIndirectThunkCalls]>;
1336 // FIXME: This is disabled for 32-bit PIC mode because the global base
1337 // register which is part of the address mode may be assigned a
1338 // callee-saved register.
1339 // Similar to X86tcret_6regs, here we only have 1 register left
1340 def : Pat<(X86tcret_1reg (load addr:$dst), timm:$off),
1341 (TCRETURNmi addr:$dst, timm:$off)>,
1342 Requires<[Not64BitMode, IsNotPIC, NotUseIndirectThunkCalls]>;
1344 def : Pat<(X86tcret (i32 tglobaladdr:$dst), timm:$off),
1345 (TCRETURNdi tglobaladdr:$dst, timm:$off)>,
1346 Requires<[NotLP64]>;
1348 def : Pat<(X86tcret (i32 texternalsym:$dst), timm:$off),
1349 (TCRETURNdi texternalsym:$dst, timm:$off)>,
1350 Requires<[NotLP64]>;
1352 def : Pat<(X86tcret ptr_rc_tailcall:$dst, timm:$off),
1353 (TCRETURNri64 ptr_rc_tailcall:$dst, timm:$off)>,
1354 Requires<[In64BitMode, NotUseIndirectThunkCalls]>;
1356 // Don't fold loads into X86tcret requiring more than 6 regs.
1357 // There wouldn't be enough scratch registers for base+index.
1358 def : Pat<(X86tcret_6regs (load addr:$dst), timm:$off),
1359 (TCRETURNmi64 addr:$dst, timm:$off)>,
1360 Requires<[In64BitMode, NotUseIndirectThunkCalls]>;
1362 def : Pat<(X86tcret ptr_rc_tailcall:$dst, timm:$off),
1363 (INDIRECT_THUNK_TCRETURN64 ptr_rc_tailcall:$dst, timm:$off)>,
1364 Requires<[In64BitMode, UseIndirectThunkCalls]>;
1366 def : Pat<(X86tcret ptr_rc_tailcall:$dst, timm:$off),
1367 (INDIRECT_THUNK_TCRETURN32 ptr_rc_tailcall:$dst, timm:$off)>,
1368 Requires<[Not64BitMode, UseIndirectThunkCalls]>;
1370 def : Pat<(X86tcret (i64 tglobaladdr:$dst), timm:$off),
1371 (TCRETURNdi64 tglobaladdr:$dst, timm:$off)>,
1374 def : Pat<(X86tcret (i64 texternalsym:$dst), timm:$off),
1375 (TCRETURNdi64 texternalsym:$dst, timm:$off)>,
1378 // Normal calls, with various flavors of addresses.
1379 def : Pat<(X86call (i32 tglobaladdr:$dst)),
1380 (CALLpcrel32 tglobaladdr:$dst)>;
1381 def : Pat<(X86call (i32 texternalsym:$dst)),
1382 (CALLpcrel32 texternalsym:$dst)>;
1383 def : Pat<(X86call (i32 imm:$dst)),
1384 (CALLpcrel32 imm:$dst)>, Requires<[CallImmAddr]>;
1388 // TEST R,R is smaller than CMP R,0
1389 def : Pat<(X86cmp GR8:$src1, 0),
1390 (TEST8rr GR8:$src1, GR8:$src1)>;
1391 def : Pat<(X86cmp GR16:$src1, 0),
1392 (TEST16rr GR16:$src1, GR16:$src1)>;
1393 def : Pat<(X86cmp GR32:$src1, 0),
1394 (TEST32rr GR32:$src1, GR32:$src1)>;
1395 def : Pat<(X86cmp GR64:$src1, 0),
1396 (TEST64rr GR64:$src1, GR64:$src1)>;
1398 // zextload bool -> zextload byte
1399 // i1 stored in one byte in zero-extended form.
1400 // Upper bits cleanup should be executed before Store.
1401 def : Pat<(zextloadi8i1 addr:$src), (MOV8rm addr:$src)>;
1402 def : Pat<(zextloadi16i1 addr:$src),
1403 (EXTRACT_SUBREG (MOVZX32rm8 addr:$src), sub_16bit)>;
1404 def : Pat<(zextloadi32i1 addr:$src), (MOVZX32rm8 addr:$src)>;
1405 def : Pat<(zextloadi64i1 addr:$src),
1406 (SUBREG_TO_REG (i64 0), (MOVZX32rm8 addr:$src), sub_32bit)>;
1408 // extload bool -> extload byte
1409 // When extloading from 16-bit and smaller memory locations into 64-bit
1410 // registers, use zero-extending loads so that the entire 64-bit register is
1411 // defined, avoiding partial-register updates.
1413 def : Pat<(extloadi8i1 addr:$src), (MOV8rm addr:$src)>;
1414 def : Pat<(extloadi16i1 addr:$src),
1415 (EXTRACT_SUBREG (MOVZX32rm8 addr:$src), sub_16bit)>;
1416 def : Pat<(extloadi32i1 addr:$src), (MOVZX32rm8 addr:$src)>;
1417 def : Pat<(extloadi16i8 addr:$src),
1418 (EXTRACT_SUBREG (MOVZX32rm8 addr:$src), sub_16bit)>;
1419 def : Pat<(extloadi32i8 addr:$src), (MOVZX32rm8 addr:$src)>;
1420 def : Pat<(extloadi32i16 addr:$src), (MOVZX32rm16 addr:$src)>;
1422 // For other extloads, use subregs, since the high contents of the register are
1423 // defined after an extload.
1424 // NOTE: The extloadi64i32 pattern needs to be first as it will try to form
1425 // 32-bit loads for 4 byte aligned i8/i16 loads.
1426 def : Pat<(extloadi64i32 addr:$src),
1427 (SUBREG_TO_REG (i64 0), (MOV32rm addr:$src), sub_32bit)>;
1428 def : Pat<(extloadi64i1 addr:$src),
1429 (SUBREG_TO_REG (i64 0), (MOVZX32rm8 addr:$src), sub_32bit)>;
1430 def : Pat<(extloadi64i8 addr:$src),
1431 (SUBREG_TO_REG (i64 0), (MOVZX32rm8 addr:$src), sub_32bit)>;
1432 def : Pat<(extloadi64i16 addr:$src),
1433 (SUBREG_TO_REG (i64 0), (MOVZX32rm16 addr:$src), sub_32bit)>;
1435 // anyext. Define these to do an explicit zero-extend to
1436 // avoid partial-register updates.
1437 def : Pat<(i16 (anyext GR8 :$src)), (EXTRACT_SUBREG
1438 (MOVZX32rr8 GR8 :$src), sub_16bit)>;
1439 def : Pat<(i32 (anyext GR8 :$src)), (MOVZX32rr8 GR8 :$src)>;
1441 // Except for i16 -> i32 since isel expect i16 ops to be promoted to i32.
1442 def : Pat<(i32 (anyext GR16:$src)),
1443 (INSERT_SUBREG (i32 (IMPLICIT_DEF)), GR16:$src, sub_16bit)>;
1445 def : Pat<(i64 (anyext GR8 :$src)),
1446 (SUBREG_TO_REG (i64 0), (MOVZX32rr8 GR8 :$src), sub_32bit)>;
1447 def : Pat<(i64 (anyext GR16:$src)),
1448 (SUBREG_TO_REG (i64 0), (MOVZX32rr16 GR16 :$src), sub_32bit)>;
1449 def : Pat<(i64 (anyext GR32:$src)),
1450 (INSERT_SUBREG (i64 (IMPLICIT_DEF)), GR32:$src, sub_32bit)>;
1452 // If this is an anyext of the remainder of an 8-bit sdivrem, use a MOVSX
1453 // instead of a MOVZX. The sdivrem lowering will emit emit a MOVSX to move
1454 // %ah to the lower byte of a register. By using a MOVSX here we allow a
1455 // post-isel peephole to merge the two MOVSX instructions into one.
1456 def anyext_sdiv : PatFrag<(ops node:$lhs), (anyext node:$lhs),[{
1457 return (N->getOperand(0).getOpcode() == ISD::SDIVREM &&
1458 N->getOperand(0).getResNo() == 1);
1460 def : Pat<(i32 (anyext_sdiv GR8:$src)), (MOVSX32rr8 GR8:$src)>;
1462 // Any instruction that defines a 32-bit result leaves the high half of the
1463 // register. Truncate can be lowered to EXTRACT_SUBREG. CopyFromReg may
1464 // be copying from a truncate. AssertSext/AssertZext/AssertAlign aren't saying
1465 // anything about the upper 32 bits, they're probably just qualifying a
1466 // CopyFromReg. FREEZE may be coming from a a truncate. Any other 32-bit
1467 // operation will zero-extend up to 64 bits.
1468 def def32 : PatLeaf<(i32 GR32:$src), [{
1469 return N->getOpcode() != ISD::TRUNCATE &&
1470 N->getOpcode() != TargetOpcode::EXTRACT_SUBREG &&
1471 N->getOpcode() != ISD::CopyFromReg &&
1472 N->getOpcode() != ISD::AssertSext &&
1473 N->getOpcode() != ISD::AssertZext &&
1474 N->getOpcode() != ISD::AssertAlign &&
1475 N->getOpcode() != ISD::FREEZE;
1478 // In the case of a 32-bit def that is known to implicitly zero-extend,
1479 // we can use a SUBREG_TO_REG.
1480 def : Pat<(i64 (zext def32:$src)),
1481 (SUBREG_TO_REG (i64 0), GR32:$src, sub_32bit)>;
1482 def : Pat<(i64 (and (anyext def32:$src), 0x00000000FFFFFFFF)),
1483 (SUBREG_TO_REG (i64 0), GR32:$src, sub_32bit)>;
1485 //===----------------------------------------------------------------------===//
1486 // Pattern match OR as ADD
1487 //===----------------------------------------------------------------------===//
1489 // If safe, we prefer to pattern match OR as ADD at isel time. ADD can be
1490 // 3-addressified into an LEA instruction to avoid copies. However, we also
1491 // want to finally emit these instructions as an or at the end of the code
1492 // generator to make the generated code easier to read. To do this, we select
1493 // into "disjoint bits" pseudo ops.
1495 // Treat an 'or' node is as an 'add' if the or'ed bits are known to be zero.
1496 def or_is_add : PatFrag<(ops node:$lhs, node:$rhs), (or node:$lhs, node:$rhs),[{
1497 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N->getOperand(1)))
1498 return CurDAG->MaskedValueIsZero(N->getOperand(0), CN->getAPIntValue());
1500 KnownBits Known0 = CurDAG->computeKnownBits(N->getOperand(0), 0);
1501 KnownBits Known1 = CurDAG->computeKnownBits(N->getOperand(1), 0);
1502 return (~Known0.Zero & ~Known1.Zero) == 0;
1506 // (or x1, x2) -> (add x1, x2) if two operands are known not to share bits.
1507 // Try this before the selecting to OR.
1508 let SchedRW = [WriteALU] in {
1510 let isConvertibleToThreeAddress = 1, isPseudo = 1,
1511 Constraints = "$src1 = $dst", Defs = [EFLAGS] in {
1512 let isCommutable = 1 in {
1513 def ADD8rr_DB : I<0, Pseudo, (outs GR8:$dst), (ins GR8:$src1, GR8:$src2),
1514 "", // orb/addb REG, REG
1515 [(set GR8:$dst, (or_is_add GR8:$src1, GR8:$src2))]>;
1516 def ADD16rr_DB : I<0, Pseudo, (outs GR16:$dst), (ins GR16:$src1, GR16:$src2),
1517 "", // orw/addw REG, REG
1518 [(set GR16:$dst, (or_is_add GR16:$src1, GR16:$src2))]>;
1519 def ADD32rr_DB : I<0, Pseudo, (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
1520 "", // orl/addl REG, REG
1521 [(set GR32:$dst, (or_is_add GR32:$src1, GR32:$src2))]>;
1522 def ADD64rr_DB : I<0, Pseudo, (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
1523 "", // orq/addq REG, REG
1524 [(set GR64:$dst, (or_is_add GR64:$src1, GR64:$src2))]>;
1527 def ADD8ri_DB : I<0, Pseudo,
1528 (outs GR8:$dst), (ins GR8:$src1, i8imm:$src2),
1529 "", // orb/addb REG, imm8
1530 [(set GR8:$dst, (or_is_add GR8:$src1, imm:$src2))]>;
1531 def ADD16ri_DB : I<0, Pseudo, (outs GR16:$dst), (ins GR16:$src1, i16imm:$src2),
1532 "", // orw/addw REG, imm
1533 [(set GR16:$dst, (or_is_add GR16:$src1, imm:$src2))]>;
1534 def ADD32ri_DB : I<0, Pseudo, (outs GR32:$dst), (ins GR32:$src1, i32imm:$src2),
1535 "", // orl/addl REG, imm
1536 [(set GR32:$dst, (or_is_add GR32:$src1, imm:$src2))]>;
1537 def ADD64ri32_DB : I<0, Pseudo,
1538 (outs GR64:$dst), (ins GR64:$src1, i64i32imm:$src2),
1539 "", // orq/addq REG, imm
1540 [(set GR64:$dst, (or_is_add GR64:$src1,
1541 i64immSExt32:$src2))]>;
1543 } // AddedComplexity, SchedRW
1545 //===----------------------------------------------------------------------===//
1546 // Pattern match XOR as ADD
1547 //===----------------------------------------------------------------------===//
1549 // Prefer to pattern match XOR with min_signed_value as ADD at isel time.
1550 // ADD can be 3-addressified into an LEA instruction to avoid copies.
1551 let AddedComplexity = 5 in {
1552 def : Pat<(xor GR8:$src1, -128),
1553 (ADD8ri GR8:$src1, -128)>;
1554 def : Pat<(xor GR16:$src1, -32768),
1555 (ADD16ri GR16:$src1, -32768)>;
1556 def : Pat<(xor GR32:$src1, -2147483648),
1557 (ADD32ri GR32:$src1, -2147483648)>;
1560 //===----------------------------------------------------------------------===//
1562 //===----------------------------------------------------------------------===//
1564 // Odd encoding trick: -128 fits into an 8-bit immediate field while
1565 // +128 doesn't, so in this special case use a sub instead of an add.
1566 def : Pat<(add GR16:$src1, 128),
1567 (SUB16ri GR16:$src1, -128)>;
1568 def : Pat<(store (add (loadi16 addr:$dst), 128), addr:$dst),
1569 (SUB16mi addr:$dst, -128)>;
1571 def : Pat<(add GR32:$src1, 128),
1572 (SUB32ri GR32:$src1, -128)>;
1573 def : Pat<(store (add (loadi32 addr:$dst), 128), addr:$dst),
1574 (SUB32mi addr:$dst, -128)>;
1576 def : Pat<(add GR64:$src1, 128),
1577 (SUB64ri32 GR64:$src1, -128)>;
1578 def : Pat<(store (add (loadi64 addr:$dst), 128), addr:$dst),
1579 (SUB64mi32 addr:$dst, -128)>;
1581 def : Pat<(X86add_flag_nocf GR16:$src1, 128),
1582 (SUB16ri GR16:$src1, -128)>;
1583 def : Pat<(X86add_flag_nocf GR32:$src1, 128),
1584 (SUB32ri GR32:$src1, -128)>;
1585 def : Pat<(X86add_flag_nocf GR64:$src1, 128),
1586 (SUB64ri32 GR64:$src1, -128)>;
1588 // The same trick applies for 32-bit immediate fields in 64-bit
1590 def : Pat<(add GR64:$src1, 0x0000000080000000),
1591 (SUB64ri32 GR64:$src1, 0xffffffff80000000)>;
1592 def : Pat<(store (add (loadi64 addr:$dst), 0x0000000080000000), addr:$dst),
1593 (SUB64mi32 addr:$dst, 0xffffffff80000000)>;
1594 def : Pat<(X86add_flag_nocf GR64:$src1, 0x0000000080000000),
1595 (SUB64ri32 GR64:$src1, 0xffffffff80000000)>;
1597 // To avoid needing to materialize an immediate in a register, use a 32-bit and
1598 // with implicit zero-extension instead of a 64-bit and if the immediate has at
1599 // least 32 bits of leading zeros. If in addition the last 32 bits can be
1600 // represented with a sign extension of a 8 bit constant, use that.
1601 // This can also reduce instruction size by eliminating the need for the REX
1604 // AddedComplexity is needed to give priority over i64immSExt8 and i64immSExt32.
1605 let AddedComplexity = 1 in {
1606 def : Pat<(and GR64:$src, i64immZExt32:$imm),
1610 (EXTRACT_SUBREG GR64:$src, sub_32bit),
1611 (i32 (GetLo32XForm imm:$imm))),
1613 } // AddedComplexity = 1
1616 // AddedComplexity is needed due to the increased complexity on the
1617 // i64immZExt32SExt8 and i64immZExt32 patterns above. Applying this to all
1618 // the MOVZX patterns keeps thems together in DAGIsel tables.
1619 let AddedComplexity = 1 in {
1620 // r & (2^16-1) ==> movz
1621 def : Pat<(and GR32:$src1, 0xffff),
1622 (MOVZX32rr16 (EXTRACT_SUBREG GR32:$src1, sub_16bit))>;
1623 // r & (2^8-1) ==> movz
1624 def : Pat<(and GR32:$src1, 0xff),
1625 (MOVZX32rr8 (EXTRACT_SUBREG GR32:$src1, sub_8bit))>;
1626 // r & (2^8-1) ==> movz
1627 def : Pat<(and GR16:$src1, 0xff),
1628 (EXTRACT_SUBREG (MOVZX32rr8 (EXTRACT_SUBREG GR16:$src1, sub_8bit)),
1631 // r & (2^32-1) ==> movz
1632 def : Pat<(and GR64:$src, 0x00000000FFFFFFFF),
1633 (SUBREG_TO_REG (i64 0),
1634 (MOV32rr (EXTRACT_SUBREG GR64:$src, sub_32bit)),
1636 // r & (2^16-1) ==> movz
1637 def : Pat<(and GR64:$src, 0xffff),
1638 (SUBREG_TO_REG (i64 0),
1639 (MOVZX32rr16 (i16 (EXTRACT_SUBREG GR64:$src, sub_16bit))),
1641 // r & (2^8-1) ==> movz
1642 def : Pat<(and GR64:$src, 0xff),
1643 (SUBREG_TO_REG (i64 0),
1644 (MOVZX32rr8 (i8 (EXTRACT_SUBREG GR64:$src, sub_8bit))),
1646 } // AddedComplexity = 1
1649 // Try to use BTS/BTR/BTC for single bit operations on the upper 32-bits.
1651 def BTRXForm : SDNodeXForm<imm, [{
1652 // Transformation function: Find the lowest 0.
1653 return getI64Imm((uint8_t)N->getAPIntValue().countr_one(), SDLoc(N));
1656 def BTCBTSXForm : SDNodeXForm<imm, [{
1657 // Transformation function: Find the lowest 1.
1658 return getI64Imm((uint8_t)N->getAPIntValue().countr_zero(), SDLoc(N));
1661 def BTRMask64 : ImmLeaf<i64, [{
1662 return !isUInt<32>(Imm) && !isInt<32>(Imm) && isPowerOf2_64(~Imm);
1665 def BTCBTSMask64 : ImmLeaf<i64, [{
1666 return !isInt<32>(Imm) && isPowerOf2_64(Imm);
1669 // For now only do this for optsize.
1670 let AddedComplexity = 1, Predicates=[OptForSize] in {
1671 def : Pat<(and GR64:$src1, BTRMask64:$mask),
1672 (BTR64ri8 GR64:$src1, (BTRXForm imm:$mask))>;
1673 def : Pat<(or GR64:$src1, BTCBTSMask64:$mask),
1674 (BTS64ri8 GR64:$src1, (BTCBTSXForm imm:$mask))>;
1675 def : Pat<(xor GR64:$src1, BTCBTSMask64:$mask),
1676 (BTC64ri8 GR64:$src1, (BTCBTSXForm imm:$mask))>;
1680 // sext_inreg patterns
1681 def : Pat<(sext_inreg GR32:$src, i16),
1682 (MOVSX32rr16 (EXTRACT_SUBREG GR32:$src, sub_16bit))>;
1683 def : Pat<(sext_inreg GR32:$src, i8),
1684 (MOVSX32rr8 (EXTRACT_SUBREG GR32:$src, sub_8bit))>;
1686 def : Pat<(sext_inreg GR16:$src, i8),
1687 (EXTRACT_SUBREG (MOVSX32rr8 (EXTRACT_SUBREG GR16:$src, sub_8bit)),
1690 def : Pat<(sext_inreg GR64:$src, i32),
1691 (MOVSX64rr32 (EXTRACT_SUBREG GR64:$src, sub_32bit))>;
1692 def : Pat<(sext_inreg GR64:$src, i16),
1693 (MOVSX64rr16 (EXTRACT_SUBREG GR64:$src, sub_16bit))>;
1694 def : Pat<(sext_inreg GR64:$src, i8),
1695 (MOVSX64rr8 (EXTRACT_SUBREG GR64:$src, sub_8bit))>;
1697 // sext, sext_load, zext, zext_load
1698 def: Pat<(i16 (sext GR8:$src)),
1699 (EXTRACT_SUBREG (MOVSX32rr8 GR8:$src), sub_16bit)>;
1700 def: Pat<(sextloadi16i8 addr:$src),
1701 (EXTRACT_SUBREG (MOVSX32rm8 addr:$src), sub_16bit)>;
1702 def: Pat<(i16 (zext GR8:$src)),
1703 (EXTRACT_SUBREG (MOVZX32rr8 GR8:$src), sub_16bit)>;
1704 def: Pat<(zextloadi16i8 addr:$src),
1705 (EXTRACT_SUBREG (MOVZX32rm8 addr:$src), sub_16bit)>;
1708 def : Pat<(i16 (trunc GR32:$src)),
1709 (EXTRACT_SUBREG GR32:$src, sub_16bit)>;
1710 def : Pat<(i8 (trunc GR32:$src)),
1711 (EXTRACT_SUBREG (i32 (COPY_TO_REGCLASS GR32:$src, GR32_ABCD)),
1713 Requires<[Not64BitMode]>;
1714 def : Pat<(i8 (trunc GR16:$src)),
1715 (EXTRACT_SUBREG (i16 (COPY_TO_REGCLASS GR16:$src, GR16_ABCD)),
1717 Requires<[Not64BitMode]>;
1718 def : Pat<(i32 (trunc GR64:$src)),
1719 (EXTRACT_SUBREG GR64:$src, sub_32bit)>;
1720 def : Pat<(i16 (trunc GR64:$src)),
1721 (EXTRACT_SUBREG GR64:$src, sub_16bit)>;
1722 def : Pat<(i8 (trunc GR64:$src)),
1723 (EXTRACT_SUBREG GR64:$src, sub_8bit)>;
1724 def : Pat<(i8 (trunc GR32:$src)),
1725 (EXTRACT_SUBREG GR32:$src, sub_8bit)>,
1726 Requires<[In64BitMode]>;
1727 def : Pat<(i8 (trunc GR16:$src)),
1728 (EXTRACT_SUBREG GR16:$src, sub_8bit)>,
1729 Requires<[In64BitMode]>;
1731 def immff00_ffff : ImmLeaf<i32, [{
1732 return Imm >= 0xff00 && Imm <= 0xffff;
1735 // h-register tricks
1736 def : Pat<(i8 (trunc (srl_su GR16:$src, (i8 8)))),
1737 (EXTRACT_SUBREG GR16:$src, sub_8bit_hi)>,
1738 Requires<[Not64BitMode]>;
1739 def : Pat<(i8 (trunc (srl_su (i32 (anyext GR16:$src)), (i8 8)))),
1740 (EXTRACT_SUBREG GR16:$src, sub_8bit_hi)>,
1741 Requires<[Not64BitMode]>;
1742 def : Pat<(i8 (trunc (srl_su GR32:$src, (i8 8)))),
1743 (EXTRACT_SUBREG GR32:$src, sub_8bit_hi)>,
1744 Requires<[Not64BitMode]>;
1745 def : Pat<(srl GR16:$src, (i8 8)),
1747 (MOVZX32rr8_NOREX (EXTRACT_SUBREG GR16:$src, sub_8bit_hi)),
1749 def : Pat<(i32 (zext (srl_su GR16:$src, (i8 8)))),
1750 (MOVZX32rr8_NOREX (EXTRACT_SUBREG GR16:$src, sub_8bit_hi))>;
1751 def : Pat<(i32 (anyext (srl_su GR16:$src, (i8 8)))),
1752 (MOVZX32rr8_NOREX (EXTRACT_SUBREG GR16:$src, sub_8bit_hi))>;
1753 def : Pat<(and (srl_su GR32:$src, (i8 8)), (i32 255)),
1754 (MOVZX32rr8_NOREX (EXTRACT_SUBREG GR32:$src, sub_8bit_hi))>;
1755 def : Pat<(srl (and_su GR32:$src, immff00_ffff), (i8 8)),
1756 (MOVZX32rr8_NOREX (EXTRACT_SUBREG GR32:$src, sub_8bit_hi))>;
1758 // h-register tricks.
1759 // For now, be conservative on x86-64 and use an h-register extract only if the
1760 // value is immediately zero-extended or stored, which are somewhat common
1761 // cases. This uses a bunch of code to prevent a register requiring a REX prefix
1762 // from being allocated in the same instruction as the h register, as there's
1763 // currently no way to describe this requirement to the register allocator.
1765 // h-register extract and zero-extend.
1766 def : Pat<(and (srl_su GR64:$src, (i8 8)), (i64 255)),
1770 (EXTRACT_SUBREG GR64:$src, sub_8bit_hi)),
1772 def : Pat<(i64 (zext (srl_su GR16:$src, (i8 8)))),
1776 (EXTRACT_SUBREG GR16:$src, sub_8bit_hi)),
1778 def : Pat<(i64 (anyext (srl_su GR16:$src, (i8 8)))),
1782 (EXTRACT_SUBREG GR16:$src, sub_8bit_hi)),
1785 // h-register extract and store.
1786 def : Pat<(store (i8 (trunc_su (srl_su GR64:$src, (i8 8)))), addr:$dst),
1789 (EXTRACT_SUBREG GR64:$src, sub_8bit_hi))>;
1790 def : Pat<(store (i8 (trunc_su (srl_su GR32:$src, (i8 8)))), addr:$dst),
1793 (EXTRACT_SUBREG GR32:$src, sub_8bit_hi))>,
1794 Requires<[In64BitMode]>;
1795 def : Pat<(store (i8 (trunc_su (srl_su GR16:$src, (i8 8)))), addr:$dst),
1798 (EXTRACT_SUBREG GR16:$src, sub_8bit_hi))>,
1799 Requires<[In64BitMode]>;
1801 // Special pattern to catch the last step of __builtin_parity handling. Our
1802 // goal is to use an xor of an h-register with the corresponding l-register.
1803 // The above patterns would handle this on non 64-bit targets, but for 64-bit
1804 // we need to be more careful. We're using a NOREX instruction here in case
1805 // register allocation fails to keep the two registers together. So we need to
1806 // make sure we can't accidentally mix R8-R15 with an h-register.
1807 def : Pat<(X86xor_flag (i8 (trunc GR32:$src)),
1808 (i8 (trunc (srl_su GR32:$src, (i8 8))))),
1809 (XOR8rr_NOREX (EXTRACT_SUBREG GR32:$src, sub_8bit),
1810 (EXTRACT_SUBREG GR32:$src, sub_8bit_hi))>;
1812 // (shl x, 1) ==> (add x, x)
1813 // Note that if x is undef (immediate or otherwise), we could theoretically
1814 // end up with the two uses of x getting different values, producing a result
1815 // where the least significant bit is not 0. However, the probability of this
1816 // happening is considered low enough that this is officially not a
1818 def : Pat<(shl GR8 :$src1, (i8 1)), (ADD8rr GR8 :$src1, GR8 :$src1)>;
1819 def : Pat<(shl GR16:$src1, (i8 1)), (ADD16rr GR16:$src1, GR16:$src1)>;
1820 def : Pat<(shl GR32:$src1, (i8 1)), (ADD32rr GR32:$src1, GR32:$src1)>;
1821 def : Pat<(shl GR64:$src1, (i8 1)), (ADD64rr GR64:$src1, GR64:$src1)>;
1823 def shiftMask8 : PatFrag<(ops node:$lhs), (and node:$lhs, imm), [{
1824 return isUnneededShiftMask(N, 3);
1827 def shiftMask16 : PatFrag<(ops node:$lhs), (and node:$lhs, imm), [{
1828 return isUnneededShiftMask(N, 4);
1831 def shiftMask32 : PatFrag<(ops node:$lhs), (and node:$lhs, imm), [{
1832 return isUnneededShiftMask(N, 5);
1835 def shiftMask64 : PatFrag<(ops node:$lhs), (and node:$lhs, imm), [{
1836 return isUnneededShiftMask(N, 6);
1840 // Shift amount is implicitly masked.
1841 multiclass MaskedShiftAmountPats<SDNode frag, string name> {
1842 // (shift x (and y, 31)) ==> (shift x, y)
1843 def : Pat<(frag GR8:$src1, (shiftMask32 CL)),
1844 (!cast<Instruction>(name # "8rCL") GR8:$src1)>;
1845 def : Pat<(frag GR16:$src1, (shiftMask32 CL)),
1846 (!cast<Instruction>(name # "16rCL") GR16:$src1)>;
1847 def : Pat<(frag GR32:$src1, (shiftMask32 CL)),
1848 (!cast<Instruction>(name # "32rCL") GR32:$src1)>;
1849 def : Pat<(store (frag (loadi8 addr:$dst), (shiftMask32 CL)), addr:$dst),
1850 (!cast<Instruction>(name # "8mCL") addr:$dst)>;
1851 def : Pat<(store (frag (loadi16 addr:$dst), (shiftMask32 CL)), addr:$dst),
1852 (!cast<Instruction>(name # "16mCL") addr:$dst)>;
1853 def : Pat<(store (frag (loadi32 addr:$dst), (shiftMask32 CL)), addr:$dst),
1854 (!cast<Instruction>(name # "32mCL") addr:$dst)>;
1856 // (shift x (and y, 63)) ==> (shift x, y)
1857 def : Pat<(frag GR64:$src1, (shiftMask64 CL)),
1858 (!cast<Instruction>(name # "64rCL") GR64:$src1)>;
1859 def : Pat<(store (frag (loadi64 addr:$dst), (shiftMask64 CL)), addr:$dst),
1860 (!cast<Instruction>(name # "64mCL") addr:$dst)>;
1863 defm : MaskedShiftAmountPats<shl, "SHL">;
1864 defm : MaskedShiftAmountPats<srl, "SHR">;
1865 defm : MaskedShiftAmountPats<sra, "SAR">;
1867 // ROL/ROR instructions allow a stronger mask optimization than shift for 8- and
1868 // 16-bit. We can remove a mask of any (bitwidth - 1) on the rotation amount
1869 // because over-rotating produces the same result. This is noted in the Intel
1870 // docs with: "tempCOUNT <- (COUNT & COUNTMASK) MOD SIZE". Masking the rotation
1871 // amount could affect EFLAGS results, but that does not matter because we are
1872 // not tracking flags for these nodes.
1873 multiclass MaskedRotateAmountPats<SDNode frag, string name> {
1874 // (rot x (and y, BitWidth - 1)) ==> (rot x, y)
1875 def : Pat<(frag GR8:$src1, (shiftMask8 CL)),
1876 (!cast<Instruction>(name # "8rCL") GR8:$src1)>;
1877 def : Pat<(frag GR16:$src1, (shiftMask16 CL)),
1878 (!cast<Instruction>(name # "16rCL") GR16:$src1)>;
1879 def : Pat<(frag GR32:$src1, (shiftMask32 CL)),
1880 (!cast<Instruction>(name # "32rCL") GR32:$src1)>;
1881 def : Pat<(store (frag (loadi8 addr:$dst), (shiftMask8 CL)), addr:$dst),
1882 (!cast<Instruction>(name # "8mCL") addr:$dst)>;
1883 def : Pat<(store (frag (loadi16 addr:$dst), (shiftMask16 CL)), addr:$dst),
1884 (!cast<Instruction>(name # "16mCL") addr:$dst)>;
1885 def : Pat<(store (frag (loadi32 addr:$dst), (shiftMask32 CL)), addr:$dst),
1886 (!cast<Instruction>(name # "32mCL") addr:$dst)>;
1888 // (rot x (and y, 63)) ==> (rot x, y)
1889 def : Pat<(frag GR64:$src1, (shiftMask64 CL)),
1890 (!cast<Instruction>(name # "64rCL") GR64:$src1)>;
1891 def : Pat<(store (frag (loadi64 addr:$dst), (shiftMask64 CL)), addr:$dst),
1892 (!cast<Instruction>(name # "64mCL") addr:$dst)>;
1896 defm : MaskedRotateAmountPats<rotl, "ROL">;
1897 defm : MaskedRotateAmountPats<rotr, "ROR">;
1899 // Double "funnel" shift amount is implicitly masked.
1900 // (fshl/fshr x (and y, 31)) ==> (fshl/fshr x, y) (NOTE: modulo32)
1901 def : Pat<(X86fshl GR16:$src1, GR16:$src2, (shiftMask32 CL)),
1902 (SHLD16rrCL GR16:$src1, GR16:$src2)>;
1903 def : Pat<(X86fshr GR16:$src2, GR16:$src1, (shiftMask32 CL)),
1904 (SHRD16rrCL GR16:$src1, GR16:$src2)>;
1906 // (fshl/fshr x (and y, 31)) ==> (fshl/fshr x, y)
1907 def : Pat<(fshl GR32:$src1, GR32:$src2, (shiftMask32 CL)),
1908 (SHLD32rrCL GR32:$src1, GR32:$src2)>;
1909 def : Pat<(fshr GR32:$src2, GR32:$src1, (shiftMask32 CL)),
1910 (SHRD32rrCL GR32:$src1, GR32:$src2)>;
1912 // (fshl/fshr x (and y, 63)) ==> (fshl/fshr x, y)
1913 def : Pat<(fshl GR64:$src1, GR64:$src2, (shiftMask64 CL)),
1914 (SHLD64rrCL GR64:$src1, GR64:$src2)>;
1915 def : Pat<(fshr GR64:$src2, GR64:$src1, (shiftMask64 CL)),
1916 (SHRD64rrCL GR64:$src1, GR64:$src2)>;
1918 let Predicates = [HasBMI2] in {
1919 let AddedComplexity = 1 in {
1920 def : Pat<(sra GR32:$src1, (shiftMask32 GR8:$src2)),
1921 (SARX32rr GR32:$src1,
1923 (i32 (IMPLICIT_DEF)), GR8:$src2, sub_8bit))>;
1924 def : Pat<(sra GR64:$src1, (shiftMask64 GR8:$src2)),
1925 (SARX64rr GR64:$src1,
1927 (i64 (IMPLICIT_DEF)), GR8:$src2, sub_8bit))>;
1929 def : Pat<(srl GR32:$src1, (shiftMask32 GR8:$src2)),
1930 (SHRX32rr GR32:$src1,
1932 (i32 (IMPLICIT_DEF)), GR8:$src2, sub_8bit))>;
1933 def : Pat<(srl GR64:$src1, (shiftMask64 GR8:$src2)),
1934 (SHRX64rr GR64:$src1,
1936 (i64 (IMPLICIT_DEF)), GR8:$src2, sub_8bit))>;
1938 def : Pat<(shl GR32:$src1, (shiftMask32 GR8:$src2)),
1939 (SHLX32rr GR32:$src1,
1941 (i32 (IMPLICIT_DEF)), GR8:$src2, sub_8bit))>;
1942 def : Pat<(shl GR64:$src1, (shiftMask64 GR8:$src2)),
1943 (SHLX64rr GR64:$src1,
1945 (i64 (IMPLICIT_DEF)), GR8:$src2, sub_8bit))>;
1948 def : Pat<(sra (loadi32 addr:$src1), (shiftMask32 GR8:$src2)),
1949 (SARX32rm addr:$src1,
1951 (i32 (IMPLICIT_DEF)), GR8:$src2, sub_8bit))>;
1952 def : Pat<(sra (loadi64 addr:$src1), (shiftMask64 GR8:$src2)),
1953 (SARX64rm addr:$src1,
1955 (i64 (IMPLICIT_DEF)), GR8:$src2, sub_8bit))>;
1957 def : Pat<(srl (loadi32 addr:$src1), (shiftMask32 GR8:$src2)),
1958 (SHRX32rm addr:$src1,
1960 (i32 (IMPLICIT_DEF)), GR8:$src2, sub_8bit))>;
1961 def : Pat<(srl (loadi64 addr:$src1), (shiftMask64 GR8:$src2)),
1962 (SHRX64rm addr:$src1,
1964 (i64 (IMPLICIT_DEF)), GR8:$src2, sub_8bit))>;
1966 def : Pat<(shl (loadi32 addr:$src1), (shiftMask32 GR8:$src2)),
1967 (SHLX32rm addr:$src1,
1969 (i32 (IMPLICIT_DEF)), GR8:$src2, sub_8bit))>;
1970 def : Pat<(shl (loadi64 addr:$src1), (shiftMask64 GR8:$src2)),
1971 (SHLX64rm addr:$src1,
1973 (i64 (IMPLICIT_DEF)), GR8:$src2, sub_8bit))>;
1976 // Use BTR/BTS/BTC for clearing/setting/toggling a bit in a variable location.
1977 multiclass one_bit_patterns<RegisterClass RC, ValueType VT, Instruction BTR,
1978 Instruction BTS, Instruction BTC,
1979 PatFrag ShiftMask> {
1980 def : Pat<(and RC:$src1, (rotl -2, GR8:$src2)),
1982 (INSERT_SUBREG (VT (IMPLICIT_DEF)), GR8:$src2, sub_8bit))>;
1983 def : Pat<(or RC:$src1, (shl 1, GR8:$src2)),
1985 (INSERT_SUBREG (VT (IMPLICIT_DEF)), GR8:$src2, sub_8bit))>;
1986 def : Pat<(xor RC:$src1, (shl 1, GR8:$src2)),
1988 (INSERT_SUBREG (VT (IMPLICIT_DEF)), GR8:$src2, sub_8bit))>;
1990 // Similar to above, but removing unneeded masking of the shift amount.
1991 def : Pat<(and RC:$src1, (rotl -2, (ShiftMask GR8:$src2))),
1993 (INSERT_SUBREG (VT (IMPLICIT_DEF)), GR8:$src2, sub_8bit))>;
1994 def : Pat<(or RC:$src1, (shl 1, (ShiftMask GR8:$src2))),
1996 (INSERT_SUBREG (VT (IMPLICIT_DEF)), GR8:$src2, sub_8bit))>;
1997 def : Pat<(xor RC:$src1, (shl 1, (ShiftMask GR8:$src2))),
1999 (INSERT_SUBREG (VT (IMPLICIT_DEF)), GR8:$src2, sub_8bit))>;
2002 defm : one_bit_patterns<GR16, i16, BTR16rr, BTS16rr, BTC16rr, shiftMask16>;
2003 defm : one_bit_patterns<GR32, i32, BTR32rr, BTS32rr, BTC32rr, shiftMask32>;
2004 defm : one_bit_patterns<GR64, i64, BTR64rr, BTS64rr, BTC64rr, shiftMask64>;
2006 //===----------------------------------------------------------------------===//
2007 // EFLAGS-defining Patterns
2008 //===----------------------------------------------------------------------===//
2011 def : Pat<(add GR8 :$src1, GR8 :$src2), (ADD8rr GR8 :$src1, GR8 :$src2)>;
2012 def : Pat<(add GR16:$src1, GR16:$src2), (ADD16rr GR16:$src1, GR16:$src2)>;
2013 def : Pat<(add GR32:$src1, GR32:$src2), (ADD32rr GR32:$src1, GR32:$src2)>;
2014 def : Pat<(add GR64:$src1, GR64:$src2), (ADD64rr GR64:$src1, GR64:$src2)>;
2017 def : Pat<(add GR8:$src1, (loadi8 addr:$src2)),
2018 (ADD8rm GR8:$src1, addr:$src2)>;
2019 def : Pat<(add GR16:$src1, (loadi16 addr:$src2)),
2020 (ADD16rm GR16:$src1, addr:$src2)>;
2021 def : Pat<(add GR32:$src1, (loadi32 addr:$src2)),
2022 (ADD32rm GR32:$src1, addr:$src2)>;
2023 def : Pat<(add GR64:$src1, (loadi64 addr:$src2)),
2024 (ADD64rm GR64:$src1, addr:$src2)>;
2027 def : Pat<(add GR8 :$src1, imm:$src2), (ADD8ri GR8:$src1 , imm:$src2)>;
2028 def : Pat<(add GR16:$src1, imm:$src2), (ADD16ri GR16:$src1, imm:$src2)>;
2029 def : Pat<(add GR32:$src1, imm:$src2), (ADD32ri GR32:$src1, imm:$src2)>;
2030 def : Pat<(add GR64:$src1, i64immSExt32:$src2), (ADD64ri32 GR64:$src1, i64immSExt32:$src2)>;
2033 def : Pat<(sub GR8 :$src1, GR8 :$src2), (SUB8rr GR8 :$src1, GR8 :$src2)>;
2034 def : Pat<(sub GR16:$src1, GR16:$src2), (SUB16rr GR16:$src1, GR16:$src2)>;
2035 def : Pat<(sub GR32:$src1, GR32:$src2), (SUB32rr GR32:$src1, GR32:$src2)>;
2036 def : Pat<(sub GR64:$src1, GR64:$src2), (SUB64rr GR64:$src1, GR64:$src2)>;
2039 def : Pat<(sub GR8:$src1, (loadi8 addr:$src2)),
2040 (SUB8rm GR8:$src1, addr:$src2)>;
2041 def : Pat<(sub GR16:$src1, (loadi16 addr:$src2)),
2042 (SUB16rm GR16:$src1, addr:$src2)>;
2043 def : Pat<(sub GR32:$src1, (loadi32 addr:$src2)),
2044 (SUB32rm GR32:$src1, addr:$src2)>;
2045 def : Pat<(sub GR64:$src1, (loadi64 addr:$src2)),
2046 (SUB64rm GR64:$src1, addr:$src2)>;
2049 def : Pat<(sub GR8:$src1, imm:$src2),
2050 (SUB8ri GR8:$src1, imm:$src2)>;
2051 def : Pat<(sub GR16:$src1, imm:$src2),
2052 (SUB16ri GR16:$src1, imm:$src2)>;
2053 def : Pat<(sub GR32:$src1, imm:$src2),
2054 (SUB32ri GR32:$src1, imm:$src2)>;
2055 def : Pat<(sub GR64:$src1, i64immSExt32:$src2),
2056 (SUB64ri32 GR64:$src1, i64immSExt32:$src2)>;
2059 def : Pat<(X86sub_flag 0, GR8 :$src), (NEG8r GR8 :$src)>;
2060 def : Pat<(X86sub_flag 0, GR16:$src), (NEG16r GR16:$src)>;
2061 def : Pat<(X86sub_flag 0, GR32:$src), (NEG32r GR32:$src)>;
2062 def : Pat<(X86sub_flag 0, GR64:$src), (NEG64r GR64:$src)>;
2065 def : Pat<(mul GR16:$src1, GR16:$src2),
2066 (IMUL16rr GR16:$src1, GR16:$src2)>;
2067 def : Pat<(mul GR32:$src1, GR32:$src2),
2068 (IMUL32rr GR32:$src1, GR32:$src2)>;
2069 def : Pat<(mul GR64:$src1, GR64:$src2),
2070 (IMUL64rr GR64:$src1, GR64:$src2)>;
2073 def : Pat<(mul GR16:$src1, (loadi16 addr:$src2)),
2074 (IMUL16rm GR16:$src1, addr:$src2)>;
2075 def : Pat<(mul GR32:$src1, (loadi32 addr:$src2)),
2076 (IMUL32rm GR32:$src1, addr:$src2)>;
2077 def : Pat<(mul GR64:$src1, (loadi64 addr:$src2)),
2078 (IMUL64rm GR64:$src1, addr:$src2)>;
2081 def : Pat<(mul GR16:$src1, imm:$src2),
2082 (IMUL16rri GR16:$src1, imm:$src2)>;
2083 def : Pat<(mul GR32:$src1, imm:$src2),
2084 (IMUL32rri GR32:$src1, imm:$src2)>;
2085 def : Pat<(mul GR64:$src1, i64immSExt32:$src2),
2086 (IMUL64rri32 GR64:$src1, i64immSExt32:$src2)>;
2088 // reg = mul mem, imm
2089 def : Pat<(mul (loadi16 addr:$src1), imm:$src2),
2090 (IMUL16rmi addr:$src1, imm:$src2)>;
2091 def : Pat<(mul (loadi32 addr:$src1), imm:$src2),
2092 (IMUL32rmi addr:$src1, imm:$src2)>;
2093 def : Pat<(mul (loadi64 addr:$src1), i64immSExt32:$src2),
2094 (IMUL64rmi32 addr:$src1, i64immSExt32:$src2)>;
2096 // Increment/Decrement reg.
2097 // Do not make INC/DEC if it is slow
2098 let Predicates = [UseIncDec] in {
2099 def : Pat<(add GR8:$src, 1), (INC8r GR8:$src)>;
2100 def : Pat<(add GR16:$src, 1), (INC16r GR16:$src)>;
2101 def : Pat<(add GR32:$src, 1), (INC32r GR32:$src)>;
2102 def : Pat<(add GR64:$src, 1), (INC64r GR64:$src)>;
2103 def : Pat<(add GR8:$src, -1), (DEC8r GR8:$src)>;
2104 def : Pat<(add GR16:$src, -1), (DEC16r GR16:$src)>;
2105 def : Pat<(add GR32:$src, -1), (DEC32r GR32:$src)>;
2106 def : Pat<(add GR64:$src, -1), (DEC64r GR64:$src)>;
2108 def : Pat<(X86add_flag_nocf GR8:$src, -1), (DEC8r GR8:$src)>;
2109 def : Pat<(X86add_flag_nocf GR16:$src, -1), (DEC16r GR16:$src)>;
2110 def : Pat<(X86add_flag_nocf GR32:$src, -1), (DEC32r GR32:$src)>;
2111 def : Pat<(X86add_flag_nocf GR64:$src, -1), (DEC64r GR64:$src)>;
2112 def : Pat<(X86sub_flag_nocf GR8:$src, -1), (INC8r GR8:$src)>;
2113 def : Pat<(X86sub_flag_nocf GR16:$src, -1), (INC16r GR16:$src)>;
2114 def : Pat<(X86sub_flag_nocf GR32:$src, -1), (INC32r GR32:$src)>;
2115 def : Pat<(X86sub_flag_nocf GR64:$src, -1), (INC64r GR64:$src)>;
2119 def : Pat<(or GR8 :$src1, GR8 :$src2), (OR8rr GR8 :$src1, GR8 :$src2)>;
2120 def : Pat<(or GR16:$src1, GR16:$src2), (OR16rr GR16:$src1, GR16:$src2)>;
2121 def : Pat<(or GR32:$src1, GR32:$src2), (OR32rr GR32:$src1, GR32:$src2)>;
2122 def : Pat<(or GR64:$src1, GR64:$src2), (OR64rr GR64:$src1, GR64:$src2)>;
2125 def : Pat<(or GR8:$src1, (loadi8 addr:$src2)),
2126 (OR8rm GR8:$src1, addr:$src2)>;
2127 def : Pat<(or GR16:$src1, (loadi16 addr:$src2)),
2128 (OR16rm GR16:$src1, addr:$src2)>;
2129 def : Pat<(or GR32:$src1, (loadi32 addr:$src2)),
2130 (OR32rm GR32:$src1, addr:$src2)>;
2131 def : Pat<(or GR64:$src1, (loadi64 addr:$src2)),
2132 (OR64rm GR64:$src1, addr:$src2)>;
2135 def : Pat<(or GR8:$src1 , imm:$src2), (OR8ri GR8 :$src1, imm:$src2)>;
2136 def : Pat<(or GR16:$src1, imm:$src2), (OR16ri GR16:$src1, imm:$src2)>;
2137 def : Pat<(or GR32:$src1, imm:$src2), (OR32ri GR32:$src1, imm:$src2)>;
2138 def : Pat<(or GR64:$src1, i64immSExt32:$src2),
2139 (OR64ri32 GR64:$src1, i64immSExt32:$src2)>;
2142 def : Pat<(xor GR8 :$src1, GR8 :$src2), (XOR8rr GR8 :$src1, GR8 :$src2)>;
2143 def : Pat<(xor GR16:$src1, GR16:$src2), (XOR16rr GR16:$src1, GR16:$src2)>;
2144 def : Pat<(xor GR32:$src1, GR32:$src2), (XOR32rr GR32:$src1, GR32:$src2)>;
2145 def : Pat<(xor GR64:$src1, GR64:$src2), (XOR64rr GR64:$src1, GR64:$src2)>;
2148 def : Pat<(xor GR8:$src1, (loadi8 addr:$src2)),
2149 (XOR8rm GR8:$src1, addr:$src2)>;
2150 def : Pat<(xor GR16:$src1, (loadi16 addr:$src2)),
2151 (XOR16rm GR16:$src1, addr:$src2)>;
2152 def : Pat<(xor GR32:$src1, (loadi32 addr:$src2)),
2153 (XOR32rm GR32:$src1, addr:$src2)>;
2154 def : Pat<(xor GR64:$src1, (loadi64 addr:$src2)),
2155 (XOR64rm GR64:$src1, addr:$src2)>;
2158 def : Pat<(xor GR8:$src1, imm:$src2),
2159 (XOR8ri GR8:$src1, imm:$src2)>;
2160 def : Pat<(xor GR16:$src1, imm:$src2),
2161 (XOR16ri GR16:$src1, imm:$src2)>;
2162 def : Pat<(xor GR32:$src1, imm:$src2),
2163 (XOR32ri GR32:$src1, imm:$src2)>;
2164 def : Pat<(xor GR64:$src1, i64immSExt32:$src2),
2165 (XOR64ri32 GR64:$src1, i64immSExt32:$src2)>;
2168 def : Pat<(and GR8 :$src1, GR8 :$src2), (AND8rr GR8 :$src1, GR8 :$src2)>;
2169 def : Pat<(and GR16:$src1, GR16:$src2), (AND16rr GR16:$src1, GR16:$src2)>;
2170 def : Pat<(and GR32:$src1, GR32:$src2), (AND32rr GR32:$src1, GR32:$src2)>;
2171 def : Pat<(and GR64:$src1, GR64:$src2), (AND64rr GR64:$src1, GR64:$src2)>;
2174 def : Pat<(and GR8:$src1, (loadi8 addr:$src2)),
2175 (AND8rm GR8:$src1, addr:$src2)>;
2176 def : Pat<(and GR16:$src1, (loadi16 addr:$src2)),
2177 (AND16rm GR16:$src1, addr:$src2)>;
2178 def : Pat<(and GR32:$src1, (loadi32 addr:$src2)),
2179 (AND32rm GR32:$src1, addr:$src2)>;
2180 def : Pat<(and GR64:$src1, (loadi64 addr:$src2)),
2181 (AND64rm GR64:$src1, addr:$src2)>;
2184 def : Pat<(and GR8:$src1, imm:$src2),
2185 (AND8ri GR8:$src1, imm:$src2)>;
2186 def : Pat<(and GR16:$src1, imm:$src2),
2187 (AND16ri GR16:$src1, imm:$src2)>;
2188 def : Pat<(and GR32:$src1, imm:$src2),
2189 (AND32ri GR32:$src1, imm:$src2)>;
2190 def : Pat<(and GR64:$src1, i64immSExt32:$src2),
2191 (AND64ri32 GR64:$src1, i64immSExt32:$src2)>;
2193 // Bit scan instruction patterns to match explicit zero-undef behavior.
2194 def : Pat<(cttz_zero_undef GR16:$src), (BSF16rr GR16:$src)>;
2195 def : Pat<(cttz_zero_undef GR32:$src), (BSF32rr GR32:$src)>;
2196 def : Pat<(cttz_zero_undef GR64:$src), (BSF64rr GR64:$src)>;
2197 def : Pat<(cttz_zero_undef (loadi16 addr:$src)), (BSF16rm addr:$src)>;
2198 def : Pat<(cttz_zero_undef (loadi32 addr:$src)), (BSF32rm addr:$src)>;
2199 def : Pat<(cttz_zero_undef (loadi64 addr:$src)), (BSF64rm addr:$src)>;
2201 // When HasMOVBE is enabled it is possible to get a non-legalized
2202 // register-register 16 bit bswap. This maps it to a ROL instruction.
2203 let Predicates = [HasMOVBE] in {
2204 def : Pat<(bswap GR16:$src), (ROL16ri GR16:$src, (i8 8))>;