1 //===- X86ISelDAGToDAG.cpp - A DAG pattern matching inst selector for X86 -===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This file defines a DAG pattern matching instruction selector for X86,
10 // converting from a legalized dag to a X86 dag.
12 //===----------------------------------------------------------------------===//
15 #include "X86MachineFunctionInfo.h"
16 #include "X86RegisterInfo.h"
17 #include "X86Subtarget.h"
18 #include "X86TargetMachine.h"
19 #include "llvm/ADT/Statistic.h"
20 #include "llvm/CodeGen/MachineFrameInfo.h"
21 #include "llvm/CodeGen/MachineFunction.h"
22 #include "llvm/CodeGen/SelectionDAGISel.h"
23 #include "llvm/Config/llvm-config.h"
24 #include "llvm/IR/ConstantRange.h"
25 #include "llvm/IR/Function.h"
26 #include "llvm/IR/Instructions.h"
27 #include "llvm/IR/Intrinsics.h"
28 #include "llvm/IR/Type.h"
29 #include "llvm/Support/Debug.h"
30 #include "llvm/Support/ErrorHandling.h"
31 #include "llvm/Support/KnownBits.h"
32 #include "llvm/Support/MathExtras.h"
33 #include "llvm/Support/raw_ostream.h"
34 #include "llvm/Target/TargetMachine.h"
35 #include "llvm/Target/TargetOptions.h"
39 #define DEBUG_TYPE "x86-isel"
41 STATISTIC(NumLoadMoved
, "Number of loads moved below TokenFactor");
43 static cl::opt
<bool> AndImmShrink("x86-and-imm-shrink", cl::init(true),
44 cl::desc("Enable setting constant bits to reduce size of mask immediates"),
47 //===----------------------------------------------------------------------===//
48 // Pattern Matcher Implementation
49 //===----------------------------------------------------------------------===//
52 /// This corresponds to X86AddressMode, but uses SDValue's instead of register
53 /// numbers for the leaves of the matched tree.
54 struct X86ISelAddressMode
{
60 // This is really a union, discriminated by BaseType!
68 const GlobalValue
*GV
;
70 const BlockAddress
*BlockAddr
;
74 unsigned Align
; // CP alignment.
75 unsigned char SymbolFlags
; // X86II::MO_*
76 bool NegateIndex
= false;
79 : BaseType(RegBase
), Base_FrameIndex(0), Scale(1), IndexReg(), Disp(0),
80 Segment(), GV(nullptr), CP(nullptr), BlockAddr(nullptr), ES(nullptr),
81 MCSym(nullptr), JT(-1), Align(0), SymbolFlags(X86II::MO_NO_FLAG
) {}
83 bool hasSymbolicDisplacement() const {
84 return GV
!= nullptr || CP
!= nullptr || ES
!= nullptr ||
85 MCSym
!= nullptr || JT
!= -1 || BlockAddr
!= nullptr;
88 bool hasBaseOrIndexReg() const {
89 return BaseType
== FrameIndexBase
||
90 IndexReg
.getNode() != nullptr || Base_Reg
.getNode() != nullptr;
93 /// Return true if this addressing mode is already RIP-relative.
94 bool isRIPRelative() const {
95 if (BaseType
!= RegBase
) return false;
96 if (RegisterSDNode
*RegNode
=
97 dyn_cast_or_null
<RegisterSDNode
>(Base_Reg
.getNode()))
98 return RegNode
->getReg() == X86::RIP
;
102 void setBaseReg(SDValue Reg
) {
107 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
108 void dump(SelectionDAG
*DAG
= nullptr) {
109 dbgs() << "X86ISelAddressMode " << this << '\n';
110 dbgs() << "Base_Reg ";
111 if (Base_Reg
.getNode())
112 Base_Reg
.getNode()->dump(DAG
);
115 if (BaseType
== FrameIndexBase
)
116 dbgs() << " Base.FrameIndex " << Base_FrameIndex
<< '\n';
117 dbgs() << " Scale " << Scale
<< '\n'
121 if (IndexReg
.getNode())
122 IndexReg
.getNode()->dump(DAG
);
125 dbgs() << " Disp " << Disp
<< '\n'
147 dbgs() << " JT" << JT
<< " Align" << Align
<< '\n';
154 //===--------------------------------------------------------------------===//
155 /// ISel - X86-specific code to select X86 machine instructions for
156 /// SelectionDAG operations.
158 class X86DAGToDAGISel final
: public SelectionDAGISel
{
159 /// Keep a pointer to the X86Subtarget around so that we can
160 /// make the right decision when generating code for different targets.
161 const X86Subtarget
*Subtarget
;
163 /// If true, selector should try to optimize for code size instead of
167 /// If true, selector should try to optimize for minimum code size.
170 /// Disable direct TLS access through segment registers.
171 bool IndirectTlsSegRefs
;
174 explicit X86DAGToDAGISel(X86TargetMachine
&tm
, CodeGenOpt::Level OptLevel
)
175 : SelectionDAGISel(tm
, OptLevel
), Subtarget(nullptr), OptForSize(false),
176 OptForMinSize(false), IndirectTlsSegRefs(false) {}
178 StringRef
getPassName() const override
{
179 return "X86 DAG->DAG Instruction Selection";
182 bool runOnMachineFunction(MachineFunction
&MF
) override
{
183 // Reset the subtarget each time through.
184 Subtarget
= &MF
.getSubtarget
<X86Subtarget
>();
185 IndirectTlsSegRefs
= MF
.getFunction().hasFnAttribute(
186 "indirect-tls-seg-refs");
188 // OptFor[Min]Size are used in pattern predicates that isel is matching.
189 OptForSize
= MF
.getFunction().hasOptSize();
190 OptForMinSize
= MF
.getFunction().hasMinSize();
191 assert((!OptForMinSize
|| OptForSize
) &&
192 "OptForMinSize implies OptForSize");
194 SelectionDAGISel::runOnMachineFunction(MF
);
198 void EmitFunctionEntryCode() override
;
200 bool IsProfitableToFold(SDValue N
, SDNode
*U
, SDNode
*Root
) const override
;
202 void PreprocessISelDAG() override
;
203 void PostprocessISelDAG() override
;
205 // Include the pieces autogenerated from the target description.
206 #include "X86GenDAGISel.inc"
209 void Select(SDNode
*N
) override
;
211 bool foldOffsetIntoAddress(uint64_t Offset
, X86ISelAddressMode
&AM
);
212 bool matchLoadInAddress(LoadSDNode
*N
, X86ISelAddressMode
&AM
);
213 bool matchWrapper(SDValue N
, X86ISelAddressMode
&AM
);
214 bool matchAddress(SDValue N
, X86ISelAddressMode
&AM
);
215 bool matchVectorAddress(SDValue N
, X86ISelAddressMode
&AM
);
216 bool matchAdd(SDValue
&N
, X86ISelAddressMode
&AM
, unsigned Depth
);
217 bool matchAddressRecursively(SDValue N
, X86ISelAddressMode
&AM
,
219 bool matchAddressBase(SDValue N
, X86ISelAddressMode
&AM
);
220 bool selectAddr(SDNode
*Parent
, SDValue N
, SDValue
&Base
,
221 SDValue
&Scale
, SDValue
&Index
, SDValue
&Disp
,
223 bool selectVectorAddr(SDNode
*Parent
, SDValue N
, SDValue
&Base
,
224 SDValue
&Scale
, SDValue
&Index
, SDValue
&Disp
,
226 bool selectMOV64Imm32(SDValue N
, SDValue
&Imm
);
227 bool selectLEAAddr(SDValue N
, SDValue
&Base
,
228 SDValue
&Scale
, SDValue
&Index
, SDValue
&Disp
,
230 bool selectLEA64_32Addr(SDValue N
, SDValue
&Base
,
231 SDValue
&Scale
, SDValue
&Index
, SDValue
&Disp
,
233 bool selectTLSADDRAddr(SDValue N
, SDValue
&Base
,
234 SDValue
&Scale
, SDValue
&Index
, SDValue
&Disp
,
236 bool selectScalarSSELoad(SDNode
*Root
, SDNode
*Parent
, SDValue N
,
237 SDValue
&Base
, SDValue
&Scale
,
238 SDValue
&Index
, SDValue
&Disp
,
240 SDValue
&NodeWithChain
);
241 bool selectRelocImm(SDValue N
, SDValue
&Op
);
243 bool tryFoldLoad(SDNode
*Root
, SDNode
*P
, SDValue N
,
244 SDValue
&Base
, SDValue
&Scale
,
245 SDValue
&Index
, SDValue
&Disp
,
248 // Convenience method where P is also root.
249 bool tryFoldLoad(SDNode
*P
, SDValue N
,
250 SDValue
&Base
, SDValue
&Scale
,
251 SDValue
&Index
, SDValue
&Disp
,
253 return tryFoldLoad(P
, P
, N
, Base
, Scale
, Index
, Disp
, Segment
);
256 /// Implement addressing mode selection for inline asm expressions.
257 bool SelectInlineAsmMemoryOperand(const SDValue
&Op
,
258 unsigned ConstraintID
,
259 std::vector
<SDValue
> &OutOps
) override
;
261 void emitSpecialCodeForMain();
263 inline void getAddressOperands(X86ISelAddressMode
&AM
, const SDLoc
&DL
,
264 MVT VT
, SDValue
&Base
, SDValue
&Scale
,
265 SDValue
&Index
, SDValue
&Disp
,
267 if (AM
.BaseType
== X86ISelAddressMode::FrameIndexBase
)
268 Base
= CurDAG
->getTargetFrameIndex(
269 AM
.Base_FrameIndex
, TLI
->getPointerTy(CurDAG
->getDataLayout()));
270 else if (AM
.Base_Reg
.getNode())
273 Base
= CurDAG
->getRegister(0, VT
);
275 Scale
= getI8Imm(AM
.Scale
, DL
);
277 // Negate the index if needed.
278 if (AM
.NegateIndex
) {
279 unsigned NegOpc
= VT
== MVT::i64
? X86::NEG64r
: X86::NEG32r
;
280 SDValue Neg
= SDValue(CurDAG
->getMachineNode(NegOpc
, DL
, VT
, MVT::i32
,
285 if (AM
.IndexReg
.getNode())
288 Index
= CurDAG
->getRegister(0, VT
);
290 // These are 32-bit even in 64-bit mode since RIP-relative offset
293 Disp
= CurDAG
->getTargetGlobalAddress(AM
.GV
, SDLoc(),
297 Disp
= CurDAG
->getTargetConstantPool(AM
.CP
, MVT::i32
,
298 AM
.Align
, AM
.Disp
, AM
.SymbolFlags
);
300 assert(!AM
.Disp
&& "Non-zero displacement is ignored with ES.");
301 Disp
= CurDAG
->getTargetExternalSymbol(AM
.ES
, MVT::i32
, AM
.SymbolFlags
);
302 } else if (AM
.MCSym
) {
303 assert(!AM
.Disp
&& "Non-zero displacement is ignored with MCSym.");
304 assert(AM
.SymbolFlags
== 0 && "oo");
305 Disp
= CurDAG
->getMCSymbol(AM
.MCSym
, MVT::i32
);
306 } else if (AM
.JT
!= -1) {
307 assert(!AM
.Disp
&& "Non-zero displacement is ignored with JT.");
308 Disp
= CurDAG
->getTargetJumpTable(AM
.JT
, MVT::i32
, AM
.SymbolFlags
);
309 } else if (AM
.BlockAddr
)
310 Disp
= CurDAG
->getTargetBlockAddress(AM
.BlockAddr
, MVT::i32
, AM
.Disp
,
313 Disp
= CurDAG
->getTargetConstant(AM
.Disp
, DL
, MVT::i32
);
315 if (AM
.Segment
.getNode())
316 Segment
= AM
.Segment
;
318 Segment
= CurDAG
->getRegister(0, MVT::i16
);
321 // Utility function to determine whether we should avoid selecting
322 // immediate forms of instructions for better code size or not.
323 // At a high level, we'd like to avoid such instructions when
324 // we have similar constants used within the same basic block
325 // that can be kept in a register.
327 bool shouldAvoidImmediateInstFormsForSize(SDNode
*N
) const {
328 uint32_t UseCount
= 0;
330 // Do not want to hoist if we're not optimizing for size.
331 // TODO: We'd like to remove this restriction.
332 // See the comment in X86InstrInfo.td for more info.
336 // Walk all the users of the immediate.
337 for (SDNode::use_iterator UI
= N
->use_begin(),
338 UE
= N
->use_end(); (UI
!= UE
) && (UseCount
< 2); ++UI
) {
342 // This user is already selected. Count it as a legitimate use and
344 if (User
->isMachineOpcode()) {
349 // We want to count stores of immediates as real uses.
350 if (User
->getOpcode() == ISD::STORE
&&
351 User
->getOperand(1).getNode() == N
) {
356 // We don't currently match users that have > 2 operands (except
357 // for stores, which are handled above)
358 // Those instruction won't match in ISEL, for now, and would
359 // be counted incorrectly.
360 // This may change in the future as we add additional instruction
362 if (User
->getNumOperands() != 2)
365 // If this can match to INC/DEC, don't count it as a use.
366 if (User
->getOpcode() == ISD::ADD
&&
367 (isOneConstant(SDValue(N
, 0)) || isAllOnesConstant(SDValue(N
, 0))))
370 // Immediates that are used for offsets as part of stack
371 // manipulation should be left alone. These are typically
372 // used to indicate SP offsets for argument passing and
373 // will get pulled into stores/pushes (implicitly).
374 if (User
->getOpcode() == X86ISD::ADD
||
375 User
->getOpcode() == ISD::ADD
||
376 User
->getOpcode() == X86ISD::SUB
||
377 User
->getOpcode() == ISD::SUB
) {
379 // Find the other operand of the add/sub.
380 SDValue OtherOp
= User
->getOperand(0);
381 if (OtherOp
.getNode() == N
)
382 OtherOp
= User
->getOperand(1);
384 // Don't count if the other operand is SP.
385 RegisterSDNode
*RegNode
;
386 if (OtherOp
->getOpcode() == ISD::CopyFromReg
&&
387 (RegNode
= dyn_cast_or_null
<RegisterSDNode
>(
388 OtherOp
->getOperand(1).getNode())))
389 if ((RegNode
->getReg() == X86::ESP
) ||
390 (RegNode
->getReg() == X86::RSP
))
394 // ... otherwise, count this and move on.
398 // If we have more than 1 use, then recommend for hoisting.
399 return (UseCount
> 1);
402 /// Return a target constant with the specified value of type i8.
403 inline SDValue
getI8Imm(unsigned Imm
, const SDLoc
&DL
) {
404 return CurDAG
->getTargetConstant(Imm
, DL
, MVT::i8
);
407 /// Return a target constant with the specified value, of type i32.
408 inline SDValue
getI32Imm(unsigned Imm
, const SDLoc
&DL
) {
409 return CurDAG
->getTargetConstant(Imm
, DL
, MVT::i32
);
412 /// Return a target constant with the specified value, of type i64.
413 inline SDValue
getI64Imm(uint64_t Imm
, const SDLoc
&DL
) {
414 return CurDAG
->getTargetConstant(Imm
, DL
, MVT::i64
);
417 SDValue
getExtractVEXTRACTImmediate(SDNode
*N
, unsigned VecWidth
,
419 assert((VecWidth
== 128 || VecWidth
== 256) && "Unexpected vector width");
420 uint64_t Index
= N
->getConstantOperandVal(1);
421 MVT VecVT
= N
->getOperand(0).getSimpleValueType();
422 return getI8Imm((Index
* VecVT
.getScalarSizeInBits()) / VecWidth
, DL
);
425 SDValue
getInsertVINSERTImmediate(SDNode
*N
, unsigned VecWidth
,
427 assert((VecWidth
== 128 || VecWidth
== 256) && "Unexpected vector width");
428 uint64_t Index
= N
->getConstantOperandVal(2);
429 MVT VecVT
= N
->getSimpleValueType(0);
430 return getI8Imm((Index
* VecVT
.getScalarSizeInBits()) / VecWidth
, DL
);
433 // Helper to detect unneeded and instructions on shift amounts. Called
434 // from PatFrags in tablegen.
435 bool isUnneededShiftMask(SDNode
*N
, unsigned Width
) const {
436 assert(N
->getOpcode() == ISD::AND
&& "Unexpected opcode");
437 const APInt
&Val
= cast
<ConstantSDNode
>(N
->getOperand(1))->getAPIntValue();
439 if (Val
.countTrailingOnes() >= Width
)
442 APInt Mask
= Val
| CurDAG
->computeKnownBits(N
->getOperand(0)).Zero
;
443 return Mask
.countTrailingOnes() >= Width
;
446 /// Return an SDNode that returns the value of the global base register.
447 /// Output instructions required to initialize the global base register,
449 SDNode
*getGlobalBaseReg();
451 /// Return a reference to the TargetMachine, casted to the target-specific
453 const X86TargetMachine
&getTargetMachine() const {
454 return static_cast<const X86TargetMachine
&>(TM
);
457 /// Return a reference to the TargetInstrInfo, casted to the target-specific
459 const X86InstrInfo
*getInstrInfo() const {
460 return Subtarget
->getInstrInfo();
463 /// Address-mode matching performs shift-of-and to and-of-shift
464 /// reassociation in order to expose more scaled addressing
466 bool ComplexPatternFuncMutatesDAG() const override
{
470 bool isSExtAbsoluteSymbolRef(unsigned Width
, SDNode
*N
) const;
472 /// Returns whether this is a relocatable immediate in the range
473 /// [-2^Width .. 2^Width-1].
474 template <unsigned Width
> bool isSExtRelocImm(SDNode
*N
) const {
475 if (auto *CN
= dyn_cast
<ConstantSDNode
>(N
))
476 return isInt
<Width
>(CN
->getSExtValue());
477 return isSExtAbsoluteSymbolRef(Width
, N
);
480 // Indicates we should prefer to use a non-temporal load for this load.
481 bool useNonTemporalLoad(LoadSDNode
*N
) const {
482 if (!N
->isNonTemporal())
485 unsigned StoreSize
= N
->getMemoryVT().getStoreSize();
487 if (N
->getAlignment() < StoreSize
)
491 default: llvm_unreachable("Unsupported store size");
496 return Subtarget
->hasSSE41();
498 return Subtarget
->hasAVX2();
500 return Subtarget
->hasAVX512();
504 bool foldLoadStoreIntoMemOperand(SDNode
*Node
);
505 MachineSDNode
*matchBEXTRFromAndImm(SDNode
*Node
);
506 bool matchBitExtract(SDNode
*Node
);
507 bool shrinkAndImmediate(SDNode
*N
);
508 bool isMaskZeroExtended(SDNode
*N
) const;
509 bool tryShiftAmountMod(SDNode
*N
);
510 bool combineIncDecVector(SDNode
*Node
);
511 bool tryShrinkShlLogicImm(SDNode
*N
);
512 bool tryVPTESTM(SDNode
*Root
, SDValue Setcc
, SDValue Mask
);
514 MachineSDNode
*emitPCMPISTR(unsigned ROpc
, unsigned MOpc
, bool MayFoldLoad
,
515 const SDLoc
&dl
, MVT VT
, SDNode
*Node
);
516 MachineSDNode
*emitPCMPESTR(unsigned ROpc
, unsigned MOpc
, bool MayFoldLoad
,
517 const SDLoc
&dl
, MVT VT
, SDNode
*Node
,
520 bool tryOptimizeRem8Extend(SDNode
*N
);
522 bool onlyUsesZeroFlag(SDValue Flags
) const;
523 bool hasNoSignFlagUses(SDValue Flags
) const;
524 bool hasNoCarryFlagUses(SDValue Flags
) const;
529 // Returns true if this masked compare can be implemented legally with this
531 static bool isLegalMaskCompare(SDNode
*N
, const X86Subtarget
*Subtarget
) {
532 unsigned Opcode
= N
->getOpcode();
533 if (Opcode
== X86ISD::CMPM
|| Opcode
== ISD::SETCC
||
534 Opcode
== X86ISD::CMPM_SAE
|| Opcode
== X86ISD::VFPCLASS
) {
535 // We can get 256-bit 8 element types here without VLX being enabled. When
536 // this happens we will use 512-bit operations and the mask will not be
538 EVT OpVT
= N
->getOperand(0).getValueType();
539 if (OpVT
.is256BitVector() || OpVT
.is128BitVector())
540 return Subtarget
->hasVLX();
544 // Scalar opcodes use 128 bit registers, but aren't subject to the VLX check.
545 if (Opcode
== X86ISD::VFPCLASSS
|| Opcode
== X86ISD::FSETCCM
||
546 Opcode
== X86ISD::FSETCCM_SAE
)
552 // Returns true if we can assume the writer of the mask has zero extended it
554 bool X86DAGToDAGISel::isMaskZeroExtended(SDNode
*N
) const {
555 // If this is an AND, check if we have a compare on either side. As long as
556 // one side guarantees the mask is zero extended, the AND will preserve those
558 if (N
->getOpcode() == ISD::AND
)
559 return isLegalMaskCompare(N
->getOperand(0).getNode(), Subtarget
) ||
560 isLegalMaskCompare(N
->getOperand(1).getNode(), Subtarget
);
562 return isLegalMaskCompare(N
, Subtarget
);
566 X86DAGToDAGISel::IsProfitableToFold(SDValue N
, SDNode
*U
, SDNode
*Root
) const {
567 if (OptLevel
== CodeGenOpt::None
) return false;
572 if (N
.getOpcode() != ISD::LOAD
)
575 // Don't fold non-temporal loads if we have an instruction for them.
576 if (useNonTemporalLoad(cast
<LoadSDNode
>(N
)))
579 // If N is a load, do additional profitability checks.
581 switch (U
->getOpcode()) {
595 SDValue Op1
= U
->getOperand(1);
597 // If the other operand is a 8-bit immediate we should fold the immediate
598 // instead. This reduces code size.
600 // movl 4(%esp), %eax
604 // addl 4(%esp), %eax
605 // The former is 2 bytes shorter. In case where the increment is 1, then
606 // the saving can be 4 bytes (by using incl %eax).
607 if (ConstantSDNode
*Imm
= dyn_cast
<ConstantSDNode
>(Op1
)) {
608 if (Imm
->getAPIntValue().isSignedIntN(8))
611 // If this is a 64-bit AND with an immediate that fits in 32-bits,
612 // prefer using the smaller and over folding the load. This is needed to
613 // make sure immediates created by shrinkAndImmediate are always folded.
614 // Ideally we would narrow the load during DAG combine and get the
615 // best of both worlds.
616 if (U
->getOpcode() == ISD::AND
&&
617 Imm
->getAPIntValue().getBitWidth() == 64 &&
618 Imm
->getAPIntValue().isIntN(32))
621 // If this really a zext_inreg that can be represented with a movzx
622 // instruction, prefer that.
623 // TODO: We could shrink the load and fold if it is non-volatile.
624 if (U
->getOpcode() == ISD::AND
&&
625 (Imm
->getAPIntValue() == UINT8_MAX
||
626 Imm
->getAPIntValue() == UINT16_MAX
||
627 Imm
->getAPIntValue() == UINT32_MAX
))
630 // ADD/SUB with can negate the immediate and use the opposite operation
631 // to fit 128 into a sign extended 8 bit immediate.
632 if ((U
->getOpcode() == ISD::ADD
|| U
->getOpcode() == ISD::SUB
) &&
633 (-Imm
->getAPIntValue()).isSignedIntN(8))
637 // If the other operand is a TLS address, we should fold it instead.
640 // leal i@NTPOFF(%eax), %eax
642 // movl $i@NTPOFF, %eax
644 // if the block also has an access to a second TLS address this will save
646 // FIXME: This is probably also true for non-TLS addresses.
647 if (Op1
.getOpcode() == X86ISD::Wrapper
) {
648 SDValue Val
= Op1
.getOperand(0);
649 if (Val
.getOpcode() == ISD::TargetGlobalTLSAddress
)
653 // Don't fold load if this matches the BTS/BTR/BTC patterns.
654 // BTS: (or X, (shl 1, n))
655 // BTR: (and X, (rotl -2, n))
656 // BTC: (xor X, (shl 1, n))
657 if (U
->getOpcode() == ISD::OR
|| U
->getOpcode() == ISD::XOR
) {
658 if (U
->getOperand(0).getOpcode() == ISD::SHL
&&
659 isOneConstant(U
->getOperand(0).getOperand(0)))
662 if (U
->getOperand(1).getOpcode() == ISD::SHL
&&
663 isOneConstant(U
->getOperand(1).getOperand(0)))
666 if (U
->getOpcode() == ISD::AND
) {
667 SDValue U0
= U
->getOperand(0);
668 SDValue U1
= U
->getOperand(1);
669 if (U0
.getOpcode() == ISD::ROTL
) {
670 auto *C
= dyn_cast
<ConstantSDNode
>(U0
.getOperand(0));
671 if (C
&& C
->getSExtValue() == -2)
675 if (U1
.getOpcode() == ISD::ROTL
) {
676 auto *C
= dyn_cast
<ConstantSDNode
>(U1
.getOperand(0));
677 if (C
&& C
->getSExtValue() == -2)
687 // Don't fold a load into a shift by immediate. The BMI2 instructions
688 // support folding a load, but not an immediate. The legacy instructions
689 // support folding an immediate, but can't fold a load. Folding an
690 // immediate is preferable to folding a load.
691 if (isa
<ConstantSDNode
>(U
->getOperand(1)))
698 // Prevent folding a load if this can implemented with an insert_subreg or
699 // a move that implicitly zeroes.
700 if (Root
->getOpcode() == ISD::INSERT_SUBVECTOR
&&
701 isNullConstant(Root
->getOperand(2)) &&
702 (Root
->getOperand(0).isUndef() ||
703 ISD::isBuildVectorAllZeros(Root
->getOperand(0).getNode())))
709 /// Replace the original chain operand of the call with
710 /// load's chain operand and move load below the call's chain operand.
711 static void moveBelowOrigChain(SelectionDAG
*CurDAG
, SDValue Load
,
712 SDValue Call
, SDValue OrigChain
) {
713 SmallVector
<SDValue
, 8> Ops
;
714 SDValue Chain
= OrigChain
.getOperand(0);
715 if (Chain
.getNode() == Load
.getNode())
716 Ops
.push_back(Load
.getOperand(0));
718 assert(Chain
.getOpcode() == ISD::TokenFactor
&&
719 "Unexpected chain operand");
720 for (unsigned i
= 0, e
= Chain
.getNumOperands(); i
!= e
; ++i
)
721 if (Chain
.getOperand(i
).getNode() == Load
.getNode())
722 Ops
.push_back(Load
.getOperand(0));
724 Ops
.push_back(Chain
.getOperand(i
));
726 CurDAG
->getNode(ISD::TokenFactor
, SDLoc(Load
), MVT::Other
, Ops
);
728 Ops
.push_back(NewChain
);
730 Ops
.append(OrigChain
->op_begin() + 1, OrigChain
->op_end());
731 CurDAG
->UpdateNodeOperands(OrigChain
.getNode(), Ops
);
732 CurDAG
->UpdateNodeOperands(Load
.getNode(), Call
.getOperand(0),
733 Load
.getOperand(1), Load
.getOperand(2));
736 Ops
.push_back(SDValue(Load
.getNode(), 1));
737 Ops
.append(Call
->op_begin() + 1, Call
->op_end());
738 CurDAG
->UpdateNodeOperands(Call
.getNode(), Ops
);
741 /// Return true if call address is a load and it can be
742 /// moved below CALLSEQ_START and the chains leading up to the call.
743 /// Return the CALLSEQ_START by reference as a second output.
744 /// In the case of a tail call, there isn't a callseq node between the call
745 /// chain and the load.
746 static bool isCalleeLoad(SDValue Callee
, SDValue
&Chain
, bool HasCallSeq
) {
747 // The transformation is somewhat dangerous if the call's chain was glued to
748 // the call. After MoveBelowOrigChain the load is moved between the call and
749 // the chain, this can create a cycle if the load is not folded. So it is
750 // *really* important that we are sure the load will be folded.
751 if (Callee
.getNode() == Chain
.getNode() || !Callee
.hasOneUse())
753 LoadSDNode
*LD
= dyn_cast
<LoadSDNode
>(Callee
.getNode());
756 LD
->getAddressingMode() != ISD::UNINDEXED
||
757 LD
->getExtensionType() != ISD::NON_EXTLOAD
)
760 // Now let's find the callseq_start.
761 while (HasCallSeq
&& Chain
.getOpcode() != ISD::CALLSEQ_START
) {
762 if (!Chain
.hasOneUse())
764 Chain
= Chain
.getOperand(0);
767 if (!Chain
.getNumOperands())
769 // Since we are not checking for AA here, conservatively abort if the chain
770 // writes to memory. It's not safe to move the callee (a load) across a store.
771 if (isa
<MemSDNode
>(Chain
.getNode()) &&
772 cast
<MemSDNode
>(Chain
.getNode())->writeMem())
774 if (Chain
.getOperand(0).getNode() == Callee
.getNode())
776 if (Chain
.getOperand(0).getOpcode() == ISD::TokenFactor
&&
777 Callee
.getValue(1).isOperandOf(Chain
.getOperand(0).getNode()) &&
778 Callee
.getValue(1).hasOneUse())
783 void X86DAGToDAGISel::PreprocessISelDAG() {
784 for (SelectionDAG::allnodes_iterator I
= CurDAG
->allnodes_begin(),
785 E
= CurDAG
->allnodes_end(); I
!= E
; ) {
786 SDNode
*N
= &*I
++; // Preincrement iterator to avoid invalidation issues.
788 // If this is a target specific AND node with no flag usages, turn it back
789 // into ISD::AND to enable test instruction matching.
790 if (N
->getOpcode() == X86ISD::AND
&& !N
->hasAnyUseOfValue(1)) {
791 SDValue Res
= CurDAG
->getNode(ISD::AND
, SDLoc(N
), N
->getValueType(0),
792 N
->getOperand(0), N
->getOperand(1));
794 CurDAG
->ReplaceAllUsesOfValueWith(SDValue(N
, 0), Res
);
796 CurDAG
->DeleteNode(N
);
800 switch (N
->getOpcode()) {
801 case ISD::FP_TO_SINT
:
802 case ISD::FP_TO_UINT
: {
803 // Replace vector fp_to_s/uint with their X86 specific equivalent so we
804 // don't need 2 sets of patterns.
805 if (!N
->getSimpleValueType(0).isVector())
809 switch (N
->getOpcode()) {
810 default: llvm_unreachable("Unexpected opcode!");
811 case ISD::FP_TO_SINT
: NewOpc
= X86ISD::CVTTP2SI
; break;
812 case ISD::FP_TO_UINT
: NewOpc
= X86ISD::CVTTP2UI
; break;
814 SDValue Res
= CurDAG
->getNode(NewOpc
, SDLoc(N
), N
->getValueType(0),
817 CurDAG
->ReplaceAllUsesOfValueWith(SDValue(N
, 0), Res
);
819 CurDAG
->DeleteNode(N
);
825 // Replace vector shifts with their X86 specific equivalent so we don't
826 // need 2 sets of patterns.
827 if (!N
->getValueType(0).isVector())
831 switch (N
->getOpcode()) {
832 default: llvm_unreachable("Unexpected opcode!");
833 case ISD::SHL
: NewOpc
= X86ISD::VSHLV
; break;
834 case ISD::SRA
: NewOpc
= X86ISD::VSRAV
; break;
835 case ISD::SRL
: NewOpc
= X86ISD::VSRLV
; break;
837 SDValue Res
= CurDAG
->getNode(NewOpc
, SDLoc(N
), N
->getValueType(0),
838 N
->getOperand(0), N
->getOperand(1));
840 CurDAG
->ReplaceAllUsesOfValueWith(SDValue(N
, 0), Res
);
842 CurDAG
->DeleteNode(N
);
845 case ISD::ANY_EXTEND
:
846 case ISD::ANY_EXTEND_VECTOR_INREG
: {
847 // Replace vector any extend with the zero extend equivalents so we don't
848 // need 2 sets of patterns. Ignore vXi1 extensions.
849 if (!N
->getValueType(0).isVector() ||
850 N
->getOperand(0).getScalarValueSizeInBits() == 1)
853 unsigned NewOpc
= N
->getOpcode() == ISD::ANY_EXTEND
855 : ISD::ZERO_EXTEND_VECTOR_INREG
;
857 SDValue Res
= CurDAG
->getNode(NewOpc
, SDLoc(N
), N
->getValueType(0),
860 CurDAG
->ReplaceAllUsesOfValueWith(SDValue(N
, 0), Res
);
862 CurDAG
->DeleteNode(N
);
868 case ISD::FNEARBYINT
:
870 // Replace fp rounding with their X86 specific equivalent so we don't
871 // need 2 sets of patterns.
873 switch (N
->getOpcode()) {
874 default: llvm_unreachable("Unexpected opcode!");
875 case ISD::FCEIL
: Imm
= 0xA; break;
876 case ISD::FFLOOR
: Imm
= 0x9; break;
877 case ISD::FTRUNC
: Imm
= 0xB; break;
878 case ISD::FNEARBYINT
: Imm
= 0xC; break;
879 case ISD::FRINT
: Imm
= 0x4; break;
882 SDValue Res
= CurDAG
->getNode(
883 X86ISD::VRNDSCALE
, dl
, N
->getValueType(0), N
->getOperand(0),
884 CurDAG
->getTargetConstant(Imm
, dl
, MVT::i8
));
886 CurDAG
->ReplaceAllUsesOfValueWith(SDValue(N
, 0), Res
);
888 CurDAG
->DeleteNode(N
);
895 // Widen scalar fp logic ops to vector to reduce isel patterns.
896 // FIXME: Can we do this during lowering/combine.
897 MVT VT
= N
->getSimpleValueType(0);
898 if (VT
.isVector() || VT
== MVT::f128
)
901 MVT VecVT
= VT
== MVT::f64
? MVT::v2f64
: MVT::v4f32
;
903 SDValue Op0
= CurDAG
->getNode(ISD::SCALAR_TO_VECTOR
, dl
, VecVT
,
905 SDValue Op1
= CurDAG
->getNode(ISD::SCALAR_TO_VECTOR
, dl
, VecVT
,
909 if (Subtarget
->hasSSE2()) {
910 EVT IntVT
= EVT(VecVT
).changeVectorElementTypeToInteger();
911 Op0
= CurDAG
->getNode(ISD::BITCAST
, dl
, IntVT
, Op0
);
912 Op1
= CurDAG
->getNode(ISD::BITCAST
, dl
, IntVT
, Op1
);
914 switch (N
->getOpcode()) {
915 default: llvm_unreachable("Unexpected opcode!");
916 case X86ISD::FANDN
: Opc
= X86ISD::ANDNP
; break;
917 case X86ISD::FAND
: Opc
= ISD::AND
; break;
918 case X86ISD::FOR
: Opc
= ISD::OR
; break;
919 case X86ISD::FXOR
: Opc
= ISD::XOR
; break;
921 Res
= CurDAG
->getNode(Opc
, dl
, IntVT
, Op0
, Op1
);
922 Res
= CurDAG
->getNode(ISD::BITCAST
, dl
, VecVT
, Res
);
924 Res
= CurDAG
->getNode(N
->getOpcode(), dl
, VecVT
, Op0
, Op1
);
926 Res
= CurDAG
->getNode(ISD::EXTRACT_VECTOR_ELT
, dl
, VT
, Res
,
927 CurDAG
->getIntPtrConstant(0, dl
));
929 CurDAG
->ReplaceAllUsesOfValueWith(SDValue(N
, 0), Res
);
931 CurDAG
->DeleteNode(N
);
936 if (OptLevel
!= CodeGenOpt::None
&&
937 // Only do this when the target can fold the load into the call or
939 !Subtarget
->useRetpolineIndirectCalls() &&
940 ((N
->getOpcode() == X86ISD::CALL
&& !Subtarget
->slowTwoMemOps()) ||
941 (N
->getOpcode() == X86ISD::TC_RETURN
&&
942 (Subtarget
->is64Bit() ||
943 !getTargetMachine().isPositionIndependent())))) {
944 /// Also try moving call address load from outside callseq_start to just
945 /// before the call to allow it to be folded.
963 bool HasCallSeq
= N
->getOpcode() == X86ISD::CALL
;
964 SDValue Chain
= N
->getOperand(0);
965 SDValue Load
= N
->getOperand(1);
966 if (!isCalleeLoad(Load
, Chain
, HasCallSeq
))
968 moveBelowOrigChain(CurDAG
, Load
, SDValue(N
, 0), Chain
);
973 // Lower fpround and fpextend nodes that target the FP stack to be store and
974 // load to the stack. This is a gross hack. We would like to simply mark
975 // these as being illegal, but when we do that, legalize produces these when
976 // it expands calls, then expands these in the same legalize pass. We would
977 // like dag combine to be able to hack on these between the call expansion
978 // and the node legalization. As such this pass basically does "really
979 // late" legalization of these inline with the X86 isel pass.
980 // FIXME: This should only happen when not compiled with -O0.
981 switch (N
->getOpcode()) {
986 MVT SrcVT
= N
->getOperand(0).getSimpleValueType();
987 MVT DstVT
= N
->getSimpleValueType(0);
989 // If any of the sources are vectors, no fp stack involved.
990 if (SrcVT
.isVector() || DstVT
.isVector())
993 // If the source and destination are SSE registers, then this is a legal
994 // conversion that should not be lowered.
995 const X86TargetLowering
*X86Lowering
=
996 static_cast<const X86TargetLowering
*>(TLI
);
997 bool SrcIsSSE
= X86Lowering
->isScalarFPTypeInSSEReg(SrcVT
);
998 bool DstIsSSE
= X86Lowering
->isScalarFPTypeInSSEReg(DstVT
);
999 if (SrcIsSSE
&& DstIsSSE
)
1002 if (!SrcIsSSE
&& !DstIsSSE
) {
1003 // If this is an FPStack extension, it is a noop.
1004 if (N
->getOpcode() == ISD::FP_EXTEND
)
1006 // If this is a value-preserving FPStack truncation, it is a noop.
1007 if (N
->getConstantOperandVal(1))
1011 // Here we could have an FP stack truncation or an FPStack <-> SSE convert.
1012 // FPStack has extload and truncstore. SSE can fold direct loads into other
1013 // operations. Based on this, decide what we want to do.
1015 if (N
->getOpcode() == ISD::FP_ROUND
)
1016 MemVT
= DstVT
; // FP_ROUND must use DstVT, we can't do a 'trunc load'.
1018 MemVT
= SrcIsSSE
? SrcVT
: DstVT
;
1020 SDValue MemTmp
= CurDAG
->CreateStackTemporary(MemVT
);
1023 // FIXME: optimize the case where the src/dest is a load or store?
1025 SDValue Store
= CurDAG
->getTruncStore(CurDAG
->getEntryNode(), dl
, N
->getOperand(0),
1026 MemTmp
, MachinePointerInfo(), MemVT
);
1027 SDValue Result
= CurDAG
->getExtLoad(ISD::EXTLOAD
, dl
, DstVT
, Store
, MemTmp
,
1028 MachinePointerInfo(), MemVT
);
1030 // We're about to replace all uses of the FP_ROUND/FP_EXTEND with the
1031 // extload we created. This will cause general havok on the dag because
1032 // anything below the conversion could be folded into other existing nodes.
1033 // To avoid invalidating 'I', back it up to the convert node.
1035 CurDAG
->ReplaceAllUsesOfValueWith(SDValue(N
, 0), Result
);
1039 //The sequence of events for lowering STRICT_FP versions of these nodes requires
1040 //dealing with the chain differently, as there is already a preexisting chain.
1041 case ISD::STRICT_FP_ROUND
:
1042 case ISD::STRICT_FP_EXTEND
:
1044 MVT SrcVT
= N
->getOperand(1).getSimpleValueType();
1045 MVT DstVT
= N
->getSimpleValueType(0);
1047 // If any of the sources are vectors, no fp stack involved.
1048 if (SrcVT
.isVector() || DstVT
.isVector())
1051 // If the source and destination are SSE registers, then this is a legal
1052 // conversion that should not be lowered.
1053 const X86TargetLowering
*X86Lowering
=
1054 static_cast<const X86TargetLowering
*>(TLI
);
1055 bool SrcIsSSE
= X86Lowering
->isScalarFPTypeInSSEReg(SrcVT
);
1056 bool DstIsSSE
= X86Lowering
->isScalarFPTypeInSSEReg(DstVT
);
1057 if (SrcIsSSE
&& DstIsSSE
)
1060 if (!SrcIsSSE
&& !DstIsSSE
) {
1061 // If this is an FPStack extension, it is a noop.
1062 if (N
->getOpcode() == ISD::STRICT_FP_EXTEND
)
1064 // If this is a value-preserving FPStack truncation, it is a noop.
1065 if (N
->getConstantOperandVal(2))
1069 // Here we could have an FP stack truncation or an FPStack <-> SSE convert.
1070 // FPStack has extload and truncstore. SSE can fold direct loads into other
1071 // operations. Based on this, decide what we want to do.
1073 if (N
->getOpcode() == ISD::STRICT_FP_ROUND
)
1074 MemVT
= DstVT
; // FP_ROUND must use DstVT, we can't do a 'trunc load'.
1076 MemVT
= SrcIsSSE
? SrcVT
: DstVT
;
1078 SDValue MemTmp
= CurDAG
->CreateStackTemporary(MemVT
);
1081 // FIXME: optimize the case where the src/dest is a load or store?
1083 //Since the operation is StrictFP, use the preexisting chain.
1084 SDValue Store
= CurDAG
->getTruncStore(N
->getOperand(0), dl
, N
->getOperand(1),
1085 MemTmp
, MachinePointerInfo(), MemVT
);
1086 SDValue Result
= CurDAG
->getExtLoad(ISD::EXTLOAD
, dl
, DstVT
, Store
, MemTmp
,
1087 MachinePointerInfo(), MemVT
);
1089 // We're about to replace all uses of the FP_ROUND/FP_EXTEND with the
1090 // extload we created. This will cause general havok on the dag because
1091 // anything below the conversion could be folded into other existing nodes.
1092 // To avoid invalidating 'I', back it up to the convert node.
1094 CurDAG
->ReplaceAllUsesWith(N
, Result
.getNode());
1100 // Now that we did that, the node is dead. Increment the iterator to the
1101 // next node to process, then delete N.
1103 CurDAG
->DeleteNode(N
);
1106 // The load+call transform above can leave some dead nodes in the graph. Make
1107 // sure we remove them. Its possible some of the other transforms do to so
1108 // just remove dead nodes unconditionally.
1109 CurDAG
->RemoveDeadNodes();
1112 // Look for a redundant movzx/movsx that can occur after an 8-bit divrem.
1113 bool X86DAGToDAGISel::tryOptimizeRem8Extend(SDNode
*N
) {
1114 unsigned Opc
= N
->getMachineOpcode();
1115 if (Opc
!= X86::MOVZX32rr8
&& Opc
!= X86::MOVSX32rr8
&&
1116 Opc
!= X86::MOVSX64rr8
)
1119 SDValue N0
= N
->getOperand(0);
1121 // We need to be extracting the lower bit of an extend.
1122 if (!N0
.isMachineOpcode() ||
1123 N0
.getMachineOpcode() != TargetOpcode::EXTRACT_SUBREG
||
1124 N0
.getConstantOperandVal(1) != X86::sub_8bit
)
1127 // We're looking for either a movsx or movzx to match the original opcode.
1128 unsigned ExpectedOpc
= Opc
== X86::MOVZX32rr8
? X86::MOVZX32rr8_NOREX
1129 : X86::MOVSX32rr8_NOREX
;
1130 SDValue N00
= N0
.getOperand(0);
1131 if (!N00
.isMachineOpcode() || N00
.getMachineOpcode() != ExpectedOpc
)
1134 if (Opc
== X86::MOVSX64rr8
) {
1135 // If we had a sign extend from 8 to 64 bits. We still need to go from 32
1137 MachineSDNode
*Extend
= CurDAG
->getMachineNode(X86::MOVSX64rr32
, SDLoc(N
),
1139 ReplaceUses(N
, Extend
);
1141 // Ok we can drop this extend and just use the original extend.
1142 ReplaceUses(N
, N00
.getNode());
1148 void X86DAGToDAGISel::PostprocessISelDAG() {
1149 // Skip peepholes at -O0.
1150 if (TM
.getOptLevel() == CodeGenOpt::None
)
1153 SelectionDAG::allnodes_iterator Position
= CurDAG
->allnodes_end();
1155 bool MadeChange
= false;
1156 while (Position
!= CurDAG
->allnodes_begin()) {
1157 SDNode
*N
= &*--Position
;
1158 // Skip dead nodes and any non-machine opcodes.
1159 if (N
->use_empty() || !N
->isMachineOpcode())
1162 if (tryOptimizeRem8Extend(N
)) {
1167 // Look for a TESTrr+ANDrr pattern where both operands of the test are
1168 // the same. Rewrite to remove the AND.
1169 unsigned Opc
= N
->getMachineOpcode();
1170 if ((Opc
== X86::TEST8rr
|| Opc
== X86::TEST16rr
||
1171 Opc
== X86::TEST32rr
|| Opc
== X86::TEST64rr
) &&
1172 N
->getOperand(0) == N
->getOperand(1) &&
1173 N
->isOnlyUserOf(N
->getOperand(0).getNode()) &&
1174 N
->getOperand(0).isMachineOpcode()) {
1175 SDValue And
= N
->getOperand(0);
1176 unsigned N0Opc
= And
.getMachineOpcode();
1177 if (N0Opc
== X86::AND8rr
|| N0Opc
== X86::AND16rr
||
1178 N0Opc
== X86::AND32rr
|| N0Opc
== X86::AND64rr
) {
1179 MachineSDNode
*Test
= CurDAG
->getMachineNode(Opc
, SDLoc(N
),
1183 ReplaceUses(N
, Test
);
1187 if (N0Opc
== X86::AND8rm
|| N0Opc
== X86::AND16rm
||
1188 N0Opc
== X86::AND32rm
|| N0Opc
== X86::AND64rm
) {
1191 case X86::AND8rm
: NewOpc
= X86::TEST8mr
; break;
1192 case X86::AND16rm
: NewOpc
= X86::TEST16mr
; break;
1193 case X86::AND32rm
: NewOpc
= X86::TEST32mr
; break;
1194 case X86::AND64rm
: NewOpc
= X86::TEST64mr
; break;
1197 // Need to swap the memory and register operand.
1198 SDValue Ops
[] = { And
.getOperand(1),
1204 And
.getOperand(6) /* Chain */ };
1205 MachineSDNode
*Test
= CurDAG
->getMachineNode(NewOpc
, SDLoc(N
),
1206 MVT::i32
, MVT::Other
, Ops
);
1207 ReplaceUses(N
, Test
);
1213 // Look for a KAND+KORTEST and turn it into KTEST if only the zero flag is
1214 // used. We're doing this late so we can prefer to fold the AND into masked
1215 // comparisons. Doing that can be better for the live range of the mask
1217 if ((Opc
== X86::KORTESTBrr
|| Opc
== X86::KORTESTWrr
||
1218 Opc
== X86::KORTESTDrr
|| Opc
== X86::KORTESTQrr
) &&
1219 N
->getOperand(0) == N
->getOperand(1) &&
1220 N
->isOnlyUserOf(N
->getOperand(0).getNode()) &&
1221 N
->getOperand(0).isMachineOpcode() &&
1222 onlyUsesZeroFlag(SDValue(N
, 0))) {
1223 SDValue And
= N
->getOperand(0);
1224 unsigned N0Opc
= And
.getMachineOpcode();
1225 // KANDW is legal with AVX512F, but KTESTW requires AVX512DQ. The other
1226 // KAND instructions and KTEST use the same ISA feature.
1227 if (N0Opc
== X86::KANDBrr
||
1228 (N0Opc
== X86::KANDWrr
&& Subtarget
->hasDQI()) ||
1229 N0Opc
== X86::KANDDrr
|| N0Opc
== X86::KANDQrr
) {
1232 default: llvm_unreachable("Unexpected opcode!");
1233 case X86::KORTESTBrr
: NewOpc
= X86::KTESTBrr
; break;
1234 case X86::KORTESTWrr
: NewOpc
= X86::KTESTWrr
; break;
1235 case X86::KORTESTDrr
: NewOpc
= X86::KTESTDrr
; break;
1236 case X86::KORTESTQrr
: NewOpc
= X86::KTESTQrr
; break;
1238 MachineSDNode
*KTest
= CurDAG
->getMachineNode(NewOpc
, SDLoc(N
),
1242 ReplaceUses(N
, KTest
);
1248 // Attempt to remove vectors moves that were inserted to zero upper bits.
1249 if (Opc
!= TargetOpcode::SUBREG_TO_REG
)
1252 unsigned SubRegIdx
= N
->getConstantOperandVal(2);
1253 if (SubRegIdx
!= X86::sub_xmm
&& SubRegIdx
!= X86::sub_ymm
)
1256 SDValue Move
= N
->getOperand(1);
1257 if (!Move
.isMachineOpcode())
1260 // Make sure its one of the move opcodes we recognize.
1261 switch (Move
.getMachineOpcode()) {
1264 case X86::VMOVAPDrr
: case X86::VMOVUPDrr
:
1265 case X86::VMOVAPSrr
: case X86::VMOVUPSrr
:
1266 case X86::VMOVDQArr
: case X86::VMOVDQUrr
:
1267 case X86::VMOVAPDYrr
: case X86::VMOVUPDYrr
:
1268 case X86::VMOVAPSYrr
: case X86::VMOVUPSYrr
:
1269 case X86::VMOVDQAYrr
: case X86::VMOVDQUYrr
:
1270 case X86::VMOVAPDZ128rr
: case X86::VMOVUPDZ128rr
:
1271 case X86::VMOVAPSZ128rr
: case X86::VMOVUPSZ128rr
:
1272 case X86::VMOVDQA32Z128rr
: case X86::VMOVDQU32Z128rr
:
1273 case X86::VMOVDQA64Z128rr
: case X86::VMOVDQU64Z128rr
:
1274 case X86::VMOVAPDZ256rr
: case X86::VMOVUPDZ256rr
:
1275 case X86::VMOVAPSZ256rr
: case X86::VMOVUPSZ256rr
:
1276 case X86::VMOVDQA32Z256rr
: case X86::VMOVDQU32Z256rr
:
1277 case X86::VMOVDQA64Z256rr
: case X86::VMOVDQU64Z256rr
:
1281 SDValue In
= Move
.getOperand(0);
1282 if (!In
.isMachineOpcode() ||
1283 In
.getMachineOpcode() <= TargetOpcode::GENERIC_OP_END
)
1286 // Make sure the instruction has a VEX, XOP, or EVEX prefix. This covers
1287 // the SHA instructions which use a legacy encoding.
1288 uint64_t TSFlags
= getInstrInfo()->get(In
.getMachineOpcode()).TSFlags
;
1289 if ((TSFlags
& X86II::EncodingMask
) != X86II::VEX
&&
1290 (TSFlags
& X86II::EncodingMask
) != X86II::EVEX
&&
1291 (TSFlags
& X86II::EncodingMask
) != X86II::XOP
)
1294 // Producing instruction is another vector instruction. We can drop the
1296 CurDAG
->UpdateNodeOperands(N
, N
->getOperand(0), In
, N
->getOperand(2));
1301 CurDAG
->RemoveDeadNodes();
1305 /// Emit any code that needs to be executed only in the main function.
1306 void X86DAGToDAGISel::emitSpecialCodeForMain() {
1307 if (Subtarget
->isTargetCygMing()) {
1308 TargetLowering::ArgListTy Args
;
1309 auto &DL
= CurDAG
->getDataLayout();
1311 TargetLowering::CallLoweringInfo
CLI(*CurDAG
);
1312 CLI
.setChain(CurDAG
->getRoot())
1313 .setCallee(CallingConv::C
, Type::getVoidTy(*CurDAG
->getContext()),
1314 CurDAG
->getExternalSymbol("__main", TLI
->getPointerTy(DL
)),
1316 const TargetLowering
&TLI
= CurDAG
->getTargetLoweringInfo();
1317 std::pair
<SDValue
, SDValue
> Result
= TLI
.LowerCallTo(CLI
);
1318 CurDAG
->setRoot(Result
.second
);
1322 void X86DAGToDAGISel::EmitFunctionEntryCode() {
1323 // If this is main, emit special code for main.
1324 const Function
&F
= MF
->getFunction();
1325 if (F
.hasExternalLinkage() && F
.getName() == "main")
1326 emitSpecialCodeForMain();
1329 static bool isDispSafeForFrameIndex(int64_t Val
) {
1330 // On 64-bit platforms, we can run into an issue where a frame index
1331 // includes a displacement that, when added to the explicit displacement,
1332 // will overflow the displacement field. Assuming that the frame index
1333 // displacement fits into a 31-bit integer (which is only slightly more
1334 // aggressive than the current fundamental assumption that it fits into
1335 // a 32-bit integer), a 31-bit disp should always be safe.
1336 return isInt
<31>(Val
);
1339 bool X86DAGToDAGISel::foldOffsetIntoAddress(uint64_t Offset
,
1340 X86ISelAddressMode
&AM
) {
1341 // If there's no offset to fold, we don't need to do any work.
1345 // Cannot combine ExternalSymbol displacements with integer offsets.
1346 if (AM
.ES
|| AM
.MCSym
)
1349 int64_t Val
= AM
.Disp
+ Offset
;
1350 CodeModel::Model M
= TM
.getCodeModel();
1351 if (Subtarget
->is64Bit()) {
1352 if (!X86::isOffsetSuitableForCodeModel(Val
, M
,
1353 AM
.hasSymbolicDisplacement()))
1355 // In addition to the checks required for a register base, check that
1356 // we do not try to use an unsafe Disp with a frame index.
1357 if (AM
.BaseType
== X86ISelAddressMode::FrameIndexBase
&&
1358 !isDispSafeForFrameIndex(Val
))
1366 bool X86DAGToDAGISel::matchLoadInAddress(LoadSDNode
*N
, X86ISelAddressMode
&AM
){
1367 SDValue Address
= N
->getOperand(1);
1369 // load gs:0 -> GS segment register.
1370 // load fs:0 -> FS segment register.
1372 // This optimization is valid because the GNU TLS model defines that
1373 // gs:0 (or fs:0 on X86-64) contains its own address.
1374 // For more information see http://people.redhat.com/drepper/tls.pdf
1375 if (ConstantSDNode
*C
= dyn_cast
<ConstantSDNode
>(Address
))
1376 if (C
->getSExtValue() == 0 && AM
.Segment
.getNode() == nullptr &&
1377 !IndirectTlsSegRefs
&&
1378 (Subtarget
->isTargetGlibc() || Subtarget
->isTargetAndroid() ||
1379 Subtarget
->isTargetFuchsia()))
1380 switch (N
->getPointerInfo().getAddrSpace()) {
1382 AM
.Segment
= CurDAG
->getRegister(X86::GS
, MVT::i16
);
1385 AM
.Segment
= CurDAG
->getRegister(X86::FS
, MVT::i16
);
1387 // Address space 258 is not handled here, because it is not used to
1388 // address TLS areas.
1394 /// Try to match X86ISD::Wrapper and X86ISD::WrapperRIP nodes into an addressing
1395 /// mode. These wrap things that will resolve down into a symbol reference.
1396 /// If no match is possible, this returns true, otherwise it returns false.
1397 bool X86DAGToDAGISel::matchWrapper(SDValue N
, X86ISelAddressMode
&AM
) {
1398 // If the addressing mode already has a symbol as the displacement, we can
1399 // never match another symbol.
1400 if (AM
.hasSymbolicDisplacement())
1403 bool IsRIPRelTLS
= false;
1404 bool IsRIPRel
= N
.getOpcode() == X86ISD::WrapperRIP
;
1406 SDValue Val
= N
.getOperand(0);
1407 if (Val
.getOpcode() == ISD::TargetGlobalTLSAddress
)
1411 // We can't use an addressing mode in the 64-bit large code model.
1412 // Global TLS addressing is an exception. In the medium code model,
1413 // we use can use a mode when RIP wrappers are present.
1414 // That signifies access to globals that are known to be "near",
1415 // such as the GOT itself.
1416 CodeModel::Model M
= TM
.getCodeModel();
1417 if (Subtarget
->is64Bit() &&
1418 ((M
== CodeModel::Large
&& !IsRIPRelTLS
) ||
1419 (M
== CodeModel::Medium
&& !IsRIPRel
)))
1422 // Base and index reg must be 0 in order to use %rip as base.
1423 if (IsRIPRel
&& AM
.hasBaseOrIndexReg())
1426 // Make a local copy in case we can't do this fold.
1427 X86ISelAddressMode Backup
= AM
;
1430 SDValue N0
= N
.getOperand(0);
1431 if (GlobalAddressSDNode
*G
= dyn_cast
<GlobalAddressSDNode
>(N0
)) {
1432 AM
.GV
= G
->getGlobal();
1433 AM
.SymbolFlags
= G
->getTargetFlags();
1434 Offset
= G
->getOffset();
1435 } else if (ConstantPoolSDNode
*CP
= dyn_cast
<ConstantPoolSDNode
>(N0
)) {
1436 AM
.CP
= CP
->getConstVal();
1437 AM
.Align
= CP
->getAlignment();
1438 AM
.SymbolFlags
= CP
->getTargetFlags();
1439 Offset
= CP
->getOffset();
1440 } else if (ExternalSymbolSDNode
*S
= dyn_cast
<ExternalSymbolSDNode
>(N0
)) {
1441 AM
.ES
= S
->getSymbol();
1442 AM
.SymbolFlags
= S
->getTargetFlags();
1443 } else if (auto *S
= dyn_cast
<MCSymbolSDNode
>(N0
)) {
1444 AM
.MCSym
= S
->getMCSymbol();
1445 } else if (JumpTableSDNode
*J
= dyn_cast
<JumpTableSDNode
>(N0
)) {
1446 AM
.JT
= J
->getIndex();
1447 AM
.SymbolFlags
= J
->getTargetFlags();
1448 } else if (BlockAddressSDNode
*BA
= dyn_cast
<BlockAddressSDNode
>(N0
)) {
1449 AM
.BlockAddr
= BA
->getBlockAddress();
1450 AM
.SymbolFlags
= BA
->getTargetFlags();
1451 Offset
= BA
->getOffset();
1453 llvm_unreachable("Unhandled symbol reference node.");
1455 if (foldOffsetIntoAddress(Offset
, AM
)) {
1461 AM
.setBaseReg(CurDAG
->getRegister(X86::RIP
, MVT::i64
));
1463 // Commit the changes now that we know this fold is safe.
1467 /// Add the specified node to the specified addressing mode, returning true if
1468 /// it cannot be done. This just pattern matches for the addressing mode.
1469 bool X86DAGToDAGISel::matchAddress(SDValue N
, X86ISelAddressMode
&AM
) {
1470 if (matchAddressRecursively(N
, AM
, 0))
1473 // Post-processing: Convert lea(,%reg,2) to lea(%reg,%reg), which has
1474 // a smaller encoding and avoids a scaled-index.
1475 if (AM
.Scale
== 2 &&
1476 AM
.BaseType
== X86ISelAddressMode::RegBase
&&
1477 AM
.Base_Reg
.getNode() == nullptr) {
1478 AM
.Base_Reg
= AM
.IndexReg
;
1482 // Post-processing: Convert foo to foo(%rip), even in non-PIC mode,
1483 // because it has a smaller encoding.
1484 // TODO: Which other code models can use this?
1485 switch (TM
.getCodeModel()) {
1487 case CodeModel::Small
:
1488 case CodeModel::Kernel
:
1489 if (Subtarget
->is64Bit() &&
1491 AM
.BaseType
== X86ISelAddressMode::RegBase
&&
1492 AM
.Base_Reg
.getNode() == nullptr &&
1493 AM
.IndexReg
.getNode() == nullptr &&
1494 AM
.SymbolFlags
== X86II::MO_NO_FLAG
&&
1495 AM
.hasSymbolicDisplacement())
1496 AM
.Base_Reg
= CurDAG
->getRegister(X86::RIP
, MVT::i64
);
1503 bool X86DAGToDAGISel::matchAdd(SDValue
&N
, X86ISelAddressMode
&AM
,
1505 // Add an artificial use to this node so that we can keep track of
1506 // it if it gets CSE'd with a different node.
1507 HandleSDNode
Handle(N
);
1509 X86ISelAddressMode Backup
= AM
;
1510 if (!matchAddressRecursively(N
.getOperand(0), AM
, Depth
+1) &&
1511 !matchAddressRecursively(Handle
.getValue().getOperand(1), AM
, Depth
+1))
1515 // Try again after commuting the operands.
1516 if (!matchAddressRecursively(Handle
.getValue().getOperand(1), AM
, Depth
+1) &&
1517 !matchAddressRecursively(Handle
.getValue().getOperand(0), AM
, Depth
+1))
1521 // If we couldn't fold both operands into the address at the same time,
1522 // see if we can just put each operand into a register and fold at least
1524 if (AM
.BaseType
== X86ISelAddressMode::RegBase
&&
1525 !AM
.Base_Reg
.getNode() &&
1526 !AM
.IndexReg
.getNode()) {
1527 N
= Handle
.getValue();
1528 AM
.Base_Reg
= N
.getOperand(0);
1529 AM
.IndexReg
= N
.getOperand(1);
1533 N
= Handle
.getValue();
1537 // Insert a node into the DAG at least before the Pos node's position. This
1538 // will reposition the node as needed, and will assign it a node ID that is <=
1539 // the Pos node's ID. Note that this does *not* preserve the uniqueness of node
1540 // IDs! The selection DAG must no longer depend on their uniqueness when this
1542 static void insertDAGNode(SelectionDAG
&DAG
, SDValue Pos
, SDValue N
) {
1543 if (N
->getNodeId() == -1 ||
1544 (SelectionDAGISel::getUninvalidatedNodeId(N
.getNode()) >
1545 SelectionDAGISel::getUninvalidatedNodeId(Pos
.getNode()))) {
1546 DAG
.RepositionNode(Pos
->getIterator(), N
.getNode());
1547 // Mark Node as invalid for pruning as after this it may be a successor to a
1548 // selected node but otherwise be in the same position of Pos.
1549 // Conservatively mark it with the same -abs(Id) to assure node id
1550 // invariant is preserved.
1551 N
->setNodeId(Pos
->getNodeId());
1552 SelectionDAGISel::InvalidateNodeId(N
.getNode());
1556 // Transform "(X >> (8-C1)) & (0xff << C1)" to "((X >> 8) & 0xff) << C1" if
1557 // safe. This allows us to convert the shift and and into an h-register
1558 // extract and a scaled index. Returns false if the simplification is
1560 static bool foldMaskAndShiftToExtract(SelectionDAG
&DAG
, SDValue N
,
1562 SDValue Shift
, SDValue X
,
1563 X86ISelAddressMode
&AM
) {
1564 if (Shift
.getOpcode() != ISD::SRL
||
1565 !isa
<ConstantSDNode
>(Shift
.getOperand(1)) ||
1569 int ScaleLog
= 8 - Shift
.getConstantOperandVal(1);
1570 if (ScaleLog
<= 0 || ScaleLog
>= 4 ||
1571 Mask
!= (0xffu
<< ScaleLog
))
1574 MVT VT
= N
.getSimpleValueType();
1576 SDValue Eight
= DAG
.getConstant(8, DL
, MVT::i8
);
1577 SDValue NewMask
= DAG
.getConstant(0xff, DL
, VT
);
1578 SDValue Srl
= DAG
.getNode(ISD::SRL
, DL
, VT
, X
, Eight
);
1579 SDValue And
= DAG
.getNode(ISD::AND
, DL
, VT
, Srl
, NewMask
);
1580 SDValue ShlCount
= DAG
.getConstant(ScaleLog
, DL
, MVT::i8
);
1581 SDValue Shl
= DAG
.getNode(ISD::SHL
, DL
, VT
, And
, ShlCount
);
1583 // Insert the new nodes into the topological ordering. We must do this in
1584 // a valid topological ordering as nothing is going to go back and re-sort
1585 // these nodes. We continually insert before 'N' in sequence as this is
1586 // essentially a pre-flattened and pre-sorted sequence of nodes. There is no
1587 // hierarchy left to express.
1588 insertDAGNode(DAG
, N
, Eight
);
1589 insertDAGNode(DAG
, N
, Srl
);
1590 insertDAGNode(DAG
, N
, NewMask
);
1591 insertDAGNode(DAG
, N
, And
);
1592 insertDAGNode(DAG
, N
, ShlCount
);
1593 insertDAGNode(DAG
, N
, Shl
);
1594 DAG
.ReplaceAllUsesWith(N
, Shl
);
1595 DAG
.RemoveDeadNode(N
.getNode());
1597 AM
.Scale
= (1 << ScaleLog
);
1601 // Transforms "(X << C1) & C2" to "(X & (C2>>C1)) << C1" if safe and if this
1602 // allows us to fold the shift into this addressing mode. Returns false if the
1603 // transform succeeded.
1604 static bool foldMaskedShiftToScaledMask(SelectionDAG
&DAG
, SDValue N
,
1605 X86ISelAddressMode
&AM
) {
1606 SDValue Shift
= N
.getOperand(0);
1608 // Use a signed mask so that shifting right will insert sign bits. These
1609 // bits will be removed when we shift the result left so it doesn't matter
1610 // what we use. This might allow a smaller immediate encoding.
1611 int64_t Mask
= cast
<ConstantSDNode
>(N
->getOperand(1))->getSExtValue();
1613 // If we have an any_extend feeding the AND, look through it to see if there
1614 // is a shift behind it. But only if the AND doesn't use the extended bits.
1615 // FIXME: Generalize this to other ANY_EXTEND than i32 to i64?
1616 bool FoundAnyExtend
= false;
1617 if (Shift
.getOpcode() == ISD::ANY_EXTEND
&& Shift
.hasOneUse() &&
1618 Shift
.getOperand(0).getSimpleValueType() == MVT::i32
&&
1620 FoundAnyExtend
= true;
1621 Shift
= Shift
.getOperand(0);
1624 if (Shift
.getOpcode() != ISD::SHL
||
1625 !isa
<ConstantSDNode
>(Shift
.getOperand(1)))
1628 SDValue X
= Shift
.getOperand(0);
1630 // Not likely to be profitable if either the AND or SHIFT node has more
1631 // than one use (unless all uses are for address computation). Besides,
1632 // isel mechanism requires their node ids to be reused.
1633 if (!N
.hasOneUse() || !Shift
.hasOneUse())
1636 // Verify that the shift amount is something we can fold.
1637 unsigned ShiftAmt
= Shift
.getConstantOperandVal(1);
1638 if (ShiftAmt
!= 1 && ShiftAmt
!= 2 && ShiftAmt
!= 3)
1641 MVT VT
= N
.getSimpleValueType();
1643 if (FoundAnyExtend
) {
1644 SDValue NewX
= DAG
.getNode(ISD::ANY_EXTEND
, DL
, VT
, X
);
1645 insertDAGNode(DAG
, N
, NewX
);
1649 SDValue NewMask
= DAG
.getConstant(Mask
>> ShiftAmt
, DL
, VT
);
1650 SDValue NewAnd
= DAG
.getNode(ISD::AND
, DL
, VT
, X
, NewMask
);
1651 SDValue NewShift
= DAG
.getNode(ISD::SHL
, DL
, VT
, NewAnd
, Shift
.getOperand(1));
1653 // Insert the new nodes into the topological ordering. We must do this in
1654 // a valid topological ordering as nothing is going to go back and re-sort
1655 // these nodes. We continually insert before 'N' in sequence as this is
1656 // essentially a pre-flattened and pre-sorted sequence of nodes. There is no
1657 // hierarchy left to express.
1658 insertDAGNode(DAG
, N
, NewMask
);
1659 insertDAGNode(DAG
, N
, NewAnd
);
1660 insertDAGNode(DAG
, N
, NewShift
);
1661 DAG
.ReplaceAllUsesWith(N
, NewShift
);
1662 DAG
.RemoveDeadNode(N
.getNode());
1664 AM
.Scale
= 1 << ShiftAmt
;
1665 AM
.IndexReg
= NewAnd
;
1669 // Implement some heroics to detect shifts of masked values where the mask can
1670 // be replaced by extending the shift and undoing that in the addressing mode
1671 // scale. Patterns such as (shl (srl x, c1), c2) are canonicalized into (and
1672 // (srl x, SHIFT), MASK) by DAGCombines that don't know the shl can be done in
1673 // the addressing mode. This results in code such as:
1675 // int f(short *y, int *lookup_table) {
1677 // return *y + lookup_table[*y >> 11];
1681 // movzwl (%rdi), %eax
1684 // addl (%rsi,%rcx,4), %eax
1687 // movzwl (%rdi), %eax
1691 // addl (%rsi,%rcx), %eax
1693 // Note that this function assumes the mask is provided as a mask *after* the
1694 // value is shifted. The input chain may or may not match that, but computing
1695 // such a mask is trivial.
1696 static bool foldMaskAndShiftToScale(SelectionDAG
&DAG
, SDValue N
,
1698 SDValue Shift
, SDValue X
,
1699 X86ISelAddressMode
&AM
) {
1700 if (Shift
.getOpcode() != ISD::SRL
|| !Shift
.hasOneUse() ||
1701 !isa
<ConstantSDNode
>(Shift
.getOperand(1)))
1704 unsigned ShiftAmt
= Shift
.getConstantOperandVal(1);
1705 unsigned MaskLZ
= countLeadingZeros(Mask
);
1706 unsigned MaskTZ
= countTrailingZeros(Mask
);
1708 // The amount of shift we're trying to fit into the addressing mode is taken
1709 // from the trailing zeros of the mask.
1710 unsigned AMShiftAmt
= MaskTZ
;
1712 // There is nothing we can do here unless the mask is removing some bits.
1713 // Also, the addressing mode can only represent shifts of 1, 2, or 3 bits.
1714 if (AMShiftAmt
<= 0 || AMShiftAmt
> 3) return true;
1716 // We also need to ensure that mask is a continuous run of bits.
1717 if (countTrailingOnes(Mask
>> MaskTZ
) + MaskTZ
+ MaskLZ
!= 64) return true;
1719 // Scale the leading zero count down based on the actual size of the value.
1720 // Also scale it down based on the size of the shift.
1721 unsigned ScaleDown
= (64 - X
.getSimpleValueType().getSizeInBits()) + ShiftAmt
;
1722 if (MaskLZ
< ScaleDown
)
1724 MaskLZ
-= ScaleDown
;
1726 // The final check is to ensure that any masked out high bits of X are
1727 // already known to be zero. Otherwise, the mask has a semantic impact
1728 // other than masking out a couple of low bits. Unfortunately, because of
1729 // the mask, zero extensions will be removed from operands in some cases.
1730 // This code works extra hard to look through extensions because we can
1731 // replace them with zero extensions cheaply if necessary.
1732 bool ReplacingAnyExtend
= false;
1733 if (X
.getOpcode() == ISD::ANY_EXTEND
) {
1734 unsigned ExtendBits
= X
.getSimpleValueType().getSizeInBits() -
1735 X
.getOperand(0).getSimpleValueType().getSizeInBits();
1736 // Assume that we'll replace the any-extend with a zero-extend, and
1737 // narrow the search to the extended value.
1738 X
= X
.getOperand(0);
1739 MaskLZ
= ExtendBits
> MaskLZ
? 0 : MaskLZ
- ExtendBits
;
1740 ReplacingAnyExtend
= true;
1742 APInt MaskedHighBits
=
1743 APInt::getHighBitsSet(X
.getSimpleValueType().getSizeInBits(), MaskLZ
);
1744 KnownBits Known
= DAG
.computeKnownBits(X
);
1745 if (MaskedHighBits
!= Known
.Zero
) return true;
1747 // We've identified a pattern that can be transformed into a single shift
1748 // and an addressing mode. Make it so.
1749 MVT VT
= N
.getSimpleValueType();
1750 if (ReplacingAnyExtend
) {
1751 assert(X
.getValueType() != VT
);
1752 // We looked through an ANY_EXTEND node, insert a ZERO_EXTEND.
1753 SDValue NewX
= DAG
.getNode(ISD::ZERO_EXTEND
, SDLoc(X
), VT
, X
);
1754 insertDAGNode(DAG
, N
, NewX
);
1758 SDValue NewSRLAmt
= DAG
.getConstant(ShiftAmt
+ AMShiftAmt
, DL
, MVT::i8
);
1759 SDValue NewSRL
= DAG
.getNode(ISD::SRL
, DL
, VT
, X
, NewSRLAmt
);
1760 SDValue NewSHLAmt
= DAG
.getConstant(AMShiftAmt
, DL
, MVT::i8
);
1761 SDValue NewSHL
= DAG
.getNode(ISD::SHL
, DL
, VT
, NewSRL
, NewSHLAmt
);
1763 // Insert the new nodes into the topological ordering. We must do this in
1764 // a valid topological ordering as nothing is going to go back and re-sort
1765 // these nodes. We continually insert before 'N' in sequence as this is
1766 // essentially a pre-flattened and pre-sorted sequence of nodes. There is no
1767 // hierarchy left to express.
1768 insertDAGNode(DAG
, N
, NewSRLAmt
);
1769 insertDAGNode(DAG
, N
, NewSRL
);
1770 insertDAGNode(DAG
, N
, NewSHLAmt
);
1771 insertDAGNode(DAG
, N
, NewSHL
);
1772 DAG
.ReplaceAllUsesWith(N
, NewSHL
);
1773 DAG
.RemoveDeadNode(N
.getNode());
1775 AM
.Scale
= 1 << AMShiftAmt
;
1776 AM
.IndexReg
= NewSRL
;
1780 // Transform "(X >> SHIFT) & (MASK << C1)" to
1781 // "((X >> (SHIFT + C1)) & (MASK)) << C1". Everything before the SHL will be
1782 // matched to a BEXTR later. Returns false if the simplification is performed.
1783 static bool foldMaskedShiftToBEXTR(SelectionDAG
&DAG
, SDValue N
,
1785 SDValue Shift
, SDValue X
,
1786 X86ISelAddressMode
&AM
,
1787 const X86Subtarget
&Subtarget
) {
1788 if (Shift
.getOpcode() != ISD::SRL
||
1789 !isa
<ConstantSDNode
>(Shift
.getOperand(1)) ||
1790 !Shift
.hasOneUse() || !N
.hasOneUse())
1793 // Only do this if BEXTR will be matched by matchBEXTRFromAndImm.
1794 if (!Subtarget
.hasTBM() &&
1795 !(Subtarget
.hasBMI() && Subtarget
.hasFastBEXTR()))
1798 // We need to ensure that mask is a continuous run of bits.
1799 if (!isShiftedMask_64(Mask
)) return true;
1801 unsigned ShiftAmt
= Shift
.getConstantOperandVal(1);
1803 // The amount of shift we're trying to fit into the addressing mode is taken
1804 // from the trailing zeros of the mask.
1805 unsigned AMShiftAmt
= countTrailingZeros(Mask
);
1807 // There is nothing we can do here unless the mask is removing some bits.
1808 // Also, the addressing mode can only represent shifts of 1, 2, or 3 bits.
1809 if (AMShiftAmt
<= 0 || AMShiftAmt
> 3) return true;
1811 MVT VT
= N
.getSimpleValueType();
1813 SDValue NewSRLAmt
= DAG
.getConstant(ShiftAmt
+ AMShiftAmt
, DL
, MVT::i8
);
1814 SDValue NewSRL
= DAG
.getNode(ISD::SRL
, DL
, VT
, X
, NewSRLAmt
);
1815 SDValue NewMask
= DAG
.getConstant(Mask
>> AMShiftAmt
, DL
, VT
);
1816 SDValue NewAnd
= DAG
.getNode(ISD::AND
, DL
, VT
, NewSRL
, NewMask
);
1817 SDValue NewSHLAmt
= DAG
.getConstant(AMShiftAmt
, DL
, MVT::i8
);
1818 SDValue NewSHL
= DAG
.getNode(ISD::SHL
, DL
, VT
, NewAnd
, NewSHLAmt
);
1820 // Insert the new nodes into the topological ordering. We must do this in
1821 // a valid topological ordering as nothing is going to go back and re-sort
1822 // these nodes. We continually insert before 'N' in sequence as this is
1823 // essentially a pre-flattened and pre-sorted sequence of nodes. There is no
1824 // hierarchy left to express.
1825 insertDAGNode(DAG
, N
, NewSRLAmt
);
1826 insertDAGNode(DAG
, N
, NewSRL
);
1827 insertDAGNode(DAG
, N
, NewMask
);
1828 insertDAGNode(DAG
, N
, NewAnd
);
1829 insertDAGNode(DAG
, N
, NewSHLAmt
);
1830 insertDAGNode(DAG
, N
, NewSHL
);
1831 DAG
.ReplaceAllUsesWith(N
, NewSHL
);
1832 DAG
.RemoveDeadNode(N
.getNode());
1834 AM
.Scale
= 1 << AMShiftAmt
;
1835 AM
.IndexReg
= NewAnd
;
1839 bool X86DAGToDAGISel::matchAddressRecursively(SDValue N
, X86ISelAddressMode
&AM
,
1843 dbgs() << "MatchAddress: ";
1848 return matchAddressBase(N
, AM
);
1850 // If this is already a %rip relative address, we can only merge immediates
1851 // into it. Instead of handling this in every case, we handle it here.
1852 // RIP relative addressing: %rip + 32-bit displacement!
1853 if (AM
.isRIPRelative()) {
1854 // FIXME: JumpTable and ExternalSymbol address currently don't like
1855 // displacements. It isn't very important, but this should be fixed for
1857 if (!(AM
.ES
|| AM
.MCSym
) && AM
.JT
!= -1)
1860 if (ConstantSDNode
*Cst
= dyn_cast
<ConstantSDNode
>(N
))
1861 if (!foldOffsetIntoAddress(Cst
->getSExtValue(), AM
))
1866 switch (N
.getOpcode()) {
1868 case ISD::LOCAL_RECOVER
: {
1869 if (!AM
.hasSymbolicDisplacement() && AM
.Disp
== 0)
1870 if (const auto *ESNode
= dyn_cast
<MCSymbolSDNode
>(N
.getOperand(0))) {
1871 // Use the symbol and don't prefix it.
1872 AM
.MCSym
= ESNode
->getMCSymbol();
1877 case ISD::Constant
: {
1878 uint64_t Val
= cast
<ConstantSDNode
>(N
)->getSExtValue();
1879 if (!foldOffsetIntoAddress(Val
, AM
))
1884 case X86ISD::Wrapper
:
1885 case X86ISD::WrapperRIP
:
1886 if (!matchWrapper(N
, AM
))
1891 if (!matchLoadInAddress(cast
<LoadSDNode
>(N
), AM
))
1895 case ISD::FrameIndex
:
1896 if (AM
.BaseType
== X86ISelAddressMode::RegBase
&&
1897 AM
.Base_Reg
.getNode() == nullptr &&
1898 (!Subtarget
->is64Bit() || isDispSafeForFrameIndex(AM
.Disp
))) {
1899 AM
.BaseType
= X86ISelAddressMode::FrameIndexBase
;
1900 AM
.Base_FrameIndex
= cast
<FrameIndexSDNode
>(N
)->getIndex();
1906 if (AM
.IndexReg
.getNode() != nullptr || AM
.Scale
!= 1)
1909 if (ConstantSDNode
*CN
= dyn_cast
<ConstantSDNode
>(N
.getOperand(1))) {
1910 unsigned Val
= CN
->getZExtValue();
1911 // Note that we handle x<<1 as (,x,2) rather than (x,x) here so
1912 // that the base operand remains free for further matching. If
1913 // the base doesn't end up getting used, a post-processing step
1914 // in MatchAddress turns (,x,2) into (x,x), which is cheaper.
1915 if (Val
== 1 || Val
== 2 || Val
== 3) {
1916 AM
.Scale
= 1 << Val
;
1917 SDValue ShVal
= N
.getOperand(0);
1919 // Okay, we know that we have a scale by now. However, if the scaled
1920 // value is an add of something and a constant, we can fold the
1921 // constant into the disp field here.
1922 if (CurDAG
->isBaseWithConstantOffset(ShVal
)) {
1923 AM
.IndexReg
= ShVal
.getOperand(0);
1924 ConstantSDNode
*AddVal
= cast
<ConstantSDNode
>(ShVal
.getOperand(1));
1925 uint64_t Disp
= (uint64_t)AddVal
->getSExtValue() << Val
;
1926 if (!foldOffsetIntoAddress(Disp
, AM
))
1930 AM
.IndexReg
= ShVal
;
1937 // Scale must not be used already.
1938 if (AM
.IndexReg
.getNode() != nullptr || AM
.Scale
!= 1) break;
1940 // We only handle up to 64-bit values here as those are what matter for
1941 // addressing mode optimizations.
1942 assert(N
.getSimpleValueType().getSizeInBits() <= 64 &&
1943 "Unexpected value size!");
1945 SDValue And
= N
.getOperand(0);
1946 if (And
.getOpcode() != ISD::AND
) break;
1947 SDValue X
= And
.getOperand(0);
1949 // The mask used for the transform is expected to be post-shift, but we
1950 // found the shift first so just apply the shift to the mask before passing
1952 if (!isa
<ConstantSDNode
>(N
.getOperand(1)) ||
1953 !isa
<ConstantSDNode
>(And
.getOperand(1)))
1955 uint64_t Mask
= And
.getConstantOperandVal(1) >> N
.getConstantOperandVal(1);
1957 // Try to fold the mask and shift into the scale, and return false if we
1959 if (!foldMaskAndShiftToScale(*CurDAG
, N
, Mask
, N
, X
, AM
))
1964 case ISD::SMUL_LOHI
:
1965 case ISD::UMUL_LOHI
:
1966 // A mul_lohi where we need the low part can be folded as a plain multiply.
1967 if (N
.getResNo() != 0) break;
1970 case X86ISD::MUL_IMM
:
1971 // X*[3,5,9] -> X+X*[2,4,8]
1972 if (AM
.BaseType
== X86ISelAddressMode::RegBase
&&
1973 AM
.Base_Reg
.getNode() == nullptr &&
1974 AM
.IndexReg
.getNode() == nullptr) {
1975 if (ConstantSDNode
*CN
= dyn_cast
<ConstantSDNode
>(N
.getOperand(1)))
1976 if (CN
->getZExtValue() == 3 || CN
->getZExtValue() == 5 ||
1977 CN
->getZExtValue() == 9) {
1978 AM
.Scale
= unsigned(CN
->getZExtValue())-1;
1980 SDValue MulVal
= N
.getOperand(0);
1983 // Okay, we know that we have a scale by now. However, if the scaled
1984 // value is an add of something and a constant, we can fold the
1985 // constant into the disp field here.
1986 if (MulVal
.getNode()->getOpcode() == ISD::ADD
&& MulVal
.hasOneUse() &&
1987 isa
<ConstantSDNode
>(MulVal
.getOperand(1))) {
1988 Reg
= MulVal
.getOperand(0);
1989 ConstantSDNode
*AddVal
=
1990 cast
<ConstantSDNode
>(MulVal
.getOperand(1));
1991 uint64_t Disp
= AddVal
->getSExtValue() * CN
->getZExtValue();
1992 if (foldOffsetIntoAddress(Disp
, AM
))
1993 Reg
= N
.getOperand(0);
1995 Reg
= N
.getOperand(0);
1998 AM
.IndexReg
= AM
.Base_Reg
= Reg
;
2005 // Given A-B, if A can be completely folded into the address and
2006 // the index field with the index field unused, use -B as the index.
2007 // This is a win if a has multiple parts that can be folded into
2008 // the address. Also, this saves a mov if the base register has
2009 // other uses, since it avoids a two-address sub instruction, however
2010 // it costs an additional mov if the index register has other uses.
2012 // Add an artificial use to this node so that we can keep track of
2013 // it if it gets CSE'd with a different node.
2014 HandleSDNode
Handle(N
);
2016 // Test if the LHS of the sub can be folded.
2017 X86ISelAddressMode Backup
= AM
;
2018 if (matchAddressRecursively(N
.getOperand(0), AM
, Depth
+1)) {
2019 N
= Handle
.getValue();
2023 N
= Handle
.getValue();
2024 // Test if the index field is free for use.
2025 if (AM
.IndexReg
.getNode() || AM
.isRIPRelative()) {
2031 SDValue RHS
= N
.getOperand(1);
2032 // If the RHS involves a register with multiple uses, this
2033 // transformation incurs an extra mov, due to the neg instruction
2034 // clobbering its operand.
2035 if (!RHS
.getNode()->hasOneUse() ||
2036 RHS
.getNode()->getOpcode() == ISD::CopyFromReg
||
2037 RHS
.getNode()->getOpcode() == ISD::TRUNCATE
||
2038 RHS
.getNode()->getOpcode() == ISD::ANY_EXTEND
||
2039 (RHS
.getNode()->getOpcode() == ISD::ZERO_EXTEND
&&
2040 RHS
.getOperand(0).getValueType() == MVT::i32
))
2042 // If the base is a register with multiple uses, this
2043 // transformation may save a mov.
2044 if ((AM
.BaseType
== X86ISelAddressMode::RegBase
&& AM
.Base_Reg
.getNode() &&
2045 !AM
.Base_Reg
.getNode()->hasOneUse()) ||
2046 AM
.BaseType
== X86ISelAddressMode::FrameIndexBase
)
2048 // If the folded LHS was interesting, this transformation saves
2049 // address arithmetic.
2050 if ((AM
.hasSymbolicDisplacement() && !Backup
.hasSymbolicDisplacement()) +
2051 ((AM
.Disp
!= 0) && (Backup
.Disp
== 0)) +
2052 (AM
.Segment
.getNode() && !Backup
.Segment
.getNode()) >= 2)
2054 // If it doesn't look like it may be an overall win, don't do it.
2060 // Ok, the transformation is legal and appears profitable. Go for it.
2061 // Negation will be emitted later to avoid creating dangling nodes if this
2062 // was an unprofitable LEA.
2064 AM
.NegateIndex
= true;
2070 if (!matchAdd(N
, AM
, Depth
))
2075 // We want to look through a transform in InstCombine and DAGCombiner that
2076 // turns 'add' into 'or', so we can treat this 'or' exactly like an 'add'.
2077 // Example: (or (and x, 1), (shl y, 3)) --> (add (and x, 1), (shl y, 3))
2078 // An 'lea' can then be used to match the shift (multiply) and add:
2080 // lea (%rsi, %rdi, 8), %rax
2081 if (CurDAG
->haveNoCommonBitsSet(N
.getOperand(0), N
.getOperand(1)) &&
2082 !matchAdd(N
, AM
, Depth
))
2087 // Perform some heroic transforms on an and of a constant-count shift
2088 // with a constant to enable use of the scaled offset field.
2090 // Scale must not be used already.
2091 if (AM
.IndexReg
.getNode() != nullptr || AM
.Scale
!= 1) break;
2093 // We only handle up to 64-bit values here as those are what matter for
2094 // addressing mode optimizations.
2095 assert(N
.getSimpleValueType().getSizeInBits() <= 64 &&
2096 "Unexpected value size!");
2098 if (!isa
<ConstantSDNode
>(N
.getOperand(1)))
2101 if (N
.getOperand(0).getOpcode() == ISD::SRL
) {
2102 SDValue Shift
= N
.getOperand(0);
2103 SDValue X
= Shift
.getOperand(0);
2105 uint64_t Mask
= N
.getConstantOperandVal(1);
2107 // Try to fold the mask and shift into an extract and scale.
2108 if (!foldMaskAndShiftToExtract(*CurDAG
, N
, Mask
, Shift
, X
, AM
))
2111 // Try to fold the mask and shift directly into the scale.
2112 if (!foldMaskAndShiftToScale(*CurDAG
, N
, Mask
, Shift
, X
, AM
))
2115 // Try to fold the mask and shift into BEXTR and scale.
2116 if (!foldMaskedShiftToBEXTR(*CurDAG
, N
, Mask
, Shift
, X
, AM
, *Subtarget
))
2120 // Try to swap the mask and shift to place shifts which can be done as
2121 // a scale on the outside of the mask.
2122 if (!foldMaskedShiftToScaledMask(*CurDAG
, N
, AM
))
2127 case ISD::ZERO_EXTEND
: {
2128 // Try to widen a zexted shift left to the same size as its use, so we can
2129 // match the shift as a scale factor.
2130 if (AM
.IndexReg
.getNode() != nullptr || AM
.Scale
!= 1)
2132 if (N
.getOperand(0).getOpcode() != ISD::SHL
|| !N
.getOperand(0).hasOneUse())
2135 // Give up if the shift is not a valid scale factor [1,2,3].
2136 SDValue Shl
= N
.getOperand(0);
2137 auto *ShAmtC
= dyn_cast
<ConstantSDNode
>(Shl
.getOperand(1));
2138 if (!ShAmtC
|| ShAmtC
->getZExtValue() > 3)
2141 // The narrow shift must only shift out zero bits (it must be 'nuw').
2142 // That makes it safe to widen to the destination type.
2143 APInt HighZeros
= APInt::getHighBitsSet(Shl
.getValueSizeInBits(),
2144 ShAmtC
->getZExtValue());
2145 if (!CurDAG
->MaskedValueIsZero(Shl
.getOperand(0), HighZeros
))
2148 // zext (shl nuw i8 %x, C) to i32 --> shl (zext i8 %x to i32), (zext C)
2149 MVT VT
= N
.getSimpleValueType();
2151 SDValue Zext
= CurDAG
->getNode(ISD::ZERO_EXTEND
, DL
, VT
, Shl
.getOperand(0));
2152 SDValue NewShl
= CurDAG
->getNode(ISD::SHL
, DL
, VT
, Zext
, Shl
.getOperand(1));
2154 // Convert the shift to scale factor.
2155 AM
.Scale
= 1 << ShAmtC
->getZExtValue();
2158 insertDAGNode(*CurDAG
, N
, Zext
);
2159 insertDAGNode(*CurDAG
, N
, NewShl
);
2160 CurDAG
->ReplaceAllUsesWith(N
, NewShl
);
2161 CurDAG
->RemoveDeadNode(N
.getNode());
2166 return matchAddressBase(N
, AM
);
2169 /// Helper for MatchAddress. Add the specified node to the
2170 /// specified addressing mode without any further recursion.
2171 bool X86DAGToDAGISel::matchAddressBase(SDValue N
, X86ISelAddressMode
&AM
) {
2172 // Is the base register already occupied?
2173 if (AM
.BaseType
!= X86ISelAddressMode::RegBase
|| AM
.Base_Reg
.getNode()) {
2174 // If so, check to see if the scale index register is set.
2175 if (!AM
.IndexReg
.getNode()) {
2181 // Otherwise, we cannot select it.
2185 // Default, generate it as a register.
2186 AM
.BaseType
= X86ISelAddressMode::RegBase
;
2191 /// Helper for selectVectorAddr. Handles things that can be folded into a
2192 /// gather scatter address. The index register and scale should have already
2194 bool X86DAGToDAGISel::matchVectorAddress(SDValue N
, X86ISelAddressMode
&AM
) {
2195 // TODO: Support other operations.
2196 switch (N
.getOpcode()) {
2197 case ISD::Constant
: {
2198 uint64_t Val
= cast
<ConstantSDNode
>(N
)->getSExtValue();
2199 if (!foldOffsetIntoAddress(Val
, AM
))
2203 case X86ISD::Wrapper
:
2204 if (!matchWrapper(N
, AM
))
2209 return matchAddressBase(N
, AM
);
2212 bool X86DAGToDAGISel::selectVectorAddr(SDNode
*Parent
, SDValue N
, SDValue
&Base
,
2213 SDValue
&Scale
, SDValue
&Index
,
2214 SDValue
&Disp
, SDValue
&Segment
) {
2215 X86ISelAddressMode AM
;
2216 auto *Mgs
= cast
<X86MaskedGatherScatterSDNode
>(Parent
);
2217 AM
.IndexReg
= Mgs
->getIndex();
2218 AM
.Scale
= cast
<ConstantSDNode
>(Mgs
->getScale())->getZExtValue();
2220 unsigned AddrSpace
= cast
<MemSDNode
>(Parent
)->getPointerInfo().getAddrSpace();
2221 // AddrSpace 256 -> GS, 257 -> FS, 258 -> SS.
2222 if (AddrSpace
== 256)
2223 AM
.Segment
= CurDAG
->getRegister(X86::GS
, MVT::i16
);
2224 if (AddrSpace
== 257)
2225 AM
.Segment
= CurDAG
->getRegister(X86::FS
, MVT::i16
);
2226 if (AddrSpace
== 258)
2227 AM
.Segment
= CurDAG
->getRegister(X86::SS
, MVT::i16
);
2230 MVT VT
= N
.getSimpleValueType();
2232 // Try to match into the base and displacement fields.
2233 if (matchVectorAddress(N
, AM
))
2236 getAddressOperands(AM
, DL
, VT
, Base
, Scale
, Index
, Disp
, Segment
);
2240 /// Returns true if it is able to pattern match an addressing mode.
2241 /// It returns the operands which make up the maximal addressing mode it can
2242 /// match by reference.
2244 /// Parent is the parent node of the addr operand that is being matched. It
2245 /// is always a load, store, atomic node, or null. It is only null when
2246 /// checking memory operands for inline asm nodes.
2247 bool X86DAGToDAGISel::selectAddr(SDNode
*Parent
, SDValue N
, SDValue
&Base
,
2248 SDValue
&Scale
, SDValue
&Index
,
2249 SDValue
&Disp
, SDValue
&Segment
) {
2250 X86ISelAddressMode AM
;
2253 // This list of opcodes are all the nodes that have an "addr:$ptr" operand
2254 // that are not a MemSDNode, and thus don't have proper addrspace info.
2255 Parent
->getOpcode() != ISD::INTRINSIC_W_CHAIN
&& // unaligned loads, fixme
2256 Parent
->getOpcode() != ISD::INTRINSIC_VOID
&& // nontemporal stores
2257 Parent
->getOpcode() != X86ISD::TLSCALL
&& // Fixme
2258 Parent
->getOpcode() != X86ISD::ENQCMD
&& // Fixme
2259 Parent
->getOpcode() != X86ISD::ENQCMDS
&& // Fixme
2260 Parent
->getOpcode() != X86ISD::EH_SJLJ_SETJMP
&& // setjmp
2261 Parent
->getOpcode() != X86ISD::EH_SJLJ_LONGJMP
) { // longjmp
2262 unsigned AddrSpace
=
2263 cast
<MemSDNode
>(Parent
)->getPointerInfo().getAddrSpace();
2264 // AddrSpace 256 -> GS, 257 -> FS, 258 -> SS.
2265 if (AddrSpace
== 256)
2266 AM
.Segment
= CurDAG
->getRegister(X86::GS
, MVT::i16
);
2267 if (AddrSpace
== 257)
2268 AM
.Segment
= CurDAG
->getRegister(X86::FS
, MVT::i16
);
2269 if (AddrSpace
== 258)
2270 AM
.Segment
= CurDAG
->getRegister(X86::SS
, MVT::i16
);
2273 // Save the DL and VT before calling matchAddress, it can invalidate N.
2275 MVT VT
= N
.getSimpleValueType();
2277 if (matchAddress(N
, AM
))
2280 getAddressOperands(AM
, DL
, VT
, Base
, Scale
, Index
, Disp
, Segment
);
2284 // We can only fold a load if all nodes between it and the root node have a
2285 // single use. If there are additional uses, we could end up duplicating the
2287 static bool hasSingleUsesFromRoot(SDNode
*Root
, SDNode
*User
) {
2288 while (User
!= Root
) {
2289 if (!User
->hasOneUse())
2291 User
= *User
->use_begin();
2297 /// Match a scalar SSE load. In particular, we want to match a load whose top
2298 /// elements are either undef or zeros. The load flavor is derived from the
2299 /// type of N, which is either v4f32 or v2f64.
2302 /// PatternChainNode: this is the matched node that has a chain input and
2304 bool X86DAGToDAGISel::selectScalarSSELoad(SDNode
*Root
, SDNode
*Parent
,
2305 SDValue N
, SDValue
&Base
,
2306 SDValue
&Scale
, SDValue
&Index
,
2307 SDValue
&Disp
, SDValue
&Segment
,
2308 SDValue
&PatternNodeWithChain
) {
2309 if (!hasSingleUsesFromRoot(Root
, Parent
))
2312 // We can allow a full vector load here since narrowing a load is ok unless
2313 // it's volatile or atomic.
2314 if (ISD::isNON_EXTLoad(N
.getNode())) {
2315 LoadSDNode
*LD
= cast
<LoadSDNode
>(N
);
2316 if (LD
->isSimple() &&
2317 IsProfitableToFold(N
, LD
, Root
) &&
2318 IsLegalToFold(N
, Parent
, Root
, OptLevel
)) {
2319 PatternNodeWithChain
= N
;
2320 return selectAddr(LD
, LD
->getBasePtr(), Base
, Scale
, Index
, Disp
,
2325 // We can also match the special zero extended load opcode.
2326 if (N
.getOpcode() == X86ISD::VZEXT_LOAD
) {
2327 PatternNodeWithChain
= N
;
2328 if (IsProfitableToFold(PatternNodeWithChain
, N
.getNode(), Root
) &&
2329 IsLegalToFold(PatternNodeWithChain
, Parent
, Root
, OptLevel
)) {
2330 auto *MI
= cast
<MemIntrinsicSDNode
>(PatternNodeWithChain
);
2331 return selectAddr(MI
, MI
->getBasePtr(), Base
, Scale
, Index
, Disp
,
2336 // Need to make sure that the SCALAR_TO_VECTOR and load are both only used
2337 // once. Otherwise the load might get duplicated and the chain output of the
2338 // duplicate load will not be observed by all dependencies.
2339 if (N
.getOpcode() == ISD::SCALAR_TO_VECTOR
&& N
.getNode()->hasOneUse()) {
2340 PatternNodeWithChain
= N
.getOperand(0);
2341 if (ISD::isNON_EXTLoad(PatternNodeWithChain
.getNode()) &&
2342 IsProfitableToFold(PatternNodeWithChain
, N
.getNode(), Root
) &&
2343 IsLegalToFold(PatternNodeWithChain
, N
.getNode(), Root
, OptLevel
)) {
2344 LoadSDNode
*LD
= cast
<LoadSDNode
>(PatternNodeWithChain
);
2345 return selectAddr(LD
, LD
->getBasePtr(), Base
, Scale
, Index
, Disp
,
2354 bool X86DAGToDAGISel::selectMOV64Imm32(SDValue N
, SDValue
&Imm
) {
2355 if (const ConstantSDNode
*CN
= dyn_cast
<ConstantSDNode
>(N
)) {
2356 uint64_t ImmVal
= CN
->getZExtValue();
2357 if (!isUInt
<32>(ImmVal
))
2360 Imm
= CurDAG
->getTargetConstant(ImmVal
, SDLoc(N
), MVT::i64
);
2364 // In static codegen with small code model, we can get the address of a label
2365 // into a register with 'movl'
2366 if (N
->getOpcode() != X86ISD::Wrapper
)
2369 N
= N
.getOperand(0);
2371 // At least GNU as does not accept 'movl' for TPOFF relocations.
2372 // FIXME: We could use 'movl' when we know we are targeting MC.
2373 if (N
->getOpcode() == ISD::TargetGlobalTLSAddress
)
2377 if (N
->getOpcode() != ISD::TargetGlobalAddress
)
2378 return TM
.getCodeModel() == CodeModel::Small
;
2380 Optional
<ConstantRange
> CR
=
2381 cast
<GlobalAddressSDNode
>(N
)->getGlobal()->getAbsoluteSymbolRange();
2383 return TM
.getCodeModel() == CodeModel::Small
;
2385 return CR
->getUnsignedMax().ult(1ull << 32);
2388 bool X86DAGToDAGISel::selectLEA64_32Addr(SDValue N
, SDValue
&Base
,
2389 SDValue
&Scale
, SDValue
&Index
,
2390 SDValue
&Disp
, SDValue
&Segment
) {
2391 // Save the debug loc before calling selectLEAAddr, in case it invalidates N.
2394 if (!selectLEAAddr(N
, Base
, Scale
, Index
, Disp
, Segment
))
2397 RegisterSDNode
*RN
= dyn_cast
<RegisterSDNode
>(Base
);
2398 if (RN
&& RN
->getReg() == 0)
2399 Base
= CurDAG
->getRegister(0, MVT::i64
);
2400 else if (Base
.getValueType() == MVT::i32
&& !isa
<FrameIndexSDNode
>(Base
)) {
2401 // Base could already be %rip, particularly in the x32 ABI.
2402 SDValue ImplDef
= SDValue(CurDAG
->getMachineNode(X86::IMPLICIT_DEF
, DL
,
2404 Base
= CurDAG
->getTargetInsertSubreg(X86::sub_32bit
, DL
, MVT::i64
, ImplDef
,
2408 RN
= dyn_cast
<RegisterSDNode
>(Index
);
2409 if (RN
&& RN
->getReg() == 0)
2410 Index
= CurDAG
->getRegister(0, MVT::i64
);
2412 assert(Index
.getValueType() == MVT::i32
&&
2413 "Expect to be extending 32-bit registers for use in LEA");
2414 SDValue ImplDef
= SDValue(CurDAG
->getMachineNode(X86::IMPLICIT_DEF
, DL
,
2416 Index
= CurDAG
->getTargetInsertSubreg(X86::sub_32bit
, DL
, MVT::i64
, ImplDef
,
2423 /// Calls SelectAddr and determines if the maximal addressing
2424 /// mode it matches can be cost effectively emitted as an LEA instruction.
2425 bool X86DAGToDAGISel::selectLEAAddr(SDValue N
,
2426 SDValue
&Base
, SDValue
&Scale
,
2427 SDValue
&Index
, SDValue
&Disp
,
2429 X86ISelAddressMode AM
;
2431 // Save the DL and VT before calling matchAddress, it can invalidate N.
2433 MVT VT
= N
.getSimpleValueType();
2435 // Set AM.Segment to prevent MatchAddress from using one. LEA doesn't support
2437 SDValue Copy
= AM
.Segment
;
2438 SDValue T
= CurDAG
->getRegister(0, MVT::i32
);
2440 if (matchAddress(N
, AM
))
2442 assert (T
== AM
.Segment
);
2445 unsigned Complexity
= 0;
2446 if (AM
.BaseType
== X86ISelAddressMode::RegBase
&& AM
.Base_Reg
.getNode())
2448 else if (AM
.BaseType
== X86ISelAddressMode::FrameIndexBase
)
2451 if (AM
.IndexReg
.getNode())
2454 // Don't match just leal(,%reg,2). It's cheaper to do addl %reg, %reg, or with
2459 // FIXME: We are artificially lowering the criteria to turn ADD %reg, $GA
2460 // to a LEA. This is determined with some experimentation but is by no means
2461 // optimal (especially for code size consideration). LEA is nice because of
2462 // its three-address nature. Tweak the cost function again when we can run
2463 // convertToThreeAddress() at register allocation time.
2464 if (AM
.hasSymbolicDisplacement()) {
2465 // For X86-64, always use LEA to materialize RIP-relative addresses.
2466 if (Subtarget
->is64Bit())
2472 // Heuristic: try harder to form an LEA from ADD if the operands set flags.
2473 // Unlike ADD, LEA does not affect flags, so we will be less likely to require
2474 // duplicating flag-producing instructions later in the pipeline.
2475 if (N
.getOpcode() == ISD::ADD
) {
2476 auto isMathWithFlags
= [](SDValue V
) {
2477 switch (V
.getOpcode()) {
2482 /* TODO: These opcodes can be added safely, but we may want to justify
2483 their inclusion for different reasons (better for reg-alloc).
2490 // Value 1 is the flag output of the node - verify it's not dead.
2491 return !SDValue(V
.getNode(), 1).use_empty();
2496 // TODO: This could be an 'or' rather than 'and' to make the transform more
2497 // likely to happen. We might want to factor in whether there's a
2498 // load folding opportunity for the math op that disappears with LEA.
2499 if (isMathWithFlags(N
.getOperand(0)) && isMathWithFlags(N
.getOperand(1)))
2506 // If it isn't worth using an LEA, reject it.
2507 if (Complexity
<= 2)
2510 getAddressOperands(AM
, DL
, VT
, Base
, Scale
, Index
, Disp
, Segment
);
2514 /// This is only run on TargetGlobalTLSAddress nodes.
2515 bool X86DAGToDAGISel::selectTLSADDRAddr(SDValue N
, SDValue
&Base
,
2516 SDValue
&Scale
, SDValue
&Index
,
2517 SDValue
&Disp
, SDValue
&Segment
) {
2518 assert(N
.getOpcode() == ISD::TargetGlobalTLSAddress
);
2519 const GlobalAddressSDNode
*GA
= cast
<GlobalAddressSDNode
>(N
);
2521 X86ISelAddressMode AM
;
2522 AM
.GV
= GA
->getGlobal();
2523 AM
.Disp
+= GA
->getOffset();
2524 AM
.SymbolFlags
= GA
->getTargetFlags();
2526 MVT VT
= N
.getSimpleValueType();
2527 if (VT
== MVT::i32
) {
2529 AM
.IndexReg
= CurDAG
->getRegister(X86::EBX
, MVT::i32
);
2532 getAddressOperands(AM
, SDLoc(N
), VT
, Base
, Scale
, Index
, Disp
, Segment
);
2536 bool X86DAGToDAGISel::selectRelocImm(SDValue N
, SDValue
&Op
) {
2537 if (auto *CN
= dyn_cast
<ConstantSDNode
>(N
)) {
2538 Op
= CurDAG
->getTargetConstant(CN
->getAPIntValue(), SDLoc(CN
),
2543 // Keep track of the original value type and whether this value was
2544 // truncated. If we see a truncation from pointer type to VT that truncates
2545 // bits that are known to be zero, we can use a narrow reference.
2546 EVT VT
= N
.getValueType();
2547 bool WasTruncated
= false;
2548 if (N
.getOpcode() == ISD::TRUNCATE
) {
2549 WasTruncated
= true;
2550 N
= N
.getOperand(0);
2553 if (N
.getOpcode() != X86ISD::Wrapper
)
2556 // We can only use non-GlobalValues as immediates if they were not truncated,
2557 // as we do not have any range information. If we have a GlobalValue and the
2558 // address was not truncated, we can select it as an operand directly.
2559 unsigned Opc
= N
.getOperand(0)->getOpcode();
2560 if (Opc
!= ISD::TargetGlobalAddress
|| !WasTruncated
) {
2561 Op
= N
.getOperand(0);
2562 // We can only select the operand directly if we didn't have to look past a
2564 return !WasTruncated
;
2567 // Check that the global's range fits into VT.
2568 auto *GA
= cast
<GlobalAddressSDNode
>(N
.getOperand(0));
2569 Optional
<ConstantRange
> CR
= GA
->getGlobal()->getAbsoluteSymbolRange();
2570 if (!CR
|| CR
->getUnsignedMax().uge(1ull << VT
.getSizeInBits()))
2573 // Okay, we can use a narrow reference.
2574 Op
= CurDAG
->getTargetGlobalAddress(GA
->getGlobal(), SDLoc(N
), VT
,
2575 GA
->getOffset(), GA
->getTargetFlags());
2579 bool X86DAGToDAGISel::tryFoldLoad(SDNode
*Root
, SDNode
*P
, SDValue N
,
2580 SDValue
&Base
, SDValue
&Scale
,
2581 SDValue
&Index
, SDValue
&Disp
,
2583 assert(Root
&& P
&& "Unknown root/parent nodes");
2584 if (!ISD::isNON_EXTLoad(N
.getNode()) ||
2585 !IsProfitableToFold(N
, P
, Root
) ||
2586 !IsLegalToFold(N
, P
, Root
, OptLevel
))
2589 return selectAddr(N
.getNode(),
2590 N
.getOperand(1), Base
, Scale
, Index
, Disp
, Segment
);
2593 /// Return an SDNode that returns the value of the global base register.
2594 /// Output instructions required to initialize the global base register,
2596 SDNode
*X86DAGToDAGISel::getGlobalBaseReg() {
2597 unsigned GlobalBaseReg
= getInstrInfo()->getGlobalBaseReg(MF
);
2598 auto &DL
= MF
->getDataLayout();
2599 return CurDAG
->getRegister(GlobalBaseReg
, TLI
->getPointerTy(DL
)).getNode();
2602 bool X86DAGToDAGISel::isSExtAbsoluteSymbolRef(unsigned Width
, SDNode
*N
) const {
2603 if (N
->getOpcode() == ISD::TRUNCATE
)
2604 N
= N
->getOperand(0).getNode();
2605 if (N
->getOpcode() != X86ISD::Wrapper
)
2608 auto *GA
= dyn_cast
<GlobalAddressSDNode
>(N
->getOperand(0));
2612 Optional
<ConstantRange
> CR
= GA
->getGlobal()->getAbsoluteSymbolRange();
2613 return CR
&& CR
->getSignedMin().sge(-1ull << Width
) &&
2614 CR
->getSignedMax().slt(1ull << Width
);
2617 static X86::CondCode
getCondFromNode(SDNode
*N
) {
2618 assert(N
->isMachineOpcode() && "Unexpected node");
2619 X86::CondCode CC
= X86::COND_INVALID
;
2620 unsigned Opc
= N
->getMachineOpcode();
2621 if (Opc
== X86::JCC_1
)
2622 CC
= static_cast<X86::CondCode
>(N
->getConstantOperandVal(1));
2623 else if (Opc
== X86::SETCCr
)
2624 CC
= static_cast<X86::CondCode
>(N
->getConstantOperandVal(0));
2625 else if (Opc
== X86::SETCCm
)
2626 CC
= static_cast<X86::CondCode
>(N
->getConstantOperandVal(5));
2627 else if (Opc
== X86::CMOV16rr
|| Opc
== X86::CMOV32rr
||
2628 Opc
== X86::CMOV64rr
)
2629 CC
= static_cast<X86::CondCode
>(N
->getConstantOperandVal(2));
2630 else if (Opc
== X86::CMOV16rm
|| Opc
== X86::CMOV32rm
||
2631 Opc
== X86::CMOV64rm
)
2632 CC
= static_cast<X86::CondCode
>(N
->getConstantOperandVal(6));
2637 /// Test whether the given X86ISD::CMP node has any users that use a flag
2639 bool X86DAGToDAGISel::onlyUsesZeroFlag(SDValue Flags
) const {
2640 // Examine each user of the node.
2641 for (SDNode::use_iterator UI
= Flags
->use_begin(), UE
= Flags
->use_end();
2643 // Only check things that use the flags.
2644 if (UI
.getUse().getResNo() != Flags
.getResNo())
2646 // Only examine CopyToReg uses that copy to EFLAGS.
2647 if (UI
->getOpcode() != ISD::CopyToReg
||
2648 cast
<RegisterSDNode
>(UI
->getOperand(1))->getReg() != X86::EFLAGS
)
2650 // Examine each user of the CopyToReg use.
2651 for (SDNode::use_iterator FlagUI
= UI
->use_begin(),
2652 FlagUE
= UI
->use_end(); FlagUI
!= FlagUE
; ++FlagUI
) {
2653 // Only examine the Flag result.
2654 if (FlagUI
.getUse().getResNo() != 1) continue;
2655 // Anything unusual: assume conservatively.
2656 if (!FlagUI
->isMachineOpcode()) return false;
2657 // Examine the condition code of the user.
2658 X86::CondCode CC
= getCondFromNode(*FlagUI
);
2661 // Comparisons which only use the zero flag.
2662 case X86::COND_E
: case X86::COND_NE
:
2664 // Anything else: assume conservatively.
2673 /// Test whether the given X86ISD::CMP node has any uses which require the SF
2674 /// flag to be accurate.
2675 bool X86DAGToDAGISel::hasNoSignFlagUses(SDValue Flags
) const {
2676 // Examine each user of the node.
2677 for (SDNode::use_iterator UI
= Flags
->use_begin(), UE
= Flags
->use_end();
2679 // Only check things that use the flags.
2680 if (UI
.getUse().getResNo() != Flags
.getResNo())
2682 // Only examine CopyToReg uses that copy to EFLAGS.
2683 if (UI
->getOpcode() != ISD::CopyToReg
||
2684 cast
<RegisterSDNode
>(UI
->getOperand(1))->getReg() != X86::EFLAGS
)
2686 // Examine each user of the CopyToReg use.
2687 for (SDNode::use_iterator FlagUI
= UI
->use_begin(),
2688 FlagUE
= UI
->use_end(); FlagUI
!= FlagUE
; ++FlagUI
) {
2689 // Only examine the Flag result.
2690 if (FlagUI
.getUse().getResNo() != 1) continue;
2691 // Anything unusual: assume conservatively.
2692 if (!FlagUI
->isMachineOpcode()) return false;
2693 // Examine the condition code of the user.
2694 X86::CondCode CC
= getCondFromNode(*FlagUI
);
2697 // Comparisons which don't examine the SF flag.
2698 case X86::COND_A
: case X86::COND_AE
:
2699 case X86::COND_B
: case X86::COND_BE
:
2700 case X86::COND_E
: case X86::COND_NE
:
2701 case X86::COND_O
: case X86::COND_NO
:
2702 case X86::COND_P
: case X86::COND_NP
:
2704 // Anything else: assume conservatively.
2713 static bool mayUseCarryFlag(X86::CondCode CC
) {
2715 // Comparisons which don't examine the CF flag.
2716 case X86::COND_O
: case X86::COND_NO
:
2717 case X86::COND_E
: case X86::COND_NE
:
2718 case X86::COND_S
: case X86::COND_NS
:
2719 case X86::COND_P
: case X86::COND_NP
:
2720 case X86::COND_L
: case X86::COND_GE
:
2721 case X86::COND_G
: case X86::COND_LE
:
2723 // Anything else: assume conservatively.
2729 /// Test whether the given node which sets flags has any uses which require the
2730 /// CF flag to be accurate.
2731 bool X86DAGToDAGISel::hasNoCarryFlagUses(SDValue Flags
) const {
2732 // Examine each user of the node.
2733 for (SDNode::use_iterator UI
= Flags
->use_begin(), UE
= Flags
->use_end();
2735 // Only check things that use the flags.
2736 if (UI
.getUse().getResNo() != Flags
.getResNo())
2739 unsigned UIOpc
= UI
->getOpcode();
2741 if (UIOpc
== ISD::CopyToReg
) {
2742 // Only examine CopyToReg uses that copy to EFLAGS.
2743 if (cast
<RegisterSDNode
>(UI
->getOperand(1))->getReg() != X86::EFLAGS
)
2745 // Examine each user of the CopyToReg use.
2746 for (SDNode::use_iterator FlagUI
= UI
->use_begin(), FlagUE
= UI
->use_end();
2747 FlagUI
!= FlagUE
; ++FlagUI
) {
2748 // Only examine the Flag result.
2749 if (FlagUI
.getUse().getResNo() != 1)
2751 // Anything unusual: assume conservatively.
2752 if (!FlagUI
->isMachineOpcode())
2754 // Examine the condition code of the user.
2755 X86::CondCode CC
= getCondFromNode(*FlagUI
);
2757 if (mayUseCarryFlag(CC
))
2761 // This CopyToReg is ok. Move on to the next user.
2765 // This might be an unselected node. So look for the pre-isel opcodes that
2770 // Something unusual. Be conservative.
2772 case X86ISD::SETCC
: CCOpNo
= 0; break;
2773 case X86ISD::SETCC_CARRY
: CCOpNo
= 0; break;
2774 case X86ISD::CMOV
: CCOpNo
= 2; break;
2775 case X86ISD::BRCOND
: CCOpNo
= 2; break;
2778 X86::CondCode CC
= (X86::CondCode
)UI
->getConstantOperandVal(CCOpNo
);
2779 if (mayUseCarryFlag(CC
))
2785 /// Check whether or not the chain ending in StoreNode is suitable for doing
2786 /// the {load; op; store} to modify transformation.
2787 static bool isFusableLoadOpStorePattern(StoreSDNode
*StoreNode
,
2788 SDValue StoredVal
, SelectionDAG
*CurDAG
,
2790 LoadSDNode
*&LoadNode
,
2791 SDValue
&InputChain
) {
2792 // Is the stored value result 0 of the operation?
2793 if (StoredVal
.getResNo() != 0) return false;
2795 // Are there other uses of the operation other than the store?
2796 if (!StoredVal
.getNode()->hasNUsesOfValue(1, 0)) return false;
2798 // Is the store non-extending and non-indexed?
2799 if (!ISD::isNormalStore(StoreNode
) || StoreNode
->isNonTemporal())
2802 SDValue Load
= StoredVal
->getOperand(LoadOpNo
);
2803 // Is the stored value a non-extending and non-indexed load?
2804 if (!ISD::isNormalLoad(Load
.getNode())) return false;
2806 // Return LoadNode by reference.
2807 LoadNode
= cast
<LoadSDNode
>(Load
);
2809 // Is store the only read of the loaded value?
2810 if (!Load
.hasOneUse())
2813 // Is the address of the store the same as the load?
2814 if (LoadNode
->getBasePtr() != StoreNode
->getBasePtr() ||
2815 LoadNode
->getOffset() != StoreNode
->getOffset())
2818 bool FoundLoad
= false;
2819 SmallVector
<SDValue
, 4> ChainOps
;
2820 SmallVector
<const SDNode
*, 4> LoopWorklist
;
2821 SmallPtrSet
<const SDNode
*, 16> Visited
;
2822 const unsigned int Max
= 1024;
2824 // Visualization of Load-Op-Store fusion:
2825 // -------------------------
2827 // *-lines = Chain operand dependencies.
2828 // |-lines = Normal operand dependencies.
2829 // Dependencies flow down and right. n-suffix references multiple nodes.
2837 // * * \ | => A--LD_OP_ST
2845 // This merge induced dependences from: #1: Xn -> LD, OP, Zn
2849 // Ensure the transform is safe by checking for the dual
2850 // dependencies to make sure we do not induce a loop.
2852 // As LD is a predecessor to both OP and ST we can do this by checking:
2853 // a). if LD is a predecessor to a member of Xn or Yn.
2854 // b). if a Zn is a predecessor to ST.
2856 // However, (b) can only occur through being a chain predecessor to
2857 // ST, which is the same as Zn being a member or predecessor of Xn,
2858 // which is a subset of LD being a predecessor of Xn. So it's
2859 // subsumed by check (a).
2861 SDValue Chain
= StoreNode
->getChain();
2863 // Gather X elements in ChainOps.
2864 if (Chain
== Load
.getValue(1)) {
2866 ChainOps
.push_back(Load
.getOperand(0));
2867 } else if (Chain
.getOpcode() == ISD::TokenFactor
) {
2868 for (unsigned i
= 0, e
= Chain
.getNumOperands(); i
!= e
; ++i
) {
2869 SDValue Op
= Chain
.getOperand(i
);
2870 if (Op
== Load
.getValue(1)) {
2872 // Drop Load, but keep its chain. No cycle check necessary.
2873 ChainOps
.push_back(Load
.getOperand(0));
2876 LoopWorklist
.push_back(Op
.getNode());
2877 ChainOps
.push_back(Op
);
2884 // Worklist is currently Xn. Add Yn to worklist.
2885 for (SDValue Op
: StoredVal
->ops())
2886 if (Op
.getNode() != LoadNode
)
2887 LoopWorklist
.push_back(Op
.getNode());
2889 // Check (a) if Load is a predecessor to Xn + Yn
2890 if (SDNode::hasPredecessorHelper(Load
.getNode(), Visited
, LoopWorklist
, Max
,
2895 CurDAG
->getNode(ISD::TokenFactor
, SDLoc(Chain
), MVT::Other
, ChainOps
);
2899 // Change a chain of {load; op; store} of the same value into a simple op
2900 // through memory of that value, if the uses of the modified value and its
2901 // address are suitable.
2903 // The tablegen pattern memory operand pattern is currently not able to match
2904 // the case where the EFLAGS on the original operation are used.
2906 // To move this to tablegen, we'll need to improve tablegen to allow flags to
2907 // be transferred from a node in the pattern to the result node, probably with
2908 // a new keyword. For example, we have this
2909 // def DEC64m : RI<0xFF, MRM1m, (outs), (ins i64mem:$dst), "dec{q}\t$dst",
2910 // [(store (add (loadi64 addr:$dst), -1), addr:$dst),
2911 // (implicit EFLAGS)]>;
2912 // but maybe need something like this
2913 // def DEC64m : RI<0xFF, MRM1m, (outs), (ins i64mem:$dst), "dec{q}\t$dst",
2914 // [(store (add (loadi64 addr:$dst), -1), addr:$dst),
2915 // (transferrable EFLAGS)]>;
2917 // Until then, we manually fold these and instruction select the operation
2919 bool X86DAGToDAGISel::foldLoadStoreIntoMemOperand(SDNode
*Node
) {
2920 StoreSDNode
*StoreNode
= cast
<StoreSDNode
>(Node
);
2921 SDValue StoredVal
= StoreNode
->getOperand(1);
2922 unsigned Opc
= StoredVal
->getOpcode();
2924 // Before we try to select anything, make sure this is memory operand size
2925 // and opcode we can handle. Note that this must match the code below that
2926 // actually lowers the opcodes.
2927 EVT MemVT
= StoreNode
->getMemoryVT();
2928 if (MemVT
!= MVT::i64
&& MemVT
!= MVT::i32
&& MemVT
!= MVT::i16
&&
2932 bool IsCommutable
= false;
2933 bool IsNegate
= false;
2938 IsNegate
= isNullConstant(StoredVal
.getOperand(0));
2947 IsCommutable
= true;
2951 unsigned LoadOpNo
= IsNegate
? 1 : 0;
2952 LoadSDNode
*LoadNode
= nullptr;
2954 if (!isFusableLoadOpStorePattern(StoreNode
, StoredVal
, CurDAG
, LoadOpNo
,
2955 LoadNode
, InputChain
)) {
2959 // This operation is commutable, try the other operand.
2961 if (!isFusableLoadOpStorePattern(StoreNode
, StoredVal
, CurDAG
, LoadOpNo
,
2962 LoadNode
, InputChain
))
2966 SDValue Base
, Scale
, Index
, Disp
, Segment
;
2967 if (!selectAddr(LoadNode
, LoadNode
->getBasePtr(), Base
, Scale
, Index
, Disp
,
2971 auto SelectOpcode
= [&](unsigned Opc64
, unsigned Opc32
, unsigned Opc16
,
2973 switch (MemVT
.getSimpleVT().SimpleTy
) {
2983 llvm_unreachable("Invalid size!");
2987 MachineSDNode
*Result
;
2992 unsigned NewOpc
= SelectOpcode(X86::NEG64m
, X86::NEG32m
, X86::NEG16m
,
2994 const SDValue Ops
[] = {Base
, Scale
, Index
, Disp
, Segment
, InputChain
};
2995 Result
= CurDAG
->getMachineNode(NewOpc
, SDLoc(Node
), MVT::i32
,
3001 // Try to match inc/dec.
3002 if (!Subtarget
->slowIncDec() || OptForSize
) {
3003 bool IsOne
= isOneConstant(StoredVal
.getOperand(1));
3004 bool IsNegOne
= isAllOnesConstant(StoredVal
.getOperand(1));
3005 // ADD/SUB with 1/-1 and carry flag isn't used can use inc/dec.
3006 if ((IsOne
|| IsNegOne
) && hasNoCarryFlagUses(StoredVal
.getValue(1))) {
3008 ((Opc
== X86ISD::ADD
) == IsOne
)
3009 ? SelectOpcode(X86::INC64m
, X86::INC32m
, X86::INC16m
, X86::INC8m
)
3010 : SelectOpcode(X86::DEC64m
, X86::DEC32m
, X86::DEC16m
, X86::DEC8m
);
3011 const SDValue Ops
[] = {Base
, Scale
, Index
, Disp
, Segment
, InputChain
};
3012 Result
= CurDAG
->getMachineNode(NewOpc
, SDLoc(Node
), MVT::i32
,
3023 auto SelectRegOpcode
= [SelectOpcode
](unsigned Opc
) {
3026 return SelectOpcode(X86::ADD64mr
, X86::ADD32mr
, X86::ADD16mr
,
3029 return SelectOpcode(X86::ADC64mr
, X86::ADC32mr
, X86::ADC16mr
,
3032 return SelectOpcode(X86::SUB64mr
, X86::SUB32mr
, X86::SUB16mr
,
3035 return SelectOpcode(X86::SBB64mr
, X86::SBB32mr
, X86::SBB16mr
,
3038 return SelectOpcode(X86::AND64mr
, X86::AND32mr
, X86::AND16mr
,
3041 return SelectOpcode(X86::OR64mr
, X86::OR32mr
, X86::OR16mr
, X86::OR8mr
);
3043 return SelectOpcode(X86::XOR64mr
, X86::XOR32mr
, X86::XOR16mr
,
3046 llvm_unreachable("Invalid opcode!");
3049 auto SelectImm8Opcode
= [SelectOpcode
](unsigned Opc
) {
3052 return SelectOpcode(X86::ADD64mi8
, X86::ADD32mi8
, X86::ADD16mi8
, 0);
3054 return SelectOpcode(X86::ADC64mi8
, X86::ADC32mi8
, X86::ADC16mi8
, 0);
3056 return SelectOpcode(X86::SUB64mi8
, X86::SUB32mi8
, X86::SUB16mi8
, 0);
3058 return SelectOpcode(X86::SBB64mi8
, X86::SBB32mi8
, X86::SBB16mi8
, 0);
3060 return SelectOpcode(X86::AND64mi8
, X86::AND32mi8
, X86::AND16mi8
, 0);
3062 return SelectOpcode(X86::OR64mi8
, X86::OR32mi8
, X86::OR16mi8
, 0);
3064 return SelectOpcode(X86::XOR64mi8
, X86::XOR32mi8
, X86::XOR16mi8
, 0);
3066 llvm_unreachable("Invalid opcode!");
3069 auto SelectImmOpcode
= [SelectOpcode
](unsigned Opc
) {
3072 return SelectOpcode(X86::ADD64mi32
, X86::ADD32mi
, X86::ADD16mi
,
3075 return SelectOpcode(X86::ADC64mi32
, X86::ADC32mi
, X86::ADC16mi
,
3078 return SelectOpcode(X86::SUB64mi32
, X86::SUB32mi
, X86::SUB16mi
,
3081 return SelectOpcode(X86::SBB64mi32
, X86::SBB32mi
, X86::SBB16mi
,
3084 return SelectOpcode(X86::AND64mi32
, X86::AND32mi
, X86::AND16mi
,
3087 return SelectOpcode(X86::OR64mi32
, X86::OR32mi
, X86::OR16mi
,
3090 return SelectOpcode(X86::XOR64mi32
, X86::XOR32mi
, X86::XOR16mi
,
3093 llvm_unreachable("Invalid opcode!");
3097 unsigned NewOpc
= SelectRegOpcode(Opc
);
3098 SDValue Operand
= StoredVal
->getOperand(1-LoadOpNo
);
3100 // See if the operand is a constant that we can fold into an immediate
3102 if (auto *OperandC
= dyn_cast
<ConstantSDNode
>(Operand
)) {
3103 int64_t OperandV
= OperandC
->getSExtValue();
3105 // Check if we can shrink the operand enough to fit in an immediate (or
3106 // fit into a smaller immediate) by negating it and switching the
3108 if ((Opc
== X86ISD::ADD
|| Opc
== X86ISD::SUB
) &&
3109 ((MemVT
!= MVT::i8
&& !isInt
<8>(OperandV
) && isInt
<8>(-OperandV
)) ||
3110 (MemVT
== MVT::i64
&& !isInt
<32>(OperandV
) &&
3111 isInt
<32>(-OperandV
))) &&
3112 hasNoCarryFlagUses(StoredVal
.getValue(1))) {
3113 OperandV
= -OperandV
;
3114 Opc
= Opc
== X86ISD::ADD
? X86ISD::SUB
: X86ISD::ADD
;
3117 // First try to fit this into an Imm8 operand. If it doesn't fit, then try
3118 // the larger immediate operand.
3119 if (MemVT
!= MVT::i8
&& isInt
<8>(OperandV
)) {
3120 Operand
= CurDAG
->getTargetConstant(OperandV
, SDLoc(Node
), MemVT
);
3121 NewOpc
= SelectImm8Opcode(Opc
);
3122 } else if (MemVT
!= MVT::i64
|| isInt
<32>(OperandV
)) {
3123 Operand
= CurDAG
->getTargetConstant(OperandV
, SDLoc(Node
), MemVT
);
3124 NewOpc
= SelectImmOpcode(Opc
);
3128 if (Opc
== X86ISD::ADC
|| Opc
== X86ISD::SBB
) {
3130 CurDAG
->getCopyToReg(InputChain
, SDLoc(Node
), X86::EFLAGS
,
3131 StoredVal
.getOperand(2), SDValue());
3133 const SDValue Ops
[] = {Base
, Scale
, Index
, Disp
,
3134 Segment
, Operand
, CopyTo
, CopyTo
.getValue(1)};
3135 Result
= CurDAG
->getMachineNode(NewOpc
, SDLoc(Node
), MVT::i32
, MVT::Other
,
3138 const SDValue Ops
[] = {Base
, Scale
, Index
, Disp
,
3139 Segment
, Operand
, InputChain
};
3140 Result
= CurDAG
->getMachineNode(NewOpc
, SDLoc(Node
), MVT::i32
, MVT::Other
,
3146 llvm_unreachable("Invalid opcode!");
3149 MachineMemOperand
*MemOps
[] = {StoreNode
->getMemOperand(),
3150 LoadNode
->getMemOperand()};
3151 CurDAG
->setNodeMemRefs(Result
, MemOps
);
3153 // Update Load Chain uses as well.
3154 ReplaceUses(SDValue(LoadNode
, 1), SDValue(Result
, 1));
3155 ReplaceUses(SDValue(StoreNode
, 0), SDValue(Result
, 1));
3156 ReplaceUses(SDValue(StoredVal
.getNode(), 1), SDValue(Result
, 0));
3157 CurDAG
->RemoveDeadNode(Node
);
3161 // See if this is an X & Mask that we can match to BEXTR/BZHI.
3162 // Where Mask is one of the following patterns:
3163 // a) x & (1 << nbits) - 1
3164 // b) x & ~(-1 << nbits)
3165 // c) x & (-1 >> (32 - y))
3166 // d) x << (32 - y) >> (32 - y)
3167 bool X86DAGToDAGISel::matchBitExtract(SDNode
*Node
) {
3169 (Node
->getOpcode() == ISD::AND
|| Node
->getOpcode() == ISD::SRL
) &&
3170 "Should be either an and-mask, or right-shift after clearing high bits.");
3172 // BEXTR is BMI instruction, BZHI is BMI2 instruction. We need at least one.
3173 if (!Subtarget
->hasBMI() && !Subtarget
->hasBMI2())
3176 MVT NVT
= Node
->getSimpleValueType(0);
3178 // Only supported for 32 and 64 bits.
3179 if (NVT
!= MVT::i32
&& NVT
!= MVT::i64
)
3184 // If we have BMI2's BZHI, we are ok with muti-use patterns.
3185 // Else, if we only have BMI1's BEXTR, we require one-use.
3186 const bool CanHaveExtraUses
= Subtarget
->hasBMI2();
3187 auto checkUses
= [CanHaveExtraUses
](SDValue Op
, unsigned NUses
) {
3188 return CanHaveExtraUses
||
3189 Op
.getNode()->hasNUsesOfValue(NUses
, Op
.getResNo());
3191 auto checkOneUse
= [checkUses
](SDValue Op
) { return checkUses(Op
, 1); };
3192 auto checkTwoUse
= [checkUses
](SDValue Op
) { return checkUses(Op
, 2); };
3194 auto peekThroughOneUseTruncation
= [checkOneUse
](SDValue V
) {
3195 if (V
->getOpcode() == ISD::TRUNCATE
&& checkOneUse(V
)) {
3196 assert(V
.getSimpleValueType() == MVT::i32
&&
3197 V
.getOperand(0).getSimpleValueType() == MVT::i64
&&
3198 "Expected i64 -> i32 truncation");
3199 V
= V
.getOperand(0);
3204 // a) x & ((1 << nbits) + (-1))
3205 auto matchPatternA
= [checkOneUse
, peekThroughOneUseTruncation
,
3206 &NBits
](SDValue Mask
) -> bool {
3207 // Match `add`. Must only have one use!
3208 if (Mask
->getOpcode() != ISD::ADD
|| !checkOneUse(Mask
))
3210 // We should be adding all-ones constant (i.e. subtracting one.)
3211 if (!isAllOnesConstant(Mask
->getOperand(1)))
3213 // Match `1 << nbits`. Might be truncated. Must only have one use!
3214 SDValue M0
= peekThroughOneUseTruncation(Mask
->getOperand(0));
3215 if (M0
->getOpcode() != ISD::SHL
|| !checkOneUse(M0
))
3217 if (!isOneConstant(M0
->getOperand(0)))
3219 NBits
= M0
->getOperand(1);
3223 auto isAllOnes
= [this, peekThroughOneUseTruncation
, NVT
](SDValue V
) {
3224 V
= peekThroughOneUseTruncation(V
);
3225 return CurDAG
->MaskedValueIsAllOnes(
3226 V
, APInt::getLowBitsSet(V
.getSimpleValueType().getSizeInBits(),
3227 NVT
.getSizeInBits()));
3230 // b) x & ~(-1 << nbits)
3231 auto matchPatternB
= [checkOneUse
, isAllOnes
, peekThroughOneUseTruncation
,
3232 &NBits
](SDValue Mask
) -> bool {
3233 // Match `~()`. Must only have one use!
3234 if (Mask
.getOpcode() != ISD::XOR
|| !checkOneUse(Mask
))
3236 // The -1 only has to be all-ones for the final Node's NVT.
3237 if (!isAllOnes(Mask
->getOperand(1)))
3239 // Match `-1 << nbits`. Might be truncated. Must only have one use!
3240 SDValue M0
= peekThroughOneUseTruncation(Mask
->getOperand(0));
3241 if (M0
->getOpcode() != ISD::SHL
|| !checkOneUse(M0
))
3243 // The -1 only has to be all-ones for the final Node's NVT.
3244 if (!isAllOnes(M0
->getOperand(0)))
3246 NBits
= M0
->getOperand(1);
3250 // Match potentially-truncated (bitwidth - y)
3251 auto matchShiftAmt
= [checkOneUse
, &NBits
](SDValue ShiftAmt
,
3252 unsigned Bitwidth
) {
3253 // Skip over a truncate of the shift amount.
3254 if (ShiftAmt
.getOpcode() == ISD::TRUNCATE
) {
3255 ShiftAmt
= ShiftAmt
.getOperand(0);
3256 // The trunc should have been the only user of the real shift amount.
3257 if (!checkOneUse(ShiftAmt
))
3260 // Match the shift amount as: (bitwidth - y). It should go away, too.
3261 if (ShiftAmt
.getOpcode() != ISD::SUB
)
3263 auto V0
= dyn_cast
<ConstantSDNode
>(ShiftAmt
.getOperand(0));
3264 if (!V0
|| V0
->getZExtValue() != Bitwidth
)
3266 NBits
= ShiftAmt
.getOperand(1);
3270 // c) x & (-1 >> (32 - y))
3271 auto matchPatternC
= [checkOneUse
, peekThroughOneUseTruncation
,
3272 matchShiftAmt
](SDValue Mask
) -> bool {
3273 // The mask itself may be truncated.
3274 Mask
= peekThroughOneUseTruncation(Mask
);
3275 unsigned Bitwidth
= Mask
.getSimpleValueType().getSizeInBits();
3276 // Match `l>>`. Must only have one use!
3277 if (Mask
.getOpcode() != ISD::SRL
|| !checkOneUse(Mask
))
3279 // We should be shifting truly all-ones constant.
3280 if (!isAllOnesConstant(Mask
.getOperand(0)))
3282 SDValue M1
= Mask
.getOperand(1);
3283 // The shift amount should not be used externally.
3284 if (!checkOneUse(M1
))
3286 return matchShiftAmt(M1
, Bitwidth
);
3291 // d) x << (32 - y) >> (32 - y)
3292 auto matchPatternD
= [checkOneUse
, checkTwoUse
, matchShiftAmt
,
3293 &X
](SDNode
*Node
) -> bool {
3294 if (Node
->getOpcode() != ISD::SRL
)
3296 SDValue N0
= Node
->getOperand(0);
3297 if (N0
->getOpcode() != ISD::SHL
|| !checkOneUse(N0
))
3299 unsigned Bitwidth
= N0
.getSimpleValueType().getSizeInBits();
3300 SDValue N1
= Node
->getOperand(1);
3301 SDValue N01
= N0
->getOperand(1);
3302 // Both of the shifts must be by the exact same value.
3303 // There should not be any uses of the shift amount outside of the pattern.
3304 if (N1
!= N01
|| !checkTwoUse(N1
))
3306 if (!matchShiftAmt(N1
, Bitwidth
))
3308 X
= N0
->getOperand(0);
3312 auto matchLowBitMask
= [matchPatternA
, matchPatternB
,
3313 matchPatternC
](SDValue Mask
) -> bool {
3314 return matchPatternA(Mask
) || matchPatternB(Mask
) || matchPatternC(Mask
);
3317 if (Node
->getOpcode() == ISD::AND
) {
3318 X
= Node
->getOperand(0);
3319 SDValue Mask
= Node
->getOperand(1);
3321 if (matchLowBitMask(Mask
)) {
3325 if (!matchLowBitMask(Mask
))
3328 } else if (!matchPatternD(Node
))
3333 // Truncate the shift amount.
3334 NBits
= CurDAG
->getNode(ISD::TRUNCATE
, DL
, MVT::i8
, NBits
);
3335 insertDAGNode(*CurDAG
, SDValue(Node
, 0), NBits
);
3337 // Insert 8-bit NBits into lowest 8 bits of 32-bit register.
3338 // All the other bits are undefined, we do not care about them.
3339 SDValue ImplDef
= SDValue(
3340 CurDAG
->getMachineNode(TargetOpcode::IMPLICIT_DEF
, DL
, MVT::i32
), 0);
3341 insertDAGNode(*CurDAG
, SDValue(Node
, 0), ImplDef
);
3343 SDValue SRIdxVal
= CurDAG
->getTargetConstant(X86::sub_8bit
, DL
, MVT::i32
);
3344 insertDAGNode(*CurDAG
, SDValue(Node
, 0), SRIdxVal
);
3346 CurDAG
->getMachineNode(TargetOpcode::INSERT_SUBREG
, DL
, MVT::i32
, ImplDef
,
3347 NBits
, SRIdxVal
), 0);
3348 insertDAGNode(*CurDAG
, SDValue(Node
, 0), NBits
);
3350 if (Subtarget
->hasBMI2()) {
3351 // Great, just emit the the BZHI..
3352 if (NVT
!= MVT::i32
) {
3353 // But have to place the bit count into the wide-enough register first.
3354 NBits
= CurDAG
->getNode(ISD::ANY_EXTEND
, DL
, NVT
, NBits
);
3355 insertDAGNode(*CurDAG
, SDValue(Node
, 0), NBits
);
3358 SDValue Extract
= CurDAG
->getNode(X86ISD::BZHI
, DL
, NVT
, X
, NBits
);
3359 ReplaceNode(Node
, Extract
.getNode());
3360 SelectCode(Extract
.getNode());
3364 // Else, if we do *NOT* have BMI2, let's find out if the if the 'X' is
3365 // *logically* shifted (potentially with one-use trunc inbetween),
3366 // and the truncation was the only use of the shift,
3367 // and if so look past one-use truncation.
3369 SDValue RealX
= peekThroughOneUseTruncation(X
);
3370 // FIXME: only if the shift is one-use?
3371 if (RealX
!= X
&& RealX
.getOpcode() == ISD::SRL
)
3375 MVT XVT
= X
.getSimpleValueType();
3377 // Else, emitting BEXTR requires one more step.
3378 // The 'control' of BEXTR has the pattern of:
3379 // [15...8 bit][ 7...0 bit] location
3380 // [ bit count][ shift] name
3381 // I.e. 0b000000011'00000001 means (x >> 0b1) & 0b11
3383 // Shift NBits left by 8 bits, thus producing 'control'.
3384 // This makes the low 8 bits to be zero.
3385 SDValue C8
= CurDAG
->getConstant(8, DL
, MVT::i8
);
3386 SDValue Control
= CurDAG
->getNode(ISD::SHL
, DL
, MVT::i32
, NBits
, C8
);
3387 insertDAGNode(*CurDAG
, SDValue(Node
, 0), Control
);
3389 // If the 'X' is *logically* shifted, we can fold that shift into 'control'.
3390 // FIXME: only if the shift is one-use?
3391 if (X
.getOpcode() == ISD::SRL
) {
3392 SDValue ShiftAmt
= X
.getOperand(1);
3393 X
= X
.getOperand(0);
3395 assert(ShiftAmt
.getValueType() == MVT::i8
&&
3396 "Expected shift amount to be i8");
3398 // Now, *zero*-extend the shift amount. The bits 8...15 *must* be zero!
3399 // We could zext to i16 in some form, but we intentionally don't do that.
3400 SDValue OrigShiftAmt
= ShiftAmt
;
3401 ShiftAmt
= CurDAG
->getNode(ISD::ZERO_EXTEND
, DL
, MVT::i32
, ShiftAmt
);
3402 insertDAGNode(*CurDAG
, OrigShiftAmt
, ShiftAmt
);
3404 // And now 'or' these low 8 bits of shift amount into the 'control'.
3405 Control
= CurDAG
->getNode(ISD::OR
, DL
, MVT::i32
, Control
, ShiftAmt
);
3406 insertDAGNode(*CurDAG
, SDValue(Node
, 0), Control
);
3409 // But have to place the 'control' into the wide-enough register first.
3410 if (XVT
!= MVT::i32
) {
3411 Control
= CurDAG
->getNode(ISD::ANY_EXTEND
, DL
, XVT
, Control
);
3412 insertDAGNode(*CurDAG
, SDValue(Node
, 0), Control
);
3415 // And finally, form the BEXTR itself.
3416 SDValue Extract
= CurDAG
->getNode(X86ISD::BEXTR
, DL
, XVT
, X
, Control
);
3418 // The 'X' was originally truncated. Do that now.
3420 insertDAGNode(*CurDAG
, SDValue(Node
, 0), Extract
);
3421 Extract
= CurDAG
->getNode(ISD::TRUNCATE
, DL
, NVT
, Extract
);
3424 ReplaceNode(Node
, Extract
.getNode());
3425 SelectCode(Extract
.getNode());
3430 // See if this is an (X >> C1) & C2 that we can match to BEXTR/BEXTRI.
3431 MachineSDNode
*X86DAGToDAGISel::matchBEXTRFromAndImm(SDNode
*Node
) {
3432 MVT NVT
= Node
->getSimpleValueType(0);
3435 SDValue N0
= Node
->getOperand(0);
3436 SDValue N1
= Node
->getOperand(1);
3438 // If we have TBM we can use an immediate for the control. If we have BMI
3439 // we should only do this if the BEXTR instruction is implemented well.
3440 // Otherwise moving the control into a register makes this more costly.
3441 // TODO: Maybe load folding, greater than 32-bit masks, or a guarantee of LICM
3442 // hoisting the move immediate would make it worthwhile with a less optimal
3445 Subtarget
->hasTBM() || (Subtarget
->hasBMI() && Subtarget
->hasFastBEXTR());
3446 if (!PreferBEXTR
&& !Subtarget
->hasBMI2())
3449 // Must have a shift right.
3450 if (N0
->getOpcode() != ISD::SRL
&& N0
->getOpcode() != ISD::SRA
)
3453 // Shift can't have additional users.
3454 if (!N0
->hasOneUse())
3457 // Only supported for 32 and 64 bits.
3458 if (NVT
!= MVT::i32
&& NVT
!= MVT::i64
)
3461 // Shift amount and RHS of and must be constant.
3462 ConstantSDNode
*MaskCst
= dyn_cast
<ConstantSDNode
>(N1
);
3463 ConstantSDNode
*ShiftCst
= dyn_cast
<ConstantSDNode
>(N0
->getOperand(1));
3464 if (!MaskCst
|| !ShiftCst
)
3467 // And RHS must be a mask.
3468 uint64_t Mask
= MaskCst
->getZExtValue();
3469 if (!isMask_64(Mask
))
3472 uint64_t Shift
= ShiftCst
->getZExtValue();
3473 uint64_t MaskSize
= countPopulation(Mask
);
3475 // Don't interfere with something that can be handled by extracting AH.
3476 // TODO: If we are able to fold a load, BEXTR might still be better than AH.
3477 if (Shift
== 8 && MaskSize
== 8)
3480 // Make sure we are only using bits that were in the original value, not
3482 if (Shift
+ MaskSize
> NVT
.getSizeInBits())
3485 // BZHI, if available, is always fast, unlike BEXTR. But even if we decide
3486 // that we can't use BEXTR, it is only worthwhile using BZHI if the mask
3487 // does not fit into 32 bits. Load folding is not a sufficient reason.
3488 if (!PreferBEXTR
&& MaskSize
<= 32)
3492 unsigned ROpc
, MOpc
;
3495 assert(Subtarget
->hasBMI2() && "We must have BMI2's BZHI then.");
3496 // If we can't make use of BEXTR then we can't fuse shift+mask stages.
3497 // Let's perform the mask first, and apply shift later. Note that we need to
3498 // widen the mask to account for the fact that we'll apply shift afterwards!
3499 Control
= CurDAG
->getTargetConstant(Shift
+ MaskSize
, dl
, NVT
);
3500 ROpc
= NVT
== MVT::i64
? X86::BZHI64rr
: X86::BZHI32rr
;
3501 MOpc
= NVT
== MVT::i64
? X86::BZHI64rm
: X86::BZHI32rm
;
3502 unsigned NewOpc
= NVT
== MVT::i64
? X86::MOV32ri64
: X86::MOV32ri
;
3503 Control
= SDValue(CurDAG
->getMachineNode(NewOpc
, dl
, NVT
, Control
), 0);
3505 // The 'control' of BEXTR has the pattern of:
3506 // [15...8 bit][ 7...0 bit] location
3507 // [ bit count][ shift] name
3508 // I.e. 0b000000011'00000001 means (x >> 0b1) & 0b11
3509 Control
= CurDAG
->getTargetConstant(Shift
| (MaskSize
<< 8), dl
, NVT
);
3510 if (Subtarget
->hasTBM()) {
3511 ROpc
= NVT
== MVT::i64
? X86::BEXTRI64ri
: X86::BEXTRI32ri
;
3512 MOpc
= NVT
== MVT::i64
? X86::BEXTRI64mi
: X86::BEXTRI32mi
;
3514 assert(Subtarget
->hasBMI() && "We must have BMI1's BEXTR then.");
3515 // BMI requires the immediate to placed in a register.
3516 ROpc
= NVT
== MVT::i64
? X86::BEXTR64rr
: X86::BEXTR32rr
;
3517 MOpc
= NVT
== MVT::i64
? X86::BEXTR64rm
: X86::BEXTR32rm
;
3518 unsigned NewOpc
= NVT
== MVT::i64
? X86::MOV32ri64
: X86::MOV32ri
;
3519 Control
= SDValue(CurDAG
->getMachineNode(NewOpc
, dl
, NVT
, Control
), 0);
3523 MachineSDNode
*NewNode
;
3524 SDValue Input
= N0
->getOperand(0);
3525 SDValue Tmp0
, Tmp1
, Tmp2
, Tmp3
, Tmp4
;
3526 if (tryFoldLoad(Node
, N0
.getNode(), Input
, Tmp0
, Tmp1
, Tmp2
, Tmp3
, Tmp4
)) {
3528 Tmp0
, Tmp1
, Tmp2
, Tmp3
, Tmp4
, Control
, Input
.getOperand(0)};
3529 SDVTList VTs
= CurDAG
->getVTList(NVT
, MVT::i32
, MVT::Other
);
3530 NewNode
= CurDAG
->getMachineNode(MOpc
, dl
, VTs
, Ops
);
3531 // Update the chain.
3532 ReplaceUses(Input
.getValue(1), SDValue(NewNode
, 2));
3533 // Record the mem-refs
3534 CurDAG
->setNodeMemRefs(NewNode
, {cast
<LoadSDNode
>(Input
)->getMemOperand()});
3536 NewNode
= CurDAG
->getMachineNode(ROpc
, dl
, NVT
, MVT::i32
, Input
, Control
);
3540 // We still need to apply the shift.
3541 SDValue ShAmt
= CurDAG
->getTargetConstant(Shift
, dl
, NVT
);
3542 unsigned NewOpc
= NVT
== MVT::i64
? X86::SHR64ri
: X86::SHR32ri
;
3544 CurDAG
->getMachineNode(NewOpc
, dl
, NVT
, SDValue(NewNode
, 0), ShAmt
);
3550 // Emit a PCMISTR(I/M) instruction.
3551 MachineSDNode
*X86DAGToDAGISel::emitPCMPISTR(unsigned ROpc
, unsigned MOpc
,
3552 bool MayFoldLoad
, const SDLoc
&dl
,
3553 MVT VT
, SDNode
*Node
) {
3554 SDValue N0
= Node
->getOperand(0);
3555 SDValue N1
= Node
->getOperand(1);
3556 SDValue Imm
= Node
->getOperand(2);
3557 const ConstantInt
*Val
= cast
<ConstantSDNode
>(Imm
)->getConstantIntValue();
3558 Imm
= CurDAG
->getTargetConstant(*Val
, SDLoc(Node
), Imm
.getValueType());
3560 // Try to fold a load. No need to check alignment.
3561 SDValue Tmp0
, Tmp1
, Tmp2
, Tmp3
, Tmp4
;
3562 if (MayFoldLoad
&& tryFoldLoad(Node
, N1
, Tmp0
, Tmp1
, Tmp2
, Tmp3
, Tmp4
)) {
3563 SDValue Ops
[] = { N0
, Tmp0
, Tmp1
, Tmp2
, Tmp3
, Tmp4
, Imm
,
3565 SDVTList VTs
= CurDAG
->getVTList(VT
, MVT::i32
, MVT::Other
);
3566 MachineSDNode
*CNode
= CurDAG
->getMachineNode(MOpc
, dl
, VTs
, Ops
);
3567 // Update the chain.
3568 ReplaceUses(N1
.getValue(1), SDValue(CNode
, 2));
3569 // Record the mem-refs
3570 CurDAG
->setNodeMemRefs(CNode
, {cast
<LoadSDNode
>(N1
)->getMemOperand()});
3574 SDValue Ops
[] = { N0
, N1
, Imm
};
3575 SDVTList VTs
= CurDAG
->getVTList(VT
, MVT::i32
);
3576 MachineSDNode
*CNode
= CurDAG
->getMachineNode(ROpc
, dl
, VTs
, Ops
);
3580 // Emit a PCMESTR(I/M) instruction. Also return the Glue result in case we need
3581 // to emit a second instruction after this one. This is needed since we have two
3582 // copyToReg nodes glued before this and we need to continue that glue through.
3583 MachineSDNode
*X86DAGToDAGISel::emitPCMPESTR(unsigned ROpc
, unsigned MOpc
,
3584 bool MayFoldLoad
, const SDLoc
&dl
,
3585 MVT VT
, SDNode
*Node
,
3587 SDValue N0
= Node
->getOperand(0);
3588 SDValue N2
= Node
->getOperand(2);
3589 SDValue Imm
= Node
->getOperand(4);
3590 const ConstantInt
*Val
= cast
<ConstantSDNode
>(Imm
)->getConstantIntValue();
3591 Imm
= CurDAG
->getTargetConstant(*Val
, SDLoc(Node
), Imm
.getValueType());
3593 // Try to fold a load. No need to check alignment.
3594 SDValue Tmp0
, Tmp1
, Tmp2
, Tmp3
, Tmp4
;
3595 if (MayFoldLoad
&& tryFoldLoad(Node
, N2
, Tmp0
, Tmp1
, Tmp2
, Tmp3
, Tmp4
)) {
3596 SDValue Ops
[] = { N0
, Tmp0
, Tmp1
, Tmp2
, Tmp3
, Tmp4
, Imm
,
3597 N2
.getOperand(0), InFlag
};
3598 SDVTList VTs
= CurDAG
->getVTList(VT
, MVT::i32
, MVT::Other
, MVT::Glue
);
3599 MachineSDNode
*CNode
= CurDAG
->getMachineNode(MOpc
, dl
, VTs
, Ops
);
3600 InFlag
= SDValue(CNode
, 3);
3601 // Update the chain.
3602 ReplaceUses(N2
.getValue(1), SDValue(CNode
, 2));
3603 // Record the mem-refs
3604 CurDAG
->setNodeMemRefs(CNode
, {cast
<LoadSDNode
>(N2
)->getMemOperand()});
3608 SDValue Ops
[] = { N0
, N2
, Imm
, InFlag
};
3609 SDVTList VTs
= CurDAG
->getVTList(VT
, MVT::i32
, MVT::Glue
);
3610 MachineSDNode
*CNode
= CurDAG
->getMachineNode(ROpc
, dl
, VTs
, Ops
);
3611 InFlag
= SDValue(CNode
, 2);
3615 bool X86DAGToDAGISel::tryShiftAmountMod(SDNode
*N
) {
3616 EVT VT
= N
->getValueType(0);
3618 // Only handle scalar shifts.
3622 // Narrower shifts only mask to 5 bits in hardware.
3623 unsigned Size
= VT
== MVT::i64
? 64 : 32;
3625 SDValue OrigShiftAmt
= N
->getOperand(1);
3626 SDValue ShiftAmt
= OrigShiftAmt
;
3629 // Skip over a truncate of the shift amount.
3630 if (ShiftAmt
->getOpcode() == ISD::TRUNCATE
)
3631 ShiftAmt
= ShiftAmt
->getOperand(0);
3633 // This function is called after X86DAGToDAGISel::matchBitExtract(),
3634 // so we are not afraid that we might mess up BZHI/BEXTR pattern.
3636 SDValue NewShiftAmt
;
3637 if (ShiftAmt
->getOpcode() == ISD::ADD
|| ShiftAmt
->getOpcode() == ISD::SUB
) {
3638 SDValue Add0
= ShiftAmt
->getOperand(0);
3639 SDValue Add1
= ShiftAmt
->getOperand(1);
3640 // If we are shifting by X+/-N where N == 0 mod Size, then just shift by X
3641 // to avoid the ADD/SUB.
3642 if (isa
<ConstantSDNode
>(Add1
) &&
3643 cast
<ConstantSDNode
>(Add1
)->getZExtValue() % Size
== 0) {
3645 // If we are shifting by N-X where N == 0 mod Size, then just shift by -X to
3646 // generate a NEG instead of a SUB of a constant.
3647 } else if (ShiftAmt
->getOpcode() == ISD::SUB
&&
3648 isa
<ConstantSDNode
>(Add0
) &&
3649 cast
<ConstantSDNode
>(Add0
)->getZExtValue() != 0 &&
3650 cast
<ConstantSDNode
>(Add0
)->getZExtValue() % Size
== 0) {
3651 // Insert a negate op.
3652 // TODO: This isn't guaranteed to replace the sub if there is a logic cone
3653 // that uses it that's not a shift.
3654 EVT SubVT
= ShiftAmt
.getValueType();
3655 SDValue Zero
= CurDAG
->getConstant(0, DL
, SubVT
);
3656 SDValue Neg
= CurDAG
->getNode(ISD::SUB
, DL
, SubVT
, Zero
, Add1
);
3659 // Insert these operands into a valid topological order so they can
3660 // get selected independently.
3661 insertDAGNode(*CurDAG
, OrigShiftAmt
, Zero
);
3662 insertDAGNode(*CurDAG
, OrigShiftAmt
, Neg
);
3668 if (NewShiftAmt
.getValueType() != MVT::i8
) {
3669 // Need to truncate the shift amount.
3670 NewShiftAmt
= CurDAG
->getNode(ISD::TRUNCATE
, DL
, MVT::i8
, NewShiftAmt
);
3671 // Add to a correct topological ordering.
3672 insertDAGNode(*CurDAG
, OrigShiftAmt
, NewShiftAmt
);
3675 // Insert a new mask to keep the shift amount legal. This should be removed
3676 // by isel patterns.
3677 NewShiftAmt
= CurDAG
->getNode(ISD::AND
, DL
, MVT::i8
, NewShiftAmt
,
3678 CurDAG
->getConstant(Size
- 1, DL
, MVT::i8
));
3679 // Place in a correct topological ordering.
3680 insertDAGNode(*CurDAG
, OrigShiftAmt
, NewShiftAmt
);
3682 SDNode
*UpdatedNode
= CurDAG
->UpdateNodeOperands(N
, N
->getOperand(0),
3684 if (UpdatedNode
!= N
) {
3685 // If we found an existing node, we should replace ourselves with that node
3686 // and wait for it to be selected after its other users.
3687 ReplaceNode(N
, UpdatedNode
);
3691 // If the original shift amount is now dead, delete it so that we don't run
3693 if (OrigShiftAmt
.getNode()->use_empty())
3694 CurDAG
->RemoveDeadNode(OrigShiftAmt
.getNode());
3696 // Now that we've optimized the shift amount, defer to normal isel to get
3697 // load folding and legacy vs BMI2 selection without repeating it here.
3702 bool X86DAGToDAGISel::tryShrinkShlLogicImm(SDNode
*N
) {
3703 MVT NVT
= N
->getSimpleValueType(0);
3704 unsigned Opcode
= N
->getOpcode();
3707 // For operations of the form (x << C1) op C2, check if we can use a smaller
3708 // encoding for C2 by transforming it into (x op (C2>>C1)) << C1.
3709 SDValue Shift
= N
->getOperand(0);
3710 SDValue N1
= N
->getOperand(1);
3712 ConstantSDNode
*Cst
= dyn_cast
<ConstantSDNode
>(N1
);
3716 int64_t Val
= Cst
->getSExtValue();
3718 // If we have an any_extend feeding the AND, look through it to see if there
3719 // is a shift behind it. But only if the AND doesn't use the extended bits.
3720 // FIXME: Generalize this to other ANY_EXTEND than i32 to i64?
3721 bool FoundAnyExtend
= false;
3722 if (Shift
.getOpcode() == ISD::ANY_EXTEND
&& Shift
.hasOneUse() &&
3723 Shift
.getOperand(0).getSimpleValueType() == MVT::i32
&&
3725 FoundAnyExtend
= true;
3726 Shift
= Shift
.getOperand(0);
3729 if (Shift
.getOpcode() != ISD::SHL
|| !Shift
.hasOneUse())
3732 // i8 is unshrinkable, i16 should be promoted to i32.
3733 if (NVT
!= MVT::i32
&& NVT
!= MVT::i64
)
3736 ConstantSDNode
*ShlCst
= dyn_cast
<ConstantSDNode
>(Shift
.getOperand(1));
3740 uint64_t ShAmt
= ShlCst
->getZExtValue();
3742 // Make sure that we don't change the operation by removing bits.
3743 // This only matters for OR and XOR, AND is unaffected.
3744 uint64_t RemovedBitsMask
= (1ULL << ShAmt
) - 1;
3745 if (Opcode
!= ISD::AND
&& (Val
& RemovedBitsMask
) != 0)
3748 // Check the minimum bitwidth for the new constant.
3749 // TODO: Using 16 and 8 bit operations is also possible for or32 & xor32.
3750 auto CanShrinkImmediate
= [&](int64_t &ShiftedVal
) {
3751 if (Opcode
== ISD::AND
) {
3752 // AND32ri is the same as AND64ri32 with zext imm.
3753 // Try this before sign extended immediates below.
3754 ShiftedVal
= (uint64_t)Val
>> ShAmt
;
3755 if (NVT
== MVT::i64
&& !isUInt
<32>(Val
) && isUInt
<32>(ShiftedVal
))
3757 // Also swap order when the AND can become MOVZX.
3758 if (ShiftedVal
== UINT8_MAX
|| ShiftedVal
== UINT16_MAX
)
3761 ShiftedVal
= Val
>> ShAmt
;
3762 if ((!isInt
<8>(Val
) && isInt
<8>(ShiftedVal
)) ||
3763 (!isInt
<32>(Val
) && isInt
<32>(ShiftedVal
)))
3765 if (Opcode
!= ISD::AND
) {
3766 // MOV32ri+OR64r/XOR64r is cheaper than MOV64ri64+OR64rr/XOR64rr
3767 ShiftedVal
= (uint64_t)Val
>> ShAmt
;
3768 if (NVT
== MVT::i64
&& !isUInt
<32>(Val
) && isUInt
<32>(ShiftedVal
))
3775 if (!CanShrinkImmediate(ShiftedVal
))
3778 // Ok, we can reorder to get a smaller immediate.
3780 // But, its possible the original immediate allowed an AND to become MOVZX.
3781 // Doing this late due to avoid the MakedValueIsZero call as late as
3783 if (Opcode
== ISD::AND
) {
3784 // Find the smallest zext this could possibly be.
3785 unsigned ZExtWidth
= Cst
->getAPIntValue().getActiveBits();
3786 ZExtWidth
= PowerOf2Ceil(std::max(ZExtWidth
, 8U));
3788 // Figure out which bits need to be zero to achieve that mask.
3789 APInt NeededMask
= APInt::getLowBitsSet(NVT
.getSizeInBits(),
3791 NeededMask
&= ~Cst
->getAPIntValue();
3793 if (CurDAG
->MaskedValueIsZero(N
->getOperand(0), NeededMask
))
3797 SDValue X
= Shift
.getOperand(0);
3798 if (FoundAnyExtend
) {
3799 SDValue NewX
= CurDAG
->getNode(ISD::ANY_EXTEND
, dl
, NVT
, X
);
3800 insertDAGNode(*CurDAG
, SDValue(N
, 0), NewX
);
3804 SDValue NewCst
= CurDAG
->getConstant(ShiftedVal
, dl
, NVT
);
3805 insertDAGNode(*CurDAG
, SDValue(N
, 0), NewCst
);
3806 SDValue NewBinOp
= CurDAG
->getNode(Opcode
, dl
, NVT
, X
, NewCst
);
3807 insertDAGNode(*CurDAG
, SDValue(N
, 0), NewBinOp
);
3808 SDValue NewSHL
= CurDAG
->getNode(ISD::SHL
, dl
, NVT
, NewBinOp
,
3809 Shift
.getOperand(1));
3810 ReplaceNode(N
, NewSHL
.getNode());
3811 SelectCode(NewSHL
.getNode());
3815 /// Convert vector increment or decrement to sub/add with an all-ones constant:
3816 /// add X, <1, 1...> --> sub X, <-1, -1...>
3817 /// sub X, <1, 1...> --> add X, <-1, -1...>
3818 /// The all-ones vector constant can be materialized using a pcmpeq instruction
3819 /// that is commonly recognized as an idiom (has no register dependency), so
3820 /// that's better/smaller than loading a splat 1 constant.
3821 bool X86DAGToDAGISel::combineIncDecVector(SDNode
*Node
) {
3822 assert((Node
->getOpcode() == ISD::ADD
|| Node
->getOpcode() == ISD::SUB
) &&
3823 "Unexpected opcode for increment/decrement transform");
3825 EVT VT
= Node
->getValueType(0);
3826 assert(VT
.isVector() && "Should only be called for vectors.");
3828 SDValue X
= Node
->getOperand(0);
3829 SDValue OneVec
= Node
->getOperand(1);
3832 if (!X86::isConstantSplat(OneVec
, SplatVal
) || !SplatVal
.isOneValue())
3836 SDValue OneConstant
, AllOnesVec
;
3838 APInt Ones
= APInt::getAllOnesValue(32);
3839 assert(VT
.getSizeInBits() % 32 == 0 &&
3840 "Expected bit count to be a multiple of 32");
3841 OneConstant
= CurDAG
->getConstant(Ones
, DL
, MVT::i32
);
3842 insertDAGNode(*CurDAG
, X
, OneConstant
);
3844 unsigned NumElts
= VT
.getSizeInBits() / 32;
3845 assert(NumElts
> 0 && "Expected to get non-empty vector.");
3846 AllOnesVec
= CurDAG
->getSplatBuildVector(MVT::getVectorVT(MVT::i32
, NumElts
),
3848 insertDAGNode(*CurDAG
, X
, AllOnesVec
);
3850 AllOnesVec
= CurDAG
->getBitcast(VT
, AllOnesVec
);
3851 insertDAGNode(*CurDAG
, X
, AllOnesVec
);
3853 unsigned NewOpcode
= Node
->getOpcode() == ISD::ADD
? ISD::SUB
: ISD::ADD
;
3854 SDValue NewNode
= CurDAG
->getNode(NewOpcode
, DL
, VT
, X
, AllOnesVec
);
3856 ReplaceNode(Node
, NewNode
.getNode());
3857 SelectCode(NewNode
.getNode());
3861 /// If the high bits of an 'and' operand are known zero, try setting the
3862 /// high bits of an 'and' constant operand to produce a smaller encoding by
3863 /// creating a small, sign-extended negative immediate rather than a large
3864 /// positive one. This reverses a transform in SimplifyDemandedBits that
3865 /// shrinks mask constants by clearing bits. There is also a possibility that
3866 /// the 'and' mask can be made -1, so the 'and' itself is unnecessary. In that
3867 /// case, just replace the 'and'. Return 'true' if the node is replaced.
3868 bool X86DAGToDAGISel::shrinkAndImmediate(SDNode
*And
) {
3869 // i8 is unshrinkable, i16 should be promoted to i32, and vector ops don't
3870 // have immediate operands.
3871 MVT VT
= And
->getSimpleValueType(0);
3872 if (VT
!= MVT::i32
&& VT
!= MVT::i64
)
3875 auto *And1C
= dyn_cast
<ConstantSDNode
>(And
->getOperand(1));
3879 // Bail out if the mask constant is already negative. It's can't shrink more.
3880 // If the upper 32 bits of a 64 bit mask are all zeros, we have special isel
3881 // patterns to use a 32-bit and instead of a 64-bit and by relying on the
3882 // implicit zeroing of 32 bit ops. So we should check if the lower 32 bits
3883 // are negative too.
3884 APInt MaskVal
= And1C
->getAPIntValue();
3885 unsigned MaskLZ
= MaskVal
.countLeadingZeros();
3886 if (!MaskLZ
|| (VT
== MVT::i64
&& MaskLZ
== 32))
3889 // Don't extend into the upper 32 bits of a 64 bit mask.
3890 if (VT
== MVT::i64
&& MaskLZ
>= 32) {
3892 MaskVal
= MaskVal
.trunc(32);
3895 SDValue And0
= And
->getOperand(0);
3896 APInt HighZeros
= APInt::getHighBitsSet(MaskVal
.getBitWidth(), MaskLZ
);
3897 APInt NegMaskVal
= MaskVal
| HighZeros
;
3899 // If a negative constant would not allow a smaller encoding, there's no need
3900 // to continue. Only change the constant when we know it's a win.
3901 unsigned MinWidth
= NegMaskVal
.getMinSignedBits();
3902 if (MinWidth
> 32 || (MinWidth
> 8 && MaskVal
.getMinSignedBits() <= 32))
3905 // Extend masks if we truncated above.
3906 if (VT
== MVT::i64
&& MaskVal
.getBitWidth() < 64) {
3907 NegMaskVal
= NegMaskVal
.zext(64);
3908 HighZeros
= HighZeros
.zext(64);
3911 // The variable operand must be all zeros in the top bits to allow using the
3912 // new, negative constant as the mask.
3913 if (!CurDAG
->MaskedValueIsZero(And0
, HighZeros
))
3916 // Check if the mask is -1. In that case, this is an unnecessary instruction
3917 // that escaped earlier analysis.
3918 if (NegMaskVal
.isAllOnesValue()) {
3919 ReplaceNode(And
, And0
.getNode());
3923 // A negative mask allows a smaller encoding. Create a new 'and' node.
3924 SDValue NewMask
= CurDAG
->getConstant(NegMaskVal
, SDLoc(And
), VT
);
3925 SDValue NewAnd
= CurDAG
->getNode(ISD::AND
, SDLoc(And
), VT
, And0
, NewMask
);
3926 ReplaceNode(And
, NewAnd
.getNode());
3927 SelectCode(NewAnd
.getNode());
3931 static unsigned getVPTESTMOpc(MVT TestVT
, bool IsTestN
, bool FoldedLoad
,
3932 bool FoldedBCast
, bool Masked
) {
3935 switch (TestVT
.SimpleTy
) {
3936 default: llvm_unreachable("Unexpected VT!");
3938 return IsTestN
? X86::VPTESTNMBZ128rmk
: X86::VPTESTMBZ128rmk
;
3940 return IsTestN
? X86::VPTESTNMWZ128rmk
: X86::VPTESTMWZ128rmk
;
3942 return IsTestN
? X86::VPTESTNMDZ128rmk
: X86::VPTESTMDZ128rmk
;
3944 return IsTestN
? X86::VPTESTNMQZ128rmk
: X86::VPTESTMQZ128rmk
;
3946 return IsTestN
? X86::VPTESTNMBZ256rmk
: X86::VPTESTMBZ256rmk
;
3948 return IsTestN
? X86::VPTESTNMWZ256rmk
: X86::VPTESTMWZ256rmk
;
3950 return IsTestN
? X86::VPTESTNMDZ256rmk
: X86::VPTESTMDZ256rmk
;
3952 return IsTestN
? X86::VPTESTNMQZ256rmk
: X86::VPTESTMQZ256rmk
;
3954 return IsTestN
? X86::VPTESTNMBZrmk
: X86::VPTESTMBZrmk
;
3956 return IsTestN
? X86::VPTESTNMWZrmk
: X86::VPTESTMWZrmk
;
3958 return IsTestN
? X86::VPTESTNMDZrmk
: X86::VPTESTMDZrmk
;
3960 return IsTestN
? X86::VPTESTNMQZrmk
: X86::VPTESTMQZrmk
;
3965 switch (TestVT
.SimpleTy
) {
3966 default: llvm_unreachable("Unexpected VT!");
3968 return IsTestN
? X86::VPTESTNMDZ128rmbk
: X86::VPTESTMDZ128rmbk
;
3970 return IsTestN
? X86::VPTESTNMQZ128rmbk
: X86::VPTESTMQZ128rmbk
;
3972 return IsTestN
? X86::VPTESTNMDZ256rmbk
: X86::VPTESTMDZ256rmbk
;
3974 return IsTestN
? X86::VPTESTNMQZ256rmbk
: X86::VPTESTMQZ256rmbk
;
3976 return IsTestN
? X86::VPTESTNMDZrmbk
: X86::VPTESTMDZrmbk
;
3978 return IsTestN
? X86::VPTESTNMQZrmbk
: X86::VPTESTMQZrmbk
;
3982 switch (TestVT
.SimpleTy
) {
3983 default: llvm_unreachable("Unexpected VT!");
3985 return IsTestN
? X86::VPTESTNMBZ128rrk
: X86::VPTESTMBZ128rrk
;
3987 return IsTestN
? X86::VPTESTNMWZ128rrk
: X86::VPTESTMWZ128rrk
;
3989 return IsTestN
? X86::VPTESTNMDZ128rrk
: X86::VPTESTMDZ128rrk
;
3991 return IsTestN
? X86::VPTESTNMQZ128rrk
: X86::VPTESTMQZ128rrk
;
3993 return IsTestN
? X86::VPTESTNMBZ256rrk
: X86::VPTESTMBZ256rrk
;
3995 return IsTestN
? X86::VPTESTNMWZ256rrk
: X86::VPTESTMWZ256rrk
;
3997 return IsTestN
? X86::VPTESTNMDZ256rrk
: X86::VPTESTMDZ256rrk
;
3999 return IsTestN
? X86::VPTESTNMQZ256rrk
: X86::VPTESTMQZ256rrk
;
4001 return IsTestN
? X86::VPTESTNMBZrrk
: X86::VPTESTMBZrrk
;
4003 return IsTestN
? X86::VPTESTNMWZrrk
: X86::VPTESTMWZrrk
;
4005 return IsTestN
? X86::VPTESTNMDZrrk
: X86::VPTESTMDZrrk
;
4007 return IsTestN
? X86::VPTESTNMQZrrk
: X86::VPTESTMQZrrk
;
4012 switch (TestVT
.SimpleTy
) {
4013 default: llvm_unreachable("Unexpected VT!");
4015 return IsTestN
? X86::VPTESTNMBZ128rm
: X86::VPTESTMBZ128rm
;
4017 return IsTestN
? X86::VPTESTNMWZ128rm
: X86::VPTESTMWZ128rm
;
4019 return IsTestN
? X86::VPTESTNMDZ128rm
: X86::VPTESTMDZ128rm
;
4021 return IsTestN
? X86::VPTESTNMQZ128rm
: X86::VPTESTMQZ128rm
;
4023 return IsTestN
? X86::VPTESTNMBZ256rm
: X86::VPTESTMBZ256rm
;
4025 return IsTestN
? X86::VPTESTNMWZ256rm
: X86::VPTESTMWZ256rm
;
4027 return IsTestN
? X86::VPTESTNMDZ256rm
: X86::VPTESTMDZ256rm
;
4029 return IsTestN
? X86::VPTESTNMQZ256rm
: X86::VPTESTMQZ256rm
;
4031 return IsTestN
? X86::VPTESTNMBZrm
: X86::VPTESTMBZrm
;
4033 return IsTestN
? X86::VPTESTNMWZrm
: X86::VPTESTMWZrm
;
4035 return IsTestN
? X86::VPTESTNMDZrm
: X86::VPTESTMDZrm
;
4037 return IsTestN
? X86::VPTESTNMQZrm
: X86::VPTESTMQZrm
;
4042 switch (TestVT
.SimpleTy
) {
4043 default: llvm_unreachable("Unexpected VT!");
4045 return IsTestN
? X86::VPTESTNMDZ128rmb
: X86::VPTESTMDZ128rmb
;
4047 return IsTestN
? X86::VPTESTNMQZ128rmb
: X86::VPTESTMQZ128rmb
;
4049 return IsTestN
? X86::VPTESTNMDZ256rmb
: X86::VPTESTMDZ256rmb
;
4051 return IsTestN
? X86::VPTESTNMQZ256rmb
: X86::VPTESTMQZ256rmb
;
4053 return IsTestN
? X86::VPTESTNMDZrmb
: X86::VPTESTMDZrmb
;
4055 return IsTestN
? X86::VPTESTNMQZrmb
: X86::VPTESTMQZrmb
;
4059 switch (TestVT
.SimpleTy
) {
4060 default: llvm_unreachable("Unexpected VT!");
4062 return IsTestN
? X86::VPTESTNMBZ128rr
: X86::VPTESTMBZ128rr
;
4064 return IsTestN
? X86::VPTESTNMWZ128rr
: X86::VPTESTMWZ128rr
;
4066 return IsTestN
? X86::VPTESTNMDZ128rr
: X86::VPTESTMDZ128rr
;
4068 return IsTestN
? X86::VPTESTNMQZ128rr
: X86::VPTESTMQZ128rr
;
4070 return IsTestN
? X86::VPTESTNMBZ256rr
: X86::VPTESTMBZ256rr
;
4072 return IsTestN
? X86::VPTESTNMWZ256rr
: X86::VPTESTMWZ256rr
;
4074 return IsTestN
? X86::VPTESTNMDZ256rr
: X86::VPTESTMDZ256rr
;
4076 return IsTestN
? X86::VPTESTNMQZ256rr
: X86::VPTESTMQZ256rr
;
4078 return IsTestN
? X86::VPTESTNMBZrr
: X86::VPTESTMBZrr
;
4080 return IsTestN
? X86::VPTESTNMWZrr
: X86::VPTESTMWZrr
;
4082 return IsTestN
? X86::VPTESTNMDZrr
: X86::VPTESTMDZrr
;
4084 return IsTestN
? X86::VPTESTNMQZrr
: X86::VPTESTMQZrr
;
4088 // Try to create VPTESTM instruction. If InMask is not null, it will be used
4089 // to form a masked operation.
4090 bool X86DAGToDAGISel::tryVPTESTM(SDNode
*Root
, SDValue Setcc
,
4092 assert(Subtarget
->hasAVX512() && "Expected AVX512!");
4093 assert(Setcc
.getSimpleValueType().getVectorElementType() == MVT::i1
&&
4096 // Look for equal and not equal compares.
4097 ISD::CondCode CC
= cast
<CondCodeSDNode
>(Setcc
.getOperand(2))->get();
4098 if (CC
!= ISD::SETEQ
&& CC
!= ISD::SETNE
)
4101 SDValue SetccOp0
= Setcc
.getOperand(0);
4102 SDValue SetccOp1
= Setcc
.getOperand(1);
4104 // Canonicalize the all zero vector to the RHS.
4105 if (ISD::isBuildVectorAllZeros(SetccOp0
.getNode()))
4106 std::swap(SetccOp0
, SetccOp1
);
4108 // See if we're comparing against zero.
4109 if (!ISD::isBuildVectorAllZeros(SetccOp1
.getNode()))
4112 SDValue N0
= SetccOp0
;
4114 MVT CmpVT
= N0
.getSimpleValueType();
4115 MVT CmpSVT
= CmpVT
.getVectorElementType();
4117 // Start with both operands the same. We'll try to refine this.
4122 // Look through single use bitcasts.
4123 SDValue N0Temp
= N0
;
4124 if (N0Temp
.getOpcode() == ISD::BITCAST
&& N0Temp
.hasOneUse())
4125 N0Temp
= N0
.getOperand(0);
4127 // Look for single use AND.
4128 if (N0Temp
.getOpcode() == ISD::AND
&& N0Temp
.hasOneUse()) {
4129 Src0
= N0Temp
.getOperand(0);
4130 Src1
= N0Temp
.getOperand(1);
4134 // Without VLX we need to widen the load.
4135 bool Widen
= !Subtarget
->hasVLX() && !CmpVT
.is512BitVector();
4137 // We can only fold loads if the sources are unique.
4138 bool CanFoldLoads
= Src0
!= Src1
;
4140 // Try to fold loads unless we need to widen.
4141 bool FoldedLoad
= false;
4142 SDValue Tmp0
, Tmp1
, Tmp2
, Tmp3
, Tmp4
, Load
;
4143 if (!Widen
&& CanFoldLoads
) {
4145 FoldedLoad
= tryFoldLoad(Root
, N0
.getNode(), Load
, Tmp0
, Tmp1
, Tmp2
, Tmp3
,
4148 // And is computative.
4150 FoldedLoad
= tryFoldLoad(Root
, N0
.getNode(), Load
, Tmp0
, Tmp1
, Tmp2
,
4153 std::swap(Src0
, Src1
);
4157 auto findBroadcastedOp
= [](SDValue Src
, MVT CmpSVT
, SDNode
*&Parent
) {
4158 // Look through single use bitcasts.
4159 if (Src
.getOpcode() == ISD::BITCAST
&& Src
.hasOneUse())
4160 Src
= Src
.getOperand(0);
4162 if (Src
.getOpcode() == X86ISD::VBROADCAST
&& Src
.hasOneUse()) {
4163 Parent
= Src
.getNode();
4164 Src
= Src
.getOperand(0);
4165 if (Src
.getSimpleValueType() == CmpSVT
)
4172 // If we didn't fold a load, try to match broadcast. No widening limitation
4173 // for this. But only 32 and 64 bit types are supported.
4174 bool FoldedBCast
= false;
4175 if (!FoldedLoad
&& CanFoldLoads
&&
4176 (CmpSVT
== MVT::i32
|| CmpSVT
== MVT::i64
)) {
4177 SDNode
*ParentNode
= nullptr;
4178 if ((Load
= findBroadcastedOp(Src1
, CmpSVT
, ParentNode
))) {
4179 FoldedBCast
= tryFoldLoad(Root
, ParentNode
, Load
, Tmp0
,
4180 Tmp1
, Tmp2
, Tmp3
, Tmp4
);
4183 // Try the other operand.
4185 if ((Load
= findBroadcastedOp(Src0
, CmpSVT
, ParentNode
))) {
4186 FoldedBCast
= tryFoldLoad(Root
, ParentNode
, Load
, Tmp0
,
4187 Tmp1
, Tmp2
, Tmp3
, Tmp4
);
4189 std::swap(Src0
, Src1
);
4194 auto getMaskRC
= [](MVT MaskVT
) {
4195 switch (MaskVT
.SimpleTy
) {
4196 default: llvm_unreachable("Unexpected VT!");
4197 case MVT::v2i1
: return X86::VK2RegClassID
;
4198 case MVT::v4i1
: return X86::VK4RegClassID
;
4199 case MVT::v8i1
: return X86::VK8RegClassID
;
4200 case MVT::v16i1
: return X86::VK16RegClassID
;
4201 case MVT::v32i1
: return X86::VK32RegClassID
;
4202 case MVT::v64i1
: return X86::VK64RegClassID
;
4206 bool IsMasked
= InMask
.getNode() != nullptr;
4210 MVT ResVT
= Setcc
.getSimpleValueType();
4213 // Widen the inputs using insert_subreg or copy_to_regclass.
4214 unsigned Scale
= CmpVT
.is128BitVector() ? 4 : 2;
4215 unsigned SubReg
= CmpVT
.is128BitVector() ? X86::sub_xmm
: X86::sub_ymm
;
4216 unsigned NumElts
= CmpVT
.getVectorNumElements() * Scale
;
4217 CmpVT
= MVT::getVectorVT(CmpSVT
, NumElts
);
4218 MaskVT
= MVT::getVectorVT(MVT::i1
, NumElts
);
4219 SDValue ImplDef
= SDValue(CurDAG
->getMachineNode(X86::IMPLICIT_DEF
, dl
,
4221 Src0
= CurDAG
->getTargetInsertSubreg(SubReg
, dl
, CmpVT
, ImplDef
, Src0
);
4223 assert(!FoldedLoad
&& "Shouldn't have folded the load");
4225 Src1
= CurDAG
->getTargetInsertSubreg(SubReg
, dl
, CmpVT
, ImplDef
, Src1
);
4229 unsigned RegClass
= getMaskRC(MaskVT
);
4230 SDValue RC
= CurDAG
->getTargetConstant(RegClass
, dl
, MVT::i32
);
4231 InMask
= SDValue(CurDAG
->getMachineNode(TargetOpcode::COPY_TO_REGCLASS
,
4232 dl
, MaskVT
, InMask
, RC
), 0);
4236 bool IsTestN
= CC
== ISD::SETEQ
;
4237 unsigned Opc
= getVPTESTMOpc(CmpVT
, IsTestN
, FoldedLoad
, FoldedBCast
,
4240 MachineSDNode
*CNode
;
4241 if (FoldedLoad
|| FoldedBCast
) {
4242 SDVTList VTs
= CurDAG
->getVTList(MaskVT
, MVT::Other
);
4245 SDValue Ops
[] = { InMask
, Src0
, Tmp0
, Tmp1
, Tmp2
, Tmp3
, Tmp4
,
4246 Load
.getOperand(0) };
4247 CNode
= CurDAG
->getMachineNode(Opc
, dl
, VTs
, Ops
);
4249 SDValue Ops
[] = { Src0
, Tmp0
, Tmp1
, Tmp2
, Tmp3
, Tmp4
,
4250 Load
.getOperand(0) };
4251 CNode
= CurDAG
->getMachineNode(Opc
, dl
, VTs
, Ops
);
4254 // Update the chain.
4255 ReplaceUses(Load
.getValue(1), SDValue(CNode
, 1));
4256 // Record the mem-refs
4257 CurDAG
->setNodeMemRefs(CNode
, {cast
<LoadSDNode
>(Load
)->getMemOperand()});
4260 CNode
= CurDAG
->getMachineNode(Opc
, dl
, MaskVT
, InMask
, Src0
, Src1
);
4262 CNode
= CurDAG
->getMachineNode(Opc
, dl
, MaskVT
, Src0
, Src1
);
4265 // If we widened, we need to shrink the mask VT.
4267 unsigned RegClass
= getMaskRC(ResVT
);
4268 SDValue RC
= CurDAG
->getTargetConstant(RegClass
, dl
, MVT::i32
);
4269 CNode
= CurDAG
->getMachineNode(TargetOpcode::COPY_TO_REGCLASS
,
4270 dl
, ResVT
, SDValue(CNode
, 0), RC
);
4273 ReplaceUses(SDValue(Root
, 0), SDValue(CNode
, 0));
4274 CurDAG
->RemoveDeadNode(Root
);
4278 void X86DAGToDAGISel::Select(SDNode
*Node
) {
4279 MVT NVT
= Node
->getSimpleValueType(0);
4280 unsigned Opcode
= Node
->getOpcode();
4283 if (Node
->isMachineOpcode()) {
4284 LLVM_DEBUG(dbgs() << "== "; Node
->dump(CurDAG
); dbgs() << '\n');
4285 Node
->setNodeId(-1);
4286 return; // Already selected.
4291 case ISD::INTRINSIC_VOID
: {
4292 unsigned IntNo
= Node
->getConstantOperandVal(1);
4295 case Intrinsic::x86_sse3_monitor
:
4296 case Intrinsic::x86_monitorx
:
4297 case Intrinsic::x86_clzero
: {
4298 bool Use64BitPtr
= Node
->getOperand(2).getValueType() == MVT::i64
;
4302 default: llvm_unreachable("Unexpected intrinsic!");
4303 case Intrinsic::x86_sse3_monitor
:
4304 if (!Subtarget
->hasSSE3())
4306 Opc
= Use64BitPtr
? X86::MONITOR64rrr
: X86::MONITOR32rrr
;
4308 case Intrinsic::x86_monitorx
:
4309 if (!Subtarget
->hasMWAITX())
4311 Opc
= Use64BitPtr
? X86::MONITORX64rrr
: X86::MONITORX32rrr
;
4313 case Intrinsic::x86_clzero
:
4314 if (!Subtarget
->hasCLZERO())
4316 Opc
= Use64BitPtr
? X86::CLZERO64r
: X86::CLZERO32r
;
4321 unsigned PtrReg
= Use64BitPtr
? X86::RAX
: X86::EAX
;
4322 SDValue Chain
= CurDAG
->getCopyToReg(Node
->getOperand(0), dl
, PtrReg
,
4323 Node
->getOperand(2), SDValue());
4324 SDValue InFlag
= Chain
.getValue(1);
4326 if (IntNo
== Intrinsic::x86_sse3_monitor
||
4327 IntNo
== Intrinsic::x86_monitorx
) {
4328 // Copy the other two operands to ECX and EDX.
4329 Chain
= CurDAG
->getCopyToReg(Chain
, dl
, X86::ECX
, Node
->getOperand(3),
4331 InFlag
= Chain
.getValue(1);
4332 Chain
= CurDAG
->getCopyToReg(Chain
, dl
, X86::EDX
, Node
->getOperand(4),
4334 InFlag
= Chain
.getValue(1);
4337 MachineSDNode
*CNode
= CurDAG
->getMachineNode(Opc
, dl
, MVT::Other
,
4339 ReplaceNode(Node
, CNode
);
4348 if (Subtarget
->isTargetNaCl())
4349 // NaCl has its own pass where jmp %r32 are converted to jmp %r64. We
4350 // leave the instruction alone.
4352 if (Subtarget
->isTarget64BitILP32()) {
4353 // Converts a 32-bit register to a 64-bit, zero-extended version of
4354 // it. This is needed because x86-64 can do many things, but jmp %r32
4355 // ain't one of them.
4356 const SDValue
&Target
= Node
->getOperand(1);
4357 assert(Target
.getSimpleValueType() == llvm::MVT::i32
);
4358 SDValue ZextTarget
= CurDAG
->getZExtOrTrunc(Target
, dl
, EVT(MVT::i64
));
4359 SDValue Brind
= CurDAG
->getNode(ISD::BRIND
, dl
, MVT::Other
,
4360 Node
->getOperand(0), ZextTarget
);
4361 ReplaceNode(Node
, Brind
.getNode());
4362 SelectCode(ZextTarget
.getNode());
4363 SelectCode(Brind
.getNode());
4368 case X86ISD::GlobalBaseReg
:
4369 ReplaceNode(Node
, getGlobalBaseReg());
4373 // Just drop all 128/256/512-bit bitcasts.
4374 if (NVT
.is512BitVector() || NVT
.is256BitVector() || NVT
.is128BitVector() ||
4376 ReplaceUses(SDValue(Node
, 0), Node
->getOperand(0));
4377 CurDAG
->RemoveDeadNode(Node
);
4382 case ISD::VSELECT
: {
4383 // Replace VSELECT with non-mask conditions with with BLENDV.
4384 if (Node
->getOperand(0).getValueType().getVectorElementType() == MVT::i1
)
4387 assert(Subtarget
->hasSSE41() && "Expected SSE4.1 support!");
4388 SDValue Blendv
= CurDAG
->getNode(
4389 X86ISD::BLENDV
, SDLoc(Node
), Node
->getValueType(0), Node
->getOperand(0),
4390 Node
->getOperand(1), Node
->getOperand(2));
4391 ReplaceNode(Node
, Blendv
.getNode());
4392 SelectCode(Blendv
.getNode());
4393 // We already called ReplaceUses.
4398 if (matchBitExtract(Node
))
4403 if (tryShiftAmountMod(Node
))
4408 if (NVT
.isVector() && NVT
.getVectorElementType() == MVT::i1
) {
4409 // Try to form a masked VPTESTM. Operands can be in either order.
4410 SDValue N0
= Node
->getOperand(0);
4411 SDValue N1
= Node
->getOperand(1);
4412 if (N0
.getOpcode() == ISD::SETCC
&& N0
.hasOneUse() &&
4413 tryVPTESTM(Node
, N0
, N1
))
4415 if (N1
.getOpcode() == ISD::SETCC
&& N1
.hasOneUse() &&
4416 tryVPTESTM(Node
, N1
, N0
))
4420 if (MachineSDNode
*NewNode
= matchBEXTRFromAndImm(Node
)) {
4421 ReplaceUses(SDValue(Node
, 0), SDValue(NewNode
, 0));
4422 CurDAG
->RemoveDeadNode(Node
);
4425 if (matchBitExtract(Node
))
4427 if (AndImmShrink
&& shrinkAndImmediate(Node
))
4433 if (tryShrinkShlLogicImm(Node
))
4439 if ((Opcode
== ISD::ADD
|| Opcode
== ISD::SUB
) && NVT
.isVector() &&
4440 combineIncDecVector(Node
))
4443 // Try to avoid folding immediates with multiple uses for optsize.
4444 // This code tries to select to register form directly to avoid going
4445 // through the isel table which might fold the immediate. We can't change
4446 // the patterns on the add/sub/and/or/xor with immediate paterns in the
4447 // tablegen files to check immediate use count without making the patterns
4448 // unavailable to the fast-isel table.
4452 // Only handle i8/i16/i32/i64.
4453 if (NVT
!= MVT::i8
&& NVT
!= MVT::i16
&& NVT
!= MVT::i32
&& NVT
!= MVT::i64
)
4456 SDValue N0
= Node
->getOperand(0);
4457 SDValue N1
= Node
->getOperand(1);
4459 ConstantSDNode
*Cst
= dyn_cast
<ConstantSDNode
>(N1
);
4463 int64_t Val
= Cst
->getSExtValue();
4465 // Make sure its an immediate that is considered foldable.
4466 // FIXME: Handle unsigned 32 bit immediates for 64-bit AND.
4467 if (!isInt
<8>(Val
) && !isInt
<32>(Val
))
4470 // If this can match to INC/DEC, let it go.
4471 if (Opcode
== ISD::ADD
&& (Val
== 1 || Val
== -1))
4474 // Check if we should avoid folding this immediate.
4475 if (!shouldAvoidImmediateInstFormsForSize(N1
.getNode()))
4478 // We should not fold the immediate. So we need a register form instead.
4479 unsigned ROpc
, MOpc
;
4480 switch (NVT
.SimpleTy
) {
4481 default: llvm_unreachable("Unexpected VT!");
4484 default: llvm_unreachable("Unexpected opcode!");
4485 case ISD::ADD
: ROpc
= X86::ADD8rr
; MOpc
= X86::ADD8rm
; break;
4486 case ISD::SUB
: ROpc
= X86::SUB8rr
; MOpc
= X86::SUB8rm
; break;
4487 case ISD::AND
: ROpc
= X86::AND8rr
; MOpc
= X86::AND8rm
; break;
4488 case ISD::OR
: ROpc
= X86::OR8rr
; MOpc
= X86::OR8rm
; break;
4489 case ISD::XOR
: ROpc
= X86::XOR8rr
; MOpc
= X86::XOR8rm
; break;
4494 default: llvm_unreachable("Unexpected opcode!");
4495 case ISD::ADD
: ROpc
= X86::ADD16rr
; MOpc
= X86::ADD16rm
; break;
4496 case ISD::SUB
: ROpc
= X86::SUB16rr
; MOpc
= X86::SUB16rm
; break;
4497 case ISD::AND
: ROpc
= X86::AND16rr
; MOpc
= X86::AND16rm
; break;
4498 case ISD::OR
: ROpc
= X86::OR16rr
; MOpc
= X86::OR16rm
; break;
4499 case ISD::XOR
: ROpc
= X86::XOR16rr
; MOpc
= X86::XOR16rm
; break;
4504 default: llvm_unreachable("Unexpected opcode!");
4505 case ISD::ADD
: ROpc
= X86::ADD32rr
; MOpc
= X86::ADD32rm
; break;
4506 case ISD::SUB
: ROpc
= X86::SUB32rr
; MOpc
= X86::SUB32rm
; break;
4507 case ISD::AND
: ROpc
= X86::AND32rr
; MOpc
= X86::AND32rm
; break;
4508 case ISD::OR
: ROpc
= X86::OR32rr
; MOpc
= X86::OR32rm
; break;
4509 case ISD::XOR
: ROpc
= X86::XOR32rr
; MOpc
= X86::XOR32rm
; break;
4514 default: llvm_unreachable("Unexpected opcode!");
4515 case ISD::ADD
: ROpc
= X86::ADD64rr
; MOpc
= X86::ADD64rm
; break;
4516 case ISD::SUB
: ROpc
= X86::SUB64rr
; MOpc
= X86::SUB64rm
; break;
4517 case ISD::AND
: ROpc
= X86::AND64rr
; MOpc
= X86::AND64rm
; break;
4518 case ISD::OR
: ROpc
= X86::OR64rr
; MOpc
= X86::OR64rm
; break;
4519 case ISD::XOR
: ROpc
= X86::XOR64rr
; MOpc
= X86::XOR64rm
; break;
4524 // Ok this is a AND/OR/XOR/ADD/SUB with constant.
4526 // If this is a not a subtract, we can still try to fold a load.
4527 if (Opcode
!= ISD::SUB
) {
4528 SDValue Tmp0
, Tmp1
, Tmp2
, Tmp3
, Tmp4
;
4529 if (tryFoldLoad(Node
, N0
, Tmp0
, Tmp1
, Tmp2
, Tmp3
, Tmp4
)) {
4530 SDValue Ops
[] = { N1
, Tmp0
, Tmp1
, Tmp2
, Tmp3
, Tmp4
, N0
.getOperand(0) };
4531 SDVTList VTs
= CurDAG
->getVTList(NVT
, MVT::i32
, MVT::Other
);
4532 MachineSDNode
*CNode
= CurDAG
->getMachineNode(MOpc
, dl
, VTs
, Ops
);
4533 // Update the chain.
4534 ReplaceUses(N0
.getValue(1), SDValue(CNode
, 2));
4535 // Record the mem-refs
4536 CurDAG
->setNodeMemRefs(CNode
, {cast
<LoadSDNode
>(N0
)->getMemOperand()});
4537 ReplaceUses(SDValue(Node
, 0), SDValue(CNode
, 0));
4538 CurDAG
->RemoveDeadNode(Node
);
4543 CurDAG
->SelectNodeTo(Node
, ROpc
, NVT
, MVT::i32
, N0
, N1
);
4548 // i16/i32/i64 are handled with isel patterns.
4552 case X86ISD::UMUL
: {
4553 SDValue N0
= Node
->getOperand(0);
4554 SDValue N1
= Node
->getOperand(1);
4556 unsigned LoReg
, ROpc
, MOpc
;
4557 switch (NVT
.SimpleTy
) {
4558 default: llvm_unreachable("Unsupported VT!");
4561 ROpc
= Opcode
== X86ISD::SMUL
? X86::IMUL8r
: X86::MUL8r
;
4562 MOpc
= Opcode
== X86ISD::SMUL
? X86::IMUL8m
: X86::MUL8m
;
4581 SDValue Tmp0
, Tmp1
, Tmp2
, Tmp3
, Tmp4
;
4582 bool FoldedLoad
= tryFoldLoad(Node
, N1
, Tmp0
, Tmp1
, Tmp2
, Tmp3
, Tmp4
);
4583 // Multiply is commmutative.
4585 FoldedLoad
= tryFoldLoad(Node
, N0
, Tmp0
, Tmp1
, Tmp2
, Tmp3
, Tmp4
);
4590 SDValue InFlag
= CurDAG
->getCopyToReg(CurDAG
->getEntryNode(), dl
, LoReg
,
4591 N0
, SDValue()).getValue(1);
4593 MachineSDNode
*CNode
;
4595 // i16/i32/i64 use an instruction that produces a low and high result even
4596 // though only the low result is used.
4599 VTs
= CurDAG
->getVTList(NVT
, MVT::i32
, MVT::Other
);
4601 VTs
= CurDAG
->getVTList(NVT
, NVT
, MVT::i32
, MVT::Other
);
4603 SDValue Ops
[] = { Tmp0
, Tmp1
, Tmp2
, Tmp3
, Tmp4
, N1
.getOperand(0),
4605 CNode
= CurDAG
->getMachineNode(MOpc
, dl
, VTs
, Ops
);
4607 // Update the chain.
4608 ReplaceUses(N1
.getValue(1), SDValue(CNode
, NVT
== MVT::i8
? 2 : 3));
4609 // Record the mem-refs
4610 CurDAG
->setNodeMemRefs(CNode
, {cast
<LoadSDNode
>(N1
)->getMemOperand()});
4612 // i16/i32/i64 use an instruction that produces a low and high result even
4613 // though only the low result is used.
4616 VTs
= CurDAG
->getVTList(NVT
, MVT::i32
);
4618 VTs
= CurDAG
->getVTList(NVT
, NVT
, MVT::i32
);
4620 CNode
= CurDAG
->getMachineNode(ROpc
, dl
, VTs
, {N1
, InFlag
});
4623 ReplaceUses(SDValue(Node
, 0), SDValue(CNode
, 0));
4624 ReplaceUses(SDValue(Node
, 1), SDValue(CNode
, NVT
== MVT::i8
? 1 : 2));
4625 CurDAG
->RemoveDeadNode(Node
);
4629 case ISD::SMUL_LOHI
:
4630 case ISD::UMUL_LOHI
: {
4631 SDValue N0
= Node
->getOperand(0);
4632 SDValue N1
= Node
->getOperand(1);
4635 bool isSigned
= Opcode
== ISD::SMUL_LOHI
;
4637 switch (NVT
.SimpleTy
) {
4638 default: llvm_unreachable("Unsupported VT!");
4639 case MVT::i32
: Opc
= X86::MUL32r
; MOpc
= X86::MUL32m
; break;
4640 case MVT::i64
: Opc
= X86::MUL64r
; MOpc
= X86::MUL64m
; break;
4643 switch (NVT
.SimpleTy
) {
4644 default: llvm_unreachable("Unsupported VT!");
4645 case MVT::i32
: Opc
= X86::IMUL32r
; MOpc
= X86::IMUL32m
; break;
4646 case MVT::i64
: Opc
= X86::IMUL64r
; MOpc
= X86::IMUL64m
; break;
4650 unsigned SrcReg
, LoReg
, HiReg
;
4652 default: llvm_unreachable("Unknown MUL opcode!");
4655 SrcReg
= LoReg
= X86::EAX
; HiReg
= X86::EDX
;
4659 SrcReg
= LoReg
= X86::RAX
; HiReg
= X86::RDX
;
4663 SDValue Tmp0
, Tmp1
, Tmp2
, Tmp3
, Tmp4
;
4664 bool foldedLoad
= tryFoldLoad(Node
, N1
, Tmp0
, Tmp1
, Tmp2
, Tmp3
, Tmp4
);
4665 // Multiply is commmutative.
4667 foldedLoad
= tryFoldLoad(Node
, N0
, Tmp0
, Tmp1
, Tmp2
, Tmp3
, Tmp4
);
4672 SDValue InFlag
= CurDAG
->getCopyToReg(CurDAG
->getEntryNode(), dl
, SrcReg
,
4673 N0
, SDValue()).getValue(1);
4676 MachineSDNode
*CNode
= nullptr;
4677 SDValue Ops
[] = { Tmp0
, Tmp1
, Tmp2
, Tmp3
, Tmp4
, N1
.getOperand(0),
4679 SDVTList VTs
= CurDAG
->getVTList(MVT::Other
, MVT::Glue
);
4680 CNode
= CurDAG
->getMachineNode(MOpc
, dl
, VTs
, Ops
);
4681 Chain
= SDValue(CNode
, 0);
4682 InFlag
= SDValue(CNode
, 1);
4684 // Update the chain.
4685 ReplaceUses(N1
.getValue(1), Chain
);
4686 // Record the mem-refs
4687 CurDAG
->setNodeMemRefs(CNode
, {cast
<LoadSDNode
>(N1
)->getMemOperand()});
4689 SDValue Ops
[] = { N1
, InFlag
};
4690 SDVTList VTs
= CurDAG
->getVTList(MVT::Glue
);
4691 SDNode
*CNode
= CurDAG
->getMachineNode(Opc
, dl
, VTs
, Ops
);
4692 InFlag
= SDValue(CNode
, 0);
4695 // Copy the low half of the result, if it is needed.
4696 if (!SDValue(Node
, 0).use_empty()) {
4697 assert(LoReg
&& "Register for low half is not defined!");
4698 SDValue ResLo
= CurDAG
->getCopyFromReg(CurDAG
->getEntryNode(), dl
, LoReg
,
4700 InFlag
= ResLo
.getValue(2);
4701 ReplaceUses(SDValue(Node
, 0), ResLo
);
4702 LLVM_DEBUG(dbgs() << "=> "; ResLo
.getNode()->dump(CurDAG
);
4705 // Copy the high half of the result, if it is needed.
4706 if (!SDValue(Node
, 1).use_empty()) {
4707 assert(HiReg
&& "Register for high half is not defined!");
4708 SDValue ResHi
= CurDAG
->getCopyFromReg(CurDAG
->getEntryNode(), dl
, HiReg
,
4710 InFlag
= ResHi
.getValue(2);
4711 ReplaceUses(SDValue(Node
, 1), ResHi
);
4712 LLVM_DEBUG(dbgs() << "=> "; ResHi
.getNode()->dump(CurDAG
);
4716 CurDAG
->RemoveDeadNode(Node
);
4721 case ISD::UDIVREM
: {
4722 SDValue N0
= Node
->getOperand(0);
4723 SDValue N1
= Node
->getOperand(1);
4726 bool isSigned
= Opcode
== ISD::SDIVREM
;
4728 switch (NVT
.SimpleTy
) {
4729 default: llvm_unreachable("Unsupported VT!");
4730 case MVT::i8
: Opc
= X86::DIV8r
; MOpc
= X86::DIV8m
; break;
4731 case MVT::i16
: Opc
= X86::DIV16r
; MOpc
= X86::DIV16m
; break;
4732 case MVT::i32
: Opc
= X86::DIV32r
; MOpc
= X86::DIV32m
; break;
4733 case MVT::i64
: Opc
= X86::DIV64r
; MOpc
= X86::DIV64m
; break;
4736 switch (NVT
.SimpleTy
) {
4737 default: llvm_unreachable("Unsupported VT!");
4738 case MVT::i8
: Opc
= X86::IDIV8r
; MOpc
= X86::IDIV8m
; break;
4739 case MVT::i16
: Opc
= X86::IDIV16r
; MOpc
= X86::IDIV16m
; break;
4740 case MVT::i32
: Opc
= X86::IDIV32r
; MOpc
= X86::IDIV32m
; break;
4741 case MVT::i64
: Opc
= X86::IDIV64r
; MOpc
= X86::IDIV64m
; break;
4745 unsigned LoReg
, HiReg
, ClrReg
;
4746 unsigned SExtOpcode
;
4747 switch (NVT
.SimpleTy
) {
4748 default: llvm_unreachable("Unsupported VT!");
4750 LoReg
= X86::AL
; ClrReg
= HiReg
= X86::AH
;
4751 SExtOpcode
= 0; // Not used.
4754 LoReg
= X86::AX
; HiReg
= X86::DX
;
4756 SExtOpcode
= X86::CWD
;
4759 LoReg
= X86::EAX
; ClrReg
= HiReg
= X86::EDX
;
4760 SExtOpcode
= X86::CDQ
;
4763 LoReg
= X86::RAX
; ClrReg
= HiReg
= X86::RDX
;
4764 SExtOpcode
= X86::CQO
;
4768 SDValue Tmp0
, Tmp1
, Tmp2
, Tmp3
, Tmp4
;
4769 bool foldedLoad
= tryFoldLoad(Node
, N1
, Tmp0
, Tmp1
, Tmp2
, Tmp3
, Tmp4
);
4770 bool signBitIsZero
= CurDAG
->SignBitIsZero(N0
);
4773 if (NVT
== MVT::i8
) {
4774 // Special case for div8, just use a move with zero extension to AX to
4775 // clear the upper 8 bits (AH).
4776 SDValue Tmp0
, Tmp1
, Tmp2
, Tmp3
, Tmp4
, Chain
;
4777 MachineSDNode
*Move
;
4778 if (tryFoldLoad(Node
, N0
, Tmp0
, Tmp1
, Tmp2
, Tmp3
, Tmp4
)) {
4779 SDValue Ops
[] = { Tmp0
, Tmp1
, Tmp2
, Tmp3
, Tmp4
, N0
.getOperand(0) };
4780 unsigned Opc
= (isSigned
&& !signBitIsZero
) ? X86::MOVSX16rm8
4782 Move
= CurDAG
->getMachineNode(Opc
, dl
, MVT::i16
, MVT::Other
, Ops
);
4783 Chain
= SDValue(Move
, 1);
4784 ReplaceUses(N0
.getValue(1), Chain
);
4785 // Record the mem-refs
4786 CurDAG
->setNodeMemRefs(Move
, {cast
<LoadSDNode
>(N0
)->getMemOperand()});
4788 unsigned Opc
= (isSigned
&& !signBitIsZero
) ? X86::MOVSX16rr8
4790 Move
= CurDAG
->getMachineNode(Opc
, dl
, MVT::i16
, N0
);
4791 Chain
= CurDAG
->getEntryNode();
4793 Chain
= CurDAG
->getCopyToReg(Chain
, dl
, X86::AX
, SDValue(Move
, 0),
4795 InFlag
= Chain
.getValue(1);
4798 CurDAG
->getCopyToReg(CurDAG
->getEntryNode(), dl
,
4799 LoReg
, N0
, SDValue()).getValue(1);
4800 if (isSigned
&& !signBitIsZero
) {
4801 // Sign extend the low part into the high part.
4803 SDValue(CurDAG
->getMachineNode(SExtOpcode
, dl
, MVT::Glue
, InFlag
),0);
4805 // Zero out the high part, effectively zero extending the input.
4806 SDValue ClrNode
= SDValue(CurDAG
->getMachineNode(X86::MOV32r0
, dl
, NVT
), 0);
4807 switch (NVT
.SimpleTy
) {
4810 SDValue(CurDAG
->getMachineNode(
4811 TargetOpcode::EXTRACT_SUBREG
, dl
, MVT::i16
, ClrNode
,
4812 CurDAG
->getTargetConstant(X86::sub_16bit
, dl
,
4820 SDValue(CurDAG
->getMachineNode(
4821 TargetOpcode::SUBREG_TO_REG
, dl
, MVT::i64
,
4822 CurDAG
->getTargetConstant(0, dl
, MVT::i64
), ClrNode
,
4823 CurDAG
->getTargetConstant(X86::sub_32bit
, dl
,
4828 llvm_unreachable("Unexpected division source");
4831 InFlag
= CurDAG
->getCopyToReg(CurDAG
->getEntryNode(), dl
, ClrReg
,
4832 ClrNode
, InFlag
).getValue(1);
4837 SDValue Ops
[] = { Tmp0
, Tmp1
, Tmp2
, Tmp3
, Tmp4
, N1
.getOperand(0),
4839 MachineSDNode
*CNode
=
4840 CurDAG
->getMachineNode(MOpc
, dl
, MVT::Other
, MVT::Glue
, Ops
);
4841 InFlag
= SDValue(CNode
, 1);
4842 // Update the chain.
4843 ReplaceUses(N1
.getValue(1), SDValue(CNode
, 0));
4844 // Record the mem-refs
4845 CurDAG
->setNodeMemRefs(CNode
, {cast
<LoadSDNode
>(N1
)->getMemOperand()});
4848 SDValue(CurDAG
->getMachineNode(Opc
, dl
, MVT::Glue
, N1
, InFlag
), 0);
4851 // Prevent use of AH in a REX instruction by explicitly copying it to
4852 // an ABCD_L register.
4854 // The current assumption of the register allocator is that isel
4855 // won't generate explicit references to the GR8_ABCD_H registers. If
4856 // the allocator and/or the backend get enhanced to be more robust in
4857 // that regard, this can be, and should be, removed.
4858 if (HiReg
== X86::AH
&& !SDValue(Node
, 1).use_empty()) {
4859 SDValue AHCopy
= CurDAG
->getRegister(X86::AH
, MVT::i8
);
4860 unsigned AHExtOpcode
=
4861 isSigned
? X86::MOVSX32rr8_NOREX
: X86::MOVZX32rr8_NOREX
;
4863 SDNode
*RNode
= CurDAG
->getMachineNode(AHExtOpcode
, dl
, MVT::i32
,
4864 MVT::Glue
, AHCopy
, InFlag
);
4865 SDValue
Result(RNode
, 0);
4866 InFlag
= SDValue(RNode
, 1);
4869 CurDAG
->getTargetExtractSubreg(X86::sub_8bit
, dl
, MVT::i8
, Result
);
4871 ReplaceUses(SDValue(Node
, 1), Result
);
4872 LLVM_DEBUG(dbgs() << "=> "; Result
.getNode()->dump(CurDAG
);
4875 // Copy the division (low) result, if it is needed.
4876 if (!SDValue(Node
, 0).use_empty()) {
4877 SDValue Result
= CurDAG
->getCopyFromReg(CurDAG
->getEntryNode(), dl
,
4878 LoReg
, NVT
, InFlag
);
4879 InFlag
= Result
.getValue(2);
4880 ReplaceUses(SDValue(Node
, 0), Result
);
4881 LLVM_DEBUG(dbgs() << "=> "; Result
.getNode()->dump(CurDAG
);
4884 // Copy the remainder (high) result, if it is needed.
4885 if (!SDValue(Node
, 1).use_empty()) {
4886 SDValue Result
= CurDAG
->getCopyFromReg(CurDAG
->getEntryNode(), dl
,
4887 HiReg
, NVT
, InFlag
);
4888 InFlag
= Result
.getValue(2);
4889 ReplaceUses(SDValue(Node
, 1), Result
);
4890 LLVM_DEBUG(dbgs() << "=> "; Result
.getNode()->dump(CurDAG
);
4893 CurDAG
->RemoveDeadNode(Node
);
4898 SDValue N0
= Node
->getOperand(0);
4899 SDValue N1
= Node
->getOperand(1);
4901 // Optimizations for TEST compares.
4902 if (!isNullConstant(N1
))
4905 // Save the original VT of the compare.
4906 MVT CmpVT
= N0
.getSimpleValueType();
4908 // If we are comparing (and (shr X, C, Mask) with 0, emit a BEXTR followed
4909 // by a test instruction. The test should be removed later by
4910 // analyzeCompare if we are using only the zero flag.
4911 // TODO: Should we check the users and use the BEXTR flags directly?
4912 if (N0
.getOpcode() == ISD::AND
&& N0
.hasOneUse()) {
4913 if (MachineSDNode
*NewNode
= matchBEXTRFromAndImm(N0
.getNode())) {
4914 unsigned TestOpc
= CmpVT
== MVT::i64
? X86::TEST64rr
4916 SDValue BEXTR
= SDValue(NewNode
, 0);
4917 NewNode
= CurDAG
->getMachineNode(TestOpc
, dl
, MVT::i32
, BEXTR
, BEXTR
);
4918 ReplaceUses(SDValue(Node
, 0), SDValue(NewNode
, 0));
4919 CurDAG
->RemoveDeadNode(Node
);
4924 // We can peek through truncates, but we need to be careful below.
4925 if (N0
.getOpcode() == ISD::TRUNCATE
&& N0
.hasOneUse())
4926 N0
= N0
.getOperand(0);
4928 // Look for (X86cmp (and $op, $imm), 0) and see if we can convert it to
4929 // use a smaller encoding.
4930 // Look past the truncate if CMP is the only use of it.
4931 if (N0
.getOpcode() == ISD::AND
&&
4932 N0
.getNode()->hasOneUse() &&
4933 N0
.getValueType() != MVT::i8
) {
4934 ConstantSDNode
*C
= dyn_cast
<ConstantSDNode
>(N0
.getOperand(1));
4936 uint64_t Mask
= C
->getZExtValue();
4938 // Check if we can replace AND+IMM64 with a shift. This is possible for
4939 // masks/ like 0xFF000000 or 0x00FFFFFF and if we care only about the zero
4941 if (CmpVT
== MVT::i64
&& !isInt
<32>(Mask
) &&
4942 onlyUsesZeroFlag(SDValue(Node
, 0))) {
4943 if (isMask_64(~Mask
)) {
4944 unsigned TrailingZeros
= countTrailingZeros(Mask
);
4945 SDValue Imm
= CurDAG
->getTargetConstant(TrailingZeros
, dl
, MVT::i64
);
4947 SDValue(CurDAG
->getMachineNode(X86::SHR64ri
, dl
, MVT::i64
, MVT::i32
,
4948 N0
.getOperand(0), Imm
), 0);
4949 MachineSDNode
*Test
= CurDAG
->getMachineNode(X86::TEST64rr
, dl
,
4950 MVT::i32
, Shift
, Shift
);
4951 ReplaceNode(Node
, Test
);
4954 if (isMask_64(Mask
)) {
4955 unsigned LeadingZeros
= countLeadingZeros(Mask
);
4956 SDValue Imm
= CurDAG
->getTargetConstant(LeadingZeros
, dl
, MVT::i64
);
4958 SDValue(CurDAG
->getMachineNode(X86::SHL64ri
, dl
, MVT::i64
, MVT::i32
,
4959 N0
.getOperand(0), Imm
), 0);
4960 MachineSDNode
*Test
= CurDAG
->getMachineNode(X86::TEST64rr
, dl
,
4961 MVT::i32
, Shift
, Shift
);
4962 ReplaceNode(Node
, Test
);
4969 unsigned ROpc
, MOpc
;
4971 // For each of these checks we need to be careful if the sign flag is
4972 // being used. It is only safe to use the sign flag in two conditions,
4973 // either the sign bit in the shrunken mask is zero or the final test
4974 // size is equal to the original compare size.
4976 if (isUInt
<8>(Mask
) &&
4977 (!(Mask
& 0x80) || CmpVT
== MVT::i8
||
4978 hasNoSignFlagUses(SDValue(Node
, 0)))) {
4979 // For example, convert "testl %eax, $8" to "testb %al, $8"
4981 SubRegOp
= X86::sub_8bit
;
4982 ROpc
= X86::TEST8ri
;
4983 MOpc
= X86::TEST8mi
;
4984 } else if (OptForMinSize
&& isUInt
<16>(Mask
) &&
4985 (!(Mask
& 0x8000) || CmpVT
== MVT::i16
||
4986 hasNoSignFlagUses(SDValue(Node
, 0)))) {
4987 // For example, "testl %eax, $32776" to "testw %ax, $32776".
4988 // NOTE: We only want to form TESTW instructions if optimizing for
4989 // min size. Otherwise we only save one byte and possibly get a length
4990 // changing prefix penalty in the decoders.
4992 SubRegOp
= X86::sub_16bit
;
4993 ROpc
= X86::TEST16ri
;
4994 MOpc
= X86::TEST16mi
;
4995 } else if (isUInt
<32>(Mask
) && N0
.getValueType() != MVT::i16
&&
4996 ((!(Mask
& 0x80000000) &&
4997 // Without minsize 16-bit Cmps can get here so we need to
4998 // be sure we calculate the correct sign flag if needed.
4999 (CmpVT
!= MVT::i16
|| !(Mask
& 0x8000))) ||
5000 CmpVT
== MVT::i32
||
5001 hasNoSignFlagUses(SDValue(Node
, 0)))) {
5002 // For example, "testq %rax, $268468232" to "testl %eax, $268468232".
5003 // NOTE: We only want to run that transform if N0 is 32 or 64 bits.
5004 // Otherwize, we find ourselves in a position where we have to do
5005 // promotion. If previous passes did not promote the and, we assume
5006 // they had a good reason not to and do not promote here.
5008 SubRegOp
= X86::sub_32bit
;
5009 ROpc
= X86::TEST32ri
;
5010 MOpc
= X86::TEST32mi
;
5012 // No eligible transformation was found.
5016 SDValue Imm
= CurDAG
->getTargetConstant(Mask
, dl
, VT
);
5017 SDValue Reg
= N0
.getOperand(0);
5019 // Emit a testl or testw.
5020 MachineSDNode
*NewNode
;
5021 SDValue Tmp0
, Tmp1
, Tmp2
, Tmp3
, Tmp4
;
5022 if (tryFoldLoad(Node
, N0
.getNode(), Reg
, Tmp0
, Tmp1
, Tmp2
, Tmp3
, Tmp4
)) {
5023 SDValue Ops
[] = { Tmp0
, Tmp1
, Tmp2
, Tmp3
, Tmp4
, Imm
,
5024 Reg
.getOperand(0) };
5025 NewNode
= CurDAG
->getMachineNode(MOpc
, dl
, MVT::i32
, MVT::Other
, Ops
);
5026 // Update the chain.
5027 ReplaceUses(Reg
.getValue(1), SDValue(NewNode
, 1));
5028 // Record the mem-refs
5029 CurDAG
->setNodeMemRefs(NewNode
,
5030 {cast
<LoadSDNode
>(Reg
)->getMemOperand()});
5032 // Extract the subregister if necessary.
5033 if (N0
.getValueType() != VT
)
5034 Reg
= CurDAG
->getTargetExtractSubreg(SubRegOp
, dl
, VT
, Reg
);
5036 NewNode
= CurDAG
->getMachineNode(ROpc
, dl
, MVT::i32
, Reg
, Imm
);
5038 // Replace CMP with TEST.
5039 ReplaceNode(Node
, NewNode
);
5044 case X86ISD::PCMPISTR
: {
5045 if (!Subtarget
->hasSSE42())
5048 bool NeedIndex
= !SDValue(Node
, 0).use_empty();
5049 bool NeedMask
= !SDValue(Node
, 1).use_empty();
5050 // We can't fold a load if we are going to make two instructions.
5051 bool MayFoldLoad
= !NeedIndex
|| !NeedMask
;
5053 MachineSDNode
*CNode
;
5055 unsigned ROpc
= Subtarget
->hasAVX() ? X86::VPCMPISTRMrr
: X86::PCMPISTRMrr
;
5056 unsigned MOpc
= Subtarget
->hasAVX() ? X86::VPCMPISTRMrm
: X86::PCMPISTRMrm
;
5057 CNode
= emitPCMPISTR(ROpc
, MOpc
, MayFoldLoad
, dl
, MVT::v16i8
, Node
);
5058 ReplaceUses(SDValue(Node
, 1), SDValue(CNode
, 0));
5060 if (NeedIndex
|| !NeedMask
) {
5061 unsigned ROpc
= Subtarget
->hasAVX() ? X86::VPCMPISTRIrr
: X86::PCMPISTRIrr
;
5062 unsigned MOpc
= Subtarget
->hasAVX() ? X86::VPCMPISTRIrm
: X86::PCMPISTRIrm
;
5063 CNode
= emitPCMPISTR(ROpc
, MOpc
, MayFoldLoad
, dl
, MVT::i32
, Node
);
5064 ReplaceUses(SDValue(Node
, 0), SDValue(CNode
, 0));
5067 // Connect the flag usage to the last instruction created.
5068 ReplaceUses(SDValue(Node
, 2), SDValue(CNode
, 1));
5069 CurDAG
->RemoveDeadNode(Node
);
5072 case X86ISD::PCMPESTR
: {
5073 if (!Subtarget
->hasSSE42())
5076 // Copy the two implicit register inputs.
5077 SDValue InFlag
= CurDAG
->getCopyToReg(CurDAG
->getEntryNode(), dl
, X86::EAX
,
5078 Node
->getOperand(1),
5079 SDValue()).getValue(1);
5080 InFlag
= CurDAG
->getCopyToReg(CurDAG
->getEntryNode(), dl
, X86::EDX
,
5081 Node
->getOperand(3), InFlag
).getValue(1);
5083 bool NeedIndex
= !SDValue(Node
, 0).use_empty();
5084 bool NeedMask
= !SDValue(Node
, 1).use_empty();
5085 // We can't fold a load if we are going to make two instructions.
5086 bool MayFoldLoad
= !NeedIndex
|| !NeedMask
;
5088 MachineSDNode
*CNode
;
5090 unsigned ROpc
= Subtarget
->hasAVX() ? X86::VPCMPESTRMrr
: X86::PCMPESTRMrr
;
5091 unsigned MOpc
= Subtarget
->hasAVX() ? X86::VPCMPESTRMrm
: X86::PCMPESTRMrm
;
5092 CNode
= emitPCMPESTR(ROpc
, MOpc
, MayFoldLoad
, dl
, MVT::v16i8
, Node
,
5094 ReplaceUses(SDValue(Node
, 1), SDValue(CNode
, 0));
5096 if (NeedIndex
|| !NeedMask
) {
5097 unsigned ROpc
= Subtarget
->hasAVX() ? X86::VPCMPESTRIrr
: X86::PCMPESTRIrr
;
5098 unsigned MOpc
= Subtarget
->hasAVX() ? X86::VPCMPESTRIrm
: X86::PCMPESTRIrm
;
5099 CNode
= emitPCMPESTR(ROpc
, MOpc
, MayFoldLoad
, dl
, MVT::i32
, Node
, InFlag
);
5100 ReplaceUses(SDValue(Node
, 0), SDValue(CNode
, 0));
5102 // Connect the flag usage to the last instruction created.
5103 ReplaceUses(SDValue(Node
, 2), SDValue(CNode
, 1));
5104 CurDAG
->RemoveDeadNode(Node
);
5109 if (NVT
.isVector() && tryVPTESTM(Node
, SDValue(Node
, 0), SDValue()))
5116 if (foldLoadStoreIntoMemOperand(Node
))
5122 case ISD::FNEARBYINT
:
5124 // Replace fp rounding with their X86 specific equivalent so we don't
5125 // need 2 sets of patterns.
5126 // FIXME: This can only happen when the nodes started as STRICT_* and have
5127 // been mutated into their non-STRICT equivalents. Eventually this
5128 // mutation will be removed and we should switch the STRICT_ nodes to a
5129 // strict version of RNDSCALE in PreProcessISelDAG.
5131 switch (Node
->getOpcode()) {
5132 default: llvm_unreachable("Unexpected opcode!");
5133 case ISD::FCEIL
: Imm
= 0xA; break;
5134 case ISD::FFLOOR
: Imm
= 0x9; break;
5135 case ISD::FTRUNC
: Imm
= 0xB; break;
5136 case ISD::FNEARBYINT
: Imm
= 0xC; break;
5137 case ISD::FRINT
: Imm
= 0x4; break;
5140 SDValue Res
= CurDAG
->getNode(X86ISD::VRNDSCALE
, dl
, Node
->getValueType(0),
5141 Node
->getOperand(0),
5142 CurDAG
->getTargetConstant(Imm
, dl
, MVT::i8
));
5143 ReplaceNode(Node
, Res
.getNode());
5144 SelectCode(Res
.getNode());
5152 bool X86DAGToDAGISel::
5153 SelectInlineAsmMemoryOperand(const SDValue
&Op
, unsigned ConstraintID
,
5154 std::vector
<SDValue
> &OutOps
) {
5155 SDValue Op0
, Op1
, Op2
, Op3
, Op4
;
5156 switch (ConstraintID
) {
5158 llvm_unreachable("Unexpected asm memory constraint");
5159 case InlineAsm::Constraint_i
:
5160 // FIXME: It seems strange that 'i' is needed here since it's supposed to
5161 // be an immediate and not a memory constraint.
5163 case InlineAsm::Constraint_o
: // offsetable ??
5164 case InlineAsm::Constraint_v
: // not offsetable ??
5165 case InlineAsm::Constraint_m
: // memory
5166 case InlineAsm::Constraint_X
:
5167 if (!selectAddr(nullptr, Op
, Op0
, Op1
, Op2
, Op3
, Op4
))
5172 OutOps
.push_back(Op0
);
5173 OutOps
.push_back(Op1
);
5174 OutOps
.push_back(Op2
);
5175 OutOps
.push_back(Op3
);
5176 OutOps
.push_back(Op4
);
5180 /// This pass converts a legalized DAG into a X86-specific DAG,
5181 /// ready for instruction scheduling.
5182 FunctionPass
*llvm::createX86ISelDag(X86TargetMachine
&TM
,
5183 CodeGenOpt::Level OptLevel
) {
5184 return new X86DAGToDAGISel(TM
, OptLevel
);