[Alignment][NFC] Use Align with TargetLowering::setPrefLoopAlignment
[llvm-complete.git] / lib / Target / X86 / X86ISelDAGToDAG.cpp
blobeaedf14a9d8d32997d9b84d8fe15b0f6c0a226c9
1 //===- X86ISelDAGToDAG.cpp - A DAG pattern matching inst selector for X86 -===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file defines a DAG pattern matching instruction selector for X86,
10 // converting from a legalized dag to a X86 dag.
12 //===----------------------------------------------------------------------===//
14 #include "X86.h"
15 #include "X86MachineFunctionInfo.h"
16 #include "X86RegisterInfo.h"
17 #include "X86Subtarget.h"
18 #include "X86TargetMachine.h"
19 #include "llvm/ADT/Statistic.h"
20 #include "llvm/CodeGen/MachineFrameInfo.h"
21 #include "llvm/CodeGen/MachineFunction.h"
22 #include "llvm/CodeGen/SelectionDAGISel.h"
23 #include "llvm/Config/llvm-config.h"
24 #include "llvm/IR/ConstantRange.h"
25 #include "llvm/IR/Function.h"
26 #include "llvm/IR/Instructions.h"
27 #include "llvm/IR/Intrinsics.h"
28 #include "llvm/IR/Type.h"
29 #include "llvm/Support/Debug.h"
30 #include "llvm/Support/ErrorHandling.h"
31 #include "llvm/Support/KnownBits.h"
32 #include "llvm/Support/MathExtras.h"
33 #include "llvm/Support/raw_ostream.h"
34 #include "llvm/Target/TargetMachine.h"
35 #include "llvm/Target/TargetOptions.h"
36 #include <stdint.h>
37 using namespace llvm;
39 #define DEBUG_TYPE "x86-isel"
41 STATISTIC(NumLoadMoved, "Number of loads moved below TokenFactor");
43 static cl::opt<bool> AndImmShrink("x86-and-imm-shrink", cl::init(true),
44 cl::desc("Enable setting constant bits to reduce size of mask immediates"),
45 cl::Hidden);
47 //===----------------------------------------------------------------------===//
48 // Pattern Matcher Implementation
49 //===----------------------------------------------------------------------===//
51 namespace {
52 /// This corresponds to X86AddressMode, but uses SDValue's instead of register
53 /// numbers for the leaves of the matched tree.
54 struct X86ISelAddressMode {
55 enum {
56 RegBase,
57 FrameIndexBase
58 } BaseType;
60 // This is really a union, discriminated by BaseType!
61 SDValue Base_Reg;
62 int Base_FrameIndex;
64 unsigned Scale;
65 SDValue IndexReg;
66 int32_t Disp;
67 SDValue Segment;
68 const GlobalValue *GV;
69 const Constant *CP;
70 const BlockAddress *BlockAddr;
71 const char *ES;
72 MCSymbol *MCSym;
73 int JT;
74 unsigned Align; // CP alignment.
75 unsigned char SymbolFlags; // X86II::MO_*
76 bool NegateIndex = false;
78 X86ISelAddressMode()
79 : BaseType(RegBase), Base_FrameIndex(0), Scale(1), IndexReg(), Disp(0),
80 Segment(), GV(nullptr), CP(nullptr), BlockAddr(nullptr), ES(nullptr),
81 MCSym(nullptr), JT(-1), Align(0), SymbolFlags(X86II::MO_NO_FLAG) {}
83 bool hasSymbolicDisplacement() const {
84 return GV != nullptr || CP != nullptr || ES != nullptr ||
85 MCSym != nullptr || JT != -1 || BlockAddr != nullptr;
88 bool hasBaseOrIndexReg() const {
89 return BaseType == FrameIndexBase ||
90 IndexReg.getNode() != nullptr || Base_Reg.getNode() != nullptr;
93 /// Return true if this addressing mode is already RIP-relative.
94 bool isRIPRelative() const {
95 if (BaseType != RegBase) return false;
96 if (RegisterSDNode *RegNode =
97 dyn_cast_or_null<RegisterSDNode>(Base_Reg.getNode()))
98 return RegNode->getReg() == X86::RIP;
99 return false;
102 void setBaseReg(SDValue Reg) {
103 BaseType = RegBase;
104 Base_Reg = Reg;
107 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
108 void dump(SelectionDAG *DAG = nullptr) {
109 dbgs() << "X86ISelAddressMode " << this << '\n';
110 dbgs() << "Base_Reg ";
111 if (Base_Reg.getNode())
112 Base_Reg.getNode()->dump(DAG);
113 else
114 dbgs() << "nul\n";
115 if (BaseType == FrameIndexBase)
116 dbgs() << " Base.FrameIndex " << Base_FrameIndex << '\n';
117 dbgs() << " Scale " << Scale << '\n'
118 << "IndexReg ";
119 if (NegateIndex)
120 dbgs() << "negate ";
121 if (IndexReg.getNode())
122 IndexReg.getNode()->dump(DAG);
123 else
124 dbgs() << "nul\n";
125 dbgs() << " Disp " << Disp << '\n'
126 << "GV ";
127 if (GV)
128 GV->dump();
129 else
130 dbgs() << "nul";
131 dbgs() << " CP ";
132 if (CP)
133 CP->dump();
134 else
135 dbgs() << "nul";
136 dbgs() << '\n'
137 << "ES ";
138 if (ES)
139 dbgs() << ES;
140 else
141 dbgs() << "nul";
142 dbgs() << " MCSym ";
143 if (MCSym)
144 dbgs() << MCSym;
145 else
146 dbgs() << "nul";
147 dbgs() << " JT" << JT << " Align" << Align << '\n';
149 #endif
153 namespace {
154 //===--------------------------------------------------------------------===//
155 /// ISel - X86-specific code to select X86 machine instructions for
156 /// SelectionDAG operations.
158 class X86DAGToDAGISel final : public SelectionDAGISel {
159 /// Keep a pointer to the X86Subtarget around so that we can
160 /// make the right decision when generating code for different targets.
161 const X86Subtarget *Subtarget;
163 /// If true, selector should try to optimize for code size instead of
164 /// performance.
165 bool OptForSize;
167 /// If true, selector should try to optimize for minimum code size.
168 bool OptForMinSize;
170 /// Disable direct TLS access through segment registers.
171 bool IndirectTlsSegRefs;
173 public:
174 explicit X86DAGToDAGISel(X86TargetMachine &tm, CodeGenOpt::Level OptLevel)
175 : SelectionDAGISel(tm, OptLevel), Subtarget(nullptr), OptForSize(false),
176 OptForMinSize(false), IndirectTlsSegRefs(false) {}
178 StringRef getPassName() const override {
179 return "X86 DAG->DAG Instruction Selection";
182 bool runOnMachineFunction(MachineFunction &MF) override {
183 // Reset the subtarget each time through.
184 Subtarget = &MF.getSubtarget<X86Subtarget>();
185 IndirectTlsSegRefs = MF.getFunction().hasFnAttribute(
186 "indirect-tls-seg-refs");
188 // OptFor[Min]Size are used in pattern predicates that isel is matching.
189 OptForSize = MF.getFunction().hasOptSize();
190 OptForMinSize = MF.getFunction().hasMinSize();
191 assert((!OptForMinSize || OptForSize) &&
192 "OptForMinSize implies OptForSize");
194 SelectionDAGISel::runOnMachineFunction(MF);
195 return true;
198 void EmitFunctionEntryCode() override;
200 bool IsProfitableToFold(SDValue N, SDNode *U, SDNode *Root) const override;
202 void PreprocessISelDAG() override;
203 void PostprocessISelDAG() override;
205 // Include the pieces autogenerated from the target description.
206 #include "X86GenDAGISel.inc"
208 private:
209 void Select(SDNode *N) override;
211 bool foldOffsetIntoAddress(uint64_t Offset, X86ISelAddressMode &AM);
212 bool matchLoadInAddress(LoadSDNode *N, X86ISelAddressMode &AM);
213 bool matchWrapper(SDValue N, X86ISelAddressMode &AM);
214 bool matchAddress(SDValue N, X86ISelAddressMode &AM);
215 bool matchVectorAddress(SDValue N, X86ISelAddressMode &AM);
216 bool matchAdd(SDValue &N, X86ISelAddressMode &AM, unsigned Depth);
217 bool matchAddressRecursively(SDValue N, X86ISelAddressMode &AM,
218 unsigned Depth);
219 bool matchAddressBase(SDValue N, X86ISelAddressMode &AM);
220 bool selectAddr(SDNode *Parent, SDValue N, SDValue &Base,
221 SDValue &Scale, SDValue &Index, SDValue &Disp,
222 SDValue &Segment);
223 bool selectVectorAddr(SDNode *Parent, SDValue N, SDValue &Base,
224 SDValue &Scale, SDValue &Index, SDValue &Disp,
225 SDValue &Segment);
226 bool selectMOV64Imm32(SDValue N, SDValue &Imm);
227 bool selectLEAAddr(SDValue N, SDValue &Base,
228 SDValue &Scale, SDValue &Index, SDValue &Disp,
229 SDValue &Segment);
230 bool selectLEA64_32Addr(SDValue N, SDValue &Base,
231 SDValue &Scale, SDValue &Index, SDValue &Disp,
232 SDValue &Segment);
233 bool selectTLSADDRAddr(SDValue N, SDValue &Base,
234 SDValue &Scale, SDValue &Index, SDValue &Disp,
235 SDValue &Segment);
236 bool selectScalarSSELoad(SDNode *Root, SDNode *Parent, SDValue N,
237 SDValue &Base, SDValue &Scale,
238 SDValue &Index, SDValue &Disp,
239 SDValue &Segment,
240 SDValue &NodeWithChain);
241 bool selectRelocImm(SDValue N, SDValue &Op);
243 bool tryFoldLoad(SDNode *Root, SDNode *P, SDValue N,
244 SDValue &Base, SDValue &Scale,
245 SDValue &Index, SDValue &Disp,
246 SDValue &Segment);
248 // Convenience method where P is also root.
249 bool tryFoldLoad(SDNode *P, SDValue N,
250 SDValue &Base, SDValue &Scale,
251 SDValue &Index, SDValue &Disp,
252 SDValue &Segment) {
253 return tryFoldLoad(P, P, N, Base, Scale, Index, Disp, Segment);
256 /// Implement addressing mode selection for inline asm expressions.
257 bool SelectInlineAsmMemoryOperand(const SDValue &Op,
258 unsigned ConstraintID,
259 std::vector<SDValue> &OutOps) override;
261 void emitSpecialCodeForMain();
263 inline void getAddressOperands(X86ISelAddressMode &AM, const SDLoc &DL,
264 MVT VT, SDValue &Base, SDValue &Scale,
265 SDValue &Index, SDValue &Disp,
266 SDValue &Segment) {
267 if (AM.BaseType == X86ISelAddressMode::FrameIndexBase)
268 Base = CurDAG->getTargetFrameIndex(
269 AM.Base_FrameIndex, TLI->getPointerTy(CurDAG->getDataLayout()));
270 else if (AM.Base_Reg.getNode())
271 Base = AM.Base_Reg;
272 else
273 Base = CurDAG->getRegister(0, VT);
275 Scale = getI8Imm(AM.Scale, DL);
277 // Negate the index if needed.
278 if (AM.NegateIndex) {
279 unsigned NegOpc = VT == MVT::i64 ? X86::NEG64r : X86::NEG32r;
280 SDValue Neg = SDValue(CurDAG->getMachineNode(NegOpc, DL, VT, MVT::i32,
281 AM.IndexReg), 0);
282 AM.IndexReg = Neg;
285 if (AM.IndexReg.getNode())
286 Index = AM.IndexReg;
287 else
288 Index = CurDAG->getRegister(0, VT);
290 // These are 32-bit even in 64-bit mode since RIP-relative offset
291 // is 32-bit.
292 if (AM.GV)
293 Disp = CurDAG->getTargetGlobalAddress(AM.GV, SDLoc(),
294 MVT::i32, AM.Disp,
295 AM.SymbolFlags);
296 else if (AM.CP)
297 Disp = CurDAG->getTargetConstantPool(AM.CP, MVT::i32,
298 AM.Align, AM.Disp, AM.SymbolFlags);
299 else if (AM.ES) {
300 assert(!AM.Disp && "Non-zero displacement is ignored with ES.");
301 Disp = CurDAG->getTargetExternalSymbol(AM.ES, MVT::i32, AM.SymbolFlags);
302 } else if (AM.MCSym) {
303 assert(!AM.Disp && "Non-zero displacement is ignored with MCSym.");
304 assert(AM.SymbolFlags == 0 && "oo");
305 Disp = CurDAG->getMCSymbol(AM.MCSym, MVT::i32);
306 } else if (AM.JT != -1) {
307 assert(!AM.Disp && "Non-zero displacement is ignored with JT.");
308 Disp = CurDAG->getTargetJumpTable(AM.JT, MVT::i32, AM.SymbolFlags);
309 } else if (AM.BlockAddr)
310 Disp = CurDAG->getTargetBlockAddress(AM.BlockAddr, MVT::i32, AM.Disp,
311 AM.SymbolFlags);
312 else
313 Disp = CurDAG->getTargetConstant(AM.Disp, DL, MVT::i32);
315 if (AM.Segment.getNode())
316 Segment = AM.Segment;
317 else
318 Segment = CurDAG->getRegister(0, MVT::i16);
321 // Utility function to determine whether we should avoid selecting
322 // immediate forms of instructions for better code size or not.
323 // At a high level, we'd like to avoid such instructions when
324 // we have similar constants used within the same basic block
325 // that can be kept in a register.
327 bool shouldAvoidImmediateInstFormsForSize(SDNode *N) const {
328 uint32_t UseCount = 0;
330 // Do not want to hoist if we're not optimizing for size.
331 // TODO: We'd like to remove this restriction.
332 // See the comment in X86InstrInfo.td for more info.
333 if (!OptForSize)
334 return false;
336 // Walk all the users of the immediate.
337 for (SDNode::use_iterator UI = N->use_begin(),
338 UE = N->use_end(); (UI != UE) && (UseCount < 2); ++UI) {
340 SDNode *User = *UI;
342 // This user is already selected. Count it as a legitimate use and
343 // move on.
344 if (User->isMachineOpcode()) {
345 UseCount++;
346 continue;
349 // We want to count stores of immediates as real uses.
350 if (User->getOpcode() == ISD::STORE &&
351 User->getOperand(1).getNode() == N) {
352 UseCount++;
353 continue;
356 // We don't currently match users that have > 2 operands (except
357 // for stores, which are handled above)
358 // Those instruction won't match in ISEL, for now, and would
359 // be counted incorrectly.
360 // This may change in the future as we add additional instruction
361 // types.
362 if (User->getNumOperands() != 2)
363 continue;
365 // If this can match to INC/DEC, don't count it as a use.
366 if (User->getOpcode() == ISD::ADD &&
367 (isOneConstant(SDValue(N, 0)) || isAllOnesConstant(SDValue(N, 0))))
368 continue;
370 // Immediates that are used for offsets as part of stack
371 // manipulation should be left alone. These are typically
372 // used to indicate SP offsets for argument passing and
373 // will get pulled into stores/pushes (implicitly).
374 if (User->getOpcode() == X86ISD::ADD ||
375 User->getOpcode() == ISD::ADD ||
376 User->getOpcode() == X86ISD::SUB ||
377 User->getOpcode() == ISD::SUB) {
379 // Find the other operand of the add/sub.
380 SDValue OtherOp = User->getOperand(0);
381 if (OtherOp.getNode() == N)
382 OtherOp = User->getOperand(1);
384 // Don't count if the other operand is SP.
385 RegisterSDNode *RegNode;
386 if (OtherOp->getOpcode() == ISD::CopyFromReg &&
387 (RegNode = dyn_cast_or_null<RegisterSDNode>(
388 OtherOp->getOperand(1).getNode())))
389 if ((RegNode->getReg() == X86::ESP) ||
390 (RegNode->getReg() == X86::RSP))
391 continue;
394 // ... otherwise, count this and move on.
395 UseCount++;
398 // If we have more than 1 use, then recommend for hoisting.
399 return (UseCount > 1);
402 /// Return a target constant with the specified value of type i8.
403 inline SDValue getI8Imm(unsigned Imm, const SDLoc &DL) {
404 return CurDAG->getTargetConstant(Imm, DL, MVT::i8);
407 /// Return a target constant with the specified value, of type i32.
408 inline SDValue getI32Imm(unsigned Imm, const SDLoc &DL) {
409 return CurDAG->getTargetConstant(Imm, DL, MVT::i32);
412 /// Return a target constant with the specified value, of type i64.
413 inline SDValue getI64Imm(uint64_t Imm, const SDLoc &DL) {
414 return CurDAG->getTargetConstant(Imm, DL, MVT::i64);
417 SDValue getExtractVEXTRACTImmediate(SDNode *N, unsigned VecWidth,
418 const SDLoc &DL) {
419 assert((VecWidth == 128 || VecWidth == 256) && "Unexpected vector width");
420 uint64_t Index = N->getConstantOperandVal(1);
421 MVT VecVT = N->getOperand(0).getSimpleValueType();
422 return getI8Imm((Index * VecVT.getScalarSizeInBits()) / VecWidth, DL);
425 SDValue getInsertVINSERTImmediate(SDNode *N, unsigned VecWidth,
426 const SDLoc &DL) {
427 assert((VecWidth == 128 || VecWidth == 256) && "Unexpected vector width");
428 uint64_t Index = N->getConstantOperandVal(2);
429 MVT VecVT = N->getSimpleValueType(0);
430 return getI8Imm((Index * VecVT.getScalarSizeInBits()) / VecWidth, DL);
433 // Helper to detect unneeded and instructions on shift amounts. Called
434 // from PatFrags in tablegen.
435 bool isUnneededShiftMask(SDNode *N, unsigned Width) const {
436 assert(N->getOpcode() == ISD::AND && "Unexpected opcode");
437 const APInt &Val = cast<ConstantSDNode>(N->getOperand(1))->getAPIntValue();
439 if (Val.countTrailingOnes() >= Width)
440 return true;
442 APInt Mask = Val | CurDAG->computeKnownBits(N->getOperand(0)).Zero;
443 return Mask.countTrailingOnes() >= Width;
446 /// Return an SDNode that returns the value of the global base register.
447 /// Output instructions required to initialize the global base register,
448 /// if necessary.
449 SDNode *getGlobalBaseReg();
451 /// Return a reference to the TargetMachine, casted to the target-specific
452 /// type.
453 const X86TargetMachine &getTargetMachine() const {
454 return static_cast<const X86TargetMachine &>(TM);
457 /// Return a reference to the TargetInstrInfo, casted to the target-specific
458 /// type.
459 const X86InstrInfo *getInstrInfo() const {
460 return Subtarget->getInstrInfo();
463 /// Address-mode matching performs shift-of-and to and-of-shift
464 /// reassociation in order to expose more scaled addressing
465 /// opportunities.
466 bool ComplexPatternFuncMutatesDAG() const override {
467 return true;
470 bool isSExtAbsoluteSymbolRef(unsigned Width, SDNode *N) const;
472 /// Returns whether this is a relocatable immediate in the range
473 /// [-2^Width .. 2^Width-1].
474 template <unsigned Width> bool isSExtRelocImm(SDNode *N) const {
475 if (auto *CN = dyn_cast<ConstantSDNode>(N))
476 return isInt<Width>(CN->getSExtValue());
477 return isSExtAbsoluteSymbolRef(Width, N);
480 // Indicates we should prefer to use a non-temporal load for this load.
481 bool useNonTemporalLoad(LoadSDNode *N) const {
482 if (!N->isNonTemporal())
483 return false;
485 unsigned StoreSize = N->getMemoryVT().getStoreSize();
487 if (N->getAlignment() < StoreSize)
488 return false;
490 switch (StoreSize) {
491 default: llvm_unreachable("Unsupported store size");
492 case 4:
493 case 8:
494 return false;
495 case 16:
496 return Subtarget->hasSSE41();
497 case 32:
498 return Subtarget->hasAVX2();
499 case 64:
500 return Subtarget->hasAVX512();
504 bool foldLoadStoreIntoMemOperand(SDNode *Node);
505 MachineSDNode *matchBEXTRFromAndImm(SDNode *Node);
506 bool matchBitExtract(SDNode *Node);
507 bool shrinkAndImmediate(SDNode *N);
508 bool isMaskZeroExtended(SDNode *N) const;
509 bool tryShiftAmountMod(SDNode *N);
510 bool combineIncDecVector(SDNode *Node);
511 bool tryShrinkShlLogicImm(SDNode *N);
512 bool tryVPTESTM(SDNode *Root, SDValue Setcc, SDValue Mask);
514 MachineSDNode *emitPCMPISTR(unsigned ROpc, unsigned MOpc, bool MayFoldLoad,
515 const SDLoc &dl, MVT VT, SDNode *Node);
516 MachineSDNode *emitPCMPESTR(unsigned ROpc, unsigned MOpc, bool MayFoldLoad,
517 const SDLoc &dl, MVT VT, SDNode *Node,
518 SDValue &InFlag);
520 bool tryOptimizeRem8Extend(SDNode *N);
522 bool onlyUsesZeroFlag(SDValue Flags) const;
523 bool hasNoSignFlagUses(SDValue Flags) const;
524 bool hasNoCarryFlagUses(SDValue Flags) const;
529 // Returns true if this masked compare can be implemented legally with this
530 // type.
531 static bool isLegalMaskCompare(SDNode *N, const X86Subtarget *Subtarget) {
532 unsigned Opcode = N->getOpcode();
533 if (Opcode == X86ISD::CMPM || Opcode == ISD::SETCC ||
534 Opcode == X86ISD::CMPM_SAE || Opcode == X86ISD::VFPCLASS) {
535 // We can get 256-bit 8 element types here without VLX being enabled. When
536 // this happens we will use 512-bit operations and the mask will not be
537 // zero extended.
538 EVT OpVT = N->getOperand(0).getValueType();
539 if (OpVT.is256BitVector() || OpVT.is128BitVector())
540 return Subtarget->hasVLX();
542 return true;
544 // Scalar opcodes use 128 bit registers, but aren't subject to the VLX check.
545 if (Opcode == X86ISD::VFPCLASSS || Opcode == X86ISD::FSETCCM ||
546 Opcode == X86ISD::FSETCCM_SAE)
547 return true;
549 return false;
552 // Returns true if we can assume the writer of the mask has zero extended it
553 // for us.
554 bool X86DAGToDAGISel::isMaskZeroExtended(SDNode *N) const {
555 // If this is an AND, check if we have a compare on either side. As long as
556 // one side guarantees the mask is zero extended, the AND will preserve those
557 // zeros.
558 if (N->getOpcode() == ISD::AND)
559 return isLegalMaskCompare(N->getOperand(0).getNode(), Subtarget) ||
560 isLegalMaskCompare(N->getOperand(1).getNode(), Subtarget);
562 return isLegalMaskCompare(N, Subtarget);
565 bool
566 X86DAGToDAGISel::IsProfitableToFold(SDValue N, SDNode *U, SDNode *Root) const {
567 if (OptLevel == CodeGenOpt::None) return false;
569 if (!N.hasOneUse())
570 return false;
572 if (N.getOpcode() != ISD::LOAD)
573 return true;
575 // Don't fold non-temporal loads if we have an instruction for them.
576 if (useNonTemporalLoad(cast<LoadSDNode>(N)))
577 return false;
579 // If N is a load, do additional profitability checks.
580 if (U == Root) {
581 switch (U->getOpcode()) {
582 default: break;
583 case X86ISD::ADD:
584 case X86ISD::ADC:
585 case X86ISD::SUB:
586 case X86ISD::SBB:
587 case X86ISD::AND:
588 case X86ISD::XOR:
589 case X86ISD::OR:
590 case ISD::ADD:
591 case ISD::ADDCARRY:
592 case ISD::AND:
593 case ISD::OR:
594 case ISD::XOR: {
595 SDValue Op1 = U->getOperand(1);
597 // If the other operand is a 8-bit immediate we should fold the immediate
598 // instead. This reduces code size.
599 // e.g.
600 // movl 4(%esp), %eax
601 // addl $4, %eax
602 // vs.
603 // movl $4, %eax
604 // addl 4(%esp), %eax
605 // The former is 2 bytes shorter. In case where the increment is 1, then
606 // the saving can be 4 bytes (by using incl %eax).
607 if (ConstantSDNode *Imm = dyn_cast<ConstantSDNode>(Op1)) {
608 if (Imm->getAPIntValue().isSignedIntN(8))
609 return false;
611 // If this is a 64-bit AND with an immediate that fits in 32-bits,
612 // prefer using the smaller and over folding the load. This is needed to
613 // make sure immediates created by shrinkAndImmediate are always folded.
614 // Ideally we would narrow the load during DAG combine and get the
615 // best of both worlds.
616 if (U->getOpcode() == ISD::AND &&
617 Imm->getAPIntValue().getBitWidth() == 64 &&
618 Imm->getAPIntValue().isIntN(32))
619 return false;
621 // If this really a zext_inreg that can be represented with a movzx
622 // instruction, prefer that.
623 // TODO: We could shrink the load and fold if it is non-volatile.
624 if (U->getOpcode() == ISD::AND &&
625 (Imm->getAPIntValue() == UINT8_MAX ||
626 Imm->getAPIntValue() == UINT16_MAX ||
627 Imm->getAPIntValue() == UINT32_MAX))
628 return false;
630 // ADD/SUB with can negate the immediate and use the opposite operation
631 // to fit 128 into a sign extended 8 bit immediate.
632 if ((U->getOpcode() == ISD::ADD || U->getOpcode() == ISD::SUB) &&
633 (-Imm->getAPIntValue()).isSignedIntN(8))
634 return false;
637 // If the other operand is a TLS address, we should fold it instead.
638 // This produces
639 // movl %gs:0, %eax
640 // leal i@NTPOFF(%eax), %eax
641 // instead of
642 // movl $i@NTPOFF, %eax
643 // addl %gs:0, %eax
644 // if the block also has an access to a second TLS address this will save
645 // a load.
646 // FIXME: This is probably also true for non-TLS addresses.
647 if (Op1.getOpcode() == X86ISD::Wrapper) {
648 SDValue Val = Op1.getOperand(0);
649 if (Val.getOpcode() == ISD::TargetGlobalTLSAddress)
650 return false;
653 // Don't fold load if this matches the BTS/BTR/BTC patterns.
654 // BTS: (or X, (shl 1, n))
655 // BTR: (and X, (rotl -2, n))
656 // BTC: (xor X, (shl 1, n))
657 if (U->getOpcode() == ISD::OR || U->getOpcode() == ISD::XOR) {
658 if (U->getOperand(0).getOpcode() == ISD::SHL &&
659 isOneConstant(U->getOperand(0).getOperand(0)))
660 return false;
662 if (U->getOperand(1).getOpcode() == ISD::SHL &&
663 isOneConstant(U->getOperand(1).getOperand(0)))
664 return false;
666 if (U->getOpcode() == ISD::AND) {
667 SDValue U0 = U->getOperand(0);
668 SDValue U1 = U->getOperand(1);
669 if (U0.getOpcode() == ISD::ROTL) {
670 auto *C = dyn_cast<ConstantSDNode>(U0.getOperand(0));
671 if (C && C->getSExtValue() == -2)
672 return false;
675 if (U1.getOpcode() == ISD::ROTL) {
676 auto *C = dyn_cast<ConstantSDNode>(U1.getOperand(0));
677 if (C && C->getSExtValue() == -2)
678 return false;
682 break;
684 case ISD::SHL:
685 case ISD::SRA:
686 case ISD::SRL:
687 // Don't fold a load into a shift by immediate. The BMI2 instructions
688 // support folding a load, but not an immediate. The legacy instructions
689 // support folding an immediate, but can't fold a load. Folding an
690 // immediate is preferable to folding a load.
691 if (isa<ConstantSDNode>(U->getOperand(1)))
692 return false;
694 break;
698 // Prevent folding a load if this can implemented with an insert_subreg or
699 // a move that implicitly zeroes.
700 if (Root->getOpcode() == ISD::INSERT_SUBVECTOR &&
701 isNullConstant(Root->getOperand(2)) &&
702 (Root->getOperand(0).isUndef() ||
703 ISD::isBuildVectorAllZeros(Root->getOperand(0).getNode())))
704 return false;
706 return true;
709 /// Replace the original chain operand of the call with
710 /// load's chain operand and move load below the call's chain operand.
711 static void moveBelowOrigChain(SelectionDAG *CurDAG, SDValue Load,
712 SDValue Call, SDValue OrigChain) {
713 SmallVector<SDValue, 8> Ops;
714 SDValue Chain = OrigChain.getOperand(0);
715 if (Chain.getNode() == Load.getNode())
716 Ops.push_back(Load.getOperand(0));
717 else {
718 assert(Chain.getOpcode() == ISD::TokenFactor &&
719 "Unexpected chain operand");
720 for (unsigned i = 0, e = Chain.getNumOperands(); i != e; ++i)
721 if (Chain.getOperand(i).getNode() == Load.getNode())
722 Ops.push_back(Load.getOperand(0));
723 else
724 Ops.push_back(Chain.getOperand(i));
725 SDValue NewChain =
726 CurDAG->getNode(ISD::TokenFactor, SDLoc(Load), MVT::Other, Ops);
727 Ops.clear();
728 Ops.push_back(NewChain);
730 Ops.append(OrigChain->op_begin() + 1, OrigChain->op_end());
731 CurDAG->UpdateNodeOperands(OrigChain.getNode(), Ops);
732 CurDAG->UpdateNodeOperands(Load.getNode(), Call.getOperand(0),
733 Load.getOperand(1), Load.getOperand(2));
735 Ops.clear();
736 Ops.push_back(SDValue(Load.getNode(), 1));
737 Ops.append(Call->op_begin() + 1, Call->op_end());
738 CurDAG->UpdateNodeOperands(Call.getNode(), Ops);
741 /// Return true if call address is a load and it can be
742 /// moved below CALLSEQ_START and the chains leading up to the call.
743 /// Return the CALLSEQ_START by reference as a second output.
744 /// In the case of a tail call, there isn't a callseq node between the call
745 /// chain and the load.
746 static bool isCalleeLoad(SDValue Callee, SDValue &Chain, bool HasCallSeq) {
747 // The transformation is somewhat dangerous if the call's chain was glued to
748 // the call. After MoveBelowOrigChain the load is moved between the call and
749 // the chain, this can create a cycle if the load is not folded. So it is
750 // *really* important that we are sure the load will be folded.
751 if (Callee.getNode() == Chain.getNode() || !Callee.hasOneUse())
752 return false;
753 LoadSDNode *LD = dyn_cast<LoadSDNode>(Callee.getNode());
754 if (!LD ||
755 LD->isVolatile() ||
756 LD->getAddressingMode() != ISD::UNINDEXED ||
757 LD->getExtensionType() != ISD::NON_EXTLOAD)
758 return false;
760 // Now let's find the callseq_start.
761 while (HasCallSeq && Chain.getOpcode() != ISD::CALLSEQ_START) {
762 if (!Chain.hasOneUse())
763 return false;
764 Chain = Chain.getOperand(0);
767 if (!Chain.getNumOperands())
768 return false;
769 // Since we are not checking for AA here, conservatively abort if the chain
770 // writes to memory. It's not safe to move the callee (a load) across a store.
771 if (isa<MemSDNode>(Chain.getNode()) &&
772 cast<MemSDNode>(Chain.getNode())->writeMem())
773 return false;
774 if (Chain.getOperand(0).getNode() == Callee.getNode())
775 return true;
776 if (Chain.getOperand(0).getOpcode() == ISD::TokenFactor &&
777 Callee.getValue(1).isOperandOf(Chain.getOperand(0).getNode()) &&
778 Callee.getValue(1).hasOneUse())
779 return true;
780 return false;
783 void X86DAGToDAGISel::PreprocessISelDAG() {
784 for (SelectionDAG::allnodes_iterator I = CurDAG->allnodes_begin(),
785 E = CurDAG->allnodes_end(); I != E; ) {
786 SDNode *N = &*I++; // Preincrement iterator to avoid invalidation issues.
788 // If this is a target specific AND node with no flag usages, turn it back
789 // into ISD::AND to enable test instruction matching.
790 if (N->getOpcode() == X86ISD::AND && !N->hasAnyUseOfValue(1)) {
791 SDValue Res = CurDAG->getNode(ISD::AND, SDLoc(N), N->getValueType(0),
792 N->getOperand(0), N->getOperand(1));
793 --I;
794 CurDAG->ReplaceAllUsesOfValueWith(SDValue(N, 0), Res);
795 ++I;
796 CurDAG->DeleteNode(N);
797 continue;
800 switch (N->getOpcode()) {
801 case ISD::FP_TO_SINT:
802 case ISD::FP_TO_UINT: {
803 // Replace vector fp_to_s/uint with their X86 specific equivalent so we
804 // don't need 2 sets of patterns.
805 if (!N->getSimpleValueType(0).isVector())
806 break;
808 unsigned NewOpc;
809 switch (N->getOpcode()) {
810 default: llvm_unreachable("Unexpected opcode!");
811 case ISD::FP_TO_SINT: NewOpc = X86ISD::CVTTP2SI; break;
812 case ISD::FP_TO_UINT: NewOpc = X86ISD::CVTTP2UI; break;
814 SDValue Res = CurDAG->getNode(NewOpc, SDLoc(N), N->getValueType(0),
815 N->getOperand(0));
816 --I;
817 CurDAG->ReplaceAllUsesOfValueWith(SDValue(N, 0), Res);
818 ++I;
819 CurDAG->DeleteNode(N);
820 continue;
822 case ISD::SHL:
823 case ISD::SRA:
824 case ISD::SRL: {
825 // Replace vector shifts with their X86 specific equivalent so we don't
826 // need 2 sets of patterns.
827 if (!N->getValueType(0).isVector())
828 break;
830 unsigned NewOpc;
831 switch (N->getOpcode()) {
832 default: llvm_unreachable("Unexpected opcode!");
833 case ISD::SHL: NewOpc = X86ISD::VSHLV; break;
834 case ISD::SRA: NewOpc = X86ISD::VSRAV; break;
835 case ISD::SRL: NewOpc = X86ISD::VSRLV; break;
837 SDValue Res = CurDAG->getNode(NewOpc, SDLoc(N), N->getValueType(0),
838 N->getOperand(0), N->getOperand(1));
839 --I;
840 CurDAG->ReplaceAllUsesOfValueWith(SDValue(N, 0), Res);
841 ++I;
842 CurDAG->DeleteNode(N);
843 continue;
845 case ISD::ANY_EXTEND:
846 case ISD::ANY_EXTEND_VECTOR_INREG: {
847 // Replace vector any extend with the zero extend equivalents so we don't
848 // need 2 sets of patterns. Ignore vXi1 extensions.
849 if (!N->getValueType(0).isVector() ||
850 N->getOperand(0).getScalarValueSizeInBits() == 1)
851 break;
853 unsigned NewOpc = N->getOpcode() == ISD::ANY_EXTEND
854 ? ISD::ZERO_EXTEND
855 : ISD::ZERO_EXTEND_VECTOR_INREG;
857 SDValue Res = CurDAG->getNode(NewOpc, SDLoc(N), N->getValueType(0),
858 N->getOperand(0));
859 --I;
860 CurDAG->ReplaceAllUsesOfValueWith(SDValue(N, 0), Res);
861 ++I;
862 CurDAG->DeleteNode(N);
863 continue;
865 case ISD::FCEIL:
866 case ISD::FFLOOR:
867 case ISD::FTRUNC:
868 case ISD::FNEARBYINT:
869 case ISD::FRINT: {
870 // Replace fp rounding with their X86 specific equivalent so we don't
871 // need 2 sets of patterns.
872 unsigned Imm;
873 switch (N->getOpcode()) {
874 default: llvm_unreachable("Unexpected opcode!");
875 case ISD::FCEIL: Imm = 0xA; break;
876 case ISD::FFLOOR: Imm = 0x9; break;
877 case ISD::FTRUNC: Imm = 0xB; break;
878 case ISD::FNEARBYINT: Imm = 0xC; break;
879 case ISD::FRINT: Imm = 0x4; break;
881 SDLoc dl(N);
882 SDValue Res = CurDAG->getNode(X86ISD::VRNDSCALE, dl,
883 N->getValueType(0),
884 N->getOperand(0),
885 CurDAG->getConstant(Imm, dl, MVT::i8));
886 --I;
887 CurDAG->ReplaceAllUsesOfValueWith(SDValue(N, 0), Res);
888 ++I;
889 CurDAG->DeleteNode(N);
890 continue;
892 case X86ISD::FANDN:
893 case X86ISD::FAND:
894 case X86ISD::FOR:
895 case X86ISD::FXOR: {
896 // Widen scalar fp logic ops to vector to reduce isel patterns.
897 // FIXME: Can we do this during lowering/combine.
898 MVT VT = N->getSimpleValueType(0);
899 if (VT.isVector() || VT == MVT::f128)
900 break;
902 MVT VecVT = VT == MVT::f64 ? MVT::v2f64 : MVT::v4f32;
903 SDLoc dl(N);
904 SDValue Op0 = CurDAG->getNode(ISD::SCALAR_TO_VECTOR, dl, VecVT,
905 N->getOperand(0));
906 SDValue Op1 = CurDAG->getNode(ISD::SCALAR_TO_VECTOR, dl, VecVT,
907 N->getOperand(1));
909 SDValue Res;
910 if (Subtarget->hasSSE2()) {
911 EVT IntVT = EVT(VecVT).changeVectorElementTypeToInteger();
912 Op0 = CurDAG->getNode(ISD::BITCAST, dl, IntVT, Op0);
913 Op1 = CurDAG->getNode(ISD::BITCAST, dl, IntVT, Op1);
914 unsigned Opc;
915 switch (N->getOpcode()) {
916 default: llvm_unreachable("Unexpected opcode!");
917 case X86ISD::FANDN: Opc = X86ISD::ANDNP; break;
918 case X86ISD::FAND: Opc = ISD::AND; break;
919 case X86ISD::FOR: Opc = ISD::OR; break;
920 case X86ISD::FXOR: Opc = ISD::XOR; break;
922 Res = CurDAG->getNode(Opc, dl, IntVT, Op0, Op1);
923 Res = CurDAG->getNode(ISD::BITCAST, dl, VecVT, Res);
924 } else {
925 Res = CurDAG->getNode(N->getOpcode(), dl, VecVT, Op0, Op1);
927 Res = CurDAG->getNode(ISD::EXTRACT_VECTOR_ELT, dl, VT, Res,
928 CurDAG->getIntPtrConstant(0, dl));
929 --I;
930 CurDAG->ReplaceAllUsesOfValueWith(SDValue(N, 0), Res);
931 ++I;
932 CurDAG->DeleteNode(N);
933 continue;
937 if (OptLevel != CodeGenOpt::None &&
938 // Only do this when the target can fold the load into the call or
939 // jmp.
940 !Subtarget->useRetpolineIndirectCalls() &&
941 ((N->getOpcode() == X86ISD::CALL && !Subtarget->slowTwoMemOps()) ||
942 (N->getOpcode() == X86ISD::TC_RETURN &&
943 (Subtarget->is64Bit() ||
944 !getTargetMachine().isPositionIndependent())))) {
945 /// Also try moving call address load from outside callseq_start to just
946 /// before the call to allow it to be folded.
948 /// [Load chain]
949 /// ^
950 /// |
951 /// [Load]
952 /// ^ ^
953 /// | |
954 /// / \--
955 /// / |
956 ///[CALLSEQ_START] |
957 /// ^ |
958 /// | |
959 /// [LOAD/C2Reg] |
960 /// | |
961 /// \ /
962 /// \ /
963 /// [CALL]
964 bool HasCallSeq = N->getOpcode() == X86ISD::CALL;
965 SDValue Chain = N->getOperand(0);
966 SDValue Load = N->getOperand(1);
967 if (!isCalleeLoad(Load, Chain, HasCallSeq))
968 continue;
969 moveBelowOrigChain(CurDAG, Load, SDValue(N, 0), Chain);
970 ++NumLoadMoved;
971 continue;
974 // Lower fpround and fpextend nodes that target the FP stack to be store and
975 // load to the stack. This is a gross hack. We would like to simply mark
976 // these as being illegal, but when we do that, legalize produces these when
977 // it expands calls, then expands these in the same legalize pass. We would
978 // like dag combine to be able to hack on these between the call expansion
979 // and the node legalization. As such this pass basically does "really
980 // late" legalization of these inline with the X86 isel pass.
981 // FIXME: This should only happen when not compiled with -O0.
982 switch (N->getOpcode()) {
983 default: continue;
984 case ISD::FP_ROUND:
985 case ISD::FP_EXTEND:
987 MVT SrcVT = N->getOperand(0).getSimpleValueType();
988 MVT DstVT = N->getSimpleValueType(0);
990 // If any of the sources are vectors, no fp stack involved.
991 if (SrcVT.isVector() || DstVT.isVector())
992 continue;
994 // If the source and destination are SSE registers, then this is a legal
995 // conversion that should not be lowered.
996 const X86TargetLowering *X86Lowering =
997 static_cast<const X86TargetLowering *>(TLI);
998 bool SrcIsSSE = X86Lowering->isScalarFPTypeInSSEReg(SrcVT);
999 bool DstIsSSE = X86Lowering->isScalarFPTypeInSSEReg(DstVT);
1000 if (SrcIsSSE && DstIsSSE)
1001 continue;
1003 if (!SrcIsSSE && !DstIsSSE) {
1004 // If this is an FPStack extension, it is a noop.
1005 if (N->getOpcode() == ISD::FP_EXTEND)
1006 continue;
1007 // If this is a value-preserving FPStack truncation, it is a noop.
1008 if (N->getConstantOperandVal(1))
1009 continue;
1012 // Here we could have an FP stack truncation or an FPStack <-> SSE convert.
1013 // FPStack has extload and truncstore. SSE can fold direct loads into other
1014 // operations. Based on this, decide what we want to do.
1015 MVT MemVT;
1016 if (N->getOpcode() == ISD::FP_ROUND)
1017 MemVT = DstVT; // FP_ROUND must use DstVT, we can't do a 'trunc load'.
1018 else
1019 MemVT = SrcIsSSE ? SrcVT : DstVT;
1021 SDValue MemTmp = CurDAG->CreateStackTemporary(MemVT);
1022 SDLoc dl(N);
1024 // FIXME: optimize the case where the src/dest is a load or store?
1026 SDValue Store = CurDAG->getTruncStore(CurDAG->getEntryNode(), dl, N->getOperand(0),
1027 MemTmp, MachinePointerInfo(), MemVT);
1028 SDValue Result = CurDAG->getExtLoad(ISD::EXTLOAD, dl, DstVT, Store, MemTmp,
1029 MachinePointerInfo(), MemVT);
1031 // We're about to replace all uses of the FP_ROUND/FP_EXTEND with the
1032 // extload we created. This will cause general havok on the dag because
1033 // anything below the conversion could be folded into other existing nodes.
1034 // To avoid invalidating 'I', back it up to the convert node.
1035 --I;
1036 CurDAG->ReplaceAllUsesOfValueWith(SDValue(N, 0), Result);
1037 break;
1040 //The sequence of events for lowering STRICT_FP versions of these nodes requires
1041 //dealing with the chain differently, as there is already a preexisting chain.
1042 case ISD::STRICT_FP_ROUND:
1043 case ISD::STRICT_FP_EXTEND:
1045 MVT SrcVT = N->getOperand(1).getSimpleValueType();
1046 MVT DstVT = N->getSimpleValueType(0);
1048 // If any of the sources are vectors, no fp stack involved.
1049 if (SrcVT.isVector() || DstVT.isVector())
1050 continue;
1052 // If the source and destination are SSE registers, then this is a legal
1053 // conversion that should not be lowered.
1054 const X86TargetLowering *X86Lowering =
1055 static_cast<const X86TargetLowering *>(TLI);
1056 bool SrcIsSSE = X86Lowering->isScalarFPTypeInSSEReg(SrcVT);
1057 bool DstIsSSE = X86Lowering->isScalarFPTypeInSSEReg(DstVT);
1058 if (SrcIsSSE && DstIsSSE)
1059 continue;
1061 if (!SrcIsSSE && !DstIsSSE) {
1062 // If this is an FPStack extension, it is a noop.
1063 if (N->getOpcode() == ISD::STRICT_FP_EXTEND)
1064 continue;
1065 // If this is a value-preserving FPStack truncation, it is a noop.
1066 if (N->getConstantOperandVal(2))
1067 continue;
1070 // Here we could have an FP stack truncation or an FPStack <-> SSE convert.
1071 // FPStack has extload and truncstore. SSE can fold direct loads into other
1072 // operations. Based on this, decide what we want to do.
1073 MVT MemVT;
1074 if (N->getOpcode() == ISD::STRICT_FP_ROUND)
1075 MemVT = DstVT; // FP_ROUND must use DstVT, we can't do a 'trunc load'.
1076 else
1077 MemVT = SrcIsSSE ? SrcVT : DstVT;
1079 SDValue MemTmp = CurDAG->CreateStackTemporary(MemVT);
1080 SDLoc dl(N);
1082 // FIXME: optimize the case where the src/dest is a load or store?
1084 //Since the operation is StrictFP, use the preexisting chain.
1085 SDValue Store = CurDAG->getTruncStore(N->getOperand(0), dl, N->getOperand(1),
1086 MemTmp, MachinePointerInfo(), MemVT);
1087 SDValue Result = CurDAG->getExtLoad(ISD::EXTLOAD, dl, DstVT, Store, MemTmp,
1088 MachinePointerInfo(), MemVT);
1090 // We're about to replace all uses of the FP_ROUND/FP_EXTEND with the
1091 // extload we created. This will cause general havok on the dag because
1092 // anything below the conversion could be folded into other existing nodes.
1093 // To avoid invalidating 'I', back it up to the convert node.
1094 --I;
1095 CurDAG->ReplaceAllUsesWith(N, Result.getNode());
1096 break;
1101 // Now that we did that, the node is dead. Increment the iterator to the
1102 // next node to process, then delete N.
1103 ++I;
1104 CurDAG->DeleteNode(N);
1107 // The load+call transform above can leave some dead nodes in the graph. Make
1108 // sure we remove them. Its possible some of the other transforms do to so
1109 // just remove dead nodes unconditionally.
1110 CurDAG->RemoveDeadNodes();
1113 // Look for a redundant movzx/movsx that can occur after an 8-bit divrem.
1114 bool X86DAGToDAGISel::tryOptimizeRem8Extend(SDNode *N) {
1115 unsigned Opc = N->getMachineOpcode();
1116 if (Opc != X86::MOVZX32rr8 && Opc != X86::MOVSX32rr8 &&
1117 Opc != X86::MOVSX64rr8)
1118 return false;
1120 SDValue N0 = N->getOperand(0);
1122 // We need to be extracting the lower bit of an extend.
1123 if (!N0.isMachineOpcode() ||
1124 N0.getMachineOpcode() != TargetOpcode::EXTRACT_SUBREG ||
1125 N0.getConstantOperandVal(1) != X86::sub_8bit)
1126 return false;
1128 // We're looking for either a movsx or movzx to match the original opcode.
1129 unsigned ExpectedOpc = Opc == X86::MOVZX32rr8 ? X86::MOVZX32rr8_NOREX
1130 : X86::MOVSX32rr8_NOREX;
1131 SDValue N00 = N0.getOperand(0);
1132 if (!N00.isMachineOpcode() || N00.getMachineOpcode() != ExpectedOpc)
1133 return false;
1135 if (Opc == X86::MOVSX64rr8) {
1136 // If we had a sign extend from 8 to 64 bits. We still need to go from 32
1137 // to 64.
1138 MachineSDNode *Extend = CurDAG->getMachineNode(X86::MOVSX64rr32, SDLoc(N),
1139 MVT::i64, N00);
1140 ReplaceUses(N, Extend);
1141 } else {
1142 // Ok we can drop this extend and just use the original extend.
1143 ReplaceUses(N, N00.getNode());
1146 return true;
1149 void X86DAGToDAGISel::PostprocessISelDAG() {
1150 // Skip peepholes at -O0.
1151 if (TM.getOptLevel() == CodeGenOpt::None)
1152 return;
1154 SelectionDAG::allnodes_iterator Position = CurDAG->allnodes_end();
1156 bool MadeChange = false;
1157 while (Position != CurDAG->allnodes_begin()) {
1158 SDNode *N = &*--Position;
1159 // Skip dead nodes and any non-machine opcodes.
1160 if (N->use_empty() || !N->isMachineOpcode())
1161 continue;
1163 if (tryOptimizeRem8Extend(N)) {
1164 MadeChange = true;
1165 continue;
1168 // Look for a TESTrr+ANDrr pattern where both operands of the test are
1169 // the same. Rewrite to remove the AND.
1170 unsigned Opc = N->getMachineOpcode();
1171 if ((Opc == X86::TEST8rr || Opc == X86::TEST16rr ||
1172 Opc == X86::TEST32rr || Opc == X86::TEST64rr) &&
1173 N->getOperand(0) == N->getOperand(1) &&
1174 N->isOnlyUserOf(N->getOperand(0).getNode()) &&
1175 N->getOperand(0).isMachineOpcode()) {
1176 SDValue And = N->getOperand(0);
1177 unsigned N0Opc = And.getMachineOpcode();
1178 if (N0Opc == X86::AND8rr || N0Opc == X86::AND16rr ||
1179 N0Opc == X86::AND32rr || N0Opc == X86::AND64rr) {
1180 MachineSDNode *Test = CurDAG->getMachineNode(Opc, SDLoc(N),
1181 MVT::i32,
1182 And.getOperand(0),
1183 And.getOperand(1));
1184 ReplaceUses(N, Test);
1185 MadeChange = true;
1186 continue;
1188 if (N0Opc == X86::AND8rm || N0Opc == X86::AND16rm ||
1189 N0Opc == X86::AND32rm || N0Opc == X86::AND64rm) {
1190 unsigned NewOpc;
1191 switch (N0Opc) {
1192 case X86::AND8rm: NewOpc = X86::TEST8mr; break;
1193 case X86::AND16rm: NewOpc = X86::TEST16mr; break;
1194 case X86::AND32rm: NewOpc = X86::TEST32mr; break;
1195 case X86::AND64rm: NewOpc = X86::TEST64mr; break;
1198 // Need to swap the memory and register operand.
1199 SDValue Ops[] = { And.getOperand(1),
1200 And.getOperand(2),
1201 And.getOperand(3),
1202 And.getOperand(4),
1203 And.getOperand(5),
1204 And.getOperand(0),
1205 And.getOperand(6) /* Chain */ };
1206 MachineSDNode *Test = CurDAG->getMachineNode(NewOpc, SDLoc(N),
1207 MVT::i32, MVT::Other, Ops);
1208 ReplaceUses(N, Test);
1209 MadeChange = true;
1210 continue;
1214 // Look for a KAND+KORTEST and turn it into KTEST if only the zero flag is
1215 // used. We're doing this late so we can prefer to fold the AND into masked
1216 // comparisons. Doing that can be better for the live range of the mask
1217 // register.
1218 if ((Opc == X86::KORTESTBrr || Opc == X86::KORTESTWrr ||
1219 Opc == X86::KORTESTDrr || Opc == X86::KORTESTQrr) &&
1220 N->getOperand(0) == N->getOperand(1) &&
1221 N->isOnlyUserOf(N->getOperand(0).getNode()) &&
1222 N->getOperand(0).isMachineOpcode() &&
1223 onlyUsesZeroFlag(SDValue(N, 0))) {
1224 SDValue And = N->getOperand(0);
1225 unsigned N0Opc = And.getMachineOpcode();
1226 // KANDW is legal with AVX512F, but KTESTW requires AVX512DQ. The other
1227 // KAND instructions and KTEST use the same ISA feature.
1228 if (N0Opc == X86::KANDBrr ||
1229 (N0Opc == X86::KANDWrr && Subtarget->hasDQI()) ||
1230 N0Opc == X86::KANDDrr || N0Opc == X86::KANDQrr) {
1231 unsigned NewOpc;
1232 switch (Opc) {
1233 default: llvm_unreachable("Unexpected opcode!");
1234 case X86::KORTESTBrr: NewOpc = X86::KTESTBrr; break;
1235 case X86::KORTESTWrr: NewOpc = X86::KTESTWrr; break;
1236 case X86::KORTESTDrr: NewOpc = X86::KTESTDrr; break;
1237 case X86::KORTESTQrr: NewOpc = X86::KTESTQrr; break;
1239 MachineSDNode *KTest = CurDAG->getMachineNode(NewOpc, SDLoc(N),
1240 MVT::i32,
1241 And.getOperand(0),
1242 And.getOperand(1));
1243 ReplaceUses(N, KTest);
1244 MadeChange = true;
1245 continue;
1249 // Attempt to remove vectors moves that were inserted to zero upper bits.
1250 if (Opc != TargetOpcode::SUBREG_TO_REG)
1251 continue;
1253 unsigned SubRegIdx = N->getConstantOperandVal(2);
1254 if (SubRegIdx != X86::sub_xmm && SubRegIdx != X86::sub_ymm)
1255 continue;
1257 SDValue Move = N->getOperand(1);
1258 if (!Move.isMachineOpcode())
1259 continue;
1261 // Make sure its one of the move opcodes we recognize.
1262 switch (Move.getMachineOpcode()) {
1263 default:
1264 continue;
1265 case X86::VMOVAPDrr: case X86::VMOVUPDrr:
1266 case X86::VMOVAPSrr: case X86::VMOVUPSrr:
1267 case X86::VMOVDQArr: case X86::VMOVDQUrr:
1268 case X86::VMOVAPDYrr: case X86::VMOVUPDYrr:
1269 case X86::VMOVAPSYrr: case X86::VMOVUPSYrr:
1270 case X86::VMOVDQAYrr: case X86::VMOVDQUYrr:
1271 case X86::VMOVAPDZ128rr: case X86::VMOVUPDZ128rr:
1272 case X86::VMOVAPSZ128rr: case X86::VMOVUPSZ128rr:
1273 case X86::VMOVDQA32Z128rr: case X86::VMOVDQU32Z128rr:
1274 case X86::VMOVDQA64Z128rr: case X86::VMOVDQU64Z128rr:
1275 case X86::VMOVAPDZ256rr: case X86::VMOVUPDZ256rr:
1276 case X86::VMOVAPSZ256rr: case X86::VMOVUPSZ256rr:
1277 case X86::VMOVDQA32Z256rr: case X86::VMOVDQU32Z256rr:
1278 case X86::VMOVDQA64Z256rr: case X86::VMOVDQU64Z256rr:
1279 break;
1282 SDValue In = Move.getOperand(0);
1283 if (!In.isMachineOpcode() ||
1284 In.getMachineOpcode() <= TargetOpcode::GENERIC_OP_END)
1285 continue;
1287 // Make sure the instruction has a VEX, XOP, or EVEX prefix. This covers
1288 // the SHA instructions which use a legacy encoding.
1289 uint64_t TSFlags = getInstrInfo()->get(In.getMachineOpcode()).TSFlags;
1290 if ((TSFlags & X86II::EncodingMask) != X86II::VEX &&
1291 (TSFlags & X86II::EncodingMask) != X86II::EVEX &&
1292 (TSFlags & X86II::EncodingMask) != X86II::XOP)
1293 continue;
1295 // Producing instruction is another vector instruction. We can drop the
1296 // move.
1297 CurDAG->UpdateNodeOperands(N, N->getOperand(0), In, N->getOperand(2));
1298 MadeChange = true;
1301 if (MadeChange)
1302 CurDAG->RemoveDeadNodes();
1306 /// Emit any code that needs to be executed only in the main function.
1307 void X86DAGToDAGISel::emitSpecialCodeForMain() {
1308 if (Subtarget->isTargetCygMing()) {
1309 TargetLowering::ArgListTy Args;
1310 auto &DL = CurDAG->getDataLayout();
1312 TargetLowering::CallLoweringInfo CLI(*CurDAG);
1313 CLI.setChain(CurDAG->getRoot())
1314 .setCallee(CallingConv::C, Type::getVoidTy(*CurDAG->getContext()),
1315 CurDAG->getExternalSymbol("__main", TLI->getPointerTy(DL)),
1316 std::move(Args));
1317 const TargetLowering &TLI = CurDAG->getTargetLoweringInfo();
1318 std::pair<SDValue, SDValue> Result = TLI.LowerCallTo(CLI);
1319 CurDAG->setRoot(Result.second);
1323 void X86DAGToDAGISel::EmitFunctionEntryCode() {
1324 // If this is main, emit special code for main.
1325 const Function &F = MF->getFunction();
1326 if (F.hasExternalLinkage() && F.getName() == "main")
1327 emitSpecialCodeForMain();
1330 static bool isDispSafeForFrameIndex(int64_t Val) {
1331 // On 64-bit platforms, we can run into an issue where a frame index
1332 // includes a displacement that, when added to the explicit displacement,
1333 // will overflow the displacement field. Assuming that the frame index
1334 // displacement fits into a 31-bit integer (which is only slightly more
1335 // aggressive than the current fundamental assumption that it fits into
1336 // a 32-bit integer), a 31-bit disp should always be safe.
1337 return isInt<31>(Val);
1340 bool X86DAGToDAGISel::foldOffsetIntoAddress(uint64_t Offset,
1341 X86ISelAddressMode &AM) {
1342 // If there's no offset to fold, we don't need to do any work.
1343 if (Offset == 0)
1344 return false;
1346 // Cannot combine ExternalSymbol displacements with integer offsets.
1347 if (AM.ES || AM.MCSym)
1348 return true;
1350 int64_t Val = AM.Disp + Offset;
1351 CodeModel::Model M = TM.getCodeModel();
1352 if (Subtarget->is64Bit()) {
1353 if (!X86::isOffsetSuitableForCodeModel(Val, M,
1354 AM.hasSymbolicDisplacement()))
1355 return true;
1356 // In addition to the checks required for a register base, check that
1357 // we do not try to use an unsafe Disp with a frame index.
1358 if (AM.BaseType == X86ISelAddressMode::FrameIndexBase &&
1359 !isDispSafeForFrameIndex(Val))
1360 return true;
1362 AM.Disp = Val;
1363 return false;
1367 bool X86DAGToDAGISel::matchLoadInAddress(LoadSDNode *N, X86ISelAddressMode &AM){
1368 SDValue Address = N->getOperand(1);
1370 // load gs:0 -> GS segment register.
1371 // load fs:0 -> FS segment register.
1373 // This optimization is valid because the GNU TLS model defines that
1374 // gs:0 (or fs:0 on X86-64) contains its own address.
1375 // For more information see http://people.redhat.com/drepper/tls.pdf
1376 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Address))
1377 if (C->getSExtValue() == 0 && AM.Segment.getNode() == nullptr &&
1378 !IndirectTlsSegRefs &&
1379 (Subtarget->isTargetGlibc() || Subtarget->isTargetAndroid() ||
1380 Subtarget->isTargetFuchsia()))
1381 switch (N->getPointerInfo().getAddrSpace()) {
1382 case 256:
1383 AM.Segment = CurDAG->getRegister(X86::GS, MVT::i16);
1384 return false;
1385 case 257:
1386 AM.Segment = CurDAG->getRegister(X86::FS, MVT::i16);
1387 return false;
1388 // Address space 258 is not handled here, because it is not used to
1389 // address TLS areas.
1392 return true;
1395 /// Try to match X86ISD::Wrapper and X86ISD::WrapperRIP nodes into an addressing
1396 /// mode. These wrap things that will resolve down into a symbol reference.
1397 /// If no match is possible, this returns true, otherwise it returns false.
1398 bool X86DAGToDAGISel::matchWrapper(SDValue N, X86ISelAddressMode &AM) {
1399 // If the addressing mode already has a symbol as the displacement, we can
1400 // never match another symbol.
1401 if (AM.hasSymbolicDisplacement())
1402 return true;
1404 bool IsRIPRelTLS = false;
1405 bool IsRIPRel = N.getOpcode() == X86ISD::WrapperRIP;
1406 if (IsRIPRel) {
1407 SDValue Val = N.getOperand(0);
1408 if (Val.getOpcode() == ISD::TargetGlobalTLSAddress)
1409 IsRIPRelTLS = true;
1412 // We can't use an addressing mode in the 64-bit large code model.
1413 // Global TLS addressing is an exception. In the medium code model,
1414 // we use can use a mode when RIP wrappers are present.
1415 // That signifies access to globals that are known to be "near",
1416 // such as the GOT itself.
1417 CodeModel::Model M = TM.getCodeModel();
1418 if (Subtarget->is64Bit() &&
1419 ((M == CodeModel::Large && !IsRIPRelTLS) ||
1420 (M == CodeModel::Medium && !IsRIPRel)))
1421 return true;
1423 // Base and index reg must be 0 in order to use %rip as base.
1424 if (IsRIPRel && AM.hasBaseOrIndexReg())
1425 return true;
1427 // Make a local copy in case we can't do this fold.
1428 X86ISelAddressMode Backup = AM;
1430 int64_t Offset = 0;
1431 SDValue N0 = N.getOperand(0);
1432 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(N0)) {
1433 AM.GV = G->getGlobal();
1434 AM.SymbolFlags = G->getTargetFlags();
1435 Offset = G->getOffset();
1436 } else if (ConstantPoolSDNode *CP = dyn_cast<ConstantPoolSDNode>(N0)) {
1437 AM.CP = CP->getConstVal();
1438 AM.Align = CP->getAlignment();
1439 AM.SymbolFlags = CP->getTargetFlags();
1440 Offset = CP->getOffset();
1441 } else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(N0)) {
1442 AM.ES = S->getSymbol();
1443 AM.SymbolFlags = S->getTargetFlags();
1444 } else if (auto *S = dyn_cast<MCSymbolSDNode>(N0)) {
1445 AM.MCSym = S->getMCSymbol();
1446 } else if (JumpTableSDNode *J = dyn_cast<JumpTableSDNode>(N0)) {
1447 AM.JT = J->getIndex();
1448 AM.SymbolFlags = J->getTargetFlags();
1449 } else if (BlockAddressSDNode *BA = dyn_cast<BlockAddressSDNode>(N0)) {
1450 AM.BlockAddr = BA->getBlockAddress();
1451 AM.SymbolFlags = BA->getTargetFlags();
1452 Offset = BA->getOffset();
1453 } else
1454 llvm_unreachable("Unhandled symbol reference node.");
1456 if (foldOffsetIntoAddress(Offset, AM)) {
1457 AM = Backup;
1458 return true;
1461 if (IsRIPRel)
1462 AM.setBaseReg(CurDAG->getRegister(X86::RIP, MVT::i64));
1464 // Commit the changes now that we know this fold is safe.
1465 return false;
1468 /// Add the specified node to the specified addressing mode, returning true if
1469 /// it cannot be done. This just pattern matches for the addressing mode.
1470 bool X86DAGToDAGISel::matchAddress(SDValue N, X86ISelAddressMode &AM) {
1471 if (matchAddressRecursively(N, AM, 0))
1472 return true;
1474 // Post-processing: Convert lea(,%reg,2) to lea(%reg,%reg), which has
1475 // a smaller encoding and avoids a scaled-index.
1476 if (AM.Scale == 2 &&
1477 AM.BaseType == X86ISelAddressMode::RegBase &&
1478 AM.Base_Reg.getNode() == nullptr) {
1479 AM.Base_Reg = AM.IndexReg;
1480 AM.Scale = 1;
1483 // Post-processing: Convert foo to foo(%rip), even in non-PIC mode,
1484 // because it has a smaller encoding.
1485 // TODO: Which other code models can use this?
1486 switch (TM.getCodeModel()) {
1487 default: break;
1488 case CodeModel::Small:
1489 case CodeModel::Kernel:
1490 if (Subtarget->is64Bit() &&
1491 AM.Scale == 1 &&
1492 AM.BaseType == X86ISelAddressMode::RegBase &&
1493 AM.Base_Reg.getNode() == nullptr &&
1494 AM.IndexReg.getNode() == nullptr &&
1495 AM.SymbolFlags == X86II::MO_NO_FLAG &&
1496 AM.hasSymbolicDisplacement())
1497 AM.Base_Reg = CurDAG->getRegister(X86::RIP, MVT::i64);
1498 break;
1501 return false;
1504 bool X86DAGToDAGISel::matchAdd(SDValue &N, X86ISelAddressMode &AM,
1505 unsigned Depth) {
1506 // Add an artificial use to this node so that we can keep track of
1507 // it if it gets CSE'd with a different node.
1508 HandleSDNode Handle(N);
1510 X86ISelAddressMode Backup = AM;
1511 if (!matchAddressRecursively(N.getOperand(0), AM, Depth+1) &&
1512 !matchAddressRecursively(Handle.getValue().getOperand(1), AM, Depth+1))
1513 return false;
1514 AM = Backup;
1516 // Try again after commuting the operands.
1517 if (!matchAddressRecursively(Handle.getValue().getOperand(1), AM, Depth+1) &&
1518 !matchAddressRecursively(Handle.getValue().getOperand(0), AM, Depth+1))
1519 return false;
1520 AM = Backup;
1522 // If we couldn't fold both operands into the address at the same time,
1523 // see if we can just put each operand into a register and fold at least
1524 // the add.
1525 if (AM.BaseType == X86ISelAddressMode::RegBase &&
1526 !AM.Base_Reg.getNode() &&
1527 !AM.IndexReg.getNode()) {
1528 N = Handle.getValue();
1529 AM.Base_Reg = N.getOperand(0);
1530 AM.IndexReg = N.getOperand(1);
1531 AM.Scale = 1;
1532 return false;
1534 N = Handle.getValue();
1535 return true;
1538 // Insert a node into the DAG at least before the Pos node's position. This
1539 // will reposition the node as needed, and will assign it a node ID that is <=
1540 // the Pos node's ID. Note that this does *not* preserve the uniqueness of node
1541 // IDs! The selection DAG must no longer depend on their uniqueness when this
1542 // is used.
1543 static void insertDAGNode(SelectionDAG &DAG, SDValue Pos, SDValue N) {
1544 if (N->getNodeId() == -1 ||
1545 (SelectionDAGISel::getUninvalidatedNodeId(N.getNode()) >
1546 SelectionDAGISel::getUninvalidatedNodeId(Pos.getNode()))) {
1547 DAG.RepositionNode(Pos->getIterator(), N.getNode());
1548 // Mark Node as invalid for pruning as after this it may be a successor to a
1549 // selected node but otherwise be in the same position of Pos.
1550 // Conservatively mark it with the same -abs(Id) to assure node id
1551 // invariant is preserved.
1552 N->setNodeId(Pos->getNodeId());
1553 SelectionDAGISel::InvalidateNodeId(N.getNode());
1557 // Transform "(X >> (8-C1)) & (0xff << C1)" to "((X >> 8) & 0xff) << C1" if
1558 // safe. This allows us to convert the shift and and into an h-register
1559 // extract and a scaled index. Returns false if the simplification is
1560 // performed.
1561 static bool foldMaskAndShiftToExtract(SelectionDAG &DAG, SDValue N,
1562 uint64_t Mask,
1563 SDValue Shift, SDValue X,
1564 X86ISelAddressMode &AM) {
1565 if (Shift.getOpcode() != ISD::SRL ||
1566 !isa<ConstantSDNode>(Shift.getOperand(1)) ||
1567 !Shift.hasOneUse())
1568 return true;
1570 int ScaleLog = 8 - Shift.getConstantOperandVal(1);
1571 if (ScaleLog <= 0 || ScaleLog >= 4 ||
1572 Mask != (0xffu << ScaleLog))
1573 return true;
1575 MVT VT = N.getSimpleValueType();
1576 SDLoc DL(N);
1577 SDValue Eight = DAG.getConstant(8, DL, MVT::i8);
1578 SDValue NewMask = DAG.getConstant(0xff, DL, VT);
1579 SDValue Srl = DAG.getNode(ISD::SRL, DL, VT, X, Eight);
1580 SDValue And = DAG.getNode(ISD::AND, DL, VT, Srl, NewMask);
1581 SDValue ShlCount = DAG.getConstant(ScaleLog, DL, MVT::i8);
1582 SDValue Shl = DAG.getNode(ISD::SHL, DL, VT, And, ShlCount);
1584 // Insert the new nodes into the topological ordering. We must do this in
1585 // a valid topological ordering as nothing is going to go back and re-sort
1586 // these nodes. We continually insert before 'N' in sequence as this is
1587 // essentially a pre-flattened and pre-sorted sequence of nodes. There is no
1588 // hierarchy left to express.
1589 insertDAGNode(DAG, N, Eight);
1590 insertDAGNode(DAG, N, Srl);
1591 insertDAGNode(DAG, N, NewMask);
1592 insertDAGNode(DAG, N, And);
1593 insertDAGNode(DAG, N, ShlCount);
1594 insertDAGNode(DAG, N, Shl);
1595 DAG.ReplaceAllUsesWith(N, Shl);
1596 DAG.RemoveDeadNode(N.getNode());
1597 AM.IndexReg = And;
1598 AM.Scale = (1 << ScaleLog);
1599 return false;
1602 // Transforms "(X << C1) & C2" to "(X & (C2>>C1)) << C1" if safe and if this
1603 // allows us to fold the shift into this addressing mode. Returns false if the
1604 // transform succeeded.
1605 static bool foldMaskedShiftToScaledMask(SelectionDAG &DAG, SDValue N,
1606 X86ISelAddressMode &AM) {
1607 SDValue Shift = N.getOperand(0);
1609 // Use a signed mask so that shifting right will insert sign bits. These
1610 // bits will be removed when we shift the result left so it doesn't matter
1611 // what we use. This might allow a smaller immediate encoding.
1612 int64_t Mask = cast<ConstantSDNode>(N->getOperand(1))->getSExtValue();
1614 // If we have an any_extend feeding the AND, look through it to see if there
1615 // is a shift behind it. But only if the AND doesn't use the extended bits.
1616 // FIXME: Generalize this to other ANY_EXTEND than i32 to i64?
1617 bool FoundAnyExtend = false;
1618 if (Shift.getOpcode() == ISD::ANY_EXTEND && Shift.hasOneUse() &&
1619 Shift.getOperand(0).getSimpleValueType() == MVT::i32 &&
1620 isUInt<32>(Mask)) {
1621 FoundAnyExtend = true;
1622 Shift = Shift.getOperand(0);
1625 if (Shift.getOpcode() != ISD::SHL ||
1626 !isa<ConstantSDNode>(Shift.getOperand(1)))
1627 return true;
1629 SDValue X = Shift.getOperand(0);
1631 // Not likely to be profitable if either the AND or SHIFT node has more
1632 // than one use (unless all uses are for address computation). Besides,
1633 // isel mechanism requires their node ids to be reused.
1634 if (!N.hasOneUse() || !Shift.hasOneUse())
1635 return true;
1637 // Verify that the shift amount is something we can fold.
1638 unsigned ShiftAmt = Shift.getConstantOperandVal(1);
1639 if (ShiftAmt != 1 && ShiftAmt != 2 && ShiftAmt != 3)
1640 return true;
1642 MVT VT = N.getSimpleValueType();
1643 SDLoc DL(N);
1644 if (FoundAnyExtend) {
1645 SDValue NewX = DAG.getNode(ISD::ANY_EXTEND, DL, VT, X);
1646 insertDAGNode(DAG, N, NewX);
1647 X = NewX;
1650 SDValue NewMask = DAG.getConstant(Mask >> ShiftAmt, DL, VT);
1651 SDValue NewAnd = DAG.getNode(ISD::AND, DL, VT, X, NewMask);
1652 SDValue NewShift = DAG.getNode(ISD::SHL, DL, VT, NewAnd, Shift.getOperand(1));
1654 // Insert the new nodes into the topological ordering. We must do this in
1655 // a valid topological ordering as nothing is going to go back and re-sort
1656 // these nodes. We continually insert before 'N' in sequence as this is
1657 // essentially a pre-flattened and pre-sorted sequence of nodes. There is no
1658 // hierarchy left to express.
1659 insertDAGNode(DAG, N, NewMask);
1660 insertDAGNode(DAG, N, NewAnd);
1661 insertDAGNode(DAG, N, NewShift);
1662 DAG.ReplaceAllUsesWith(N, NewShift);
1663 DAG.RemoveDeadNode(N.getNode());
1665 AM.Scale = 1 << ShiftAmt;
1666 AM.IndexReg = NewAnd;
1667 return false;
1670 // Implement some heroics to detect shifts of masked values where the mask can
1671 // be replaced by extending the shift and undoing that in the addressing mode
1672 // scale. Patterns such as (shl (srl x, c1), c2) are canonicalized into (and
1673 // (srl x, SHIFT), MASK) by DAGCombines that don't know the shl can be done in
1674 // the addressing mode. This results in code such as:
1676 // int f(short *y, int *lookup_table) {
1677 // ...
1678 // return *y + lookup_table[*y >> 11];
1679 // }
1681 // Turning into:
1682 // movzwl (%rdi), %eax
1683 // movl %eax, %ecx
1684 // shrl $11, %ecx
1685 // addl (%rsi,%rcx,4), %eax
1687 // Instead of:
1688 // movzwl (%rdi), %eax
1689 // movl %eax, %ecx
1690 // shrl $9, %ecx
1691 // andl $124, %rcx
1692 // addl (%rsi,%rcx), %eax
1694 // Note that this function assumes the mask is provided as a mask *after* the
1695 // value is shifted. The input chain may or may not match that, but computing
1696 // such a mask is trivial.
1697 static bool foldMaskAndShiftToScale(SelectionDAG &DAG, SDValue N,
1698 uint64_t Mask,
1699 SDValue Shift, SDValue X,
1700 X86ISelAddressMode &AM) {
1701 if (Shift.getOpcode() != ISD::SRL || !Shift.hasOneUse() ||
1702 !isa<ConstantSDNode>(Shift.getOperand(1)))
1703 return true;
1705 unsigned ShiftAmt = Shift.getConstantOperandVal(1);
1706 unsigned MaskLZ = countLeadingZeros(Mask);
1707 unsigned MaskTZ = countTrailingZeros(Mask);
1709 // The amount of shift we're trying to fit into the addressing mode is taken
1710 // from the trailing zeros of the mask.
1711 unsigned AMShiftAmt = MaskTZ;
1713 // There is nothing we can do here unless the mask is removing some bits.
1714 // Also, the addressing mode can only represent shifts of 1, 2, or 3 bits.
1715 if (AMShiftAmt <= 0 || AMShiftAmt > 3) return true;
1717 // We also need to ensure that mask is a continuous run of bits.
1718 if (countTrailingOnes(Mask >> MaskTZ) + MaskTZ + MaskLZ != 64) return true;
1720 // Scale the leading zero count down based on the actual size of the value.
1721 // Also scale it down based on the size of the shift.
1722 unsigned ScaleDown = (64 - X.getSimpleValueType().getSizeInBits()) + ShiftAmt;
1723 if (MaskLZ < ScaleDown)
1724 return true;
1725 MaskLZ -= ScaleDown;
1727 // The final check is to ensure that any masked out high bits of X are
1728 // already known to be zero. Otherwise, the mask has a semantic impact
1729 // other than masking out a couple of low bits. Unfortunately, because of
1730 // the mask, zero extensions will be removed from operands in some cases.
1731 // This code works extra hard to look through extensions because we can
1732 // replace them with zero extensions cheaply if necessary.
1733 bool ReplacingAnyExtend = false;
1734 if (X.getOpcode() == ISD::ANY_EXTEND) {
1735 unsigned ExtendBits = X.getSimpleValueType().getSizeInBits() -
1736 X.getOperand(0).getSimpleValueType().getSizeInBits();
1737 // Assume that we'll replace the any-extend with a zero-extend, and
1738 // narrow the search to the extended value.
1739 X = X.getOperand(0);
1740 MaskLZ = ExtendBits > MaskLZ ? 0 : MaskLZ - ExtendBits;
1741 ReplacingAnyExtend = true;
1743 APInt MaskedHighBits =
1744 APInt::getHighBitsSet(X.getSimpleValueType().getSizeInBits(), MaskLZ);
1745 KnownBits Known = DAG.computeKnownBits(X);
1746 if (MaskedHighBits != Known.Zero) return true;
1748 // We've identified a pattern that can be transformed into a single shift
1749 // and an addressing mode. Make it so.
1750 MVT VT = N.getSimpleValueType();
1751 if (ReplacingAnyExtend) {
1752 assert(X.getValueType() != VT);
1753 // We looked through an ANY_EXTEND node, insert a ZERO_EXTEND.
1754 SDValue NewX = DAG.getNode(ISD::ZERO_EXTEND, SDLoc(X), VT, X);
1755 insertDAGNode(DAG, N, NewX);
1756 X = NewX;
1758 SDLoc DL(N);
1759 SDValue NewSRLAmt = DAG.getConstant(ShiftAmt + AMShiftAmt, DL, MVT::i8);
1760 SDValue NewSRL = DAG.getNode(ISD::SRL, DL, VT, X, NewSRLAmt);
1761 SDValue NewSHLAmt = DAG.getConstant(AMShiftAmt, DL, MVT::i8);
1762 SDValue NewSHL = DAG.getNode(ISD::SHL, DL, VT, NewSRL, NewSHLAmt);
1764 // Insert the new nodes into the topological ordering. We must do this in
1765 // a valid topological ordering as nothing is going to go back and re-sort
1766 // these nodes. We continually insert before 'N' in sequence as this is
1767 // essentially a pre-flattened and pre-sorted sequence of nodes. There is no
1768 // hierarchy left to express.
1769 insertDAGNode(DAG, N, NewSRLAmt);
1770 insertDAGNode(DAG, N, NewSRL);
1771 insertDAGNode(DAG, N, NewSHLAmt);
1772 insertDAGNode(DAG, N, NewSHL);
1773 DAG.ReplaceAllUsesWith(N, NewSHL);
1774 DAG.RemoveDeadNode(N.getNode());
1776 AM.Scale = 1 << AMShiftAmt;
1777 AM.IndexReg = NewSRL;
1778 return false;
1781 // Transform "(X >> SHIFT) & (MASK << C1)" to
1782 // "((X >> (SHIFT + C1)) & (MASK)) << C1". Everything before the SHL will be
1783 // matched to a BEXTR later. Returns false if the simplification is performed.
1784 static bool foldMaskedShiftToBEXTR(SelectionDAG &DAG, SDValue N,
1785 uint64_t Mask,
1786 SDValue Shift, SDValue X,
1787 X86ISelAddressMode &AM,
1788 const X86Subtarget &Subtarget) {
1789 if (Shift.getOpcode() != ISD::SRL ||
1790 !isa<ConstantSDNode>(Shift.getOperand(1)) ||
1791 !Shift.hasOneUse() || !N.hasOneUse())
1792 return true;
1794 // Only do this if BEXTR will be matched by matchBEXTRFromAndImm.
1795 if (!Subtarget.hasTBM() &&
1796 !(Subtarget.hasBMI() && Subtarget.hasFastBEXTR()))
1797 return true;
1799 // We need to ensure that mask is a continuous run of bits.
1800 if (!isShiftedMask_64(Mask)) return true;
1802 unsigned ShiftAmt = Shift.getConstantOperandVal(1);
1804 // The amount of shift we're trying to fit into the addressing mode is taken
1805 // from the trailing zeros of the mask.
1806 unsigned AMShiftAmt = countTrailingZeros(Mask);
1808 // There is nothing we can do here unless the mask is removing some bits.
1809 // Also, the addressing mode can only represent shifts of 1, 2, or 3 bits.
1810 if (AMShiftAmt <= 0 || AMShiftAmt > 3) return true;
1812 MVT VT = N.getSimpleValueType();
1813 SDLoc DL(N);
1814 SDValue NewSRLAmt = DAG.getConstant(ShiftAmt + AMShiftAmt, DL, MVT::i8);
1815 SDValue NewSRL = DAG.getNode(ISD::SRL, DL, VT, X, NewSRLAmt);
1816 SDValue NewMask = DAG.getConstant(Mask >> AMShiftAmt, DL, VT);
1817 SDValue NewAnd = DAG.getNode(ISD::AND, DL, VT, NewSRL, NewMask);
1818 SDValue NewSHLAmt = DAG.getConstant(AMShiftAmt, DL, MVT::i8);
1819 SDValue NewSHL = DAG.getNode(ISD::SHL, DL, VT, NewAnd, NewSHLAmt);
1821 // Insert the new nodes into the topological ordering. We must do this in
1822 // a valid topological ordering as nothing is going to go back and re-sort
1823 // these nodes. We continually insert before 'N' in sequence as this is
1824 // essentially a pre-flattened and pre-sorted sequence of nodes. There is no
1825 // hierarchy left to express.
1826 insertDAGNode(DAG, N, NewSRLAmt);
1827 insertDAGNode(DAG, N, NewSRL);
1828 insertDAGNode(DAG, N, NewMask);
1829 insertDAGNode(DAG, N, NewAnd);
1830 insertDAGNode(DAG, N, NewSHLAmt);
1831 insertDAGNode(DAG, N, NewSHL);
1832 DAG.ReplaceAllUsesWith(N, NewSHL);
1833 DAG.RemoveDeadNode(N.getNode());
1835 AM.Scale = 1 << AMShiftAmt;
1836 AM.IndexReg = NewAnd;
1837 return false;
1840 bool X86DAGToDAGISel::matchAddressRecursively(SDValue N, X86ISelAddressMode &AM,
1841 unsigned Depth) {
1842 SDLoc dl(N);
1843 LLVM_DEBUG({
1844 dbgs() << "MatchAddress: ";
1845 AM.dump(CurDAG);
1847 // Limit recursion.
1848 if (Depth > 5)
1849 return matchAddressBase(N, AM);
1851 // If this is already a %rip relative address, we can only merge immediates
1852 // into it. Instead of handling this in every case, we handle it here.
1853 // RIP relative addressing: %rip + 32-bit displacement!
1854 if (AM.isRIPRelative()) {
1855 // FIXME: JumpTable and ExternalSymbol address currently don't like
1856 // displacements. It isn't very important, but this should be fixed for
1857 // consistency.
1858 if (!(AM.ES || AM.MCSym) && AM.JT != -1)
1859 return true;
1861 if (ConstantSDNode *Cst = dyn_cast<ConstantSDNode>(N))
1862 if (!foldOffsetIntoAddress(Cst->getSExtValue(), AM))
1863 return false;
1864 return true;
1867 switch (N.getOpcode()) {
1868 default: break;
1869 case ISD::LOCAL_RECOVER: {
1870 if (!AM.hasSymbolicDisplacement() && AM.Disp == 0)
1871 if (const auto *ESNode = dyn_cast<MCSymbolSDNode>(N.getOperand(0))) {
1872 // Use the symbol and don't prefix it.
1873 AM.MCSym = ESNode->getMCSymbol();
1874 return false;
1876 break;
1878 case ISD::Constant: {
1879 uint64_t Val = cast<ConstantSDNode>(N)->getSExtValue();
1880 if (!foldOffsetIntoAddress(Val, AM))
1881 return false;
1882 break;
1885 case X86ISD::Wrapper:
1886 case X86ISD::WrapperRIP:
1887 if (!matchWrapper(N, AM))
1888 return false;
1889 break;
1891 case ISD::LOAD:
1892 if (!matchLoadInAddress(cast<LoadSDNode>(N), AM))
1893 return false;
1894 break;
1896 case ISD::FrameIndex:
1897 if (AM.BaseType == X86ISelAddressMode::RegBase &&
1898 AM.Base_Reg.getNode() == nullptr &&
1899 (!Subtarget->is64Bit() || isDispSafeForFrameIndex(AM.Disp))) {
1900 AM.BaseType = X86ISelAddressMode::FrameIndexBase;
1901 AM.Base_FrameIndex = cast<FrameIndexSDNode>(N)->getIndex();
1902 return false;
1904 break;
1906 case ISD::SHL:
1907 if (AM.IndexReg.getNode() != nullptr || AM.Scale != 1)
1908 break;
1910 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
1911 unsigned Val = CN->getZExtValue();
1912 // Note that we handle x<<1 as (,x,2) rather than (x,x) here so
1913 // that the base operand remains free for further matching. If
1914 // the base doesn't end up getting used, a post-processing step
1915 // in MatchAddress turns (,x,2) into (x,x), which is cheaper.
1916 if (Val == 1 || Val == 2 || Val == 3) {
1917 AM.Scale = 1 << Val;
1918 SDValue ShVal = N.getOperand(0);
1920 // Okay, we know that we have a scale by now. However, if the scaled
1921 // value is an add of something and a constant, we can fold the
1922 // constant into the disp field here.
1923 if (CurDAG->isBaseWithConstantOffset(ShVal)) {
1924 AM.IndexReg = ShVal.getOperand(0);
1925 ConstantSDNode *AddVal = cast<ConstantSDNode>(ShVal.getOperand(1));
1926 uint64_t Disp = (uint64_t)AddVal->getSExtValue() << Val;
1927 if (!foldOffsetIntoAddress(Disp, AM))
1928 return false;
1931 AM.IndexReg = ShVal;
1932 return false;
1935 break;
1937 case ISD::SRL: {
1938 // Scale must not be used already.
1939 if (AM.IndexReg.getNode() != nullptr || AM.Scale != 1) break;
1941 // We only handle up to 64-bit values here as those are what matter for
1942 // addressing mode optimizations.
1943 assert(N.getSimpleValueType().getSizeInBits() <= 64 &&
1944 "Unexpected value size!");
1946 SDValue And = N.getOperand(0);
1947 if (And.getOpcode() != ISD::AND) break;
1948 SDValue X = And.getOperand(0);
1950 // The mask used for the transform is expected to be post-shift, but we
1951 // found the shift first so just apply the shift to the mask before passing
1952 // it down.
1953 if (!isa<ConstantSDNode>(N.getOperand(1)) ||
1954 !isa<ConstantSDNode>(And.getOperand(1)))
1955 break;
1956 uint64_t Mask = And.getConstantOperandVal(1) >> N.getConstantOperandVal(1);
1958 // Try to fold the mask and shift into the scale, and return false if we
1959 // succeed.
1960 if (!foldMaskAndShiftToScale(*CurDAG, N, Mask, N, X, AM))
1961 return false;
1962 break;
1965 case ISD::SMUL_LOHI:
1966 case ISD::UMUL_LOHI:
1967 // A mul_lohi where we need the low part can be folded as a plain multiply.
1968 if (N.getResNo() != 0) break;
1969 LLVM_FALLTHROUGH;
1970 case ISD::MUL:
1971 case X86ISD::MUL_IMM:
1972 // X*[3,5,9] -> X+X*[2,4,8]
1973 if (AM.BaseType == X86ISelAddressMode::RegBase &&
1974 AM.Base_Reg.getNode() == nullptr &&
1975 AM.IndexReg.getNode() == nullptr) {
1976 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N.getOperand(1)))
1977 if (CN->getZExtValue() == 3 || CN->getZExtValue() == 5 ||
1978 CN->getZExtValue() == 9) {
1979 AM.Scale = unsigned(CN->getZExtValue())-1;
1981 SDValue MulVal = N.getOperand(0);
1982 SDValue Reg;
1984 // Okay, we know that we have a scale by now. However, if the scaled
1985 // value is an add of something and a constant, we can fold the
1986 // constant into the disp field here.
1987 if (MulVal.getNode()->getOpcode() == ISD::ADD && MulVal.hasOneUse() &&
1988 isa<ConstantSDNode>(MulVal.getOperand(1))) {
1989 Reg = MulVal.getOperand(0);
1990 ConstantSDNode *AddVal =
1991 cast<ConstantSDNode>(MulVal.getOperand(1));
1992 uint64_t Disp = AddVal->getSExtValue() * CN->getZExtValue();
1993 if (foldOffsetIntoAddress(Disp, AM))
1994 Reg = N.getOperand(0);
1995 } else {
1996 Reg = N.getOperand(0);
1999 AM.IndexReg = AM.Base_Reg = Reg;
2000 return false;
2003 break;
2005 case ISD::SUB: {
2006 // Given A-B, if A can be completely folded into the address and
2007 // the index field with the index field unused, use -B as the index.
2008 // This is a win if a has multiple parts that can be folded into
2009 // the address. Also, this saves a mov if the base register has
2010 // other uses, since it avoids a two-address sub instruction, however
2011 // it costs an additional mov if the index register has other uses.
2013 // Add an artificial use to this node so that we can keep track of
2014 // it if it gets CSE'd with a different node.
2015 HandleSDNode Handle(N);
2017 // Test if the LHS of the sub can be folded.
2018 X86ISelAddressMode Backup = AM;
2019 if (matchAddressRecursively(N.getOperand(0), AM, Depth+1)) {
2020 N = Handle.getValue();
2021 AM = Backup;
2022 break;
2024 N = Handle.getValue();
2025 // Test if the index field is free for use.
2026 if (AM.IndexReg.getNode() || AM.isRIPRelative()) {
2027 AM = Backup;
2028 break;
2031 int Cost = 0;
2032 SDValue RHS = N.getOperand(1);
2033 // If the RHS involves a register with multiple uses, this
2034 // transformation incurs an extra mov, due to the neg instruction
2035 // clobbering its operand.
2036 if (!RHS.getNode()->hasOneUse() ||
2037 RHS.getNode()->getOpcode() == ISD::CopyFromReg ||
2038 RHS.getNode()->getOpcode() == ISD::TRUNCATE ||
2039 RHS.getNode()->getOpcode() == ISD::ANY_EXTEND ||
2040 (RHS.getNode()->getOpcode() == ISD::ZERO_EXTEND &&
2041 RHS.getOperand(0).getValueType() == MVT::i32))
2042 ++Cost;
2043 // If the base is a register with multiple uses, this
2044 // transformation may save a mov.
2045 if ((AM.BaseType == X86ISelAddressMode::RegBase && AM.Base_Reg.getNode() &&
2046 !AM.Base_Reg.getNode()->hasOneUse()) ||
2047 AM.BaseType == X86ISelAddressMode::FrameIndexBase)
2048 --Cost;
2049 // If the folded LHS was interesting, this transformation saves
2050 // address arithmetic.
2051 if ((AM.hasSymbolicDisplacement() && !Backup.hasSymbolicDisplacement()) +
2052 ((AM.Disp != 0) && (Backup.Disp == 0)) +
2053 (AM.Segment.getNode() && !Backup.Segment.getNode()) >= 2)
2054 --Cost;
2055 // If it doesn't look like it may be an overall win, don't do it.
2056 if (Cost >= 0) {
2057 AM = Backup;
2058 break;
2061 // Ok, the transformation is legal and appears profitable. Go for it.
2062 // Negation will be emitted later to avoid creating dangling nodes if this
2063 // was an unprofitable LEA.
2064 AM.IndexReg = RHS;
2065 AM.NegateIndex = true;
2066 AM.Scale = 1;
2067 return false;
2070 case ISD::ADD:
2071 if (!matchAdd(N, AM, Depth))
2072 return false;
2073 break;
2075 case ISD::OR:
2076 // We want to look through a transform in InstCombine and DAGCombiner that
2077 // turns 'add' into 'or', so we can treat this 'or' exactly like an 'add'.
2078 // Example: (or (and x, 1), (shl y, 3)) --> (add (and x, 1), (shl y, 3))
2079 // An 'lea' can then be used to match the shift (multiply) and add:
2080 // and $1, %esi
2081 // lea (%rsi, %rdi, 8), %rax
2082 if (CurDAG->haveNoCommonBitsSet(N.getOperand(0), N.getOperand(1)) &&
2083 !matchAdd(N, AM, Depth))
2084 return false;
2085 break;
2087 case ISD::AND: {
2088 // Perform some heroic transforms on an and of a constant-count shift
2089 // with a constant to enable use of the scaled offset field.
2091 // Scale must not be used already.
2092 if (AM.IndexReg.getNode() != nullptr || AM.Scale != 1) break;
2094 // We only handle up to 64-bit values here as those are what matter for
2095 // addressing mode optimizations.
2096 assert(N.getSimpleValueType().getSizeInBits() <= 64 &&
2097 "Unexpected value size!");
2099 if (!isa<ConstantSDNode>(N.getOperand(1)))
2100 break;
2102 if (N.getOperand(0).getOpcode() == ISD::SRL) {
2103 SDValue Shift = N.getOperand(0);
2104 SDValue X = Shift.getOperand(0);
2106 uint64_t Mask = N.getConstantOperandVal(1);
2108 // Try to fold the mask and shift into an extract and scale.
2109 if (!foldMaskAndShiftToExtract(*CurDAG, N, Mask, Shift, X, AM))
2110 return false;
2112 // Try to fold the mask and shift directly into the scale.
2113 if (!foldMaskAndShiftToScale(*CurDAG, N, Mask, Shift, X, AM))
2114 return false;
2116 // Try to fold the mask and shift into BEXTR and scale.
2117 if (!foldMaskedShiftToBEXTR(*CurDAG, N, Mask, Shift, X, AM, *Subtarget))
2118 return false;
2121 // Try to swap the mask and shift to place shifts which can be done as
2122 // a scale on the outside of the mask.
2123 if (!foldMaskedShiftToScaledMask(*CurDAG, N, AM))
2124 return false;
2126 break;
2128 case ISD::ZERO_EXTEND: {
2129 // Try to widen a zexted shift left to the same size as its use, so we can
2130 // match the shift as a scale factor.
2131 if (AM.IndexReg.getNode() != nullptr || AM.Scale != 1)
2132 break;
2133 if (N.getOperand(0).getOpcode() != ISD::SHL || !N.getOperand(0).hasOneUse())
2134 break;
2136 // Give up if the shift is not a valid scale factor [1,2,3].
2137 SDValue Shl = N.getOperand(0);
2138 auto *ShAmtC = dyn_cast<ConstantSDNode>(Shl.getOperand(1));
2139 if (!ShAmtC || ShAmtC->getZExtValue() > 3)
2140 break;
2142 // The narrow shift must only shift out zero bits (it must be 'nuw').
2143 // That makes it safe to widen to the destination type.
2144 APInt HighZeros = APInt::getHighBitsSet(Shl.getValueSizeInBits(),
2145 ShAmtC->getZExtValue());
2146 if (!CurDAG->MaskedValueIsZero(Shl.getOperand(0), HighZeros))
2147 break;
2149 // zext (shl nuw i8 %x, C) to i32 --> shl (zext i8 %x to i32), (zext C)
2150 MVT VT = N.getSimpleValueType();
2151 SDLoc DL(N);
2152 SDValue Zext = CurDAG->getNode(ISD::ZERO_EXTEND, DL, VT, Shl.getOperand(0));
2153 SDValue NewShl = CurDAG->getNode(ISD::SHL, DL, VT, Zext, Shl.getOperand(1));
2155 // Convert the shift to scale factor.
2156 AM.Scale = 1 << ShAmtC->getZExtValue();
2157 AM.IndexReg = Zext;
2159 insertDAGNode(*CurDAG, N, Zext);
2160 insertDAGNode(*CurDAG, N, NewShl);
2161 CurDAG->ReplaceAllUsesWith(N, NewShl);
2162 CurDAG->RemoveDeadNode(N.getNode());
2163 return false;
2167 return matchAddressBase(N, AM);
2170 /// Helper for MatchAddress. Add the specified node to the
2171 /// specified addressing mode without any further recursion.
2172 bool X86DAGToDAGISel::matchAddressBase(SDValue N, X86ISelAddressMode &AM) {
2173 // Is the base register already occupied?
2174 if (AM.BaseType != X86ISelAddressMode::RegBase || AM.Base_Reg.getNode()) {
2175 // If so, check to see if the scale index register is set.
2176 if (!AM.IndexReg.getNode()) {
2177 AM.IndexReg = N;
2178 AM.Scale = 1;
2179 return false;
2182 // Otherwise, we cannot select it.
2183 return true;
2186 // Default, generate it as a register.
2187 AM.BaseType = X86ISelAddressMode::RegBase;
2188 AM.Base_Reg = N;
2189 return false;
2192 /// Helper for selectVectorAddr. Handles things that can be folded into a
2193 /// gather scatter address. The index register and scale should have already
2194 /// been handled.
2195 bool X86DAGToDAGISel::matchVectorAddress(SDValue N, X86ISelAddressMode &AM) {
2196 // TODO: Support other operations.
2197 switch (N.getOpcode()) {
2198 case ISD::Constant: {
2199 uint64_t Val = cast<ConstantSDNode>(N)->getSExtValue();
2200 if (!foldOffsetIntoAddress(Val, AM))
2201 return false;
2202 break;
2204 case X86ISD::Wrapper:
2205 if (!matchWrapper(N, AM))
2206 return false;
2207 break;
2210 return matchAddressBase(N, AM);
2213 bool X86DAGToDAGISel::selectVectorAddr(SDNode *Parent, SDValue N, SDValue &Base,
2214 SDValue &Scale, SDValue &Index,
2215 SDValue &Disp, SDValue &Segment) {
2216 X86ISelAddressMode AM;
2217 auto *Mgs = cast<X86MaskedGatherScatterSDNode>(Parent);
2218 AM.IndexReg = Mgs->getIndex();
2219 AM.Scale = cast<ConstantSDNode>(Mgs->getScale())->getZExtValue();
2221 unsigned AddrSpace = cast<MemSDNode>(Parent)->getPointerInfo().getAddrSpace();
2222 // AddrSpace 256 -> GS, 257 -> FS, 258 -> SS.
2223 if (AddrSpace == 256)
2224 AM.Segment = CurDAG->getRegister(X86::GS, MVT::i16);
2225 if (AddrSpace == 257)
2226 AM.Segment = CurDAG->getRegister(X86::FS, MVT::i16);
2227 if (AddrSpace == 258)
2228 AM.Segment = CurDAG->getRegister(X86::SS, MVT::i16);
2230 SDLoc DL(N);
2231 MVT VT = N.getSimpleValueType();
2233 // Try to match into the base and displacement fields.
2234 if (matchVectorAddress(N, AM))
2235 return false;
2237 getAddressOperands(AM, DL, VT, Base, Scale, Index, Disp, Segment);
2238 return true;
2241 /// Returns true if it is able to pattern match an addressing mode.
2242 /// It returns the operands which make up the maximal addressing mode it can
2243 /// match by reference.
2245 /// Parent is the parent node of the addr operand that is being matched. It
2246 /// is always a load, store, atomic node, or null. It is only null when
2247 /// checking memory operands for inline asm nodes.
2248 bool X86DAGToDAGISel::selectAddr(SDNode *Parent, SDValue N, SDValue &Base,
2249 SDValue &Scale, SDValue &Index,
2250 SDValue &Disp, SDValue &Segment) {
2251 X86ISelAddressMode AM;
2253 if (Parent &&
2254 // This list of opcodes are all the nodes that have an "addr:$ptr" operand
2255 // that are not a MemSDNode, and thus don't have proper addrspace info.
2256 Parent->getOpcode() != ISD::INTRINSIC_W_CHAIN && // unaligned loads, fixme
2257 Parent->getOpcode() != ISD::INTRINSIC_VOID && // nontemporal stores
2258 Parent->getOpcode() != X86ISD::TLSCALL && // Fixme
2259 Parent->getOpcode() != X86ISD::ENQCMD && // Fixme
2260 Parent->getOpcode() != X86ISD::ENQCMDS && // Fixme
2261 Parent->getOpcode() != X86ISD::EH_SJLJ_SETJMP && // setjmp
2262 Parent->getOpcode() != X86ISD::EH_SJLJ_LONGJMP) { // longjmp
2263 unsigned AddrSpace =
2264 cast<MemSDNode>(Parent)->getPointerInfo().getAddrSpace();
2265 // AddrSpace 256 -> GS, 257 -> FS, 258 -> SS.
2266 if (AddrSpace == 256)
2267 AM.Segment = CurDAG->getRegister(X86::GS, MVT::i16);
2268 if (AddrSpace == 257)
2269 AM.Segment = CurDAG->getRegister(X86::FS, MVT::i16);
2270 if (AddrSpace == 258)
2271 AM.Segment = CurDAG->getRegister(X86::SS, MVT::i16);
2274 // Save the DL and VT before calling matchAddress, it can invalidate N.
2275 SDLoc DL(N);
2276 MVT VT = N.getSimpleValueType();
2278 if (matchAddress(N, AM))
2279 return false;
2281 getAddressOperands(AM, DL, VT, Base, Scale, Index, Disp, Segment);
2282 return true;
2285 // We can only fold a load if all nodes between it and the root node have a
2286 // single use. If there are additional uses, we could end up duplicating the
2287 // load.
2288 static bool hasSingleUsesFromRoot(SDNode *Root, SDNode *User) {
2289 while (User != Root) {
2290 if (!User->hasOneUse())
2291 return false;
2292 User = *User->use_begin();
2295 return true;
2298 /// Match a scalar SSE load. In particular, we want to match a load whose top
2299 /// elements are either undef or zeros. The load flavor is derived from the
2300 /// type of N, which is either v4f32 or v2f64.
2302 /// We also return:
2303 /// PatternChainNode: this is the matched node that has a chain input and
2304 /// output.
2305 bool X86DAGToDAGISel::selectScalarSSELoad(SDNode *Root, SDNode *Parent,
2306 SDValue N, SDValue &Base,
2307 SDValue &Scale, SDValue &Index,
2308 SDValue &Disp, SDValue &Segment,
2309 SDValue &PatternNodeWithChain) {
2310 if (!hasSingleUsesFromRoot(Root, Parent))
2311 return false;
2313 // We can allow a full vector load here since narrowing a load is ok unless
2314 // it's volatile.
2315 if (ISD::isNON_EXTLoad(N.getNode())) {
2316 LoadSDNode *LD = cast<LoadSDNode>(N);
2317 if (!LD->isVolatile() &&
2318 IsProfitableToFold(N, LD, Root) &&
2319 IsLegalToFold(N, Parent, Root, OptLevel)) {
2320 PatternNodeWithChain = N;
2321 return selectAddr(LD, LD->getBasePtr(), Base, Scale, Index, Disp,
2322 Segment);
2326 // We can also match the special zero extended load opcode.
2327 if (N.getOpcode() == X86ISD::VZEXT_LOAD) {
2328 PatternNodeWithChain = N;
2329 if (IsProfitableToFold(PatternNodeWithChain, N.getNode(), Root) &&
2330 IsLegalToFold(PatternNodeWithChain, Parent, Root, OptLevel)) {
2331 auto *MI = cast<MemIntrinsicSDNode>(PatternNodeWithChain);
2332 return selectAddr(MI, MI->getBasePtr(), Base, Scale, Index, Disp,
2333 Segment);
2337 // Need to make sure that the SCALAR_TO_VECTOR and load are both only used
2338 // once. Otherwise the load might get duplicated and the chain output of the
2339 // duplicate load will not be observed by all dependencies.
2340 if (N.getOpcode() == ISD::SCALAR_TO_VECTOR && N.getNode()->hasOneUse()) {
2341 PatternNodeWithChain = N.getOperand(0);
2342 if (ISD::isNON_EXTLoad(PatternNodeWithChain.getNode()) &&
2343 IsProfitableToFold(PatternNodeWithChain, N.getNode(), Root) &&
2344 IsLegalToFold(PatternNodeWithChain, N.getNode(), Root, OptLevel)) {
2345 LoadSDNode *LD = cast<LoadSDNode>(PatternNodeWithChain);
2346 return selectAddr(LD, LD->getBasePtr(), Base, Scale, Index, Disp,
2347 Segment);
2351 return false;
2355 bool X86DAGToDAGISel::selectMOV64Imm32(SDValue N, SDValue &Imm) {
2356 if (const ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N)) {
2357 uint64_t ImmVal = CN->getZExtValue();
2358 if (!isUInt<32>(ImmVal))
2359 return false;
2361 Imm = CurDAG->getTargetConstant(ImmVal, SDLoc(N), MVT::i64);
2362 return true;
2365 // In static codegen with small code model, we can get the address of a label
2366 // into a register with 'movl'
2367 if (N->getOpcode() != X86ISD::Wrapper)
2368 return false;
2370 N = N.getOperand(0);
2372 // At least GNU as does not accept 'movl' for TPOFF relocations.
2373 // FIXME: We could use 'movl' when we know we are targeting MC.
2374 if (N->getOpcode() == ISD::TargetGlobalTLSAddress)
2375 return false;
2377 Imm = N;
2378 if (N->getOpcode() != ISD::TargetGlobalAddress)
2379 return TM.getCodeModel() == CodeModel::Small;
2381 Optional<ConstantRange> CR =
2382 cast<GlobalAddressSDNode>(N)->getGlobal()->getAbsoluteSymbolRange();
2383 if (!CR)
2384 return TM.getCodeModel() == CodeModel::Small;
2386 return CR->getUnsignedMax().ult(1ull << 32);
2389 bool X86DAGToDAGISel::selectLEA64_32Addr(SDValue N, SDValue &Base,
2390 SDValue &Scale, SDValue &Index,
2391 SDValue &Disp, SDValue &Segment) {
2392 // Save the debug loc before calling selectLEAAddr, in case it invalidates N.
2393 SDLoc DL(N);
2395 if (!selectLEAAddr(N, Base, Scale, Index, Disp, Segment))
2396 return false;
2398 RegisterSDNode *RN = dyn_cast<RegisterSDNode>(Base);
2399 if (RN && RN->getReg() == 0)
2400 Base = CurDAG->getRegister(0, MVT::i64);
2401 else if (Base.getValueType() == MVT::i32 && !isa<FrameIndexSDNode>(Base)) {
2402 // Base could already be %rip, particularly in the x32 ABI.
2403 SDValue ImplDef = SDValue(CurDAG->getMachineNode(X86::IMPLICIT_DEF, DL,
2404 MVT::i64), 0);
2405 Base = CurDAG->getTargetInsertSubreg(X86::sub_32bit, DL, MVT::i64, ImplDef,
2406 Base);
2409 RN = dyn_cast<RegisterSDNode>(Index);
2410 if (RN && RN->getReg() == 0)
2411 Index = CurDAG->getRegister(0, MVT::i64);
2412 else {
2413 assert(Index.getValueType() == MVT::i32 &&
2414 "Expect to be extending 32-bit registers for use in LEA");
2415 SDValue ImplDef = SDValue(CurDAG->getMachineNode(X86::IMPLICIT_DEF, DL,
2416 MVT::i64), 0);
2417 Index = CurDAG->getTargetInsertSubreg(X86::sub_32bit, DL, MVT::i64, ImplDef,
2418 Index);
2421 return true;
2424 /// Calls SelectAddr and determines if the maximal addressing
2425 /// mode it matches can be cost effectively emitted as an LEA instruction.
2426 bool X86DAGToDAGISel::selectLEAAddr(SDValue N,
2427 SDValue &Base, SDValue &Scale,
2428 SDValue &Index, SDValue &Disp,
2429 SDValue &Segment) {
2430 X86ISelAddressMode AM;
2432 // Save the DL and VT before calling matchAddress, it can invalidate N.
2433 SDLoc DL(N);
2434 MVT VT = N.getSimpleValueType();
2436 // Set AM.Segment to prevent MatchAddress from using one. LEA doesn't support
2437 // segments.
2438 SDValue Copy = AM.Segment;
2439 SDValue T = CurDAG->getRegister(0, MVT::i32);
2440 AM.Segment = T;
2441 if (matchAddress(N, AM))
2442 return false;
2443 assert (T == AM.Segment);
2444 AM.Segment = Copy;
2446 unsigned Complexity = 0;
2447 if (AM.BaseType == X86ISelAddressMode::RegBase && AM.Base_Reg.getNode())
2448 Complexity = 1;
2449 else if (AM.BaseType == X86ISelAddressMode::FrameIndexBase)
2450 Complexity = 4;
2452 if (AM.IndexReg.getNode())
2453 Complexity++;
2455 // Don't match just leal(,%reg,2). It's cheaper to do addl %reg, %reg, or with
2456 // a simple shift.
2457 if (AM.Scale > 1)
2458 Complexity++;
2460 // FIXME: We are artificially lowering the criteria to turn ADD %reg, $GA
2461 // to a LEA. This is determined with some experimentation but is by no means
2462 // optimal (especially for code size consideration). LEA is nice because of
2463 // its three-address nature. Tweak the cost function again when we can run
2464 // convertToThreeAddress() at register allocation time.
2465 if (AM.hasSymbolicDisplacement()) {
2466 // For X86-64, always use LEA to materialize RIP-relative addresses.
2467 if (Subtarget->is64Bit())
2468 Complexity = 4;
2469 else
2470 Complexity += 2;
2473 // Heuristic: try harder to form an LEA from ADD if the operands set flags.
2474 // Unlike ADD, LEA does not affect flags, so we will be less likely to require
2475 // duplicating flag-producing instructions later in the pipeline.
2476 if (N.getOpcode() == ISD::ADD) {
2477 auto isMathWithFlags = [](SDValue V) {
2478 switch (V.getOpcode()) {
2479 case X86ISD::ADD:
2480 case X86ISD::SUB:
2481 case X86ISD::ADC:
2482 case X86ISD::SBB:
2483 /* TODO: These opcodes can be added safely, but we may want to justify
2484 their inclusion for different reasons (better for reg-alloc).
2485 case X86ISD::SMUL:
2486 case X86ISD::UMUL:
2487 case X86ISD::OR:
2488 case X86ISD::XOR:
2489 case X86ISD::AND:
2491 // Value 1 is the flag output of the node - verify it's not dead.
2492 return !SDValue(V.getNode(), 1).use_empty();
2493 default:
2494 return false;
2497 // TODO: This could be an 'or' rather than 'and' to make the transform more
2498 // likely to happen. We might want to factor in whether there's a
2499 // load folding opportunity for the math op that disappears with LEA.
2500 if (isMathWithFlags(N.getOperand(0)) && isMathWithFlags(N.getOperand(1)))
2501 Complexity++;
2504 if (AM.Disp)
2505 Complexity++;
2507 // If it isn't worth using an LEA, reject it.
2508 if (Complexity <= 2)
2509 return false;
2511 getAddressOperands(AM, DL, VT, Base, Scale, Index, Disp, Segment);
2512 return true;
2515 /// This is only run on TargetGlobalTLSAddress nodes.
2516 bool X86DAGToDAGISel::selectTLSADDRAddr(SDValue N, SDValue &Base,
2517 SDValue &Scale, SDValue &Index,
2518 SDValue &Disp, SDValue &Segment) {
2519 assert(N.getOpcode() == ISD::TargetGlobalTLSAddress);
2520 const GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(N);
2522 X86ISelAddressMode AM;
2523 AM.GV = GA->getGlobal();
2524 AM.Disp += GA->getOffset();
2525 AM.SymbolFlags = GA->getTargetFlags();
2527 MVT VT = N.getSimpleValueType();
2528 if (VT == MVT::i32) {
2529 AM.Scale = 1;
2530 AM.IndexReg = CurDAG->getRegister(X86::EBX, MVT::i32);
2533 getAddressOperands(AM, SDLoc(N), VT, Base, Scale, Index, Disp, Segment);
2534 return true;
2537 bool X86DAGToDAGISel::selectRelocImm(SDValue N, SDValue &Op) {
2538 if (auto *CN = dyn_cast<ConstantSDNode>(N)) {
2539 Op = CurDAG->getTargetConstant(CN->getAPIntValue(), SDLoc(CN),
2540 N.getValueType());
2541 return true;
2544 // Keep track of the original value type and whether this value was
2545 // truncated. If we see a truncation from pointer type to VT that truncates
2546 // bits that are known to be zero, we can use a narrow reference.
2547 EVT VT = N.getValueType();
2548 bool WasTruncated = false;
2549 if (N.getOpcode() == ISD::TRUNCATE) {
2550 WasTruncated = true;
2551 N = N.getOperand(0);
2554 if (N.getOpcode() != X86ISD::Wrapper)
2555 return false;
2557 // We can only use non-GlobalValues as immediates if they were not truncated,
2558 // as we do not have any range information. If we have a GlobalValue and the
2559 // address was not truncated, we can select it as an operand directly.
2560 unsigned Opc = N.getOperand(0)->getOpcode();
2561 if (Opc != ISD::TargetGlobalAddress || !WasTruncated) {
2562 Op = N.getOperand(0);
2563 // We can only select the operand directly if we didn't have to look past a
2564 // truncate.
2565 return !WasTruncated;
2568 // Check that the global's range fits into VT.
2569 auto *GA = cast<GlobalAddressSDNode>(N.getOperand(0));
2570 Optional<ConstantRange> CR = GA->getGlobal()->getAbsoluteSymbolRange();
2571 if (!CR || CR->getUnsignedMax().uge(1ull << VT.getSizeInBits()))
2572 return false;
2574 // Okay, we can use a narrow reference.
2575 Op = CurDAG->getTargetGlobalAddress(GA->getGlobal(), SDLoc(N), VT,
2576 GA->getOffset(), GA->getTargetFlags());
2577 return true;
2580 bool X86DAGToDAGISel::tryFoldLoad(SDNode *Root, SDNode *P, SDValue N,
2581 SDValue &Base, SDValue &Scale,
2582 SDValue &Index, SDValue &Disp,
2583 SDValue &Segment) {
2584 if (!ISD::isNON_EXTLoad(N.getNode()) ||
2585 !IsProfitableToFold(N, P, Root) ||
2586 !IsLegalToFold(N, P, Root, OptLevel))
2587 return false;
2589 return selectAddr(N.getNode(),
2590 N.getOperand(1), Base, Scale, Index, Disp, Segment);
2593 /// Return an SDNode that returns the value of the global base register.
2594 /// Output instructions required to initialize the global base register,
2595 /// if necessary.
2596 SDNode *X86DAGToDAGISel::getGlobalBaseReg() {
2597 unsigned GlobalBaseReg = getInstrInfo()->getGlobalBaseReg(MF);
2598 auto &DL = MF->getDataLayout();
2599 return CurDAG->getRegister(GlobalBaseReg, TLI->getPointerTy(DL)).getNode();
2602 bool X86DAGToDAGISel::isSExtAbsoluteSymbolRef(unsigned Width, SDNode *N) const {
2603 if (N->getOpcode() == ISD::TRUNCATE)
2604 N = N->getOperand(0).getNode();
2605 if (N->getOpcode() != X86ISD::Wrapper)
2606 return false;
2608 auto *GA = dyn_cast<GlobalAddressSDNode>(N->getOperand(0));
2609 if (!GA)
2610 return false;
2612 Optional<ConstantRange> CR = GA->getGlobal()->getAbsoluteSymbolRange();
2613 return CR && CR->getSignedMin().sge(-1ull << Width) &&
2614 CR->getSignedMax().slt(1ull << Width);
2617 static X86::CondCode getCondFromNode(SDNode *N) {
2618 assert(N->isMachineOpcode() && "Unexpected node");
2619 X86::CondCode CC = X86::COND_INVALID;
2620 unsigned Opc = N->getMachineOpcode();
2621 if (Opc == X86::JCC_1)
2622 CC = static_cast<X86::CondCode>(N->getConstantOperandVal(1));
2623 else if (Opc == X86::SETCCr)
2624 CC = static_cast<X86::CondCode>(N->getConstantOperandVal(0));
2625 else if (Opc == X86::SETCCm)
2626 CC = static_cast<X86::CondCode>(N->getConstantOperandVal(5));
2627 else if (Opc == X86::CMOV16rr || Opc == X86::CMOV32rr ||
2628 Opc == X86::CMOV64rr)
2629 CC = static_cast<X86::CondCode>(N->getConstantOperandVal(2));
2630 else if (Opc == X86::CMOV16rm || Opc == X86::CMOV32rm ||
2631 Opc == X86::CMOV64rm)
2632 CC = static_cast<X86::CondCode>(N->getConstantOperandVal(6));
2634 return CC;
2637 /// Test whether the given X86ISD::CMP node has any users that use a flag
2638 /// other than ZF.
2639 bool X86DAGToDAGISel::onlyUsesZeroFlag(SDValue Flags) const {
2640 // Examine each user of the node.
2641 for (SDNode::use_iterator UI = Flags->use_begin(), UE = Flags->use_end();
2642 UI != UE; ++UI) {
2643 // Only check things that use the flags.
2644 if (UI.getUse().getResNo() != Flags.getResNo())
2645 continue;
2646 // Only examine CopyToReg uses that copy to EFLAGS.
2647 if (UI->getOpcode() != ISD::CopyToReg ||
2648 cast<RegisterSDNode>(UI->getOperand(1))->getReg() != X86::EFLAGS)
2649 return false;
2650 // Examine each user of the CopyToReg use.
2651 for (SDNode::use_iterator FlagUI = UI->use_begin(),
2652 FlagUE = UI->use_end(); FlagUI != FlagUE; ++FlagUI) {
2653 // Only examine the Flag result.
2654 if (FlagUI.getUse().getResNo() != 1) continue;
2655 // Anything unusual: assume conservatively.
2656 if (!FlagUI->isMachineOpcode()) return false;
2657 // Examine the condition code of the user.
2658 X86::CondCode CC = getCondFromNode(*FlagUI);
2660 switch (CC) {
2661 // Comparisons which only use the zero flag.
2662 case X86::COND_E: case X86::COND_NE:
2663 continue;
2664 // Anything else: assume conservatively.
2665 default:
2666 return false;
2670 return true;
2673 /// Test whether the given X86ISD::CMP node has any uses which require the SF
2674 /// flag to be accurate.
2675 bool X86DAGToDAGISel::hasNoSignFlagUses(SDValue Flags) const {
2676 // Examine each user of the node.
2677 for (SDNode::use_iterator UI = Flags->use_begin(), UE = Flags->use_end();
2678 UI != UE; ++UI) {
2679 // Only check things that use the flags.
2680 if (UI.getUse().getResNo() != Flags.getResNo())
2681 continue;
2682 // Only examine CopyToReg uses that copy to EFLAGS.
2683 if (UI->getOpcode() != ISD::CopyToReg ||
2684 cast<RegisterSDNode>(UI->getOperand(1))->getReg() != X86::EFLAGS)
2685 return false;
2686 // Examine each user of the CopyToReg use.
2687 for (SDNode::use_iterator FlagUI = UI->use_begin(),
2688 FlagUE = UI->use_end(); FlagUI != FlagUE; ++FlagUI) {
2689 // Only examine the Flag result.
2690 if (FlagUI.getUse().getResNo() != 1) continue;
2691 // Anything unusual: assume conservatively.
2692 if (!FlagUI->isMachineOpcode()) return false;
2693 // Examine the condition code of the user.
2694 X86::CondCode CC = getCondFromNode(*FlagUI);
2696 switch (CC) {
2697 // Comparisons which don't examine the SF flag.
2698 case X86::COND_A: case X86::COND_AE:
2699 case X86::COND_B: case X86::COND_BE:
2700 case X86::COND_E: case X86::COND_NE:
2701 case X86::COND_O: case X86::COND_NO:
2702 case X86::COND_P: case X86::COND_NP:
2703 continue;
2704 // Anything else: assume conservatively.
2705 default:
2706 return false;
2710 return true;
2713 static bool mayUseCarryFlag(X86::CondCode CC) {
2714 switch (CC) {
2715 // Comparisons which don't examine the CF flag.
2716 case X86::COND_O: case X86::COND_NO:
2717 case X86::COND_E: case X86::COND_NE:
2718 case X86::COND_S: case X86::COND_NS:
2719 case X86::COND_P: case X86::COND_NP:
2720 case X86::COND_L: case X86::COND_GE:
2721 case X86::COND_G: case X86::COND_LE:
2722 return false;
2723 // Anything else: assume conservatively.
2724 default:
2725 return true;
2729 /// Test whether the given node which sets flags has any uses which require the
2730 /// CF flag to be accurate.
2731 bool X86DAGToDAGISel::hasNoCarryFlagUses(SDValue Flags) const {
2732 // Examine each user of the node.
2733 for (SDNode::use_iterator UI = Flags->use_begin(), UE = Flags->use_end();
2734 UI != UE; ++UI) {
2735 // Only check things that use the flags.
2736 if (UI.getUse().getResNo() != Flags.getResNo())
2737 continue;
2739 unsigned UIOpc = UI->getOpcode();
2741 if (UIOpc == ISD::CopyToReg) {
2742 // Only examine CopyToReg uses that copy to EFLAGS.
2743 if (cast<RegisterSDNode>(UI->getOperand(1))->getReg() != X86::EFLAGS)
2744 return false;
2745 // Examine each user of the CopyToReg use.
2746 for (SDNode::use_iterator FlagUI = UI->use_begin(), FlagUE = UI->use_end();
2747 FlagUI != FlagUE; ++FlagUI) {
2748 // Only examine the Flag result.
2749 if (FlagUI.getUse().getResNo() != 1)
2750 continue;
2751 // Anything unusual: assume conservatively.
2752 if (!FlagUI->isMachineOpcode())
2753 return false;
2754 // Examine the condition code of the user.
2755 X86::CondCode CC = getCondFromNode(*FlagUI);
2757 if (mayUseCarryFlag(CC))
2758 return false;
2761 // This CopyToReg is ok. Move on to the next user.
2762 continue;
2765 // This might be an unselected node. So look for the pre-isel opcodes that
2766 // use flags.
2767 unsigned CCOpNo;
2768 switch (UIOpc) {
2769 default:
2770 // Something unusual. Be conservative.
2771 return false;
2772 case X86ISD::SETCC: CCOpNo = 0; break;
2773 case X86ISD::SETCC_CARRY: CCOpNo = 0; break;
2774 case X86ISD::CMOV: CCOpNo = 2; break;
2775 case X86ISD::BRCOND: CCOpNo = 2; break;
2778 X86::CondCode CC = (X86::CondCode)UI->getConstantOperandVal(CCOpNo);
2779 if (mayUseCarryFlag(CC))
2780 return false;
2782 return true;
2785 /// Check whether or not the chain ending in StoreNode is suitable for doing
2786 /// the {load; op; store} to modify transformation.
2787 static bool isFusableLoadOpStorePattern(StoreSDNode *StoreNode,
2788 SDValue StoredVal, SelectionDAG *CurDAG,
2789 unsigned LoadOpNo,
2790 LoadSDNode *&LoadNode,
2791 SDValue &InputChain) {
2792 // Is the stored value result 0 of the operation?
2793 if (StoredVal.getResNo() != 0) return false;
2795 // Are there other uses of the operation other than the store?
2796 if (!StoredVal.getNode()->hasNUsesOfValue(1, 0)) return false;
2798 // Is the store non-extending and non-indexed?
2799 if (!ISD::isNormalStore(StoreNode) || StoreNode->isNonTemporal())
2800 return false;
2802 SDValue Load = StoredVal->getOperand(LoadOpNo);
2803 // Is the stored value a non-extending and non-indexed load?
2804 if (!ISD::isNormalLoad(Load.getNode())) return false;
2806 // Return LoadNode by reference.
2807 LoadNode = cast<LoadSDNode>(Load);
2809 // Is store the only read of the loaded value?
2810 if (!Load.hasOneUse())
2811 return false;
2813 // Is the address of the store the same as the load?
2814 if (LoadNode->getBasePtr() != StoreNode->getBasePtr() ||
2815 LoadNode->getOffset() != StoreNode->getOffset())
2816 return false;
2818 bool FoundLoad = false;
2819 SmallVector<SDValue, 4> ChainOps;
2820 SmallVector<const SDNode *, 4> LoopWorklist;
2821 SmallPtrSet<const SDNode *, 16> Visited;
2822 const unsigned int Max = 1024;
2824 // Visualization of Load-Op-Store fusion:
2825 // -------------------------
2826 // Legend:
2827 // *-lines = Chain operand dependencies.
2828 // |-lines = Normal operand dependencies.
2829 // Dependencies flow down and right. n-suffix references multiple nodes.
2831 // C Xn C
2832 // * * *
2833 // * * *
2834 // Xn A-LD Yn TF Yn
2835 // * * \ | * |
2836 // * * \ | * |
2837 // * * \ | => A--LD_OP_ST
2838 // * * \| \
2839 // TF OP \
2840 // * | \ Zn
2841 // * | \
2842 // A-ST Zn
2845 // This merge induced dependences from: #1: Xn -> LD, OP, Zn
2846 // #2: Yn -> LD
2847 // #3: ST -> Zn
2849 // Ensure the transform is safe by checking for the dual
2850 // dependencies to make sure we do not induce a loop.
2852 // As LD is a predecessor to both OP and ST we can do this by checking:
2853 // a). if LD is a predecessor to a member of Xn or Yn.
2854 // b). if a Zn is a predecessor to ST.
2856 // However, (b) can only occur through being a chain predecessor to
2857 // ST, which is the same as Zn being a member or predecessor of Xn,
2858 // which is a subset of LD being a predecessor of Xn. So it's
2859 // subsumed by check (a).
2861 SDValue Chain = StoreNode->getChain();
2863 // Gather X elements in ChainOps.
2864 if (Chain == Load.getValue(1)) {
2865 FoundLoad = true;
2866 ChainOps.push_back(Load.getOperand(0));
2867 } else if (Chain.getOpcode() == ISD::TokenFactor) {
2868 for (unsigned i = 0, e = Chain.getNumOperands(); i != e; ++i) {
2869 SDValue Op = Chain.getOperand(i);
2870 if (Op == Load.getValue(1)) {
2871 FoundLoad = true;
2872 // Drop Load, but keep its chain. No cycle check necessary.
2873 ChainOps.push_back(Load.getOperand(0));
2874 continue;
2876 LoopWorklist.push_back(Op.getNode());
2877 ChainOps.push_back(Op);
2881 if (!FoundLoad)
2882 return false;
2884 // Worklist is currently Xn. Add Yn to worklist.
2885 for (SDValue Op : StoredVal->ops())
2886 if (Op.getNode() != LoadNode)
2887 LoopWorklist.push_back(Op.getNode());
2889 // Check (a) if Load is a predecessor to Xn + Yn
2890 if (SDNode::hasPredecessorHelper(Load.getNode(), Visited, LoopWorklist, Max,
2891 true))
2892 return false;
2894 InputChain =
2895 CurDAG->getNode(ISD::TokenFactor, SDLoc(Chain), MVT::Other, ChainOps);
2896 return true;
2899 // Change a chain of {load; op; store} of the same value into a simple op
2900 // through memory of that value, if the uses of the modified value and its
2901 // address are suitable.
2903 // The tablegen pattern memory operand pattern is currently not able to match
2904 // the case where the EFLAGS on the original operation are used.
2906 // To move this to tablegen, we'll need to improve tablegen to allow flags to
2907 // be transferred from a node in the pattern to the result node, probably with
2908 // a new keyword. For example, we have this
2909 // def DEC64m : RI<0xFF, MRM1m, (outs), (ins i64mem:$dst), "dec{q}\t$dst",
2910 // [(store (add (loadi64 addr:$dst), -1), addr:$dst),
2911 // (implicit EFLAGS)]>;
2912 // but maybe need something like this
2913 // def DEC64m : RI<0xFF, MRM1m, (outs), (ins i64mem:$dst), "dec{q}\t$dst",
2914 // [(store (add (loadi64 addr:$dst), -1), addr:$dst),
2915 // (transferrable EFLAGS)]>;
2917 // Until then, we manually fold these and instruction select the operation
2918 // here.
2919 bool X86DAGToDAGISel::foldLoadStoreIntoMemOperand(SDNode *Node) {
2920 StoreSDNode *StoreNode = cast<StoreSDNode>(Node);
2921 SDValue StoredVal = StoreNode->getOperand(1);
2922 unsigned Opc = StoredVal->getOpcode();
2924 // Before we try to select anything, make sure this is memory operand size
2925 // and opcode we can handle. Note that this must match the code below that
2926 // actually lowers the opcodes.
2927 EVT MemVT = StoreNode->getMemoryVT();
2928 if (MemVT != MVT::i64 && MemVT != MVT::i32 && MemVT != MVT::i16 &&
2929 MemVT != MVT::i8)
2930 return false;
2932 bool IsCommutable = false;
2933 bool IsNegate = false;
2934 switch (Opc) {
2935 default:
2936 return false;
2937 case X86ISD::SUB:
2938 IsNegate = isNullConstant(StoredVal.getOperand(0));
2939 break;
2940 case X86ISD::SBB:
2941 break;
2942 case X86ISD::ADD:
2943 case X86ISD::ADC:
2944 case X86ISD::AND:
2945 case X86ISD::OR:
2946 case X86ISD::XOR:
2947 IsCommutable = true;
2948 break;
2951 unsigned LoadOpNo = IsNegate ? 1 : 0;
2952 LoadSDNode *LoadNode = nullptr;
2953 SDValue InputChain;
2954 if (!isFusableLoadOpStorePattern(StoreNode, StoredVal, CurDAG, LoadOpNo,
2955 LoadNode, InputChain)) {
2956 if (!IsCommutable)
2957 return false;
2959 // This operation is commutable, try the other operand.
2960 LoadOpNo = 1;
2961 if (!isFusableLoadOpStorePattern(StoreNode, StoredVal, CurDAG, LoadOpNo,
2962 LoadNode, InputChain))
2963 return false;
2966 SDValue Base, Scale, Index, Disp, Segment;
2967 if (!selectAddr(LoadNode, LoadNode->getBasePtr(), Base, Scale, Index, Disp,
2968 Segment))
2969 return false;
2971 auto SelectOpcode = [&](unsigned Opc64, unsigned Opc32, unsigned Opc16,
2972 unsigned Opc8) {
2973 switch (MemVT.getSimpleVT().SimpleTy) {
2974 case MVT::i64:
2975 return Opc64;
2976 case MVT::i32:
2977 return Opc32;
2978 case MVT::i16:
2979 return Opc16;
2980 case MVT::i8:
2981 return Opc8;
2982 default:
2983 llvm_unreachable("Invalid size!");
2987 MachineSDNode *Result;
2988 switch (Opc) {
2989 case X86ISD::SUB:
2990 // Handle negate.
2991 if (IsNegate) {
2992 unsigned NewOpc = SelectOpcode(X86::NEG64m, X86::NEG32m, X86::NEG16m,
2993 X86::NEG8m);
2994 const SDValue Ops[] = {Base, Scale, Index, Disp, Segment, InputChain};
2995 Result = CurDAG->getMachineNode(NewOpc, SDLoc(Node), MVT::i32,
2996 MVT::Other, Ops);
2997 break;
2999 LLVM_FALLTHROUGH;
3000 case X86ISD::ADD:
3001 // Try to match inc/dec.
3002 if (!Subtarget->slowIncDec() || OptForSize) {
3003 bool IsOne = isOneConstant(StoredVal.getOperand(1));
3004 bool IsNegOne = isAllOnesConstant(StoredVal.getOperand(1));
3005 // ADD/SUB with 1/-1 and carry flag isn't used can use inc/dec.
3006 if ((IsOne || IsNegOne) && hasNoCarryFlagUses(StoredVal.getValue(1))) {
3007 unsigned NewOpc =
3008 ((Opc == X86ISD::ADD) == IsOne)
3009 ? SelectOpcode(X86::INC64m, X86::INC32m, X86::INC16m, X86::INC8m)
3010 : SelectOpcode(X86::DEC64m, X86::DEC32m, X86::DEC16m, X86::DEC8m);
3011 const SDValue Ops[] = {Base, Scale, Index, Disp, Segment, InputChain};
3012 Result = CurDAG->getMachineNode(NewOpc, SDLoc(Node), MVT::i32,
3013 MVT::Other, Ops);
3014 break;
3017 LLVM_FALLTHROUGH;
3018 case X86ISD::ADC:
3019 case X86ISD::SBB:
3020 case X86ISD::AND:
3021 case X86ISD::OR:
3022 case X86ISD::XOR: {
3023 auto SelectRegOpcode = [SelectOpcode](unsigned Opc) {
3024 switch (Opc) {
3025 case X86ISD::ADD:
3026 return SelectOpcode(X86::ADD64mr, X86::ADD32mr, X86::ADD16mr,
3027 X86::ADD8mr);
3028 case X86ISD::ADC:
3029 return SelectOpcode(X86::ADC64mr, X86::ADC32mr, X86::ADC16mr,
3030 X86::ADC8mr);
3031 case X86ISD::SUB:
3032 return SelectOpcode(X86::SUB64mr, X86::SUB32mr, X86::SUB16mr,
3033 X86::SUB8mr);
3034 case X86ISD::SBB:
3035 return SelectOpcode(X86::SBB64mr, X86::SBB32mr, X86::SBB16mr,
3036 X86::SBB8mr);
3037 case X86ISD::AND:
3038 return SelectOpcode(X86::AND64mr, X86::AND32mr, X86::AND16mr,
3039 X86::AND8mr);
3040 case X86ISD::OR:
3041 return SelectOpcode(X86::OR64mr, X86::OR32mr, X86::OR16mr, X86::OR8mr);
3042 case X86ISD::XOR:
3043 return SelectOpcode(X86::XOR64mr, X86::XOR32mr, X86::XOR16mr,
3044 X86::XOR8mr);
3045 default:
3046 llvm_unreachable("Invalid opcode!");
3049 auto SelectImm8Opcode = [SelectOpcode](unsigned Opc) {
3050 switch (Opc) {
3051 case X86ISD::ADD:
3052 return SelectOpcode(X86::ADD64mi8, X86::ADD32mi8, X86::ADD16mi8, 0);
3053 case X86ISD::ADC:
3054 return SelectOpcode(X86::ADC64mi8, X86::ADC32mi8, X86::ADC16mi8, 0);
3055 case X86ISD::SUB:
3056 return SelectOpcode(X86::SUB64mi8, X86::SUB32mi8, X86::SUB16mi8, 0);
3057 case X86ISD::SBB:
3058 return SelectOpcode(X86::SBB64mi8, X86::SBB32mi8, X86::SBB16mi8, 0);
3059 case X86ISD::AND:
3060 return SelectOpcode(X86::AND64mi8, X86::AND32mi8, X86::AND16mi8, 0);
3061 case X86ISD::OR:
3062 return SelectOpcode(X86::OR64mi8, X86::OR32mi8, X86::OR16mi8, 0);
3063 case X86ISD::XOR:
3064 return SelectOpcode(X86::XOR64mi8, X86::XOR32mi8, X86::XOR16mi8, 0);
3065 default:
3066 llvm_unreachable("Invalid opcode!");
3069 auto SelectImmOpcode = [SelectOpcode](unsigned Opc) {
3070 switch (Opc) {
3071 case X86ISD::ADD:
3072 return SelectOpcode(X86::ADD64mi32, X86::ADD32mi, X86::ADD16mi,
3073 X86::ADD8mi);
3074 case X86ISD::ADC:
3075 return SelectOpcode(X86::ADC64mi32, X86::ADC32mi, X86::ADC16mi,
3076 X86::ADC8mi);
3077 case X86ISD::SUB:
3078 return SelectOpcode(X86::SUB64mi32, X86::SUB32mi, X86::SUB16mi,
3079 X86::SUB8mi);
3080 case X86ISD::SBB:
3081 return SelectOpcode(X86::SBB64mi32, X86::SBB32mi, X86::SBB16mi,
3082 X86::SBB8mi);
3083 case X86ISD::AND:
3084 return SelectOpcode(X86::AND64mi32, X86::AND32mi, X86::AND16mi,
3085 X86::AND8mi);
3086 case X86ISD::OR:
3087 return SelectOpcode(X86::OR64mi32, X86::OR32mi, X86::OR16mi,
3088 X86::OR8mi);
3089 case X86ISD::XOR:
3090 return SelectOpcode(X86::XOR64mi32, X86::XOR32mi, X86::XOR16mi,
3091 X86::XOR8mi);
3092 default:
3093 llvm_unreachable("Invalid opcode!");
3097 unsigned NewOpc = SelectRegOpcode(Opc);
3098 SDValue Operand = StoredVal->getOperand(1-LoadOpNo);
3100 // See if the operand is a constant that we can fold into an immediate
3101 // operand.
3102 if (auto *OperandC = dyn_cast<ConstantSDNode>(Operand)) {
3103 int64_t OperandV = OperandC->getSExtValue();
3105 // Check if we can shrink the operand enough to fit in an immediate (or
3106 // fit into a smaller immediate) by negating it and switching the
3107 // operation.
3108 if ((Opc == X86ISD::ADD || Opc == X86ISD::SUB) &&
3109 ((MemVT != MVT::i8 && !isInt<8>(OperandV) && isInt<8>(-OperandV)) ||
3110 (MemVT == MVT::i64 && !isInt<32>(OperandV) &&
3111 isInt<32>(-OperandV))) &&
3112 hasNoCarryFlagUses(StoredVal.getValue(1))) {
3113 OperandV = -OperandV;
3114 Opc = Opc == X86ISD::ADD ? X86ISD::SUB : X86ISD::ADD;
3117 // First try to fit this into an Imm8 operand. If it doesn't fit, then try
3118 // the larger immediate operand.
3119 if (MemVT != MVT::i8 && isInt<8>(OperandV)) {
3120 Operand = CurDAG->getTargetConstant(OperandV, SDLoc(Node), MemVT);
3121 NewOpc = SelectImm8Opcode(Opc);
3122 } else if (MemVT != MVT::i64 || isInt<32>(OperandV)) {
3123 Operand = CurDAG->getTargetConstant(OperandV, SDLoc(Node), MemVT);
3124 NewOpc = SelectImmOpcode(Opc);
3128 if (Opc == X86ISD::ADC || Opc == X86ISD::SBB) {
3129 SDValue CopyTo =
3130 CurDAG->getCopyToReg(InputChain, SDLoc(Node), X86::EFLAGS,
3131 StoredVal.getOperand(2), SDValue());
3133 const SDValue Ops[] = {Base, Scale, Index, Disp,
3134 Segment, Operand, CopyTo, CopyTo.getValue(1)};
3135 Result = CurDAG->getMachineNode(NewOpc, SDLoc(Node), MVT::i32, MVT::Other,
3136 Ops);
3137 } else {
3138 const SDValue Ops[] = {Base, Scale, Index, Disp,
3139 Segment, Operand, InputChain};
3140 Result = CurDAG->getMachineNode(NewOpc, SDLoc(Node), MVT::i32, MVT::Other,
3141 Ops);
3143 break;
3145 default:
3146 llvm_unreachable("Invalid opcode!");
3149 MachineMemOperand *MemOps[] = {StoreNode->getMemOperand(),
3150 LoadNode->getMemOperand()};
3151 CurDAG->setNodeMemRefs(Result, MemOps);
3153 // Update Load Chain uses as well.
3154 ReplaceUses(SDValue(LoadNode, 1), SDValue(Result, 1));
3155 ReplaceUses(SDValue(StoreNode, 0), SDValue(Result, 1));
3156 ReplaceUses(SDValue(StoredVal.getNode(), 1), SDValue(Result, 0));
3157 CurDAG->RemoveDeadNode(Node);
3158 return true;
3161 // See if this is an X & Mask that we can match to BEXTR/BZHI.
3162 // Where Mask is one of the following patterns:
3163 // a) x & (1 << nbits) - 1
3164 // b) x & ~(-1 << nbits)
3165 // c) x & (-1 >> (32 - y))
3166 // d) x << (32 - y) >> (32 - y)
3167 bool X86DAGToDAGISel::matchBitExtract(SDNode *Node) {
3168 assert(
3169 (Node->getOpcode() == ISD::AND || Node->getOpcode() == ISD::SRL) &&
3170 "Should be either an and-mask, or right-shift after clearing high bits.");
3172 // BEXTR is BMI instruction, BZHI is BMI2 instruction. We need at least one.
3173 if (!Subtarget->hasBMI() && !Subtarget->hasBMI2())
3174 return false;
3176 MVT NVT = Node->getSimpleValueType(0);
3178 // Only supported for 32 and 64 bits.
3179 if (NVT != MVT::i32 && NVT != MVT::i64)
3180 return false;
3182 SDValue NBits;
3184 // If we have BMI2's BZHI, we are ok with muti-use patterns.
3185 // Else, if we only have BMI1's BEXTR, we require one-use.
3186 const bool CanHaveExtraUses = Subtarget->hasBMI2();
3187 auto checkUses = [CanHaveExtraUses](SDValue Op, unsigned NUses) {
3188 return CanHaveExtraUses ||
3189 Op.getNode()->hasNUsesOfValue(NUses, Op.getResNo());
3191 auto checkOneUse = [checkUses](SDValue Op) { return checkUses(Op, 1); };
3192 auto checkTwoUse = [checkUses](SDValue Op) { return checkUses(Op, 2); };
3194 auto peekThroughOneUseTruncation = [checkOneUse](SDValue V) {
3195 if (V->getOpcode() == ISD::TRUNCATE && checkOneUse(V)) {
3196 assert(V.getSimpleValueType() == MVT::i32 &&
3197 V.getOperand(0).getSimpleValueType() == MVT::i64 &&
3198 "Expected i64 -> i32 truncation");
3199 V = V.getOperand(0);
3201 return V;
3204 // a) x & ((1 << nbits) + (-1))
3205 auto matchPatternA = [checkOneUse, peekThroughOneUseTruncation,
3206 &NBits](SDValue Mask) -> bool {
3207 // Match `add`. Must only have one use!
3208 if (Mask->getOpcode() != ISD::ADD || !checkOneUse(Mask))
3209 return false;
3210 // We should be adding all-ones constant (i.e. subtracting one.)
3211 if (!isAllOnesConstant(Mask->getOperand(1)))
3212 return false;
3213 // Match `1 << nbits`. Might be truncated. Must only have one use!
3214 SDValue M0 = peekThroughOneUseTruncation(Mask->getOperand(0));
3215 if (M0->getOpcode() != ISD::SHL || !checkOneUse(M0))
3216 return false;
3217 if (!isOneConstant(M0->getOperand(0)))
3218 return false;
3219 NBits = M0->getOperand(1);
3220 return true;
3223 auto isAllOnes = [this, peekThroughOneUseTruncation, NVT](SDValue V) {
3224 V = peekThroughOneUseTruncation(V);
3225 return CurDAG->MaskedValueIsAllOnes(
3226 V, APInt::getLowBitsSet(V.getSimpleValueType().getSizeInBits(),
3227 NVT.getSizeInBits()));
3230 // b) x & ~(-1 << nbits)
3231 auto matchPatternB = [checkOneUse, isAllOnes, peekThroughOneUseTruncation,
3232 &NBits](SDValue Mask) -> bool {
3233 // Match `~()`. Must only have one use!
3234 if (Mask.getOpcode() != ISD::XOR || !checkOneUse(Mask))
3235 return false;
3236 // The -1 only has to be all-ones for the final Node's NVT.
3237 if (!isAllOnes(Mask->getOperand(1)))
3238 return false;
3239 // Match `-1 << nbits`. Might be truncated. Must only have one use!
3240 SDValue M0 = peekThroughOneUseTruncation(Mask->getOperand(0));
3241 if (M0->getOpcode() != ISD::SHL || !checkOneUse(M0))
3242 return false;
3243 // The -1 only has to be all-ones for the final Node's NVT.
3244 if (!isAllOnes(M0->getOperand(0)))
3245 return false;
3246 NBits = M0->getOperand(1);
3247 return true;
3250 // Match potentially-truncated (bitwidth - y)
3251 auto matchShiftAmt = [checkOneUse, &NBits](SDValue ShiftAmt,
3252 unsigned Bitwidth) {
3253 // Skip over a truncate of the shift amount.
3254 if (ShiftAmt.getOpcode() == ISD::TRUNCATE) {
3255 ShiftAmt = ShiftAmt.getOperand(0);
3256 // The trunc should have been the only user of the real shift amount.
3257 if (!checkOneUse(ShiftAmt))
3258 return false;
3260 // Match the shift amount as: (bitwidth - y). It should go away, too.
3261 if (ShiftAmt.getOpcode() != ISD::SUB)
3262 return false;
3263 auto V0 = dyn_cast<ConstantSDNode>(ShiftAmt.getOperand(0));
3264 if (!V0 || V0->getZExtValue() != Bitwidth)
3265 return false;
3266 NBits = ShiftAmt.getOperand(1);
3267 return true;
3270 // c) x & (-1 >> (32 - y))
3271 auto matchPatternC = [checkOneUse, peekThroughOneUseTruncation,
3272 matchShiftAmt](SDValue Mask) -> bool {
3273 // The mask itself may be truncated.
3274 Mask = peekThroughOneUseTruncation(Mask);
3275 unsigned Bitwidth = Mask.getSimpleValueType().getSizeInBits();
3276 // Match `l>>`. Must only have one use!
3277 if (Mask.getOpcode() != ISD::SRL || !checkOneUse(Mask))
3278 return false;
3279 // We should be shifting truly all-ones constant.
3280 if (!isAllOnesConstant(Mask.getOperand(0)))
3281 return false;
3282 SDValue M1 = Mask.getOperand(1);
3283 // The shift amount should not be used externally.
3284 if (!checkOneUse(M1))
3285 return false;
3286 return matchShiftAmt(M1, Bitwidth);
3289 SDValue X;
3291 // d) x << (32 - y) >> (32 - y)
3292 auto matchPatternD = [checkOneUse, checkTwoUse, matchShiftAmt,
3293 &X](SDNode *Node) -> bool {
3294 if (Node->getOpcode() != ISD::SRL)
3295 return false;
3296 SDValue N0 = Node->getOperand(0);
3297 if (N0->getOpcode() != ISD::SHL || !checkOneUse(N0))
3298 return false;
3299 unsigned Bitwidth = N0.getSimpleValueType().getSizeInBits();
3300 SDValue N1 = Node->getOperand(1);
3301 SDValue N01 = N0->getOperand(1);
3302 // Both of the shifts must be by the exact same value.
3303 // There should not be any uses of the shift amount outside of the pattern.
3304 if (N1 != N01 || !checkTwoUse(N1))
3305 return false;
3306 if (!matchShiftAmt(N1, Bitwidth))
3307 return false;
3308 X = N0->getOperand(0);
3309 return true;
3312 auto matchLowBitMask = [matchPatternA, matchPatternB,
3313 matchPatternC](SDValue Mask) -> bool {
3314 return matchPatternA(Mask) || matchPatternB(Mask) || matchPatternC(Mask);
3317 if (Node->getOpcode() == ISD::AND) {
3318 X = Node->getOperand(0);
3319 SDValue Mask = Node->getOperand(1);
3321 if (matchLowBitMask(Mask)) {
3322 // Great.
3323 } else {
3324 std::swap(X, Mask);
3325 if (!matchLowBitMask(Mask))
3326 return false;
3328 } else if (!matchPatternD(Node))
3329 return false;
3331 SDLoc DL(Node);
3333 // Truncate the shift amount.
3334 NBits = CurDAG->getNode(ISD::TRUNCATE, DL, MVT::i8, NBits);
3335 insertDAGNode(*CurDAG, SDValue(Node, 0), NBits);
3337 // Insert 8-bit NBits into lowest 8 bits of 32-bit register.
3338 // All the other bits are undefined, we do not care about them.
3339 SDValue ImplDef = SDValue(
3340 CurDAG->getMachineNode(TargetOpcode::IMPLICIT_DEF, DL, MVT::i32), 0);
3341 insertDAGNode(*CurDAG, SDValue(Node, 0), ImplDef);
3343 SDValue SRIdxVal = CurDAG->getTargetConstant(X86::sub_8bit, DL, MVT::i32);
3344 insertDAGNode(*CurDAG, SDValue(Node, 0), SRIdxVal);
3345 NBits = SDValue(
3346 CurDAG->getMachineNode(TargetOpcode::INSERT_SUBREG, DL, MVT::i32, ImplDef,
3347 NBits, SRIdxVal), 0);
3348 insertDAGNode(*CurDAG, SDValue(Node, 0), NBits);
3350 if (Subtarget->hasBMI2()) {
3351 // Great, just emit the the BZHI..
3352 if (NVT != MVT::i32) {
3353 // But have to place the bit count into the wide-enough register first.
3354 NBits = CurDAG->getNode(ISD::ANY_EXTEND, DL, NVT, NBits);
3355 insertDAGNode(*CurDAG, SDValue(Node, 0), NBits);
3358 SDValue Extract = CurDAG->getNode(X86ISD::BZHI, DL, NVT, X, NBits);
3359 ReplaceNode(Node, Extract.getNode());
3360 SelectCode(Extract.getNode());
3361 return true;
3364 // Else, if we do *NOT* have BMI2, let's find out if the if the 'X' is
3365 // *logically* shifted (potentially with one-use trunc inbetween),
3366 // and the truncation was the only use of the shift,
3367 // and if so look past one-use truncation.
3369 SDValue RealX = peekThroughOneUseTruncation(X);
3370 // FIXME: only if the shift is one-use?
3371 if (RealX != X && RealX.getOpcode() == ISD::SRL)
3372 X = RealX;
3375 MVT XVT = X.getSimpleValueType();
3377 // Else, emitting BEXTR requires one more step.
3378 // The 'control' of BEXTR has the pattern of:
3379 // [15...8 bit][ 7...0 bit] location
3380 // [ bit count][ shift] name
3381 // I.e. 0b000000011'00000001 means (x >> 0b1) & 0b11
3383 // Shift NBits left by 8 bits, thus producing 'control'.
3384 // This makes the low 8 bits to be zero.
3385 SDValue C8 = CurDAG->getConstant(8, DL, MVT::i8);
3386 SDValue Control = CurDAG->getNode(ISD::SHL, DL, MVT::i32, NBits, C8);
3387 insertDAGNode(*CurDAG, SDValue(Node, 0), Control);
3389 // If the 'X' is *logically* shifted, we can fold that shift into 'control'.
3390 // FIXME: only if the shift is one-use?
3391 if (X.getOpcode() == ISD::SRL) {
3392 SDValue ShiftAmt = X.getOperand(1);
3393 X = X.getOperand(0);
3395 assert(ShiftAmt.getValueType() == MVT::i8 &&
3396 "Expected shift amount to be i8");
3398 // Now, *zero*-extend the shift amount. The bits 8...15 *must* be zero!
3399 // We could zext to i16 in some form, but we intentionally don't do that.
3400 SDValue OrigShiftAmt = ShiftAmt;
3401 ShiftAmt = CurDAG->getNode(ISD::ZERO_EXTEND, DL, MVT::i32, ShiftAmt);
3402 insertDAGNode(*CurDAG, OrigShiftAmt, ShiftAmt);
3404 // And now 'or' these low 8 bits of shift amount into the 'control'.
3405 Control = CurDAG->getNode(ISD::OR, DL, MVT::i32, Control, ShiftAmt);
3406 insertDAGNode(*CurDAG, SDValue(Node, 0), Control);
3409 // But have to place the 'control' into the wide-enough register first.
3410 if (XVT != MVT::i32) {
3411 Control = CurDAG->getNode(ISD::ANY_EXTEND, DL, XVT, Control);
3412 insertDAGNode(*CurDAG, SDValue(Node, 0), Control);
3415 // And finally, form the BEXTR itself.
3416 SDValue Extract = CurDAG->getNode(X86ISD::BEXTR, DL, XVT, X, Control);
3418 // The 'X' was originally truncated. Do that now.
3419 if (XVT != NVT) {
3420 insertDAGNode(*CurDAG, SDValue(Node, 0), Extract);
3421 Extract = CurDAG->getNode(ISD::TRUNCATE, DL, NVT, Extract);
3424 ReplaceNode(Node, Extract.getNode());
3425 SelectCode(Extract.getNode());
3427 return true;
3430 // See if this is an (X >> C1) & C2 that we can match to BEXTR/BEXTRI.
3431 MachineSDNode *X86DAGToDAGISel::matchBEXTRFromAndImm(SDNode *Node) {
3432 MVT NVT = Node->getSimpleValueType(0);
3433 SDLoc dl(Node);
3435 SDValue N0 = Node->getOperand(0);
3436 SDValue N1 = Node->getOperand(1);
3438 // If we have TBM we can use an immediate for the control. If we have BMI
3439 // we should only do this if the BEXTR instruction is implemented well.
3440 // Otherwise moving the control into a register makes this more costly.
3441 // TODO: Maybe load folding, greater than 32-bit masks, or a guarantee of LICM
3442 // hoisting the move immediate would make it worthwhile with a less optimal
3443 // BEXTR?
3444 if (!Subtarget->hasTBM() &&
3445 !(Subtarget->hasBMI() && Subtarget->hasFastBEXTR()))
3446 return nullptr;
3448 // Must have a shift right.
3449 if (N0->getOpcode() != ISD::SRL && N0->getOpcode() != ISD::SRA)
3450 return nullptr;
3452 // Shift can't have additional users.
3453 if (!N0->hasOneUse())
3454 return nullptr;
3456 // Only supported for 32 and 64 bits.
3457 if (NVT != MVT::i32 && NVT != MVT::i64)
3458 return nullptr;
3460 // Shift amount and RHS of and must be constant.
3461 ConstantSDNode *MaskCst = dyn_cast<ConstantSDNode>(N1);
3462 ConstantSDNode *ShiftCst = dyn_cast<ConstantSDNode>(N0->getOperand(1));
3463 if (!MaskCst || !ShiftCst)
3464 return nullptr;
3466 // And RHS must be a mask.
3467 uint64_t Mask = MaskCst->getZExtValue();
3468 if (!isMask_64(Mask))
3469 return nullptr;
3471 uint64_t Shift = ShiftCst->getZExtValue();
3472 uint64_t MaskSize = countPopulation(Mask);
3474 // Don't interfere with something that can be handled by extracting AH.
3475 // TODO: If we are able to fold a load, BEXTR might still be better than AH.
3476 if (Shift == 8 && MaskSize == 8)
3477 return nullptr;
3479 // Make sure we are only using bits that were in the original value, not
3480 // shifted in.
3481 if (Shift + MaskSize > NVT.getSizeInBits())
3482 return nullptr;
3484 SDValue New = CurDAG->getTargetConstant(Shift | (MaskSize << 8), dl, NVT);
3485 unsigned ROpc = NVT == MVT::i64 ? X86::BEXTRI64ri : X86::BEXTRI32ri;
3486 unsigned MOpc = NVT == MVT::i64 ? X86::BEXTRI64mi : X86::BEXTRI32mi;
3488 // BMI requires the immediate to placed in a register.
3489 if (!Subtarget->hasTBM()) {
3490 ROpc = NVT == MVT::i64 ? X86::BEXTR64rr : X86::BEXTR32rr;
3491 MOpc = NVT == MVT::i64 ? X86::BEXTR64rm : X86::BEXTR32rm;
3492 unsigned NewOpc = NVT == MVT::i64 ? X86::MOV32ri64 : X86::MOV32ri;
3493 New = SDValue(CurDAG->getMachineNode(NewOpc, dl, NVT, New), 0);
3496 MachineSDNode *NewNode;
3497 SDValue Input = N0->getOperand(0);
3498 SDValue Tmp0, Tmp1, Tmp2, Tmp3, Tmp4;
3499 if (tryFoldLoad(Node, N0.getNode(), Input, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4)) {
3500 SDValue Ops[] = { Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, New, Input.getOperand(0) };
3501 SDVTList VTs = CurDAG->getVTList(NVT, MVT::i32, MVT::Other);
3502 NewNode = CurDAG->getMachineNode(MOpc, dl, VTs, Ops);
3503 // Update the chain.
3504 ReplaceUses(Input.getValue(1), SDValue(NewNode, 2));
3505 // Record the mem-refs
3506 CurDAG->setNodeMemRefs(NewNode, {cast<LoadSDNode>(Input)->getMemOperand()});
3507 } else {
3508 NewNode = CurDAG->getMachineNode(ROpc, dl, NVT, MVT::i32, Input, New);
3511 return NewNode;
3514 // Emit a PCMISTR(I/M) instruction.
3515 MachineSDNode *X86DAGToDAGISel::emitPCMPISTR(unsigned ROpc, unsigned MOpc,
3516 bool MayFoldLoad, const SDLoc &dl,
3517 MVT VT, SDNode *Node) {
3518 SDValue N0 = Node->getOperand(0);
3519 SDValue N1 = Node->getOperand(1);
3520 SDValue Imm = Node->getOperand(2);
3521 const ConstantInt *Val = cast<ConstantSDNode>(Imm)->getConstantIntValue();
3522 Imm = CurDAG->getTargetConstant(*Val, SDLoc(Node), Imm.getValueType());
3524 // Try to fold a load. No need to check alignment.
3525 SDValue Tmp0, Tmp1, Tmp2, Tmp3, Tmp4;
3526 if (MayFoldLoad && tryFoldLoad(Node, N1, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4)) {
3527 SDValue Ops[] = { N0, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, Imm,
3528 N1.getOperand(0) };
3529 SDVTList VTs = CurDAG->getVTList(VT, MVT::i32, MVT::Other);
3530 MachineSDNode *CNode = CurDAG->getMachineNode(MOpc, dl, VTs, Ops);
3531 // Update the chain.
3532 ReplaceUses(N1.getValue(1), SDValue(CNode, 2));
3533 // Record the mem-refs
3534 CurDAG->setNodeMemRefs(CNode, {cast<LoadSDNode>(N1)->getMemOperand()});
3535 return CNode;
3538 SDValue Ops[] = { N0, N1, Imm };
3539 SDVTList VTs = CurDAG->getVTList(VT, MVT::i32);
3540 MachineSDNode *CNode = CurDAG->getMachineNode(ROpc, dl, VTs, Ops);
3541 return CNode;
3544 // Emit a PCMESTR(I/M) instruction. Also return the Glue result in case we need
3545 // to emit a second instruction after this one. This is needed since we have two
3546 // copyToReg nodes glued before this and we need to continue that glue through.
3547 MachineSDNode *X86DAGToDAGISel::emitPCMPESTR(unsigned ROpc, unsigned MOpc,
3548 bool MayFoldLoad, const SDLoc &dl,
3549 MVT VT, SDNode *Node,
3550 SDValue &InFlag) {
3551 SDValue N0 = Node->getOperand(0);
3552 SDValue N2 = Node->getOperand(2);
3553 SDValue Imm = Node->getOperand(4);
3554 const ConstantInt *Val = cast<ConstantSDNode>(Imm)->getConstantIntValue();
3555 Imm = CurDAG->getTargetConstant(*Val, SDLoc(Node), Imm.getValueType());
3557 // Try to fold a load. No need to check alignment.
3558 SDValue Tmp0, Tmp1, Tmp2, Tmp3, Tmp4;
3559 if (MayFoldLoad && tryFoldLoad(Node, N2, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4)) {
3560 SDValue Ops[] = { N0, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, Imm,
3561 N2.getOperand(0), InFlag };
3562 SDVTList VTs = CurDAG->getVTList(VT, MVT::i32, MVT::Other, MVT::Glue);
3563 MachineSDNode *CNode = CurDAG->getMachineNode(MOpc, dl, VTs, Ops);
3564 InFlag = SDValue(CNode, 3);
3565 // Update the chain.
3566 ReplaceUses(N2.getValue(1), SDValue(CNode, 2));
3567 // Record the mem-refs
3568 CurDAG->setNodeMemRefs(CNode, {cast<LoadSDNode>(N2)->getMemOperand()});
3569 return CNode;
3572 SDValue Ops[] = { N0, N2, Imm, InFlag };
3573 SDVTList VTs = CurDAG->getVTList(VT, MVT::i32, MVT::Glue);
3574 MachineSDNode *CNode = CurDAG->getMachineNode(ROpc, dl, VTs, Ops);
3575 InFlag = SDValue(CNode, 2);
3576 return CNode;
3579 bool X86DAGToDAGISel::tryShiftAmountMod(SDNode *N) {
3580 EVT VT = N->getValueType(0);
3582 // Only handle scalar shifts.
3583 if (VT.isVector())
3584 return false;
3586 // Narrower shifts only mask to 5 bits in hardware.
3587 unsigned Size = VT == MVT::i64 ? 64 : 32;
3589 SDValue OrigShiftAmt = N->getOperand(1);
3590 SDValue ShiftAmt = OrigShiftAmt;
3591 SDLoc DL(N);
3593 // Skip over a truncate of the shift amount.
3594 if (ShiftAmt->getOpcode() == ISD::TRUNCATE)
3595 ShiftAmt = ShiftAmt->getOperand(0);
3597 // This function is called after X86DAGToDAGISel::matchBitExtract(),
3598 // so we are not afraid that we might mess up BZHI/BEXTR pattern.
3600 SDValue NewShiftAmt;
3601 if (ShiftAmt->getOpcode() == ISD::ADD || ShiftAmt->getOpcode() == ISD::SUB) {
3602 SDValue Add0 = ShiftAmt->getOperand(0);
3603 SDValue Add1 = ShiftAmt->getOperand(1);
3604 // If we are shifting by X+/-N where N == 0 mod Size, then just shift by X
3605 // to avoid the ADD/SUB.
3606 if (isa<ConstantSDNode>(Add1) &&
3607 cast<ConstantSDNode>(Add1)->getZExtValue() % Size == 0) {
3608 NewShiftAmt = Add0;
3609 // If we are shifting by N-X where N == 0 mod Size, then just shift by -X to
3610 // generate a NEG instead of a SUB of a constant.
3611 } else if (ShiftAmt->getOpcode() == ISD::SUB &&
3612 isa<ConstantSDNode>(Add0) &&
3613 cast<ConstantSDNode>(Add0)->getZExtValue() != 0 &&
3614 cast<ConstantSDNode>(Add0)->getZExtValue() % Size == 0) {
3615 // Insert a negate op.
3616 // TODO: This isn't guaranteed to replace the sub if there is a logic cone
3617 // that uses it that's not a shift.
3618 EVT SubVT = ShiftAmt.getValueType();
3619 SDValue Zero = CurDAG->getConstant(0, DL, SubVT);
3620 SDValue Neg = CurDAG->getNode(ISD::SUB, DL, SubVT, Zero, Add1);
3621 NewShiftAmt = Neg;
3623 // Insert these operands into a valid topological order so they can
3624 // get selected independently.
3625 insertDAGNode(*CurDAG, OrigShiftAmt, Zero);
3626 insertDAGNode(*CurDAG, OrigShiftAmt, Neg);
3627 } else
3628 return false;
3629 } else
3630 return false;
3632 if (NewShiftAmt.getValueType() != MVT::i8) {
3633 // Need to truncate the shift amount.
3634 NewShiftAmt = CurDAG->getNode(ISD::TRUNCATE, DL, MVT::i8, NewShiftAmt);
3635 // Add to a correct topological ordering.
3636 insertDAGNode(*CurDAG, OrigShiftAmt, NewShiftAmt);
3639 // Insert a new mask to keep the shift amount legal. This should be removed
3640 // by isel patterns.
3641 NewShiftAmt = CurDAG->getNode(ISD::AND, DL, MVT::i8, NewShiftAmt,
3642 CurDAG->getConstant(Size - 1, DL, MVT::i8));
3643 // Place in a correct topological ordering.
3644 insertDAGNode(*CurDAG, OrigShiftAmt, NewShiftAmt);
3646 SDNode *UpdatedNode = CurDAG->UpdateNodeOperands(N, N->getOperand(0),
3647 NewShiftAmt);
3648 if (UpdatedNode != N) {
3649 // If we found an existing node, we should replace ourselves with that node
3650 // and wait for it to be selected after its other users.
3651 ReplaceNode(N, UpdatedNode);
3652 return true;
3655 // If the original shift amount is now dead, delete it so that we don't run
3656 // it through isel.
3657 if (OrigShiftAmt.getNode()->use_empty())
3658 CurDAG->RemoveDeadNode(OrigShiftAmt.getNode());
3660 // Now that we've optimized the shift amount, defer to normal isel to get
3661 // load folding and legacy vs BMI2 selection without repeating it here.
3662 SelectCode(N);
3663 return true;
3666 bool X86DAGToDAGISel::tryShrinkShlLogicImm(SDNode *N) {
3667 MVT NVT = N->getSimpleValueType(0);
3668 unsigned Opcode = N->getOpcode();
3669 SDLoc dl(N);
3671 // For operations of the form (x << C1) op C2, check if we can use a smaller
3672 // encoding for C2 by transforming it into (x op (C2>>C1)) << C1.
3673 SDValue Shift = N->getOperand(0);
3674 SDValue N1 = N->getOperand(1);
3676 ConstantSDNode *Cst = dyn_cast<ConstantSDNode>(N1);
3677 if (!Cst)
3678 return false;
3680 int64_t Val = Cst->getSExtValue();
3682 // If we have an any_extend feeding the AND, look through it to see if there
3683 // is a shift behind it. But only if the AND doesn't use the extended bits.
3684 // FIXME: Generalize this to other ANY_EXTEND than i32 to i64?
3685 bool FoundAnyExtend = false;
3686 if (Shift.getOpcode() == ISD::ANY_EXTEND && Shift.hasOneUse() &&
3687 Shift.getOperand(0).getSimpleValueType() == MVT::i32 &&
3688 isUInt<32>(Val)) {
3689 FoundAnyExtend = true;
3690 Shift = Shift.getOperand(0);
3693 if (Shift.getOpcode() != ISD::SHL || !Shift.hasOneUse())
3694 return false;
3696 // i8 is unshrinkable, i16 should be promoted to i32.
3697 if (NVT != MVT::i32 && NVT != MVT::i64)
3698 return false;
3700 ConstantSDNode *ShlCst = dyn_cast<ConstantSDNode>(Shift.getOperand(1));
3701 if (!ShlCst)
3702 return false;
3704 uint64_t ShAmt = ShlCst->getZExtValue();
3706 // Make sure that we don't change the operation by removing bits.
3707 // This only matters for OR and XOR, AND is unaffected.
3708 uint64_t RemovedBitsMask = (1ULL << ShAmt) - 1;
3709 if (Opcode != ISD::AND && (Val & RemovedBitsMask) != 0)
3710 return false;
3712 // Check the minimum bitwidth for the new constant.
3713 // TODO: Using 16 and 8 bit operations is also possible for or32 & xor32.
3714 auto CanShrinkImmediate = [&](int64_t &ShiftedVal) {
3715 if (Opcode == ISD::AND) {
3716 // AND32ri is the same as AND64ri32 with zext imm.
3717 // Try this before sign extended immediates below.
3718 ShiftedVal = (uint64_t)Val >> ShAmt;
3719 if (NVT == MVT::i64 && !isUInt<32>(Val) && isUInt<32>(ShiftedVal))
3720 return true;
3721 // Also swap order when the AND can become MOVZX.
3722 if (ShiftedVal == UINT8_MAX || ShiftedVal == UINT16_MAX)
3723 return true;
3725 ShiftedVal = Val >> ShAmt;
3726 if ((!isInt<8>(Val) && isInt<8>(ShiftedVal)) ||
3727 (!isInt<32>(Val) && isInt<32>(ShiftedVal)))
3728 return true;
3729 if (Opcode != ISD::AND) {
3730 // MOV32ri+OR64r/XOR64r is cheaper than MOV64ri64+OR64rr/XOR64rr
3731 ShiftedVal = (uint64_t)Val >> ShAmt;
3732 if (NVT == MVT::i64 && !isUInt<32>(Val) && isUInt<32>(ShiftedVal))
3733 return true;
3735 return false;
3738 int64_t ShiftedVal;
3739 if (!CanShrinkImmediate(ShiftedVal))
3740 return false;
3742 // Ok, we can reorder to get a smaller immediate.
3744 // But, its possible the original immediate allowed an AND to become MOVZX.
3745 // Doing this late due to avoid the MakedValueIsZero call as late as
3746 // possible.
3747 if (Opcode == ISD::AND) {
3748 // Find the smallest zext this could possibly be.
3749 unsigned ZExtWidth = Cst->getAPIntValue().getActiveBits();
3750 ZExtWidth = PowerOf2Ceil(std::max(ZExtWidth, 8U));
3752 // Figure out which bits need to be zero to achieve that mask.
3753 APInt NeededMask = APInt::getLowBitsSet(NVT.getSizeInBits(),
3754 ZExtWidth);
3755 NeededMask &= ~Cst->getAPIntValue();
3757 if (CurDAG->MaskedValueIsZero(N->getOperand(0), NeededMask))
3758 return false;
3761 SDValue X = Shift.getOperand(0);
3762 if (FoundAnyExtend) {
3763 SDValue NewX = CurDAG->getNode(ISD::ANY_EXTEND, dl, NVT, X);
3764 insertDAGNode(*CurDAG, SDValue(N, 0), NewX);
3765 X = NewX;
3768 SDValue NewCst = CurDAG->getConstant(ShiftedVal, dl, NVT);
3769 insertDAGNode(*CurDAG, SDValue(N, 0), NewCst);
3770 SDValue NewBinOp = CurDAG->getNode(Opcode, dl, NVT, X, NewCst);
3771 insertDAGNode(*CurDAG, SDValue(N, 0), NewBinOp);
3772 SDValue NewSHL = CurDAG->getNode(ISD::SHL, dl, NVT, NewBinOp,
3773 Shift.getOperand(1));
3774 ReplaceNode(N, NewSHL.getNode());
3775 SelectCode(NewSHL.getNode());
3776 return true;
3779 /// Convert vector increment or decrement to sub/add with an all-ones constant:
3780 /// add X, <1, 1...> --> sub X, <-1, -1...>
3781 /// sub X, <1, 1...> --> add X, <-1, -1...>
3782 /// The all-ones vector constant can be materialized using a pcmpeq instruction
3783 /// that is commonly recognized as an idiom (has no register dependency), so
3784 /// that's better/smaller than loading a splat 1 constant.
3785 bool X86DAGToDAGISel::combineIncDecVector(SDNode *Node) {
3786 assert((Node->getOpcode() == ISD::ADD || Node->getOpcode() == ISD::SUB) &&
3787 "Unexpected opcode for increment/decrement transform");
3789 EVT VT = Node->getValueType(0);
3790 assert(VT.isVector() && "Should only be called for vectors.");
3792 SDValue X = Node->getOperand(0);
3793 SDValue OneVec = Node->getOperand(1);
3795 APInt SplatVal;
3796 if (!X86::isConstantSplat(OneVec, SplatVal) || !SplatVal.isOneValue())
3797 return false;
3799 SDLoc DL(Node);
3800 SDValue AllOnesVec;
3802 APInt Ones = APInt::getAllOnesValue(32);
3803 assert(VT.getSizeInBits() % 32 == 0 &&
3804 "Expected bit count to be a multiple of 32");
3805 unsigned NumElts = VT.getSizeInBits() / 32;
3806 assert(NumElts > 0 && "Expected to get non-empty vector.");
3807 AllOnesVec =
3808 CurDAG->getConstant(Ones, DL, MVT::getVectorVT(MVT::i32, NumElts));
3809 insertDAGNode(*CurDAG, X, AllOnesVec);
3811 AllOnesVec = CurDAG->getBitcast(VT, AllOnesVec);
3812 insertDAGNode(*CurDAG, X, AllOnesVec);
3814 unsigned NewOpcode = Node->getOpcode() == ISD::ADD ? ISD::SUB : ISD::ADD;
3815 SDValue NewNode = CurDAG->getNode(NewOpcode, DL, VT, X, AllOnesVec);
3817 ReplaceNode(Node, NewNode.getNode());
3818 SelectCode(NewNode.getNode());
3819 return true;
3822 /// If the high bits of an 'and' operand are known zero, try setting the
3823 /// high bits of an 'and' constant operand to produce a smaller encoding by
3824 /// creating a small, sign-extended negative immediate rather than a large
3825 /// positive one. This reverses a transform in SimplifyDemandedBits that
3826 /// shrinks mask constants by clearing bits. There is also a possibility that
3827 /// the 'and' mask can be made -1, so the 'and' itself is unnecessary. In that
3828 /// case, just replace the 'and'. Return 'true' if the node is replaced.
3829 bool X86DAGToDAGISel::shrinkAndImmediate(SDNode *And) {
3830 // i8 is unshrinkable, i16 should be promoted to i32, and vector ops don't
3831 // have immediate operands.
3832 MVT VT = And->getSimpleValueType(0);
3833 if (VT != MVT::i32 && VT != MVT::i64)
3834 return false;
3836 auto *And1C = dyn_cast<ConstantSDNode>(And->getOperand(1));
3837 if (!And1C)
3838 return false;
3840 // Bail out if the mask constant is already negative. It's can't shrink more.
3841 // If the upper 32 bits of a 64 bit mask are all zeros, we have special isel
3842 // patterns to use a 32-bit and instead of a 64-bit and by relying on the
3843 // implicit zeroing of 32 bit ops. So we should check if the lower 32 bits
3844 // are negative too.
3845 APInt MaskVal = And1C->getAPIntValue();
3846 unsigned MaskLZ = MaskVal.countLeadingZeros();
3847 if (!MaskLZ || (VT == MVT::i64 && MaskLZ == 32))
3848 return false;
3850 // Don't extend into the upper 32 bits of a 64 bit mask.
3851 if (VT == MVT::i64 && MaskLZ >= 32) {
3852 MaskLZ -= 32;
3853 MaskVal = MaskVal.trunc(32);
3856 SDValue And0 = And->getOperand(0);
3857 APInt HighZeros = APInt::getHighBitsSet(MaskVal.getBitWidth(), MaskLZ);
3858 APInt NegMaskVal = MaskVal | HighZeros;
3860 // If a negative constant would not allow a smaller encoding, there's no need
3861 // to continue. Only change the constant when we know it's a win.
3862 unsigned MinWidth = NegMaskVal.getMinSignedBits();
3863 if (MinWidth > 32 || (MinWidth > 8 && MaskVal.getMinSignedBits() <= 32))
3864 return false;
3866 // Extend masks if we truncated above.
3867 if (VT == MVT::i64 && MaskVal.getBitWidth() < 64) {
3868 NegMaskVal = NegMaskVal.zext(64);
3869 HighZeros = HighZeros.zext(64);
3872 // The variable operand must be all zeros in the top bits to allow using the
3873 // new, negative constant as the mask.
3874 if (!CurDAG->MaskedValueIsZero(And0, HighZeros))
3875 return false;
3877 // Check if the mask is -1. In that case, this is an unnecessary instruction
3878 // that escaped earlier analysis.
3879 if (NegMaskVal.isAllOnesValue()) {
3880 ReplaceNode(And, And0.getNode());
3881 return true;
3884 // A negative mask allows a smaller encoding. Create a new 'and' node.
3885 SDValue NewMask = CurDAG->getConstant(NegMaskVal, SDLoc(And), VT);
3886 SDValue NewAnd = CurDAG->getNode(ISD::AND, SDLoc(And), VT, And0, NewMask);
3887 ReplaceNode(And, NewAnd.getNode());
3888 SelectCode(NewAnd.getNode());
3889 return true;
3892 static unsigned getVPTESTMOpc(MVT TestVT, bool IsTestN, bool FoldedLoad,
3893 bool FoldedBCast, bool Masked) {
3894 if (Masked) {
3895 if (FoldedLoad) {
3896 switch (TestVT.SimpleTy) {
3897 default: llvm_unreachable("Unexpected VT!");
3898 case MVT::v16i8:
3899 return IsTestN ? X86::VPTESTNMBZ128rmk : X86::VPTESTMBZ128rmk;
3900 case MVT::v8i16:
3901 return IsTestN ? X86::VPTESTNMWZ128rmk : X86::VPTESTMWZ128rmk;
3902 case MVT::v4i32:
3903 return IsTestN ? X86::VPTESTNMDZ128rmk : X86::VPTESTMDZ128rmk;
3904 case MVT::v2i64:
3905 return IsTestN ? X86::VPTESTNMQZ128rmk : X86::VPTESTMQZ128rmk;
3906 case MVT::v32i8:
3907 return IsTestN ? X86::VPTESTNMBZ256rmk : X86::VPTESTMBZ256rmk;
3908 case MVT::v16i16:
3909 return IsTestN ? X86::VPTESTNMWZ256rmk : X86::VPTESTMWZ256rmk;
3910 case MVT::v8i32:
3911 return IsTestN ? X86::VPTESTNMDZ256rmk : X86::VPTESTMDZ256rmk;
3912 case MVT::v4i64:
3913 return IsTestN ? X86::VPTESTNMQZ256rmk : X86::VPTESTMQZ256rmk;
3914 case MVT::v64i8:
3915 return IsTestN ? X86::VPTESTNMBZrmk : X86::VPTESTMBZrmk;
3916 case MVT::v32i16:
3917 return IsTestN ? X86::VPTESTNMWZrmk : X86::VPTESTMWZrmk;
3918 case MVT::v16i32:
3919 return IsTestN ? X86::VPTESTNMDZrmk : X86::VPTESTMDZrmk;
3920 case MVT::v8i64:
3921 return IsTestN ? X86::VPTESTNMQZrmk : X86::VPTESTMQZrmk;
3925 if (FoldedBCast) {
3926 switch (TestVT.SimpleTy) {
3927 default: llvm_unreachable("Unexpected VT!");
3928 case MVT::v4i32:
3929 return IsTestN ? X86::VPTESTNMDZ128rmbk : X86::VPTESTMDZ128rmbk;
3930 case MVT::v2i64:
3931 return IsTestN ? X86::VPTESTNMQZ128rmbk : X86::VPTESTMQZ128rmbk;
3932 case MVT::v8i32:
3933 return IsTestN ? X86::VPTESTNMDZ256rmbk : X86::VPTESTMDZ256rmbk;
3934 case MVT::v4i64:
3935 return IsTestN ? X86::VPTESTNMQZ256rmbk : X86::VPTESTMQZ256rmbk;
3936 case MVT::v16i32:
3937 return IsTestN ? X86::VPTESTNMDZrmbk : X86::VPTESTMDZrmbk;
3938 case MVT::v8i64:
3939 return IsTestN ? X86::VPTESTNMQZrmbk : X86::VPTESTMQZrmbk;
3943 switch (TestVT.SimpleTy) {
3944 default: llvm_unreachable("Unexpected VT!");
3945 case MVT::v16i8:
3946 return IsTestN ? X86::VPTESTNMBZ128rrk : X86::VPTESTMBZ128rrk;
3947 case MVT::v8i16:
3948 return IsTestN ? X86::VPTESTNMWZ128rrk : X86::VPTESTMWZ128rrk;
3949 case MVT::v4i32:
3950 return IsTestN ? X86::VPTESTNMDZ128rrk : X86::VPTESTMDZ128rrk;
3951 case MVT::v2i64:
3952 return IsTestN ? X86::VPTESTNMQZ128rrk : X86::VPTESTMQZ128rrk;
3953 case MVT::v32i8:
3954 return IsTestN ? X86::VPTESTNMBZ256rrk : X86::VPTESTMBZ256rrk;
3955 case MVT::v16i16:
3956 return IsTestN ? X86::VPTESTNMWZ256rrk : X86::VPTESTMWZ256rrk;
3957 case MVT::v8i32:
3958 return IsTestN ? X86::VPTESTNMDZ256rrk : X86::VPTESTMDZ256rrk;
3959 case MVT::v4i64:
3960 return IsTestN ? X86::VPTESTNMQZ256rrk : X86::VPTESTMQZ256rrk;
3961 case MVT::v64i8:
3962 return IsTestN ? X86::VPTESTNMBZrrk : X86::VPTESTMBZrrk;
3963 case MVT::v32i16:
3964 return IsTestN ? X86::VPTESTNMWZrrk : X86::VPTESTMWZrrk;
3965 case MVT::v16i32:
3966 return IsTestN ? X86::VPTESTNMDZrrk : X86::VPTESTMDZrrk;
3967 case MVT::v8i64:
3968 return IsTestN ? X86::VPTESTNMQZrrk : X86::VPTESTMQZrrk;
3972 if (FoldedLoad) {
3973 switch (TestVT.SimpleTy) {
3974 default: llvm_unreachable("Unexpected VT!");
3975 case MVT::v16i8:
3976 return IsTestN ? X86::VPTESTNMBZ128rm : X86::VPTESTMBZ128rm;
3977 case MVT::v8i16:
3978 return IsTestN ? X86::VPTESTNMWZ128rm : X86::VPTESTMWZ128rm;
3979 case MVT::v4i32:
3980 return IsTestN ? X86::VPTESTNMDZ128rm : X86::VPTESTMDZ128rm;
3981 case MVT::v2i64:
3982 return IsTestN ? X86::VPTESTNMQZ128rm : X86::VPTESTMQZ128rm;
3983 case MVT::v32i8:
3984 return IsTestN ? X86::VPTESTNMBZ256rm : X86::VPTESTMBZ256rm;
3985 case MVT::v16i16:
3986 return IsTestN ? X86::VPTESTNMWZ256rm : X86::VPTESTMWZ256rm;
3987 case MVT::v8i32:
3988 return IsTestN ? X86::VPTESTNMDZ256rm : X86::VPTESTMDZ256rm;
3989 case MVT::v4i64:
3990 return IsTestN ? X86::VPTESTNMQZ256rm : X86::VPTESTMQZ256rm;
3991 case MVT::v64i8:
3992 return IsTestN ? X86::VPTESTNMBZrm : X86::VPTESTMBZrm;
3993 case MVT::v32i16:
3994 return IsTestN ? X86::VPTESTNMWZrm : X86::VPTESTMWZrm;
3995 case MVT::v16i32:
3996 return IsTestN ? X86::VPTESTNMDZrm : X86::VPTESTMDZrm;
3997 case MVT::v8i64:
3998 return IsTestN ? X86::VPTESTNMQZrm : X86::VPTESTMQZrm;
4002 if (FoldedBCast) {
4003 switch (TestVT.SimpleTy) {
4004 default: llvm_unreachable("Unexpected VT!");
4005 case MVT::v4i32:
4006 return IsTestN ? X86::VPTESTNMDZ128rmb : X86::VPTESTMDZ128rmb;
4007 case MVT::v2i64:
4008 return IsTestN ? X86::VPTESTNMQZ128rmb : X86::VPTESTMQZ128rmb;
4009 case MVT::v8i32:
4010 return IsTestN ? X86::VPTESTNMDZ256rmb : X86::VPTESTMDZ256rmb;
4011 case MVT::v4i64:
4012 return IsTestN ? X86::VPTESTNMQZ256rmb : X86::VPTESTMQZ256rmb;
4013 case MVT::v16i32:
4014 return IsTestN ? X86::VPTESTNMDZrmb : X86::VPTESTMDZrmb;
4015 case MVT::v8i64:
4016 return IsTestN ? X86::VPTESTNMQZrmb : X86::VPTESTMQZrmb;
4020 switch (TestVT.SimpleTy) {
4021 default: llvm_unreachable("Unexpected VT!");
4022 case MVT::v16i8:
4023 return IsTestN ? X86::VPTESTNMBZ128rr : X86::VPTESTMBZ128rr;
4024 case MVT::v8i16:
4025 return IsTestN ? X86::VPTESTNMWZ128rr : X86::VPTESTMWZ128rr;
4026 case MVT::v4i32:
4027 return IsTestN ? X86::VPTESTNMDZ128rr : X86::VPTESTMDZ128rr;
4028 case MVT::v2i64:
4029 return IsTestN ? X86::VPTESTNMQZ128rr : X86::VPTESTMQZ128rr;
4030 case MVT::v32i8:
4031 return IsTestN ? X86::VPTESTNMBZ256rr : X86::VPTESTMBZ256rr;
4032 case MVT::v16i16:
4033 return IsTestN ? X86::VPTESTNMWZ256rr : X86::VPTESTMWZ256rr;
4034 case MVT::v8i32:
4035 return IsTestN ? X86::VPTESTNMDZ256rr : X86::VPTESTMDZ256rr;
4036 case MVT::v4i64:
4037 return IsTestN ? X86::VPTESTNMQZ256rr : X86::VPTESTMQZ256rr;
4038 case MVT::v64i8:
4039 return IsTestN ? X86::VPTESTNMBZrr : X86::VPTESTMBZrr;
4040 case MVT::v32i16:
4041 return IsTestN ? X86::VPTESTNMWZrr : X86::VPTESTMWZrr;
4042 case MVT::v16i32:
4043 return IsTestN ? X86::VPTESTNMDZrr : X86::VPTESTMDZrr;
4044 case MVT::v8i64:
4045 return IsTestN ? X86::VPTESTNMQZrr : X86::VPTESTMQZrr;
4049 // Try to create VPTESTM instruction. If InMask is not null, it will be used
4050 // to form a masked operation.
4051 bool X86DAGToDAGISel::tryVPTESTM(SDNode *Root, SDValue Setcc,
4052 SDValue InMask) {
4053 assert(Subtarget->hasAVX512() && "Expected AVX512!");
4054 assert(Setcc.getSimpleValueType().getVectorElementType() == MVT::i1 &&
4055 "Unexpected VT!");
4057 // Look for equal and not equal compares.
4058 ISD::CondCode CC = cast<CondCodeSDNode>(Setcc.getOperand(2))->get();
4059 if (CC != ISD::SETEQ && CC != ISD::SETNE)
4060 return false;
4062 // See if we're comparing against zero. This should have been canonicalized
4063 // to RHS during lowering.
4064 if (!ISD::isBuildVectorAllZeros(Setcc.getOperand(1).getNode()))
4065 return false;
4067 SDValue N0 = Setcc.getOperand(0);
4069 MVT CmpVT = N0.getSimpleValueType();
4070 MVT CmpSVT = CmpVT.getVectorElementType();
4072 // Start with both operands the same. We'll try to refine this.
4073 SDValue Src0 = N0;
4074 SDValue Src1 = N0;
4077 // Look through single use bitcasts.
4078 SDValue N0Temp = N0;
4079 if (N0Temp.getOpcode() == ISD::BITCAST && N0Temp.hasOneUse())
4080 N0Temp = N0.getOperand(0);
4082 // Look for single use AND.
4083 if (N0Temp.getOpcode() == ISD::AND && N0Temp.hasOneUse()) {
4084 Src0 = N0Temp.getOperand(0);
4085 Src1 = N0Temp.getOperand(1);
4089 // Without VLX we need to widen the load.
4090 bool Widen = !Subtarget->hasVLX() && !CmpVT.is512BitVector();
4092 // We can only fold loads if the sources are unique.
4093 bool CanFoldLoads = Src0 != Src1;
4095 // Try to fold loads unless we need to widen.
4096 bool FoldedLoad = false;
4097 SDValue Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, Load;
4098 if (!Widen && CanFoldLoads) {
4099 Load = Src1;
4100 FoldedLoad = tryFoldLoad(Root, N0.getNode(), Load, Tmp0, Tmp1, Tmp2, Tmp3,
4101 Tmp4);
4102 if (!FoldedLoad) {
4103 // And is computative.
4104 Load = Src0;
4105 FoldedLoad = tryFoldLoad(Root, N0.getNode(), Load, Tmp0, Tmp1, Tmp2,
4106 Tmp3, Tmp4);
4107 if (FoldedLoad)
4108 std::swap(Src0, Src1);
4112 auto findBroadcastedOp = [](SDValue Src, MVT CmpSVT, SDNode *&Parent) {
4113 // Look through single use bitcasts.
4114 if (Src.getOpcode() == ISD::BITCAST && Src.hasOneUse())
4115 Src = Src.getOperand(0);
4117 if (Src.getOpcode() == X86ISD::VBROADCAST && Src.hasOneUse()) {
4118 Parent = Src.getNode();
4119 Src = Src.getOperand(0);
4120 if (Src.getSimpleValueType() == CmpSVT)
4121 return Src;
4124 return SDValue();
4127 // If we didn't fold a load, try to match broadcast. No widening limitation
4128 // for this. But only 32 and 64 bit types are supported.
4129 bool FoldedBCast = false;
4130 if (!FoldedLoad && CanFoldLoads &&
4131 (CmpSVT == MVT::i32 || CmpSVT == MVT::i64)) {
4132 SDNode *ParentNode = nullptr;
4133 if ((Load = findBroadcastedOp(Src1, CmpSVT, ParentNode))) {
4134 FoldedBCast = tryFoldLoad(Root, ParentNode, Load, Tmp0,
4135 Tmp1, Tmp2, Tmp3, Tmp4);
4138 // Try the other operand.
4139 if (!FoldedBCast) {
4140 if ((Load = findBroadcastedOp(Src0, CmpSVT, ParentNode))) {
4141 FoldedBCast = tryFoldLoad(Root, ParentNode, Load, Tmp0,
4142 Tmp1, Tmp2, Tmp3, Tmp4);
4143 if (FoldedBCast)
4144 std::swap(Src0, Src1);
4149 auto getMaskRC = [](MVT MaskVT) {
4150 switch (MaskVT.SimpleTy) {
4151 default: llvm_unreachable("Unexpected VT!");
4152 case MVT::v2i1: return X86::VK2RegClassID;
4153 case MVT::v4i1: return X86::VK4RegClassID;
4154 case MVT::v8i1: return X86::VK8RegClassID;
4155 case MVT::v16i1: return X86::VK16RegClassID;
4156 case MVT::v32i1: return X86::VK32RegClassID;
4157 case MVT::v64i1: return X86::VK64RegClassID;
4161 bool IsMasked = InMask.getNode() != nullptr;
4163 SDLoc dl(Root);
4165 MVT ResVT = Setcc.getSimpleValueType();
4166 MVT MaskVT = ResVT;
4167 if (Widen) {
4168 // Widen the inputs using insert_subreg or copy_to_regclass.
4169 unsigned Scale = CmpVT.is128BitVector() ? 4 : 2;
4170 unsigned SubReg = CmpVT.is128BitVector() ? X86::sub_xmm : X86::sub_ymm;
4171 unsigned NumElts = CmpVT.getVectorNumElements() * Scale;
4172 CmpVT = MVT::getVectorVT(CmpSVT, NumElts);
4173 MaskVT = MVT::getVectorVT(MVT::i1, NumElts);
4174 SDValue ImplDef = SDValue(CurDAG->getMachineNode(X86::IMPLICIT_DEF, dl,
4175 CmpVT), 0);
4176 Src0 = CurDAG->getTargetInsertSubreg(SubReg, dl, CmpVT, ImplDef, Src0);
4178 assert(!FoldedLoad && "Shouldn't have folded the load");
4179 if (!FoldedBCast)
4180 Src1 = CurDAG->getTargetInsertSubreg(SubReg, dl, CmpVT, ImplDef, Src1);
4182 if (IsMasked) {
4183 // Widen the mask.
4184 unsigned RegClass = getMaskRC(MaskVT);
4185 SDValue RC = CurDAG->getTargetConstant(RegClass, dl, MVT::i32);
4186 InMask = SDValue(CurDAG->getMachineNode(TargetOpcode::COPY_TO_REGCLASS,
4187 dl, MaskVT, InMask, RC), 0);
4191 bool IsTestN = CC == ISD::SETEQ;
4192 unsigned Opc = getVPTESTMOpc(CmpVT, IsTestN, FoldedLoad, FoldedBCast,
4193 IsMasked);
4195 MachineSDNode *CNode;
4196 if (FoldedLoad || FoldedBCast) {
4197 SDVTList VTs = CurDAG->getVTList(MaskVT, MVT::Other);
4199 if (IsMasked) {
4200 SDValue Ops[] = { InMask, Src0, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4,
4201 Load.getOperand(0) };
4202 CNode = CurDAG->getMachineNode(Opc, dl, VTs, Ops);
4203 } else {
4204 SDValue Ops[] = { Src0, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4,
4205 Load.getOperand(0) };
4206 CNode = CurDAG->getMachineNode(Opc, dl, VTs, Ops);
4209 // Update the chain.
4210 ReplaceUses(Load.getValue(1), SDValue(CNode, 1));
4211 // Record the mem-refs
4212 CurDAG->setNodeMemRefs(CNode, {cast<LoadSDNode>(Load)->getMemOperand()});
4213 } else {
4214 if (IsMasked)
4215 CNode = CurDAG->getMachineNode(Opc, dl, MaskVT, InMask, Src0, Src1);
4216 else
4217 CNode = CurDAG->getMachineNode(Opc, dl, MaskVT, Src0, Src1);
4220 // If we widened, we need to shrink the mask VT.
4221 if (Widen) {
4222 unsigned RegClass = getMaskRC(ResVT);
4223 SDValue RC = CurDAG->getTargetConstant(RegClass, dl, MVT::i32);
4224 CNode = CurDAG->getMachineNode(TargetOpcode::COPY_TO_REGCLASS,
4225 dl, ResVT, SDValue(CNode, 0), RC);
4228 ReplaceUses(SDValue(Root, 0), SDValue(CNode, 0));
4229 CurDAG->RemoveDeadNode(Root);
4230 return true;
4233 void X86DAGToDAGISel::Select(SDNode *Node) {
4234 MVT NVT = Node->getSimpleValueType(0);
4235 unsigned Opcode = Node->getOpcode();
4236 SDLoc dl(Node);
4238 if (Node->isMachineOpcode()) {
4239 LLVM_DEBUG(dbgs() << "== "; Node->dump(CurDAG); dbgs() << '\n');
4240 Node->setNodeId(-1);
4241 return; // Already selected.
4244 switch (Opcode) {
4245 default: break;
4246 case ISD::INTRINSIC_VOID: {
4247 unsigned IntNo = Node->getConstantOperandVal(1);
4248 switch (IntNo) {
4249 default: break;
4250 case Intrinsic::x86_sse3_monitor:
4251 case Intrinsic::x86_monitorx:
4252 case Intrinsic::x86_clzero: {
4253 bool Use64BitPtr = Node->getOperand(2).getValueType() == MVT::i64;
4255 unsigned Opc = 0;
4256 switch (IntNo) {
4257 default: llvm_unreachable("Unexpected intrinsic!");
4258 case Intrinsic::x86_sse3_monitor:
4259 if (!Subtarget->hasSSE3())
4260 break;
4261 Opc = Use64BitPtr ? X86::MONITOR64rrr : X86::MONITOR32rrr;
4262 break;
4263 case Intrinsic::x86_monitorx:
4264 if (!Subtarget->hasMWAITX())
4265 break;
4266 Opc = Use64BitPtr ? X86::MONITORX64rrr : X86::MONITORX32rrr;
4267 break;
4268 case Intrinsic::x86_clzero:
4269 if (!Subtarget->hasCLZERO())
4270 break;
4271 Opc = Use64BitPtr ? X86::CLZERO64r : X86::CLZERO32r;
4272 break;
4275 if (Opc) {
4276 unsigned PtrReg = Use64BitPtr ? X86::RAX : X86::EAX;
4277 SDValue Chain = CurDAG->getCopyToReg(Node->getOperand(0), dl, PtrReg,
4278 Node->getOperand(2), SDValue());
4279 SDValue InFlag = Chain.getValue(1);
4281 if (IntNo == Intrinsic::x86_sse3_monitor ||
4282 IntNo == Intrinsic::x86_monitorx) {
4283 // Copy the other two operands to ECX and EDX.
4284 Chain = CurDAG->getCopyToReg(Chain, dl, X86::ECX, Node->getOperand(3),
4285 InFlag);
4286 InFlag = Chain.getValue(1);
4287 Chain = CurDAG->getCopyToReg(Chain, dl, X86::EDX, Node->getOperand(4),
4288 InFlag);
4289 InFlag = Chain.getValue(1);
4292 MachineSDNode *CNode = CurDAG->getMachineNode(Opc, dl, MVT::Other,
4293 { Chain, InFlag});
4294 ReplaceNode(Node, CNode);
4295 return;
4300 break;
4302 case ISD::BRIND: {
4303 if (Subtarget->isTargetNaCl())
4304 // NaCl has its own pass where jmp %r32 are converted to jmp %r64. We
4305 // leave the instruction alone.
4306 break;
4307 if (Subtarget->isTarget64BitILP32()) {
4308 // Converts a 32-bit register to a 64-bit, zero-extended version of
4309 // it. This is needed because x86-64 can do many things, but jmp %r32
4310 // ain't one of them.
4311 const SDValue &Target = Node->getOperand(1);
4312 assert(Target.getSimpleValueType() == llvm::MVT::i32);
4313 SDValue ZextTarget = CurDAG->getZExtOrTrunc(Target, dl, EVT(MVT::i64));
4314 SDValue Brind = CurDAG->getNode(ISD::BRIND, dl, MVT::Other,
4315 Node->getOperand(0), ZextTarget);
4316 ReplaceNode(Node, Brind.getNode());
4317 SelectCode(ZextTarget.getNode());
4318 SelectCode(Brind.getNode());
4319 return;
4321 break;
4323 case X86ISD::GlobalBaseReg:
4324 ReplaceNode(Node, getGlobalBaseReg());
4325 return;
4327 case ISD::BITCAST:
4328 // Just drop all 128/256/512-bit bitcasts.
4329 if (NVT.is512BitVector() || NVT.is256BitVector() || NVT.is128BitVector() ||
4330 NVT == MVT::f128) {
4331 ReplaceUses(SDValue(Node, 0), Node->getOperand(0));
4332 CurDAG->RemoveDeadNode(Node);
4333 return;
4335 break;
4337 case ISD::VSELECT: {
4338 // Replace VSELECT with non-mask conditions with with BLENDV.
4339 if (Node->getOperand(0).getValueType().getVectorElementType() == MVT::i1)
4340 break;
4342 assert(Subtarget->hasSSE41() && "Expected SSE4.1 support!");
4343 SDValue Blendv = CurDAG->getNode(
4344 X86ISD::BLENDV, SDLoc(Node), Node->getValueType(0), Node->getOperand(0),
4345 Node->getOperand(1), Node->getOperand(2));
4346 ReplaceNode(Node, Blendv.getNode());
4347 SelectCode(Blendv.getNode());
4348 // We already called ReplaceUses.
4349 return;
4352 case ISD::SRL:
4353 if (matchBitExtract(Node))
4354 return;
4355 LLVM_FALLTHROUGH;
4356 case ISD::SRA:
4357 case ISD::SHL:
4358 if (tryShiftAmountMod(Node))
4359 return;
4360 break;
4362 case ISD::AND:
4363 if (NVT.isVector() && NVT.getVectorElementType() == MVT::i1) {
4364 // Try to form a masked VPTESTM. Operands can be in either order.
4365 SDValue N0 = Node->getOperand(0);
4366 SDValue N1 = Node->getOperand(1);
4367 if (N0.getOpcode() == ISD::SETCC && N0.hasOneUse() &&
4368 tryVPTESTM(Node, N0, N1))
4369 return;
4370 if (N1.getOpcode() == ISD::SETCC && N1.hasOneUse() &&
4371 tryVPTESTM(Node, N1, N0))
4372 return;
4375 if (MachineSDNode *NewNode = matchBEXTRFromAndImm(Node)) {
4376 ReplaceUses(SDValue(Node, 0), SDValue(NewNode, 0));
4377 CurDAG->RemoveDeadNode(Node);
4378 return;
4380 if (matchBitExtract(Node))
4381 return;
4382 if (AndImmShrink && shrinkAndImmediate(Node))
4383 return;
4385 LLVM_FALLTHROUGH;
4386 case ISD::OR:
4387 case ISD::XOR:
4388 if (tryShrinkShlLogicImm(Node))
4389 return;
4391 LLVM_FALLTHROUGH;
4392 case ISD::ADD:
4393 case ISD::SUB: {
4394 if ((Opcode == ISD::ADD || Opcode == ISD::SUB) && NVT.isVector() &&
4395 combineIncDecVector(Node))
4396 return;
4398 // Try to avoid folding immediates with multiple uses for optsize.
4399 // This code tries to select to register form directly to avoid going
4400 // through the isel table which might fold the immediate. We can't change
4401 // the patterns on the add/sub/and/or/xor with immediate paterns in the
4402 // tablegen files to check immediate use count without making the patterns
4403 // unavailable to the fast-isel table.
4404 if (!OptForSize)
4405 break;
4407 // Only handle i8/i16/i32/i64.
4408 if (NVT != MVT::i8 && NVT != MVT::i16 && NVT != MVT::i32 && NVT != MVT::i64)
4409 break;
4411 SDValue N0 = Node->getOperand(0);
4412 SDValue N1 = Node->getOperand(1);
4414 ConstantSDNode *Cst = dyn_cast<ConstantSDNode>(N1);
4415 if (!Cst)
4416 break;
4418 int64_t Val = Cst->getSExtValue();
4420 // Make sure its an immediate that is considered foldable.
4421 // FIXME: Handle unsigned 32 bit immediates for 64-bit AND.
4422 if (!isInt<8>(Val) && !isInt<32>(Val))
4423 break;
4425 // If this can match to INC/DEC, let it go.
4426 if (Opcode == ISD::ADD && (Val == 1 || Val == -1))
4427 break;
4429 // Check if we should avoid folding this immediate.
4430 if (!shouldAvoidImmediateInstFormsForSize(N1.getNode()))
4431 break;
4433 // We should not fold the immediate. So we need a register form instead.
4434 unsigned ROpc, MOpc;
4435 switch (NVT.SimpleTy) {
4436 default: llvm_unreachable("Unexpected VT!");
4437 case MVT::i8:
4438 switch (Opcode) {
4439 default: llvm_unreachable("Unexpected opcode!");
4440 case ISD::ADD: ROpc = X86::ADD8rr; MOpc = X86::ADD8rm; break;
4441 case ISD::SUB: ROpc = X86::SUB8rr; MOpc = X86::SUB8rm; break;
4442 case ISD::AND: ROpc = X86::AND8rr; MOpc = X86::AND8rm; break;
4443 case ISD::OR: ROpc = X86::OR8rr; MOpc = X86::OR8rm; break;
4444 case ISD::XOR: ROpc = X86::XOR8rr; MOpc = X86::XOR8rm; break;
4446 break;
4447 case MVT::i16:
4448 switch (Opcode) {
4449 default: llvm_unreachable("Unexpected opcode!");
4450 case ISD::ADD: ROpc = X86::ADD16rr; MOpc = X86::ADD16rm; break;
4451 case ISD::SUB: ROpc = X86::SUB16rr; MOpc = X86::SUB16rm; break;
4452 case ISD::AND: ROpc = X86::AND16rr; MOpc = X86::AND16rm; break;
4453 case ISD::OR: ROpc = X86::OR16rr; MOpc = X86::OR16rm; break;
4454 case ISD::XOR: ROpc = X86::XOR16rr; MOpc = X86::XOR16rm; break;
4456 break;
4457 case MVT::i32:
4458 switch (Opcode) {
4459 default: llvm_unreachable("Unexpected opcode!");
4460 case ISD::ADD: ROpc = X86::ADD32rr; MOpc = X86::ADD32rm; break;
4461 case ISD::SUB: ROpc = X86::SUB32rr; MOpc = X86::SUB32rm; break;
4462 case ISD::AND: ROpc = X86::AND32rr; MOpc = X86::AND32rm; break;
4463 case ISD::OR: ROpc = X86::OR32rr; MOpc = X86::OR32rm; break;
4464 case ISD::XOR: ROpc = X86::XOR32rr; MOpc = X86::XOR32rm; break;
4466 break;
4467 case MVT::i64:
4468 switch (Opcode) {
4469 default: llvm_unreachable("Unexpected opcode!");
4470 case ISD::ADD: ROpc = X86::ADD64rr; MOpc = X86::ADD64rm; break;
4471 case ISD::SUB: ROpc = X86::SUB64rr; MOpc = X86::SUB64rm; break;
4472 case ISD::AND: ROpc = X86::AND64rr; MOpc = X86::AND64rm; break;
4473 case ISD::OR: ROpc = X86::OR64rr; MOpc = X86::OR64rm; break;
4474 case ISD::XOR: ROpc = X86::XOR64rr; MOpc = X86::XOR64rm; break;
4476 break;
4479 // Ok this is a AND/OR/XOR/ADD/SUB with constant.
4481 // If this is a not a subtract, we can still try to fold a load.
4482 if (Opcode != ISD::SUB) {
4483 SDValue Tmp0, Tmp1, Tmp2, Tmp3, Tmp4;
4484 if (tryFoldLoad(Node, N0, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4)) {
4485 SDValue Ops[] = { N1, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, N0.getOperand(0) };
4486 SDVTList VTs = CurDAG->getVTList(NVT, MVT::i32, MVT::Other);
4487 MachineSDNode *CNode = CurDAG->getMachineNode(MOpc, dl, VTs, Ops);
4488 // Update the chain.
4489 ReplaceUses(N0.getValue(1), SDValue(CNode, 2));
4490 // Record the mem-refs
4491 CurDAG->setNodeMemRefs(CNode, {cast<LoadSDNode>(N0)->getMemOperand()});
4492 ReplaceUses(SDValue(Node, 0), SDValue(CNode, 0));
4493 CurDAG->RemoveDeadNode(Node);
4494 return;
4498 CurDAG->SelectNodeTo(Node, ROpc, NVT, MVT::i32, N0, N1);
4499 return;
4502 case X86ISD::SMUL:
4503 // i16/i32/i64 are handled with isel patterns.
4504 if (NVT != MVT::i8)
4505 break;
4506 LLVM_FALLTHROUGH;
4507 case X86ISD::UMUL: {
4508 SDValue N0 = Node->getOperand(0);
4509 SDValue N1 = Node->getOperand(1);
4511 unsigned LoReg, ROpc, MOpc;
4512 switch (NVT.SimpleTy) {
4513 default: llvm_unreachable("Unsupported VT!");
4514 case MVT::i8:
4515 LoReg = X86::AL;
4516 ROpc = Opcode == X86ISD::SMUL ? X86::IMUL8r : X86::MUL8r;
4517 MOpc = Opcode == X86ISD::SMUL ? X86::IMUL8m : X86::MUL8m;
4518 break;
4519 case MVT::i16:
4520 LoReg = X86::AX;
4521 ROpc = X86::MUL16r;
4522 MOpc = X86::MUL16m;
4523 break;
4524 case MVT::i32:
4525 LoReg = X86::EAX;
4526 ROpc = X86::MUL32r;
4527 MOpc = X86::MUL32m;
4528 break;
4529 case MVT::i64:
4530 LoReg = X86::RAX;
4531 ROpc = X86::MUL64r;
4532 MOpc = X86::MUL64m;
4533 break;
4536 SDValue Tmp0, Tmp1, Tmp2, Tmp3, Tmp4;
4537 bool FoldedLoad = tryFoldLoad(Node, N1, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4);
4538 // Multiply is commmutative.
4539 if (!FoldedLoad) {
4540 FoldedLoad = tryFoldLoad(Node, N0, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4);
4541 if (FoldedLoad)
4542 std::swap(N0, N1);
4545 SDValue InFlag = CurDAG->getCopyToReg(CurDAG->getEntryNode(), dl, LoReg,
4546 N0, SDValue()).getValue(1);
4548 MachineSDNode *CNode;
4549 if (FoldedLoad) {
4550 // i16/i32/i64 use an instruction that produces a low and high result even
4551 // though only the low result is used.
4552 SDVTList VTs;
4553 if (NVT == MVT::i8)
4554 VTs = CurDAG->getVTList(NVT, MVT::i32, MVT::Other);
4555 else
4556 VTs = CurDAG->getVTList(NVT, NVT, MVT::i32, MVT::Other);
4558 SDValue Ops[] = { Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, N1.getOperand(0),
4559 InFlag };
4560 CNode = CurDAG->getMachineNode(MOpc, dl, VTs, Ops);
4562 // Update the chain.
4563 ReplaceUses(N1.getValue(1), SDValue(CNode, NVT == MVT::i8 ? 2 : 3));
4564 // Record the mem-refs
4565 CurDAG->setNodeMemRefs(CNode, {cast<LoadSDNode>(N1)->getMemOperand()});
4566 } else {
4567 // i16/i32/i64 use an instruction that produces a low and high result even
4568 // though only the low result is used.
4569 SDVTList VTs;
4570 if (NVT == MVT::i8)
4571 VTs = CurDAG->getVTList(NVT, MVT::i32);
4572 else
4573 VTs = CurDAG->getVTList(NVT, NVT, MVT::i32);
4575 CNode = CurDAG->getMachineNode(ROpc, dl, VTs, {N1, InFlag});
4578 ReplaceUses(SDValue(Node, 0), SDValue(CNode, 0));
4579 ReplaceUses(SDValue(Node, 1), SDValue(CNode, NVT == MVT::i8 ? 1 : 2));
4580 CurDAG->RemoveDeadNode(Node);
4581 return;
4584 case ISD::SMUL_LOHI:
4585 case ISD::UMUL_LOHI: {
4586 SDValue N0 = Node->getOperand(0);
4587 SDValue N1 = Node->getOperand(1);
4589 unsigned Opc, MOpc;
4590 bool isSigned = Opcode == ISD::SMUL_LOHI;
4591 if (!isSigned) {
4592 switch (NVT.SimpleTy) {
4593 default: llvm_unreachable("Unsupported VT!");
4594 case MVT::i32: Opc = X86::MUL32r; MOpc = X86::MUL32m; break;
4595 case MVT::i64: Opc = X86::MUL64r; MOpc = X86::MUL64m; break;
4597 } else {
4598 switch (NVT.SimpleTy) {
4599 default: llvm_unreachable("Unsupported VT!");
4600 case MVT::i32: Opc = X86::IMUL32r; MOpc = X86::IMUL32m; break;
4601 case MVT::i64: Opc = X86::IMUL64r; MOpc = X86::IMUL64m; break;
4605 unsigned SrcReg, LoReg, HiReg;
4606 switch (Opc) {
4607 default: llvm_unreachable("Unknown MUL opcode!");
4608 case X86::IMUL32r:
4609 case X86::MUL32r:
4610 SrcReg = LoReg = X86::EAX; HiReg = X86::EDX;
4611 break;
4612 case X86::IMUL64r:
4613 case X86::MUL64r:
4614 SrcReg = LoReg = X86::RAX; HiReg = X86::RDX;
4615 break;
4618 SDValue Tmp0, Tmp1, Tmp2, Tmp3, Tmp4;
4619 bool foldedLoad = tryFoldLoad(Node, N1, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4);
4620 // Multiply is commmutative.
4621 if (!foldedLoad) {
4622 foldedLoad = tryFoldLoad(Node, N0, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4);
4623 if (foldedLoad)
4624 std::swap(N0, N1);
4627 SDValue InFlag = CurDAG->getCopyToReg(CurDAG->getEntryNode(), dl, SrcReg,
4628 N0, SDValue()).getValue(1);
4629 if (foldedLoad) {
4630 SDValue Chain;
4631 MachineSDNode *CNode = nullptr;
4632 SDValue Ops[] = { Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, N1.getOperand(0),
4633 InFlag };
4634 SDVTList VTs = CurDAG->getVTList(MVT::Other, MVT::Glue);
4635 CNode = CurDAG->getMachineNode(MOpc, dl, VTs, Ops);
4636 Chain = SDValue(CNode, 0);
4637 InFlag = SDValue(CNode, 1);
4639 // Update the chain.
4640 ReplaceUses(N1.getValue(1), Chain);
4641 // Record the mem-refs
4642 CurDAG->setNodeMemRefs(CNode, {cast<LoadSDNode>(N1)->getMemOperand()});
4643 } else {
4644 SDValue Ops[] = { N1, InFlag };
4645 SDVTList VTs = CurDAG->getVTList(MVT::Glue);
4646 SDNode *CNode = CurDAG->getMachineNode(Opc, dl, VTs, Ops);
4647 InFlag = SDValue(CNode, 0);
4650 // Copy the low half of the result, if it is needed.
4651 if (!SDValue(Node, 0).use_empty()) {
4652 assert(LoReg && "Register for low half is not defined!");
4653 SDValue ResLo = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), dl, LoReg,
4654 NVT, InFlag);
4655 InFlag = ResLo.getValue(2);
4656 ReplaceUses(SDValue(Node, 0), ResLo);
4657 LLVM_DEBUG(dbgs() << "=> "; ResLo.getNode()->dump(CurDAG);
4658 dbgs() << '\n');
4660 // Copy the high half of the result, if it is needed.
4661 if (!SDValue(Node, 1).use_empty()) {
4662 assert(HiReg && "Register for high half is not defined!");
4663 SDValue ResHi = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), dl, HiReg,
4664 NVT, InFlag);
4665 InFlag = ResHi.getValue(2);
4666 ReplaceUses(SDValue(Node, 1), ResHi);
4667 LLVM_DEBUG(dbgs() << "=> "; ResHi.getNode()->dump(CurDAG);
4668 dbgs() << '\n');
4671 CurDAG->RemoveDeadNode(Node);
4672 return;
4675 case ISD::SDIVREM:
4676 case ISD::UDIVREM: {
4677 SDValue N0 = Node->getOperand(0);
4678 SDValue N1 = Node->getOperand(1);
4680 unsigned Opc, MOpc;
4681 bool isSigned = Opcode == ISD::SDIVREM;
4682 if (!isSigned) {
4683 switch (NVT.SimpleTy) {
4684 default: llvm_unreachable("Unsupported VT!");
4685 case MVT::i8: Opc = X86::DIV8r; MOpc = X86::DIV8m; break;
4686 case MVT::i16: Opc = X86::DIV16r; MOpc = X86::DIV16m; break;
4687 case MVT::i32: Opc = X86::DIV32r; MOpc = X86::DIV32m; break;
4688 case MVT::i64: Opc = X86::DIV64r; MOpc = X86::DIV64m; break;
4690 } else {
4691 switch (NVT.SimpleTy) {
4692 default: llvm_unreachable("Unsupported VT!");
4693 case MVT::i8: Opc = X86::IDIV8r; MOpc = X86::IDIV8m; break;
4694 case MVT::i16: Opc = X86::IDIV16r; MOpc = X86::IDIV16m; break;
4695 case MVT::i32: Opc = X86::IDIV32r; MOpc = X86::IDIV32m; break;
4696 case MVT::i64: Opc = X86::IDIV64r; MOpc = X86::IDIV64m; break;
4700 unsigned LoReg, HiReg, ClrReg;
4701 unsigned SExtOpcode;
4702 switch (NVT.SimpleTy) {
4703 default: llvm_unreachable("Unsupported VT!");
4704 case MVT::i8:
4705 LoReg = X86::AL; ClrReg = HiReg = X86::AH;
4706 SExtOpcode = X86::CBW;
4707 break;
4708 case MVT::i16:
4709 LoReg = X86::AX; HiReg = X86::DX;
4710 ClrReg = X86::DX;
4711 SExtOpcode = X86::CWD;
4712 break;
4713 case MVT::i32:
4714 LoReg = X86::EAX; ClrReg = HiReg = X86::EDX;
4715 SExtOpcode = X86::CDQ;
4716 break;
4717 case MVT::i64:
4718 LoReg = X86::RAX; ClrReg = HiReg = X86::RDX;
4719 SExtOpcode = X86::CQO;
4720 break;
4723 SDValue Tmp0, Tmp1, Tmp2, Tmp3, Tmp4;
4724 bool foldedLoad = tryFoldLoad(Node, N1, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4);
4725 bool signBitIsZero = CurDAG->SignBitIsZero(N0);
4727 SDValue InFlag;
4728 if (NVT == MVT::i8 && (!isSigned || signBitIsZero)) {
4729 // Special case for div8, just use a move with zero extension to AX to
4730 // clear the upper 8 bits (AH).
4731 SDValue Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, Chain;
4732 MachineSDNode *Move;
4733 if (tryFoldLoad(Node, N0, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4)) {
4734 SDValue Ops[] = { Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, N0.getOperand(0) };
4735 Move = CurDAG->getMachineNode(X86::MOVZX32rm8, dl, MVT::i32,
4736 MVT::Other, Ops);
4737 Chain = SDValue(Move, 1);
4738 ReplaceUses(N0.getValue(1), Chain);
4739 // Record the mem-refs
4740 CurDAG->setNodeMemRefs(Move, {cast<LoadSDNode>(N0)->getMemOperand()});
4741 } else {
4742 Move = CurDAG->getMachineNode(X86::MOVZX32rr8, dl, MVT::i32, N0);
4743 Chain = CurDAG->getEntryNode();
4745 Chain = CurDAG->getCopyToReg(Chain, dl, X86::EAX, SDValue(Move, 0),
4746 SDValue());
4747 InFlag = Chain.getValue(1);
4748 } else {
4749 InFlag =
4750 CurDAG->getCopyToReg(CurDAG->getEntryNode(), dl,
4751 LoReg, N0, SDValue()).getValue(1);
4752 if (isSigned && !signBitIsZero) {
4753 // Sign extend the low part into the high part.
4754 InFlag =
4755 SDValue(CurDAG->getMachineNode(SExtOpcode, dl, MVT::Glue, InFlag),0);
4756 } else {
4757 // Zero out the high part, effectively zero extending the input.
4758 SDValue ClrNode = SDValue(CurDAG->getMachineNode(X86::MOV32r0, dl, NVT), 0);
4759 switch (NVT.SimpleTy) {
4760 case MVT::i16:
4761 ClrNode =
4762 SDValue(CurDAG->getMachineNode(
4763 TargetOpcode::EXTRACT_SUBREG, dl, MVT::i16, ClrNode,
4764 CurDAG->getTargetConstant(X86::sub_16bit, dl,
4765 MVT::i32)),
4767 break;
4768 case MVT::i32:
4769 break;
4770 case MVT::i64:
4771 ClrNode =
4772 SDValue(CurDAG->getMachineNode(
4773 TargetOpcode::SUBREG_TO_REG, dl, MVT::i64,
4774 CurDAG->getTargetConstant(0, dl, MVT::i64), ClrNode,
4775 CurDAG->getTargetConstant(X86::sub_32bit, dl,
4776 MVT::i32)),
4778 break;
4779 default:
4780 llvm_unreachable("Unexpected division source");
4783 InFlag = CurDAG->getCopyToReg(CurDAG->getEntryNode(), dl, ClrReg,
4784 ClrNode, InFlag).getValue(1);
4788 if (foldedLoad) {
4789 SDValue Ops[] = { Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, N1.getOperand(0),
4790 InFlag };
4791 MachineSDNode *CNode =
4792 CurDAG->getMachineNode(MOpc, dl, MVT::Other, MVT::Glue, Ops);
4793 InFlag = SDValue(CNode, 1);
4794 // Update the chain.
4795 ReplaceUses(N1.getValue(1), SDValue(CNode, 0));
4796 // Record the mem-refs
4797 CurDAG->setNodeMemRefs(CNode, {cast<LoadSDNode>(N1)->getMemOperand()});
4798 } else {
4799 InFlag =
4800 SDValue(CurDAG->getMachineNode(Opc, dl, MVT::Glue, N1, InFlag), 0);
4803 // Prevent use of AH in a REX instruction by explicitly copying it to
4804 // an ABCD_L register.
4806 // The current assumption of the register allocator is that isel
4807 // won't generate explicit references to the GR8_ABCD_H registers. If
4808 // the allocator and/or the backend get enhanced to be more robust in
4809 // that regard, this can be, and should be, removed.
4810 if (HiReg == X86::AH && !SDValue(Node, 1).use_empty()) {
4811 SDValue AHCopy = CurDAG->getRegister(X86::AH, MVT::i8);
4812 unsigned AHExtOpcode =
4813 isSigned ? X86::MOVSX32rr8_NOREX : X86::MOVZX32rr8_NOREX;
4815 SDNode *RNode = CurDAG->getMachineNode(AHExtOpcode, dl, MVT::i32,
4816 MVT::Glue, AHCopy, InFlag);
4817 SDValue Result(RNode, 0);
4818 InFlag = SDValue(RNode, 1);
4820 Result =
4821 CurDAG->getTargetExtractSubreg(X86::sub_8bit, dl, MVT::i8, Result);
4823 ReplaceUses(SDValue(Node, 1), Result);
4824 LLVM_DEBUG(dbgs() << "=> "; Result.getNode()->dump(CurDAG);
4825 dbgs() << '\n');
4827 // Copy the division (low) result, if it is needed.
4828 if (!SDValue(Node, 0).use_empty()) {
4829 SDValue Result = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), dl,
4830 LoReg, NVT, InFlag);
4831 InFlag = Result.getValue(2);
4832 ReplaceUses(SDValue(Node, 0), Result);
4833 LLVM_DEBUG(dbgs() << "=> "; Result.getNode()->dump(CurDAG);
4834 dbgs() << '\n');
4836 // Copy the remainder (high) result, if it is needed.
4837 if (!SDValue(Node, 1).use_empty()) {
4838 SDValue Result = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), dl,
4839 HiReg, NVT, InFlag);
4840 InFlag = Result.getValue(2);
4841 ReplaceUses(SDValue(Node, 1), Result);
4842 LLVM_DEBUG(dbgs() << "=> "; Result.getNode()->dump(CurDAG);
4843 dbgs() << '\n');
4845 CurDAG->RemoveDeadNode(Node);
4846 return;
4849 case X86ISD::CMP: {
4850 SDValue N0 = Node->getOperand(0);
4851 SDValue N1 = Node->getOperand(1);
4853 // Optimizations for TEST compares.
4854 if (!isNullConstant(N1))
4855 break;
4857 // Save the original VT of the compare.
4858 MVT CmpVT = N0.getSimpleValueType();
4860 // If we are comparing (and (shr X, C, Mask) with 0, emit a BEXTR followed
4861 // by a test instruction. The test should be removed later by
4862 // analyzeCompare if we are using only the zero flag.
4863 // TODO: Should we check the users and use the BEXTR flags directly?
4864 if (N0.getOpcode() == ISD::AND && N0.hasOneUse()) {
4865 if (MachineSDNode *NewNode = matchBEXTRFromAndImm(N0.getNode())) {
4866 unsigned TestOpc = CmpVT == MVT::i64 ? X86::TEST64rr
4867 : X86::TEST32rr;
4868 SDValue BEXTR = SDValue(NewNode, 0);
4869 NewNode = CurDAG->getMachineNode(TestOpc, dl, MVT::i32, BEXTR, BEXTR);
4870 ReplaceUses(SDValue(Node, 0), SDValue(NewNode, 0));
4871 CurDAG->RemoveDeadNode(Node);
4872 return;
4876 // We can peek through truncates, but we need to be careful below.
4877 if (N0.getOpcode() == ISD::TRUNCATE && N0.hasOneUse())
4878 N0 = N0.getOperand(0);
4880 // Look for (X86cmp (and $op, $imm), 0) and see if we can convert it to
4881 // use a smaller encoding.
4882 // Look past the truncate if CMP is the only use of it.
4883 if (N0.getOpcode() == ISD::AND &&
4884 N0.getNode()->hasOneUse() &&
4885 N0.getValueType() != MVT::i8) {
4886 ConstantSDNode *C = dyn_cast<ConstantSDNode>(N0.getOperand(1));
4887 if (!C) break;
4888 uint64_t Mask = C->getZExtValue();
4890 // Check if we can replace AND+IMM64 with a shift. This is possible for
4891 // masks/ like 0xFF000000 or 0x00FFFFFF and if we care only about the zero
4892 // flag.
4893 if (CmpVT == MVT::i64 && !isInt<32>(Mask) &&
4894 onlyUsesZeroFlag(SDValue(Node, 0))) {
4895 if (isMask_64(~Mask)) {
4896 unsigned TrailingZeros = countTrailingZeros(Mask);
4897 SDValue Imm = CurDAG->getTargetConstant(TrailingZeros, dl, MVT::i64);
4898 SDValue Shift =
4899 SDValue(CurDAG->getMachineNode(X86::SHR64ri, dl, MVT::i64, MVT::i32,
4900 N0.getOperand(0), Imm), 0);
4901 MachineSDNode *Test = CurDAG->getMachineNode(X86::TEST64rr, dl,
4902 MVT::i32, Shift, Shift);
4903 ReplaceNode(Node, Test);
4904 return;
4906 if (isMask_64(Mask)) {
4907 unsigned LeadingZeros = countLeadingZeros(Mask);
4908 SDValue Imm = CurDAG->getTargetConstant(LeadingZeros, dl, MVT::i64);
4909 SDValue Shift =
4910 SDValue(CurDAG->getMachineNode(X86::SHL64ri, dl, MVT::i64, MVT::i32,
4911 N0.getOperand(0), Imm), 0);
4912 MachineSDNode *Test = CurDAG->getMachineNode(X86::TEST64rr, dl,
4913 MVT::i32, Shift, Shift);
4914 ReplaceNode(Node, Test);
4915 return;
4919 MVT VT;
4920 int SubRegOp;
4921 unsigned ROpc, MOpc;
4923 // For each of these checks we need to be careful if the sign flag is
4924 // being used. It is only safe to use the sign flag in two conditions,
4925 // either the sign bit in the shrunken mask is zero or the final test
4926 // size is equal to the original compare size.
4928 if (isUInt<8>(Mask) &&
4929 (!(Mask & 0x80) || CmpVT == MVT::i8 ||
4930 hasNoSignFlagUses(SDValue(Node, 0)))) {
4931 // For example, convert "testl %eax, $8" to "testb %al, $8"
4932 VT = MVT::i8;
4933 SubRegOp = X86::sub_8bit;
4934 ROpc = X86::TEST8ri;
4935 MOpc = X86::TEST8mi;
4936 } else if (OptForMinSize && isUInt<16>(Mask) &&
4937 (!(Mask & 0x8000) || CmpVT == MVT::i16 ||
4938 hasNoSignFlagUses(SDValue(Node, 0)))) {
4939 // For example, "testl %eax, $32776" to "testw %ax, $32776".
4940 // NOTE: We only want to form TESTW instructions if optimizing for
4941 // min size. Otherwise we only save one byte and possibly get a length
4942 // changing prefix penalty in the decoders.
4943 VT = MVT::i16;
4944 SubRegOp = X86::sub_16bit;
4945 ROpc = X86::TEST16ri;
4946 MOpc = X86::TEST16mi;
4947 } else if (isUInt<32>(Mask) && N0.getValueType() != MVT::i16 &&
4948 ((!(Mask & 0x80000000) &&
4949 // Without minsize 16-bit Cmps can get here so we need to
4950 // be sure we calculate the correct sign flag if needed.
4951 (CmpVT != MVT::i16 || !(Mask & 0x8000))) ||
4952 CmpVT == MVT::i32 ||
4953 hasNoSignFlagUses(SDValue(Node, 0)))) {
4954 // For example, "testq %rax, $268468232" to "testl %eax, $268468232".
4955 // NOTE: We only want to run that transform if N0 is 32 or 64 bits.
4956 // Otherwize, we find ourselves in a position where we have to do
4957 // promotion. If previous passes did not promote the and, we assume
4958 // they had a good reason not to and do not promote here.
4959 VT = MVT::i32;
4960 SubRegOp = X86::sub_32bit;
4961 ROpc = X86::TEST32ri;
4962 MOpc = X86::TEST32mi;
4963 } else {
4964 // No eligible transformation was found.
4965 break;
4968 SDValue Imm = CurDAG->getTargetConstant(Mask, dl, VT);
4969 SDValue Reg = N0.getOperand(0);
4971 // Emit a testl or testw.
4972 MachineSDNode *NewNode;
4973 SDValue Tmp0, Tmp1, Tmp2, Tmp3, Tmp4;
4974 if (tryFoldLoad(Node, N0.getNode(), Reg, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4)) {
4975 SDValue Ops[] = { Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, Imm,
4976 Reg.getOperand(0) };
4977 NewNode = CurDAG->getMachineNode(MOpc, dl, MVT::i32, MVT::Other, Ops);
4978 // Update the chain.
4979 ReplaceUses(Reg.getValue(1), SDValue(NewNode, 1));
4980 // Record the mem-refs
4981 CurDAG->setNodeMemRefs(NewNode,
4982 {cast<LoadSDNode>(Reg)->getMemOperand()});
4983 } else {
4984 // Extract the subregister if necessary.
4985 if (N0.getValueType() != VT)
4986 Reg = CurDAG->getTargetExtractSubreg(SubRegOp, dl, VT, Reg);
4988 NewNode = CurDAG->getMachineNode(ROpc, dl, MVT::i32, Reg, Imm);
4990 // Replace CMP with TEST.
4991 ReplaceNode(Node, NewNode);
4992 return;
4994 break;
4996 case X86ISD::PCMPISTR: {
4997 if (!Subtarget->hasSSE42())
4998 break;
5000 bool NeedIndex = !SDValue(Node, 0).use_empty();
5001 bool NeedMask = !SDValue(Node, 1).use_empty();
5002 // We can't fold a load if we are going to make two instructions.
5003 bool MayFoldLoad = !NeedIndex || !NeedMask;
5005 MachineSDNode *CNode;
5006 if (NeedMask) {
5007 unsigned ROpc = Subtarget->hasAVX() ? X86::VPCMPISTRMrr : X86::PCMPISTRMrr;
5008 unsigned MOpc = Subtarget->hasAVX() ? X86::VPCMPISTRMrm : X86::PCMPISTRMrm;
5009 CNode = emitPCMPISTR(ROpc, MOpc, MayFoldLoad, dl, MVT::v16i8, Node);
5010 ReplaceUses(SDValue(Node, 1), SDValue(CNode, 0));
5012 if (NeedIndex || !NeedMask) {
5013 unsigned ROpc = Subtarget->hasAVX() ? X86::VPCMPISTRIrr : X86::PCMPISTRIrr;
5014 unsigned MOpc = Subtarget->hasAVX() ? X86::VPCMPISTRIrm : X86::PCMPISTRIrm;
5015 CNode = emitPCMPISTR(ROpc, MOpc, MayFoldLoad, dl, MVT::i32, Node);
5016 ReplaceUses(SDValue(Node, 0), SDValue(CNode, 0));
5019 // Connect the flag usage to the last instruction created.
5020 ReplaceUses(SDValue(Node, 2), SDValue(CNode, 1));
5021 CurDAG->RemoveDeadNode(Node);
5022 return;
5024 case X86ISD::PCMPESTR: {
5025 if (!Subtarget->hasSSE42())
5026 break;
5028 // Copy the two implicit register inputs.
5029 SDValue InFlag = CurDAG->getCopyToReg(CurDAG->getEntryNode(), dl, X86::EAX,
5030 Node->getOperand(1),
5031 SDValue()).getValue(1);
5032 InFlag = CurDAG->getCopyToReg(CurDAG->getEntryNode(), dl, X86::EDX,
5033 Node->getOperand(3), InFlag).getValue(1);
5035 bool NeedIndex = !SDValue(Node, 0).use_empty();
5036 bool NeedMask = !SDValue(Node, 1).use_empty();
5037 // We can't fold a load if we are going to make two instructions.
5038 bool MayFoldLoad = !NeedIndex || !NeedMask;
5040 MachineSDNode *CNode;
5041 if (NeedMask) {
5042 unsigned ROpc = Subtarget->hasAVX() ? X86::VPCMPESTRMrr : X86::PCMPESTRMrr;
5043 unsigned MOpc = Subtarget->hasAVX() ? X86::VPCMPESTRMrm : X86::PCMPESTRMrm;
5044 CNode = emitPCMPESTR(ROpc, MOpc, MayFoldLoad, dl, MVT::v16i8, Node,
5045 InFlag);
5046 ReplaceUses(SDValue(Node, 1), SDValue(CNode, 0));
5048 if (NeedIndex || !NeedMask) {
5049 unsigned ROpc = Subtarget->hasAVX() ? X86::VPCMPESTRIrr : X86::PCMPESTRIrr;
5050 unsigned MOpc = Subtarget->hasAVX() ? X86::VPCMPESTRIrm : X86::PCMPESTRIrm;
5051 CNode = emitPCMPESTR(ROpc, MOpc, MayFoldLoad, dl, MVT::i32, Node, InFlag);
5052 ReplaceUses(SDValue(Node, 0), SDValue(CNode, 0));
5054 // Connect the flag usage to the last instruction created.
5055 ReplaceUses(SDValue(Node, 2), SDValue(CNode, 1));
5056 CurDAG->RemoveDeadNode(Node);
5057 return;
5060 case ISD::SETCC: {
5061 if (NVT.isVector() && tryVPTESTM(Node, SDValue(Node, 0), SDValue()))
5062 return;
5064 break;
5067 case ISD::STORE:
5068 if (foldLoadStoreIntoMemOperand(Node))
5069 return;
5070 break;
5071 case ISD::FCEIL:
5072 case ISD::FFLOOR:
5073 case ISD::FTRUNC:
5074 case ISD::FNEARBYINT:
5075 case ISD::FRINT: {
5076 // Replace fp rounding with their X86 specific equivalent so we don't
5077 // need 2 sets of patterns.
5078 // FIXME: This can only happen when the nodes started as STRICT_* and have
5079 // been mutated into their non-STRICT equivalents. Eventually this
5080 // mutation will be removed and we should switch the STRICT_ nodes to a
5081 // strict version of RNDSCALE in PreProcessISelDAG.
5082 unsigned Imm;
5083 switch (Node->getOpcode()) {
5084 default: llvm_unreachable("Unexpected opcode!");
5085 case ISD::FCEIL: Imm = 0xA; break;
5086 case ISD::FFLOOR: Imm = 0x9; break;
5087 case ISD::FTRUNC: Imm = 0xB; break;
5088 case ISD::FNEARBYINT: Imm = 0xC; break;
5089 case ISD::FRINT: Imm = 0x4; break;
5091 SDLoc dl(Node);
5092 SDValue Res = CurDAG->getNode(X86ISD::VRNDSCALE, dl,
5093 Node->getValueType(0),
5094 Node->getOperand(0),
5095 CurDAG->getConstant(Imm, dl, MVT::i8));
5096 ReplaceNode(Node, Res.getNode());
5097 SelectCode(Res.getNode());
5098 return;
5102 SelectCode(Node);
5105 bool X86DAGToDAGISel::
5106 SelectInlineAsmMemoryOperand(const SDValue &Op, unsigned ConstraintID,
5107 std::vector<SDValue> &OutOps) {
5108 SDValue Op0, Op1, Op2, Op3, Op4;
5109 switch (ConstraintID) {
5110 default:
5111 llvm_unreachable("Unexpected asm memory constraint");
5112 case InlineAsm::Constraint_i:
5113 // FIXME: It seems strange that 'i' is needed here since it's supposed to
5114 // be an immediate and not a memory constraint.
5115 LLVM_FALLTHROUGH;
5116 case InlineAsm::Constraint_o: // offsetable ??
5117 case InlineAsm::Constraint_v: // not offsetable ??
5118 case InlineAsm::Constraint_m: // memory
5119 case InlineAsm::Constraint_X:
5120 if (!selectAddr(nullptr, Op, Op0, Op1, Op2, Op3, Op4))
5121 return true;
5122 break;
5125 OutOps.push_back(Op0);
5126 OutOps.push_back(Op1);
5127 OutOps.push_back(Op2);
5128 OutOps.push_back(Op3);
5129 OutOps.push_back(Op4);
5130 return false;
5133 /// This pass converts a legalized DAG into a X86-specific DAG,
5134 /// ready for instruction scheduling.
5135 FunctionPass *llvm::createX86ISelDag(X86TargetMachine &TM,
5136 CodeGenOpt::Level OptLevel) {
5137 return new X86DAGToDAGISel(TM, OptLevel);