[PowerPC] Convert r+r instructions to r+i (pre and post RA)
[llvm-core.git] / lib / Target / PowerPC / PPCISelDAGToDAG.cpp
blobcf5c3e8b5c6e563df640af933eed74836ea97962
1 //===-- PPCISelDAGToDAG.cpp - PPC --pattern matching inst selector --------===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file defines a pattern matching instruction selector for PowerPC,
11 // converting from a legalized dag to a PPC dag.
13 //===----------------------------------------------------------------------===//
15 #include "MCTargetDesc/PPCMCTargetDesc.h"
16 #include "MCTargetDesc/PPCPredicates.h"
17 #include "PPC.h"
18 #include "PPCISelLowering.h"
19 #include "PPCMachineFunctionInfo.h"
20 #include "PPCSubtarget.h"
21 #include "PPCTargetMachine.h"
22 #include "llvm/ADT/APInt.h"
23 #include "llvm/ADT/DenseMap.h"
24 #include "llvm/ADT/STLExtras.h"
25 #include "llvm/ADT/SmallPtrSet.h"
26 #include "llvm/ADT/SmallVector.h"
27 #include "llvm/ADT/Statistic.h"
28 #include "llvm/Analysis/BranchProbabilityInfo.h"
29 #include "llvm/CodeGen/FunctionLoweringInfo.h"
30 #include "llvm/CodeGen/ISDOpcodes.h"
31 #include "llvm/CodeGen/MachineBasicBlock.h"
32 #include "llvm/CodeGen/MachineFunction.h"
33 #include "llvm/CodeGen/MachineInstrBuilder.h"
34 #include "llvm/CodeGen/MachineRegisterInfo.h"
35 #include "llvm/CodeGen/MachineValueType.h"
36 #include "llvm/CodeGen/SelectionDAG.h"
37 #include "llvm/CodeGen/SelectionDAGISel.h"
38 #include "llvm/CodeGen/SelectionDAGNodes.h"
39 #include "llvm/CodeGen/TargetInstrInfo.h"
40 #include "llvm/CodeGen/TargetRegisterInfo.h"
41 #include "llvm/CodeGen/ValueTypes.h"
42 #include "llvm/IR/BasicBlock.h"
43 #include "llvm/IR/DebugLoc.h"
44 #include "llvm/IR/Function.h"
45 #include "llvm/IR/GlobalValue.h"
46 #include "llvm/IR/InlineAsm.h"
47 #include "llvm/IR/InstrTypes.h"
48 #include "llvm/IR/Module.h"
49 #include "llvm/Support/Casting.h"
50 #include "llvm/Support/CodeGen.h"
51 #include "llvm/Support/CommandLine.h"
52 #include "llvm/Support/Compiler.h"
53 #include "llvm/Support/Debug.h"
54 #include "llvm/Support/ErrorHandling.h"
55 #include "llvm/Support/KnownBits.h"
56 #include "llvm/Support/MathExtras.h"
57 #include "llvm/Support/raw_ostream.h"
58 #include <algorithm>
59 #include <cassert>
60 #include <cstdint>
61 #include <iterator>
62 #include <limits>
63 #include <memory>
64 #include <new>
65 #include <tuple>
66 #include <utility>
68 using namespace llvm;
70 #define DEBUG_TYPE "ppc-codegen"
72 STATISTIC(NumSextSetcc,
73 "Number of (sext(setcc)) nodes expanded into GPR sequence.");
74 STATISTIC(NumZextSetcc,
75 "Number of (zext(setcc)) nodes expanded into GPR sequence.");
76 STATISTIC(SignExtensionsAdded,
77 "Number of sign extensions for compare inputs added.");
78 STATISTIC(ZeroExtensionsAdded,
79 "Number of zero extensions for compare inputs added.");
80 STATISTIC(NumLogicOpsOnComparison,
81 "Number of logical ops on i1 values calculated in GPR.");
82 STATISTIC(OmittedForNonExtendUses,
83 "Number of compares not eliminated as they have non-extending uses.");
85 // FIXME: Remove this once the bug has been fixed!
86 cl::opt<bool> ANDIGlueBug("expose-ppc-andi-glue-bug",
87 cl::desc("expose the ANDI glue bug on PPC"), cl::Hidden);
89 static cl::opt<bool>
90 UseBitPermRewriter("ppc-use-bit-perm-rewriter", cl::init(true),
91 cl::desc("use aggressive ppc isel for bit permutations"),
92 cl::Hidden);
93 static cl::opt<bool> BPermRewriterNoMasking(
94 "ppc-bit-perm-rewriter-stress-rotates",
95 cl::desc("stress rotate selection in aggressive ppc isel for "
96 "bit permutations"),
97 cl::Hidden);
99 static cl::opt<bool> EnableBranchHint(
100 "ppc-use-branch-hint", cl::init(true),
101 cl::desc("Enable static hinting of branches on ppc"),
102 cl::Hidden);
104 enum ICmpInGPRType { ICGPR_All, ICGPR_None, ICGPR_I32, ICGPR_I64,
105 ICGPR_NonExtIn, ICGPR_Zext, ICGPR_Sext, ICGPR_ZextI32,
106 ICGPR_SextI32, ICGPR_ZextI64, ICGPR_SextI64 };
108 static cl::opt<ICmpInGPRType> CmpInGPR(
109 "ppc-gpr-icmps", cl::Hidden, cl::init(ICGPR_All),
110 cl::desc("Specify the types of comparisons to emit GPR-only code for."),
111 cl::values(clEnumValN(ICGPR_None, "none", "Do not modify integer comparisons."),
112 clEnumValN(ICGPR_All, "all", "All possible int comparisons in GPRs."),
113 clEnumValN(ICGPR_I32, "i32", "Only i32 comparisons in GPRs."),
114 clEnumValN(ICGPR_I64, "i64", "Only i64 comparisons in GPRs."),
115 clEnumValN(ICGPR_NonExtIn, "nonextin",
116 "Only comparisons where inputs don't need [sz]ext."),
117 clEnumValN(ICGPR_Zext, "zext", "Only comparisons with zext result."),
118 clEnumValN(ICGPR_ZextI32, "zexti32",
119 "Only i32 comparisons with zext result."),
120 clEnumValN(ICGPR_ZextI64, "zexti64",
121 "Only i64 comparisons with zext result."),
122 clEnumValN(ICGPR_Sext, "sext", "Only comparisons with sext result."),
123 clEnumValN(ICGPR_SextI32, "sexti32",
124 "Only i32 comparisons with sext result."),
125 clEnumValN(ICGPR_SextI64, "sexti64",
126 "Only i64 comparisons with sext result.")));
127 namespace {
129 //===--------------------------------------------------------------------===//
130 /// PPCDAGToDAGISel - PPC specific code to select PPC machine
131 /// instructions for SelectionDAG operations.
133 class PPCDAGToDAGISel : public SelectionDAGISel {
134 const PPCTargetMachine &TM;
135 const PPCSubtarget *PPCSubTarget;
136 const PPCTargetLowering *PPCLowering;
137 unsigned GlobalBaseReg;
139 public:
140 explicit PPCDAGToDAGISel(PPCTargetMachine &tm, CodeGenOpt::Level OptLevel)
141 : SelectionDAGISel(tm, OptLevel), TM(tm) {}
143 bool runOnMachineFunction(MachineFunction &MF) override {
144 // Make sure we re-emit a set of the global base reg if necessary
145 GlobalBaseReg = 0;
146 PPCSubTarget = &MF.getSubtarget<PPCSubtarget>();
147 PPCLowering = PPCSubTarget->getTargetLowering();
148 SelectionDAGISel::runOnMachineFunction(MF);
150 if (!PPCSubTarget->isSVR4ABI())
151 InsertVRSaveCode(MF);
153 return true;
156 void PreprocessISelDAG() override;
157 void PostprocessISelDAG() override;
159 /// getI16Imm - Return a target constant with the specified value, of type
160 /// i16.
161 inline SDValue getI16Imm(unsigned Imm, const SDLoc &dl) {
162 return CurDAG->getTargetConstant(Imm, dl, MVT::i16);
165 /// getI32Imm - Return a target constant with the specified value, of type
166 /// i32.
167 inline SDValue getI32Imm(unsigned Imm, const SDLoc &dl) {
168 return CurDAG->getTargetConstant(Imm, dl, MVT::i32);
171 /// getI64Imm - Return a target constant with the specified value, of type
172 /// i64.
173 inline SDValue getI64Imm(uint64_t Imm, const SDLoc &dl) {
174 return CurDAG->getTargetConstant(Imm, dl, MVT::i64);
177 /// getSmallIPtrImm - Return a target constant of pointer type.
178 inline SDValue getSmallIPtrImm(unsigned Imm, const SDLoc &dl) {
179 return CurDAG->getTargetConstant(
180 Imm, dl, PPCLowering->getPointerTy(CurDAG->getDataLayout()));
183 /// isRotateAndMask - Returns true if Mask and Shift can be folded into a
184 /// rotate and mask opcode and mask operation.
185 static bool isRotateAndMask(SDNode *N, unsigned Mask, bool isShiftMask,
186 unsigned &SH, unsigned &MB, unsigned &ME);
188 /// getGlobalBaseReg - insert code into the entry mbb to materialize the PIC
189 /// base register. Return the virtual register that holds this value.
190 SDNode *getGlobalBaseReg();
192 void selectFrameIndex(SDNode *SN, SDNode *N, unsigned Offset = 0);
194 // Select - Convert the specified operand from a target-independent to a
195 // target-specific node if it hasn't already been changed.
196 void Select(SDNode *N) override;
198 bool tryBitfieldInsert(SDNode *N);
199 bool tryBitPermutation(SDNode *N);
200 bool tryIntCompareInGPR(SDNode *N);
202 /// SelectCC - Select a comparison of the specified values with the
203 /// specified condition code, returning the CR# of the expression.
204 SDValue SelectCC(SDValue LHS, SDValue RHS, ISD::CondCode CC,
205 const SDLoc &dl);
207 /// SelectAddrImm - Returns true if the address N can be represented by
208 /// a base register plus a signed 16-bit displacement [r+imm].
209 bool SelectAddrImm(SDValue N, SDValue &Disp,
210 SDValue &Base) {
211 return PPCLowering->SelectAddressRegImm(N, Disp, Base, *CurDAG, 0);
214 /// SelectAddrImmOffs - Return true if the operand is valid for a preinc
215 /// immediate field. Note that the operand at this point is already the
216 /// result of a prior SelectAddressRegImm call.
217 bool SelectAddrImmOffs(SDValue N, SDValue &Out) const {
218 if (N.getOpcode() == ISD::TargetConstant ||
219 N.getOpcode() == ISD::TargetGlobalAddress) {
220 Out = N;
221 return true;
224 return false;
227 /// SelectAddrIdx - Given the specified addressed, check to see if it can be
228 /// represented as an indexed [r+r] operation. Returns false if it can
229 /// be represented by [r+imm], which are preferred.
230 bool SelectAddrIdx(SDValue N, SDValue &Base, SDValue &Index) {
231 return PPCLowering->SelectAddressRegReg(N, Base, Index, *CurDAG);
234 /// SelectAddrIdxOnly - Given the specified addressed, force it to be
235 /// represented as an indexed [r+r] operation.
236 bool SelectAddrIdxOnly(SDValue N, SDValue &Base, SDValue &Index) {
237 return PPCLowering->SelectAddressRegRegOnly(N, Base, Index, *CurDAG);
240 /// SelectAddrImmX4 - Returns true if the address N can be represented by
241 /// a base register plus a signed 16-bit displacement that is a multiple of 4.
242 /// Suitable for use by STD and friends.
243 bool SelectAddrImmX4(SDValue N, SDValue &Disp, SDValue &Base) {
244 return PPCLowering->SelectAddressRegImm(N, Disp, Base, *CurDAG, 4);
247 bool SelectAddrImmX16(SDValue N, SDValue &Disp, SDValue &Base) {
248 return PPCLowering->SelectAddressRegImm(N, Disp, Base, *CurDAG, 16);
251 // Select an address into a single register.
252 bool SelectAddr(SDValue N, SDValue &Base) {
253 Base = N;
254 return true;
257 /// SelectInlineAsmMemoryOperand - Implement addressing mode selection for
258 /// inline asm expressions. It is always correct to compute the value into
259 /// a register. The case of adding a (possibly relocatable) constant to a
260 /// register can be improved, but it is wrong to substitute Reg+Reg for
261 /// Reg in an asm, because the load or store opcode would have to change.
262 bool SelectInlineAsmMemoryOperand(const SDValue &Op,
263 unsigned ConstraintID,
264 std::vector<SDValue> &OutOps) override {
265 switch(ConstraintID) {
266 default:
267 errs() << "ConstraintID: " << ConstraintID << "\n";
268 llvm_unreachable("Unexpected asm memory constraint");
269 case InlineAsm::Constraint_es:
270 case InlineAsm::Constraint_i:
271 case InlineAsm::Constraint_m:
272 case InlineAsm::Constraint_o:
273 case InlineAsm::Constraint_Q:
274 case InlineAsm::Constraint_Z:
275 case InlineAsm::Constraint_Zy:
276 // We need to make sure that this one operand does not end up in r0
277 // (because we might end up lowering this as 0(%op)).
278 const TargetRegisterInfo *TRI = PPCSubTarget->getRegisterInfo();
279 const TargetRegisterClass *TRC = TRI->getPointerRegClass(*MF, /*Kind=*/1);
280 SDLoc dl(Op);
281 SDValue RC = CurDAG->getTargetConstant(TRC->getID(), dl, MVT::i32);
282 SDValue NewOp =
283 SDValue(CurDAG->getMachineNode(TargetOpcode::COPY_TO_REGCLASS,
284 dl, Op.getValueType(),
285 Op, RC), 0);
287 OutOps.push_back(NewOp);
288 return false;
290 return true;
293 void InsertVRSaveCode(MachineFunction &MF);
295 StringRef getPassName() const override {
296 return "PowerPC DAG->DAG Pattern Instruction Selection";
299 // Include the pieces autogenerated from the target description.
300 #include "PPCGenDAGISel.inc"
302 private:
303 bool trySETCC(SDNode *N);
305 void PeepholePPC64();
306 void PeepholePPC64ZExt();
307 void PeepholeCROps();
309 SDValue combineToCMPB(SDNode *N);
310 void foldBoolExts(SDValue &Res, SDNode *&N);
312 bool AllUsersSelectZero(SDNode *N);
313 void SwapAllSelectUsers(SDNode *N);
315 bool isOffsetMultipleOf(SDNode *N, unsigned Val) const;
316 void transferMemOperands(SDNode *N, SDNode *Result);
319 } // end anonymous namespace
321 /// InsertVRSaveCode - Once the entire function has been instruction selected,
322 /// all virtual registers are created and all machine instructions are built,
323 /// check to see if we need to save/restore VRSAVE. If so, do it.
324 void PPCDAGToDAGISel::InsertVRSaveCode(MachineFunction &Fn) {
325 // Check to see if this function uses vector registers, which means we have to
326 // save and restore the VRSAVE register and update it with the regs we use.
328 // In this case, there will be virtual registers of vector type created
329 // by the scheduler. Detect them now.
330 bool HasVectorVReg = false;
331 for (unsigned i = 0, e = RegInfo->getNumVirtRegs(); i != e; ++i) {
332 unsigned Reg = TargetRegisterInfo::index2VirtReg(i);
333 if (RegInfo->getRegClass(Reg) == &PPC::VRRCRegClass) {
334 HasVectorVReg = true;
335 break;
338 if (!HasVectorVReg) return; // nothing to do.
340 // If we have a vector register, we want to emit code into the entry and exit
341 // blocks to save and restore the VRSAVE register. We do this here (instead
342 // of marking all vector instructions as clobbering VRSAVE) for two reasons:
344 // 1. This (trivially) reduces the load on the register allocator, by not
345 // having to represent the live range of the VRSAVE register.
346 // 2. This (more significantly) allows us to create a temporary virtual
347 // register to hold the saved VRSAVE value, allowing this temporary to be
348 // register allocated, instead of forcing it to be spilled to the stack.
350 // Create two vregs - one to hold the VRSAVE register that is live-in to the
351 // function and one for the value after having bits or'd into it.
352 unsigned InVRSAVE = RegInfo->createVirtualRegister(&PPC::GPRCRegClass);
353 unsigned UpdatedVRSAVE = RegInfo->createVirtualRegister(&PPC::GPRCRegClass);
355 const TargetInstrInfo &TII = *PPCSubTarget->getInstrInfo();
356 MachineBasicBlock &EntryBB = *Fn.begin();
357 DebugLoc dl;
358 // Emit the following code into the entry block:
359 // InVRSAVE = MFVRSAVE
360 // UpdatedVRSAVE = UPDATE_VRSAVE InVRSAVE
361 // MTVRSAVE UpdatedVRSAVE
362 MachineBasicBlock::iterator IP = EntryBB.begin(); // Insert Point
363 BuildMI(EntryBB, IP, dl, TII.get(PPC::MFVRSAVE), InVRSAVE);
364 BuildMI(EntryBB, IP, dl, TII.get(PPC::UPDATE_VRSAVE),
365 UpdatedVRSAVE).addReg(InVRSAVE);
366 BuildMI(EntryBB, IP, dl, TII.get(PPC::MTVRSAVE)).addReg(UpdatedVRSAVE);
368 // Find all return blocks, outputting a restore in each epilog.
369 for (MachineFunction::iterator BB = Fn.begin(), E = Fn.end(); BB != E; ++BB) {
370 if (BB->isReturnBlock()) {
371 IP = BB->end(); --IP;
373 // Skip over all terminator instructions, which are part of the return
374 // sequence.
375 MachineBasicBlock::iterator I2 = IP;
376 while (I2 != BB->begin() && (--I2)->isTerminator())
377 IP = I2;
379 // Emit: MTVRSAVE InVRSave
380 BuildMI(*BB, IP, dl, TII.get(PPC::MTVRSAVE)).addReg(InVRSAVE);
385 /// getGlobalBaseReg - Output the instructions required to put the
386 /// base address to use for accessing globals into a register.
388 SDNode *PPCDAGToDAGISel::getGlobalBaseReg() {
389 if (!GlobalBaseReg) {
390 const TargetInstrInfo &TII = *PPCSubTarget->getInstrInfo();
391 // Insert the set of GlobalBaseReg into the first MBB of the function
392 MachineBasicBlock &FirstMBB = MF->front();
393 MachineBasicBlock::iterator MBBI = FirstMBB.begin();
394 const Module *M = MF->getFunction()->getParent();
395 DebugLoc dl;
397 if (PPCLowering->getPointerTy(CurDAG->getDataLayout()) == MVT::i32) {
398 if (PPCSubTarget->isTargetELF()) {
399 GlobalBaseReg = PPC::R30;
400 if (M->getPICLevel() == PICLevel::SmallPIC) {
401 BuildMI(FirstMBB, MBBI, dl, TII.get(PPC::MoveGOTtoLR));
402 BuildMI(FirstMBB, MBBI, dl, TII.get(PPC::MFLR), GlobalBaseReg);
403 MF->getInfo<PPCFunctionInfo>()->setUsesPICBase(true);
404 } else {
405 BuildMI(FirstMBB, MBBI, dl, TII.get(PPC::MovePCtoLR));
406 BuildMI(FirstMBB, MBBI, dl, TII.get(PPC::MFLR), GlobalBaseReg);
407 unsigned TempReg = RegInfo->createVirtualRegister(&PPC::GPRCRegClass);
408 BuildMI(FirstMBB, MBBI, dl,
409 TII.get(PPC::UpdateGBR), GlobalBaseReg)
410 .addReg(TempReg, RegState::Define).addReg(GlobalBaseReg);
411 MF->getInfo<PPCFunctionInfo>()->setUsesPICBase(true);
413 } else {
414 GlobalBaseReg =
415 RegInfo->createVirtualRegister(&PPC::GPRC_and_GPRC_NOR0RegClass);
416 BuildMI(FirstMBB, MBBI, dl, TII.get(PPC::MovePCtoLR));
417 BuildMI(FirstMBB, MBBI, dl, TII.get(PPC::MFLR), GlobalBaseReg);
419 } else {
420 GlobalBaseReg = RegInfo->createVirtualRegister(&PPC::G8RC_and_G8RC_NOX0RegClass);
421 BuildMI(FirstMBB, MBBI, dl, TII.get(PPC::MovePCtoLR8));
422 BuildMI(FirstMBB, MBBI, dl, TII.get(PPC::MFLR8), GlobalBaseReg);
425 return CurDAG->getRegister(GlobalBaseReg,
426 PPCLowering->getPointerTy(CurDAG->getDataLayout()))
427 .getNode();
430 /// isInt32Immediate - This method tests to see if the node is a 32-bit constant
431 /// operand. If so Imm will receive the 32-bit value.
432 static bool isInt32Immediate(SDNode *N, unsigned &Imm) {
433 if (N->getOpcode() == ISD::Constant && N->getValueType(0) == MVT::i32) {
434 Imm = cast<ConstantSDNode>(N)->getZExtValue();
435 return true;
437 return false;
440 /// isInt64Immediate - This method tests to see if the node is a 64-bit constant
441 /// operand. If so Imm will receive the 64-bit value.
442 static bool isInt64Immediate(SDNode *N, uint64_t &Imm) {
443 if (N->getOpcode() == ISD::Constant && N->getValueType(0) == MVT::i64) {
444 Imm = cast<ConstantSDNode>(N)->getZExtValue();
445 return true;
447 return false;
450 // isInt32Immediate - This method tests to see if a constant operand.
451 // If so Imm will receive the 32 bit value.
452 static bool isInt32Immediate(SDValue N, unsigned &Imm) {
453 return isInt32Immediate(N.getNode(), Imm);
456 /// isInt64Immediate - This method tests to see if the value is a 64-bit
457 /// constant operand. If so Imm will receive the 64-bit value.
458 static bool isInt64Immediate(SDValue N, uint64_t &Imm) {
459 return isInt64Immediate(N.getNode(), Imm);
462 static unsigned getBranchHint(unsigned PCC, FunctionLoweringInfo *FuncInfo,
463 const SDValue &DestMBB) {
464 assert(isa<BasicBlockSDNode>(DestMBB));
466 if (!FuncInfo->BPI) return PPC::BR_NO_HINT;
468 const BasicBlock *BB = FuncInfo->MBB->getBasicBlock();
469 const TerminatorInst *BBTerm = BB->getTerminator();
471 if (BBTerm->getNumSuccessors() != 2) return PPC::BR_NO_HINT;
473 const BasicBlock *TBB = BBTerm->getSuccessor(0);
474 const BasicBlock *FBB = BBTerm->getSuccessor(1);
476 auto TProb = FuncInfo->BPI->getEdgeProbability(BB, TBB);
477 auto FProb = FuncInfo->BPI->getEdgeProbability(BB, FBB);
479 // We only want to handle cases which are easy to predict at static time, e.g.
480 // C++ throw statement, that is very likely not taken, or calling never
481 // returned function, e.g. stdlib exit(). So we set Threshold to filter
482 // unwanted cases.
484 // Below is LLVM branch weight table, we only want to handle case 1, 2
486 // Case Taken:Nontaken Example
487 // 1. Unreachable 1048575:1 C++ throw, stdlib exit(),
488 // 2. Invoke-terminating 1:1048575
489 // 3. Coldblock 4:64 __builtin_expect
490 // 4. Loop Branch 124:4 For loop
491 // 5. PH/ZH/FPH 20:12
492 const uint32_t Threshold = 10000;
494 if (std::max(TProb, FProb) / Threshold < std::min(TProb, FProb))
495 return PPC::BR_NO_HINT;
497 DEBUG(dbgs() << "Use branch hint for '" << FuncInfo->Fn->getName() << "::"
498 << BB->getName() << "'\n"
499 << " -> " << TBB->getName() << ": " << TProb << "\n"
500 << " -> " << FBB->getName() << ": " << FProb << "\n");
502 const BasicBlockSDNode *BBDN = cast<BasicBlockSDNode>(DestMBB);
504 // If Dest BasicBlock is False-BasicBlock (FBB), swap branch probabilities,
505 // because we want 'TProb' stands for 'branch probability' to Dest BasicBlock
506 if (BBDN->getBasicBlock()->getBasicBlock() != TBB)
507 std::swap(TProb, FProb);
509 return (TProb > FProb) ? PPC::BR_TAKEN_HINT : PPC::BR_NONTAKEN_HINT;
512 // isOpcWithIntImmediate - This method tests to see if the node is a specific
513 // opcode and that it has a immediate integer right operand.
514 // If so Imm will receive the 32 bit value.
515 static bool isOpcWithIntImmediate(SDNode *N, unsigned Opc, unsigned& Imm) {
516 return N->getOpcode() == Opc
517 && isInt32Immediate(N->getOperand(1).getNode(), Imm);
520 void PPCDAGToDAGISel::selectFrameIndex(SDNode *SN, SDNode *N, unsigned Offset) {
521 SDLoc dl(SN);
522 int FI = cast<FrameIndexSDNode>(N)->getIndex();
523 SDValue TFI = CurDAG->getTargetFrameIndex(FI, N->getValueType(0));
524 unsigned Opc = N->getValueType(0) == MVT::i32 ? PPC::ADDI : PPC::ADDI8;
525 if (SN->hasOneUse())
526 CurDAG->SelectNodeTo(SN, Opc, N->getValueType(0), TFI,
527 getSmallIPtrImm(Offset, dl));
528 else
529 ReplaceNode(SN, CurDAG->getMachineNode(Opc, dl, N->getValueType(0), TFI,
530 getSmallIPtrImm(Offset, dl)));
533 bool PPCDAGToDAGISel::isRotateAndMask(SDNode *N, unsigned Mask,
534 bool isShiftMask, unsigned &SH,
535 unsigned &MB, unsigned &ME) {
536 // Don't even go down this path for i64, since different logic will be
537 // necessary for rldicl/rldicr/rldimi.
538 if (N->getValueType(0) != MVT::i32)
539 return false;
541 unsigned Shift = 32;
542 unsigned Indeterminant = ~0; // bit mask marking indeterminant results
543 unsigned Opcode = N->getOpcode();
544 if (N->getNumOperands() != 2 ||
545 !isInt32Immediate(N->getOperand(1).getNode(), Shift) || (Shift > 31))
546 return false;
548 if (Opcode == ISD::SHL) {
549 // apply shift left to mask if it comes first
550 if (isShiftMask) Mask = Mask << Shift;
551 // determine which bits are made indeterminant by shift
552 Indeterminant = ~(0xFFFFFFFFu << Shift);
553 } else if (Opcode == ISD::SRL) {
554 // apply shift right to mask if it comes first
555 if (isShiftMask) Mask = Mask >> Shift;
556 // determine which bits are made indeterminant by shift
557 Indeterminant = ~(0xFFFFFFFFu >> Shift);
558 // adjust for the left rotate
559 Shift = 32 - Shift;
560 } else if (Opcode == ISD::ROTL) {
561 Indeterminant = 0;
562 } else {
563 return false;
566 // if the mask doesn't intersect any Indeterminant bits
567 if (Mask && !(Mask & Indeterminant)) {
568 SH = Shift & 31;
569 // make sure the mask is still a mask (wrap arounds may not be)
570 return isRunOfOnes(Mask, MB, ME);
572 return false;
575 /// Turn an or of two masked values into the rotate left word immediate then
576 /// mask insert (rlwimi) instruction.
577 bool PPCDAGToDAGISel::tryBitfieldInsert(SDNode *N) {
578 SDValue Op0 = N->getOperand(0);
579 SDValue Op1 = N->getOperand(1);
580 SDLoc dl(N);
582 KnownBits LKnown, RKnown;
583 CurDAG->computeKnownBits(Op0, LKnown);
584 CurDAG->computeKnownBits(Op1, RKnown);
586 unsigned TargetMask = LKnown.Zero.getZExtValue();
587 unsigned InsertMask = RKnown.Zero.getZExtValue();
589 if ((TargetMask | InsertMask) == 0xFFFFFFFF) {
590 unsigned Op0Opc = Op0.getOpcode();
591 unsigned Op1Opc = Op1.getOpcode();
592 unsigned Value, SH = 0;
593 TargetMask = ~TargetMask;
594 InsertMask = ~InsertMask;
596 // If the LHS has a foldable shift and the RHS does not, then swap it to the
597 // RHS so that we can fold the shift into the insert.
598 if (Op0Opc == ISD::AND && Op1Opc == ISD::AND) {
599 if (Op0.getOperand(0).getOpcode() == ISD::SHL ||
600 Op0.getOperand(0).getOpcode() == ISD::SRL) {
601 if (Op1.getOperand(0).getOpcode() != ISD::SHL &&
602 Op1.getOperand(0).getOpcode() != ISD::SRL) {
603 std::swap(Op0, Op1);
604 std::swap(Op0Opc, Op1Opc);
605 std::swap(TargetMask, InsertMask);
608 } else if (Op0Opc == ISD::SHL || Op0Opc == ISD::SRL) {
609 if (Op1Opc == ISD::AND && Op1.getOperand(0).getOpcode() != ISD::SHL &&
610 Op1.getOperand(0).getOpcode() != ISD::SRL) {
611 std::swap(Op0, Op1);
612 std::swap(Op0Opc, Op1Opc);
613 std::swap(TargetMask, InsertMask);
617 unsigned MB, ME;
618 if (isRunOfOnes(InsertMask, MB, ME)) {
619 if ((Op1Opc == ISD::SHL || Op1Opc == ISD::SRL) &&
620 isInt32Immediate(Op1.getOperand(1), Value)) {
621 Op1 = Op1.getOperand(0);
622 SH = (Op1Opc == ISD::SHL) ? Value : 32 - Value;
624 if (Op1Opc == ISD::AND) {
625 // The AND mask might not be a constant, and we need to make sure that
626 // if we're going to fold the masking with the insert, all bits not
627 // know to be zero in the mask are known to be one.
628 KnownBits MKnown;
629 CurDAG->computeKnownBits(Op1.getOperand(1), MKnown);
630 bool CanFoldMask = InsertMask == MKnown.One.getZExtValue();
632 unsigned SHOpc = Op1.getOperand(0).getOpcode();
633 if ((SHOpc == ISD::SHL || SHOpc == ISD::SRL) && CanFoldMask &&
634 isInt32Immediate(Op1.getOperand(0).getOperand(1), Value)) {
635 // Note that Value must be in range here (less than 32) because
636 // otherwise there would not be any bits set in InsertMask.
637 Op1 = Op1.getOperand(0).getOperand(0);
638 SH = (SHOpc == ISD::SHL) ? Value : 32 - Value;
642 SH &= 31;
643 SDValue Ops[] = { Op0, Op1, getI32Imm(SH, dl), getI32Imm(MB, dl),
644 getI32Imm(ME, dl) };
645 ReplaceNode(N, CurDAG->getMachineNode(PPC::RLWIMI, dl, MVT::i32, Ops));
646 return true;
649 return false;
652 // Predict the number of instructions that would be generated by calling
653 // selectI64Imm(N).
654 static unsigned selectI64ImmInstrCountDirect(int64_t Imm) {
655 // Assume no remaining bits.
656 unsigned Remainder = 0;
657 // Assume no shift required.
658 unsigned Shift = 0;
660 // If it can't be represented as a 32 bit value.
661 if (!isInt<32>(Imm)) {
662 Shift = countTrailingZeros<uint64_t>(Imm);
663 int64_t ImmSh = static_cast<uint64_t>(Imm) >> Shift;
665 // If the shifted value fits 32 bits.
666 if (isInt<32>(ImmSh)) {
667 // Go with the shifted value.
668 Imm = ImmSh;
669 } else {
670 // Still stuck with a 64 bit value.
671 Remainder = Imm;
672 Shift = 32;
673 Imm >>= 32;
677 // Intermediate operand.
678 unsigned Result = 0;
680 // Handle first 32 bits.
681 unsigned Lo = Imm & 0xFFFF;
683 // Simple value.
684 if (isInt<16>(Imm)) {
685 // Just the Lo bits.
686 ++Result;
687 } else if (Lo) {
688 // Handle the Hi bits and Lo bits.
689 Result += 2;
690 } else {
691 // Just the Hi bits.
692 ++Result;
695 // If no shift, we're done.
696 if (!Shift) return Result;
698 // If Hi word == Lo word,
699 // we can use rldimi to insert the Lo word into Hi word.
700 if ((unsigned)(Imm & 0xFFFFFFFF) == Remainder) {
701 ++Result;
702 return Result;
705 // Shift for next step if the upper 32-bits were not zero.
706 if (Imm)
707 ++Result;
709 // Add in the last bits as required.
710 if ((Remainder >> 16) & 0xFFFF)
711 ++Result;
712 if (Remainder & 0xFFFF)
713 ++Result;
715 return Result;
718 static uint64_t Rot64(uint64_t Imm, unsigned R) {
719 return (Imm << R) | (Imm >> (64 - R));
722 static unsigned selectI64ImmInstrCount(int64_t Imm) {
723 unsigned Count = selectI64ImmInstrCountDirect(Imm);
725 // If the instruction count is 1 or 2, we do not need further analysis
726 // since rotate + load constant requires at least 2 instructions.
727 if (Count <= 2)
728 return Count;
730 for (unsigned r = 1; r < 63; ++r) {
731 uint64_t RImm = Rot64(Imm, r);
732 unsigned RCount = selectI64ImmInstrCountDirect(RImm) + 1;
733 Count = std::min(Count, RCount);
735 // See comments in selectI64Imm for an explanation of the logic below.
736 unsigned LS = findLastSet(RImm);
737 if (LS != r-1)
738 continue;
740 uint64_t OnesMask = -(int64_t) (UINT64_C(1) << (LS+1));
741 uint64_t RImmWithOnes = RImm | OnesMask;
743 RCount = selectI64ImmInstrCountDirect(RImmWithOnes) + 1;
744 Count = std::min(Count, RCount);
747 return Count;
750 // Select a 64-bit constant. For cost-modeling purposes, selectI64ImmInstrCount
751 // (above) needs to be kept in sync with this function.
752 static SDNode *selectI64ImmDirect(SelectionDAG *CurDAG, const SDLoc &dl,
753 int64_t Imm) {
754 // Assume no remaining bits.
755 unsigned Remainder = 0;
756 // Assume no shift required.
757 unsigned Shift = 0;
759 // If it can't be represented as a 32 bit value.
760 if (!isInt<32>(Imm)) {
761 Shift = countTrailingZeros<uint64_t>(Imm);
762 int64_t ImmSh = static_cast<uint64_t>(Imm) >> Shift;
764 // If the shifted value fits 32 bits.
765 if (isInt<32>(ImmSh)) {
766 // Go with the shifted value.
767 Imm = ImmSh;
768 } else {
769 // Still stuck with a 64 bit value.
770 Remainder = Imm;
771 Shift = 32;
772 Imm >>= 32;
776 // Intermediate operand.
777 SDNode *Result;
779 // Handle first 32 bits.
780 unsigned Lo = Imm & 0xFFFF;
781 unsigned Hi = (Imm >> 16) & 0xFFFF;
783 auto getI32Imm = [CurDAG, dl](unsigned Imm) {
784 return CurDAG->getTargetConstant(Imm, dl, MVT::i32);
787 // Simple value.
788 if (isInt<16>(Imm)) {
789 uint64_t SextImm = SignExtend64(Lo, 16);
790 SDValue SDImm = CurDAG->getTargetConstant(SextImm, dl, MVT::i64);
791 // Just the Lo bits.
792 Result = CurDAG->getMachineNode(PPC::LI8, dl, MVT::i64, SDImm);
793 } else if (Lo) {
794 // Handle the Hi bits.
795 unsigned OpC = Hi ? PPC::LIS8 : PPC::LI8;
796 Result = CurDAG->getMachineNode(OpC, dl, MVT::i64, getI32Imm(Hi));
797 // And Lo bits.
798 Result = CurDAG->getMachineNode(PPC::ORI8, dl, MVT::i64,
799 SDValue(Result, 0), getI32Imm(Lo));
800 } else {
801 // Just the Hi bits.
802 Result = CurDAG->getMachineNode(PPC::LIS8, dl, MVT::i64, getI32Imm(Hi));
805 // If no shift, we're done.
806 if (!Shift) return Result;
808 // If Hi word == Lo word,
809 // we can use rldimi to insert the Lo word into Hi word.
810 if ((unsigned)(Imm & 0xFFFFFFFF) == Remainder) {
811 SDValue Ops[] =
812 { SDValue(Result, 0), SDValue(Result, 0), getI32Imm(Shift), getI32Imm(0)};
813 return CurDAG->getMachineNode(PPC::RLDIMI, dl, MVT::i64, Ops);
816 // Shift for next step if the upper 32-bits were not zero.
817 if (Imm) {
818 Result = CurDAG->getMachineNode(PPC::RLDICR, dl, MVT::i64,
819 SDValue(Result, 0),
820 getI32Imm(Shift),
821 getI32Imm(63 - Shift));
824 // Add in the last bits as required.
825 if ((Hi = (Remainder >> 16) & 0xFFFF)) {
826 Result = CurDAG->getMachineNode(PPC::ORIS8, dl, MVT::i64,
827 SDValue(Result, 0), getI32Imm(Hi));
829 if ((Lo = Remainder & 0xFFFF)) {
830 Result = CurDAG->getMachineNode(PPC::ORI8, dl, MVT::i64,
831 SDValue(Result, 0), getI32Imm(Lo));
834 return Result;
837 static SDNode *selectI64Imm(SelectionDAG *CurDAG, const SDLoc &dl,
838 int64_t Imm) {
839 unsigned Count = selectI64ImmInstrCountDirect(Imm);
841 // If the instruction count is 1 or 2, we do not need further analysis
842 // since rotate + load constant requires at least 2 instructions.
843 if (Count <= 2)
844 return selectI64ImmDirect(CurDAG, dl, Imm);
846 unsigned RMin = 0;
848 int64_t MatImm;
849 unsigned MaskEnd;
851 for (unsigned r = 1; r < 63; ++r) {
852 uint64_t RImm = Rot64(Imm, r);
853 unsigned RCount = selectI64ImmInstrCountDirect(RImm) + 1;
854 if (RCount < Count) {
855 Count = RCount;
856 RMin = r;
857 MatImm = RImm;
858 MaskEnd = 63;
861 // If the immediate to generate has many trailing zeros, it might be
862 // worthwhile to generate a rotated value with too many leading ones
863 // (because that's free with li/lis's sign-extension semantics), and then
864 // mask them off after rotation.
866 unsigned LS = findLastSet(RImm);
867 // We're adding (63-LS) higher-order ones, and we expect to mask them off
868 // after performing the inverse rotation by (64-r). So we need that:
869 // 63-LS == 64-r => LS == r-1
870 if (LS != r-1)
871 continue;
873 uint64_t OnesMask = -(int64_t) (UINT64_C(1) << (LS+1));
874 uint64_t RImmWithOnes = RImm | OnesMask;
876 RCount = selectI64ImmInstrCountDirect(RImmWithOnes) + 1;
877 if (RCount < Count) {
878 Count = RCount;
879 RMin = r;
880 MatImm = RImmWithOnes;
881 MaskEnd = LS;
885 if (!RMin)
886 return selectI64ImmDirect(CurDAG, dl, Imm);
888 auto getI32Imm = [CurDAG, dl](unsigned Imm) {
889 return CurDAG->getTargetConstant(Imm, dl, MVT::i32);
892 SDValue Val = SDValue(selectI64ImmDirect(CurDAG, dl, MatImm), 0);
893 return CurDAG->getMachineNode(PPC::RLDICR, dl, MVT::i64, Val,
894 getI32Imm(64 - RMin), getI32Imm(MaskEnd));
897 static unsigned allUsesTruncate(SelectionDAG *CurDAG, SDNode *N) {
898 unsigned MaxTruncation = 0;
899 // Cannot use range-based for loop here as we need the actual use (i.e. we
900 // need the operand number corresponding to the use). A range-based for
901 // will unbox the use and provide an SDNode*.
902 for (SDNode::use_iterator Use = N->use_begin(), UseEnd = N->use_end();
903 Use != UseEnd; ++Use) {
904 unsigned Opc =
905 Use->isMachineOpcode() ? Use->getMachineOpcode() : Use->getOpcode();
906 switch (Opc) {
907 default: return 0;
908 case ISD::TRUNCATE:
909 if (Use->isMachineOpcode())
910 return 0;
911 MaxTruncation =
912 std::max(MaxTruncation, Use->getValueType(0).getSizeInBits());
913 continue;
914 case ISD::STORE: {
915 if (Use->isMachineOpcode())
916 return 0;
917 StoreSDNode *STN = cast<StoreSDNode>(*Use);
918 unsigned MemVTSize = STN->getMemoryVT().getSizeInBits();
919 if (MemVTSize == 64 || Use.getOperandNo() != 0)
920 return 0;
921 MaxTruncation = std::max(MaxTruncation, MemVTSize);
922 continue;
924 case PPC::STW8:
925 case PPC::STWX8:
926 case PPC::STWU8:
927 case PPC::STWUX8:
928 if (Use.getOperandNo() != 0)
929 return 0;
930 MaxTruncation = std::max(MaxTruncation, 32u);
931 continue;
932 case PPC::STH8:
933 case PPC::STHX8:
934 case PPC::STHU8:
935 case PPC::STHUX8:
936 if (Use.getOperandNo() != 0)
937 return 0;
938 MaxTruncation = std::max(MaxTruncation, 16u);
939 continue;
940 case PPC::STB8:
941 case PPC::STBX8:
942 case PPC::STBU8:
943 case PPC::STBUX8:
944 if (Use.getOperandNo() != 0)
945 return 0;
946 MaxTruncation = std::max(MaxTruncation, 8u);
947 continue;
950 return MaxTruncation;
953 // Select a 64-bit constant.
954 static SDNode *selectI64Imm(SelectionDAG *CurDAG, SDNode *N) {
955 SDLoc dl(N);
957 // Get 64 bit value.
958 int64_t Imm = cast<ConstantSDNode>(N)->getZExtValue();
959 if (unsigned MinSize = allUsesTruncate(CurDAG, N)) {
960 uint64_t SextImm = SignExtend64(Imm, MinSize);
961 SDValue SDImm = CurDAG->getTargetConstant(SextImm, dl, MVT::i64);
962 if (isInt<16>(SextImm))
963 return CurDAG->getMachineNode(PPC::LI8, dl, MVT::i64, SDImm);
965 return selectI64Imm(CurDAG, dl, Imm);
968 namespace {
970 class BitPermutationSelector {
971 struct ValueBit {
972 SDValue V;
974 // The bit number in the value, using a convention where bit 0 is the
975 // lowest-order bit.
976 unsigned Idx;
978 enum Kind {
979 ConstZero,
980 Variable
981 } K;
983 ValueBit(SDValue V, unsigned I, Kind K = Variable)
984 : V(V), Idx(I), K(K) {}
985 ValueBit(Kind K = Variable)
986 : V(SDValue(nullptr, 0)), Idx(UINT32_MAX), K(K) {}
988 bool isZero() const {
989 return K == ConstZero;
992 bool hasValue() const {
993 return K == Variable;
996 SDValue getValue() const {
997 assert(hasValue() && "Cannot get the value of a constant bit");
998 return V;
1001 unsigned getValueBitIndex() const {
1002 assert(hasValue() && "Cannot get the value bit index of a constant bit");
1003 return Idx;
1007 // A bit group has the same underlying value and the same rotate factor.
1008 struct BitGroup {
1009 SDValue V;
1010 unsigned RLAmt;
1011 unsigned StartIdx, EndIdx;
1013 // This rotation amount assumes that the lower 32 bits of the quantity are
1014 // replicated in the high 32 bits by the rotation operator (which is done
1015 // by rlwinm and friends in 64-bit mode).
1016 bool Repl32;
1017 // Did converting to Repl32 == true change the rotation factor? If it did,
1018 // it decreased it by 32.
1019 bool Repl32CR;
1020 // Was this group coalesced after setting Repl32 to true?
1021 bool Repl32Coalesced;
1023 BitGroup(SDValue V, unsigned R, unsigned S, unsigned E)
1024 : V(V), RLAmt(R), StartIdx(S), EndIdx(E), Repl32(false), Repl32CR(false),
1025 Repl32Coalesced(false) {
1026 DEBUG(dbgs() << "\tbit group for " << V.getNode() << " RLAmt = " << R <<
1027 " [" << S << ", " << E << "]\n");
1031 // Information on each (Value, RLAmt) pair (like the number of groups
1032 // associated with each) used to choose the lowering method.
1033 struct ValueRotInfo {
1034 SDValue V;
1035 unsigned RLAmt = std::numeric_limits<unsigned>::max();
1036 unsigned NumGroups = 0;
1037 unsigned FirstGroupStartIdx = std::numeric_limits<unsigned>::max();
1038 bool Repl32 = false;
1040 ValueRotInfo() = default;
1042 // For sorting (in reverse order) by NumGroups, and then by
1043 // FirstGroupStartIdx.
1044 bool operator < (const ValueRotInfo &Other) const {
1045 // We need to sort so that the non-Repl32 come first because, when we're
1046 // doing masking, the Repl32 bit groups might be subsumed into the 64-bit
1047 // masking operation.
1048 if (Repl32 < Other.Repl32)
1049 return true;
1050 else if (Repl32 > Other.Repl32)
1051 return false;
1052 else if (NumGroups > Other.NumGroups)
1053 return true;
1054 else if (NumGroups < Other.NumGroups)
1055 return false;
1056 else if (FirstGroupStartIdx < Other.FirstGroupStartIdx)
1057 return true;
1058 return false;
1062 using ValueBitsMemoizedValue = std::pair<bool, SmallVector<ValueBit, 64>>;
1063 using ValueBitsMemoizer =
1064 DenseMap<SDValue, std::unique_ptr<ValueBitsMemoizedValue>>;
1065 ValueBitsMemoizer Memoizer;
1067 // Return a pair of bool and a SmallVector pointer to a memoization entry.
1068 // The bool is true if something interesting was deduced, otherwise if we're
1069 // providing only a generic representation of V (or something else likewise
1070 // uninteresting for instruction selection) through the SmallVector.
1071 std::pair<bool, SmallVector<ValueBit, 64> *> getValueBits(SDValue V,
1072 unsigned NumBits) {
1073 auto &ValueEntry = Memoizer[V];
1074 if (ValueEntry)
1075 return std::make_pair(ValueEntry->first, &ValueEntry->second);
1076 ValueEntry.reset(new ValueBitsMemoizedValue());
1077 bool &Interesting = ValueEntry->first;
1078 SmallVector<ValueBit, 64> &Bits = ValueEntry->second;
1079 Bits.resize(NumBits);
1081 switch (V.getOpcode()) {
1082 default: break;
1083 case ISD::ROTL:
1084 if (isa<ConstantSDNode>(V.getOperand(1))) {
1085 unsigned RotAmt = V.getConstantOperandVal(1);
1087 const auto &LHSBits = *getValueBits(V.getOperand(0), NumBits).second;
1089 for (unsigned i = 0; i < NumBits; ++i)
1090 Bits[i] = LHSBits[i < RotAmt ? i + (NumBits - RotAmt) : i - RotAmt];
1092 return std::make_pair(Interesting = true, &Bits);
1094 break;
1095 case ISD::SHL:
1096 if (isa<ConstantSDNode>(V.getOperand(1))) {
1097 unsigned ShiftAmt = V.getConstantOperandVal(1);
1099 const auto &LHSBits = *getValueBits(V.getOperand(0), NumBits).second;
1101 for (unsigned i = ShiftAmt; i < NumBits; ++i)
1102 Bits[i] = LHSBits[i - ShiftAmt];
1104 for (unsigned i = 0; i < ShiftAmt; ++i)
1105 Bits[i] = ValueBit(ValueBit::ConstZero);
1107 return std::make_pair(Interesting = true, &Bits);
1109 break;
1110 case ISD::SRL:
1111 if (isa<ConstantSDNode>(V.getOperand(1))) {
1112 unsigned ShiftAmt = V.getConstantOperandVal(1);
1114 const auto &LHSBits = *getValueBits(V.getOperand(0), NumBits).second;
1116 for (unsigned i = 0; i < NumBits - ShiftAmt; ++i)
1117 Bits[i] = LHSBits[i + ShiftAmt];
1119 for (unsigned i = NumBits - ShiftAmt; i < NumBits; ++i)
1120 Bits[i] = ValueBit(ValueBit::ConstZero);
1122 return std::make_pair(Interesting = true, &Bits);
1124 break;
1125 case ISD::AND:
1126 if (isa<ConstantSDNode>(V.getOperand(1))) {
1127 uint64_t Mask = V.getConstantOperandVal(1);
1129 const SmallVector<ValueBit, 64> *LHSBits;
1130 // Mark this as interesting, only if the LHS was also interesting. This
1131 // prevents the overall procedure from matching a single immediate 'and'
1132 // (which is non-optimal because such an and might be folded with other
1133 // things if we don't select it here).
1134 std::tie(Interesting, LHSBits) = getValueBits(V.getOperand(0), NumBits);
1136 for (unsigned i = 0; i < NumBits; ++i)
1137 if (((Mask >> i) & 1) == 1)
1138 Bits[i] = (*LHSBits)[i];
1139 else
1140 Bits[i] = ValueBit(ValueBit::ConstZero);
1142 return std::make_pair(Interesting, &Bits);
1144 break;
1145 case ISD::OR: {
1146 const auto &LHSBits = *getValueBits(V.getOperand(0), NumBits).second;
1147 const auto &RHSBits = *getValueBits(V.getOperand(1), NumBits).second;
1149 bool AllDisjoint = true;
1150 for (unsigned i = 0; i < NumBits; ++i)
1151 if (LHSBits[i].isZero())
1152 Bits[i] = RHSBits[i];
1153 else if (RHSBits[i].isZero())
1154 Bits[i] = LHSBits[i];
1155 else {
1156 AllDisjoint = false;
1157 break;
1160 if (!AllDisjoint)
1161 break;
1163 return std::make_pair(Interesting = true, &Bits);
1165 case ISD::ZERO_EXTEND: {
1166 // We support only the case with zero extension from i32 to i64 so far.
1167 if (V.getValueType() != MVT::i64 ||
1168 V.getOperand(0).getValueType() != MVT::i32)
1169 break;
1171 const SmallVector<ValueBit, 64> *LHSBits;
1172 const unsigned NumOperandBits = 32;
1173 std::tie(Interesting, LHSBits) = getValueBits(V.getOperand(0),
1174 NumOperandBits);
1176 for (unsigned i = 0; i < NumOperandBits; ++i)
1177 Bits[i] = (*LHSBits)[i];
1179 for (unsigned i = NumOperandBits; i < NumBits; ++i)
1180 Bits[i] = ValueBit(ValueBit::ConstZero);
1182 return std::make_pair(Interesting, &Bits);
1186 for (unsigned i = 0; i < NumBits; ++i)
1187 Bits[i] = ValueBit(V, i);
1189 return std::make_pair(Interesting = false, &Bits);
1192 // For each value (except the constant ones), compute the left-rotate amount
1193 // to get it from its original to final position.
1194 void computeRotationAmounts() {
1195 HasZeros = false;
1196 RLAmt.resize(Bits.size());
1197 for (unsigned i = 0; i < Bits.size(); ++i)
1198 if (Bits[i].hasValue()) {
1199 unsigned VBI = Bits[i].getValueBitIndex();
1200 if (i >= VBI)
1201 RLAmt[i] = i - VBI;
1202 else
1203 RLAmt[i] = Bits.size() - (VBI - i);
1204 } else if (Bits[i].isZero()) {
1205 HasZeros = true;
1206 RLAmt[i] = UINT32_MAX;
1207 } else {
1208 llvm_unreachable("Unknown value bit type");
1212 // Collect groups of consecutive bits with the same underlying value and
1213 // rotation factor. If we're doing late masking, we ignore zeros, otherwise
1214 // they break up groups.
1215 void collectBitGroups(bool LateMask) {
1216 BitGroups.clear();
1218 unsigned LastRLAmt = RLAmt[0];
1219 SDValue LastValue = Bits[0].hasValue() ? Bits[0].getValue() : SDValue();
1220 unsigned LastGroupStartIdx = 0;
1221 for (unsigned i = 1; i < Bits.size(); ++i) {
1222 unsigned ThisRLAmt = RLAmt[i];
1223 SDValue ThisValue = Bits[i].hasValue() ? Bits[i].getValue() : SDValue();
1224 if (LateMask && !ThisValue) {
1225 ThisValue = LastValue;
1226 ThisRLAmt = LastRLAmt;
1227 // If we're doing late masking, then the first bit group always starts
1228 // at zero (even if the first bits were zero).
1229 if (BitGroups.empty())
1230 LastGroupStartIdx = 0;
1233 // If this bit has the same underlying value and the same rotate factor as
1234 // the last one, then they're part of the same group.
1235 if (ThisRLAmt == LastRLAmt && ThisValue == LastValue)
1236 continue;
1238 if (LastValue.getNode())
1239 BitGroups.push_back(BitGroup(LastValue, LastRLAmt, LastGroupStartIdx,
1240 i-1));
1241 LastRLAmt = ThisRLAmt;
1242 LastValue = ThisValue;
1243 LastGroupStartIdx = i;
1245 if (LastValue.getNode())
1246 BitGroups.push_back(BitGroup(LastValue, LastRLAmt, LastGroupStartIdx,
1247 Bits.size()-1));
1249 if (BitGroups.empty())
1250 return;
1252 // We might be able to combine the first and last groups.
1253 if (BitGroups.size() > 1) {
1254 // If the first and last groups are the same, then remove the first group
1255 // in favor of the last group, making the ending index of the last group
1256 // equal to the ending index of the to-be-removed first group.
1257 if (BitGroups[0].StartIdx == 0 &&
1258 BitGroups[BitGroups.size()-1].EndIdx == Bits.size()-1 &&
1259 BitGroups[0].V == BitGroups[BitGroups.size()-1].V &&
1260 BitGroups[0].RLAmt == BitGroups[BitGroups.size()-1].RLAmt) {
1261 DEBUG(dbgs() << "\tcombining final bit group with initial one\n");
1262 BitGroups[BitGroups.size()-1].EndIdx = BitGroups[0].EndIdx;
1263 BitGroups.erase(BitGroups.begin());
1268 // Take all (SDValue, RLAmt) pairs and sort them by the number of groups
1269 // associated with each. If there is a degeneracy, pick the one that occurs
1270 // first (in the final value).
1271 void collectValueRotInfo() {
1272 ValueRots.clear();
1274 for (auto &BG : BitGroups) {
1275 unsigned RLAmtKey = BG.RLAmt + (BG.Repl32 ? 64 : 0);
1276 ValueRotInfo &VRI = ValueRots[std::make_pair(BG.V, RLAmtKey)];
1277 VRI.V = BG.V;
1278 VRI.RLAmt = BG.RLAmt;
1279 VRI.Repl32 = BG.Repl32;
1280 VRI.NumGroups += 1;
1281 VRI.FirstGroupStartIdx = std::min(VRI.FirstGroupStartIdx, BG.StartIdx);
1284 // Now that we've collected the various ValueRotInfo instances, we need to
1285 // sort them.
1286 ValueRotsVec.clear();
1287 for (auto &I : ValueRots) {
1288 ValueRotsVec.push_back(I.second);
1290 std::sort(ValueRotsVec.begin(), ValueRotsVec.end());
1293 // In 64-bit mode, rlwinm and friends have a rotation operator that
1294 // replicates the low-order 32 bits into the high-order 32-bits. The mask
1295 // indices of these instructions can only be in the lower 32 bits, so they
1296 // can only represent some 64-bit bit groups. However, when they can be used,
1297 // the 32-bit replication can be used to represent, as a single bit group,
1298 // otherwise separate bit groups. We'll convert to replicated-32-bit bit
1299 // groups when possible. Returns true if any of the bit groups were
1300 // converted.
1301 void assignRepl32BitGroups() {
1302 // If we have bits like this:
1304 // Indices: 15 14 13 12 11 10 9 8 7 6 5 4 3 2 1 0
1305 // V bits: ... 7 6 5 4 3 2 1 0 31 30 29 28 27 26 25 24
1306 // Groups: | RLAmt = 8 | RLAmt = 40 |
1308 // But, making use of a 32-bit operation that replicates the low-order 32
1309 // bits into the high-order 32 bits, this can be one bit group with a RLAmt
1310 // of 8.
1312 auto IsAllLow32 = [this](BitGroup & BG) {
1313 if (BG.StartIdx <= BG.EndIdx) {
1314 for (unsigned i = BG.StartIdx; i <= BG.EndIdx; ++i) {
1315 if (!Bits[i].hasValue())
1316 continue;
1317 if (Bits[i].getValueBitIndex() >= 32)
1318 return false;
1320 } else {
1321 for (unsigned i = BG.StartIdx; i < Bits.size(); ++i) {
1322 if (!Bits[i].hasValue())
1323 continue;
1324 if (Bits[i].getValueBitIndex() >= 32)
1325 return false;
1327 for (unsigned i = 0; i <= BG.EndIdx; ++i) {
1328 if (!Bits[i].hasValue())
1329 continue;
1330 if (Bits[i].getValueBitIndex() >= 32)
1331 return false;
1335 return true;
1338 for (auto &BG : BitGroups) {
1339 if (BG.StartIdx < 32 && BG.EndIdx < 32) {
1340 if (IsAllLow32(BG)) {
1341 if (BG.RLAmt >= 32) {
1342 BG.RLAmt -= 32;
1343 BG.Repl32CR = true;
1346 BG.Repl32 = true;
1348 DEBUG(dbgs() << "\t32-bit replicated bit group for " <<
1349 BG.V.getNode() << " RLAmt = " << BG.RLAmt <<
1350 " [" << BG.StartIdx << ", " << BG.EndIdx << "]\n");
1355 // Now walk through the bit groups, consolidating where possible.
1356 for (auto I = BitGroups.begin(); I != BitGroups.end();) {
1357 // We might want to remove this bit group by merging it with the previous
1358 // group (which might be the ending group).
1359 auto IP = (I == BitGroups.begin()) ?
1360 std::prev(BitGroups.end()) : std::prev(I);
1361 if (I->Repl32 && IP->Repl32 && I->V == IP->V && I->RLAmt == IP->RLAmt &&
1362 I->StartIdx == (IP->EndIdx + 1) % 64 && I != IP) {
1364 DEBUG(dbgs() << "\tcombining 32-bit replicated bit group for " <<
1365 I->V.getNode() << " RLAmt = " << I->RLAmt <<
1366 " [" << I->StartIdx << ", " << I->EndIdx <<
1367 "] with group with range [" <<
1368 IP->StartIdx << ", " << IP->EndIdx << "]\n");
1370 IP->EndIdx = I->EndIdx;
1371 IP->Repl32CR = IP->Repl32CR || I->Repl32CR;
1372 IP->Repl32Coalesced = true;
1373 I = BitGroups.erase(I);
1374 continue;
1375 } else {
1376 // There is a special case worth handling: If there is a single group
1377 // covering the entire upper 32 bits, and it can be merged with both
1378 // the next and previous groups (which might be the same group), then
1379 // do so. If it is the same group (so there will be only one group in
1380 // total), then we need to reverse the order of the range so that it
1381 // covers the entire 64 bits.
1382 if (I->StartIdx == 32 && I->EndIdx == 63) {
1383 assert(std::next(I) == BitGroups.end() &&
1384 "bit group ends at index 63 but there is another?");
1385 auto IN = BitGroups.begin();
1387 if (IP->Repl32 && IN->Repl32 && I->V == IP->V && I->V == IN->V &&
1388 (I->RLAmt % 32) == IP->RLAmt && (I->RLAmt % 32) == IN->RLAmt &&
1389 IP->EndIdx == 31 && IN->StartIdx == 0 && I != IP &&
1390 IsAllLow32(*I)) {
1392 DEBUG(dbgs() << "\tcombining bit group for " <<
1393 I->V.getNode() << " RLAmt = " << I->RLAmt <<
1394 " [" << I->StartIdx << ", " << I->EndIdx <<
1395 "] with 32-bit replicated groups with ranges [" <<
1396 IP->StartIdx << ", " << IP->EndIdx << "] and [" <<
1397 IN->StartIdx << ", " << IN->EndIdx << "]\n");
1399 if (IP == IN) {
1400 // There is only one other group; change it to cover the whole
1401 // range (backward, so that it can still be Repl32 but cover the
1402 // whole 64-bit range).
1403 IP->StartIdx = 31;
1404 IP->EndIdx = 30;
1405 IP->Repl32CR = IP->Repl32CR || I->RLAmt >= 32;
1406 IP->Repl32Coalesced = true;
1407 I = BitGroups.erase(I);
1408 } else {
1409 // There are two separate groups, one before this group and one
1410 // after us (at the beginning). We're going to remove this group,
1411 // but also the group at the very beginning.
1412 IP->EndIdx = IN->EndIdx;
1413 IP->Repl32CR = IP->Repl32CR || IN->Repl32CR || I->RLAmt >= 32;
1414 IP->Repl32Coalesced = true;
1415 I = BitGroups.erase(I);
1416 BitGroups.erase(BitGroups.begin());
1419 // This must be the last group in the vector (and we might have
1420 // just invalidated the iterator above), so break here.
1421 break;
1426 ++I;
1430 SDValue getI32Imm(unsigned Imm, const SDLoc &dl) {
1431 return CurDAG->getTargetConstant(Imm, dl, MVT::i32);
1434 uint64_t getZerosMask() {
1435 uint64_t Mask = 0;
1436 for (unsigned i = 0; i < Bits.size(); ++i) {
1437 if (Bits[i].hasValue())
1438 continue;
1439 Mask |= (UINT64_C(1) << i);
1442 return ~Mask;
1445 // This method extends an input value to 64 bit if input is 32-bit integer.
1446 // While selecting instructions in BitPermutationSelector in 64-bit mode,
1447 // an input value can be a 32-bit integer if a ZERO_EXTEND node is included.
1448 // In such case, we extend it to 64 bit to be consistent with other values.
1449 SDValue ExtendToInt64(SDValue V, const SDLoc &dl) {
1450 if (V.getValueSizeInBits() == 64)
1451 return V;
1453 assert(V.getValueSizeInBits() == 32);
1454 SDValue SubRegIdx = CurDAG->getTargetConstant(PPC::sub_32, dl, MVT::i32);
1455 SDValue ImDef = SDValue(CurDAG->getMachineNode(PPC::IMPLICIT_DEF, dl,
1456 MVT::i64), 0);
1457 SDValue ExtVal = SDValue(CurDAG->getMachineNode(PPC::INSERT_SUBREG, dl,
1458 MVT::i64, ImDef, V,
1459 SubRegIdx), 0);
1460 return ExtVal;
1463 // Depending on the number of groups for a particular value, it might be
1464 // better to rotate, mask explicitly (using andi/andis), and then or the
1465 // result. Select this part of the result first.
1466 void SelectAndParts32(const SDLoc &dl, SDValue &Res, unsigned *InstCnt) {
1467 if (BPermRewriterNoMasking)
1468 return;
1470 for (ValueRotInfo &VRI : ValueRotsVec) {
1471 unsigned Mask = 0;
1472 for (unsigned i = 0; i < Bits.size(); ++i) {
1473 if (!Bits[i].hasValue() || Bits[i].getValue() != VRI.V)
1474 continue;
1475 if (RLAmt[i] != VRI.RLAmt)
1476 continue;
1477 Mask |= (1u << i);
1480 // Compute the masks for andi/andis that would be necessary.
1481 unsigned ANDIMask = (Mask & UINT16_MAX), ANDISMask = Mask >> 16;
1482 assert((ANDIMask != 0 || ANDISMask != 0) &&
1483 "No set bits in mask for value bit groups");
1484 bool NeedsRotate = VRI.RLAmt != 0;
1486 // We're trying to minimize the number of instructions. If we have one
1487 // group, using one of andi/andis can break even. If we have three
1488 // groups, we can use both andi and andis and break even (to use both
1489 // andi and andis we also need to or the results together). We need four
1490 // groups if we also need to rotate. To use andi/andis we need to do more
1491 // than break even because rotate-and-mask instructions tend to be easier
1492 // to schedule.
1494 // FIXME: We've biased here against using andi/andis, which is right for
1495 // POWER cores, but not optimal everywhere. For example, on the A2,
1496 // andi/andis have single-cycle latency whereas the rotate-and-mask
1497 // instructions take two cycles, and it would be better to bias toward
1498 // andi/andis in break-even cases.
1500 unsigned NumAndInsts = (unsigned) NeedsRotate +
1501 (unsigned) (ANDIMask != 0) +
1502 (unsigned) (ANDISMask != 0) +
1503 (unsigned) (ANDIMask != 0 && ANDISMask != 0) +
1504 (unsigned) (bool) Res;
1506 DEBUG(dbgs() << "\t\trotation groups for " << VRI.V.getNode() <<
1507 " RL: " << VRI.RLAmt << ":" <<
1508 "\n\t\t\tisel using masking: " << NumAndInsts <<
1509 " using rotates: " << VRI.NumGroups << "\n");
1511 if (NumAndInsts >= VRI.NumGroups)
1512 continue;
1514 DEBUG(dbgs() << "\t\t\t\tusing masking\n");
1516 if (InstCnt) *InstCnt += NumAndInsts;
1518 SDValue VRot;
1519 if (VRI.RLAmt) {
1520 SDValue Ops[] =
1521 { VRI.V, getI32Imm(VRI.RLAmt, dl), getI32Imm(0, dl),
1522 getI32Imm(31, dl) };
1523 VRot = SDValue(CurDAG->getMachineNode(PPC::RLWINM, dl, MVT::i32,
1524 Ops), 0);
1525 } else {
1526 VRot = VRI.V;
1529 SDValue ANDIVal, ANDISVal;
1530 if (ANDIMask != 0)
1531 ANDIVal = SDValue(CurDAG->getMachineNode(PPC::ANDIo, dl, MVT::i32,
1532 VRot, getI32Imm(ANDIMask, dl)), 0);
1533 if (ANDISMask != 0)
1534 ANDISVal = SDValue(CurDAG->getMachineNode(PPC::ANDISo, dl, MVT::i32,
1535 VRot, getI32Imm(ANDISMask, dl)), 0);
1537 SDValue TotalVal;
1538 if (!ANDIVal)
1539 TotalVal = ANDISVal;
1540 else if (!ANDISVal)
1541 TotalVal = ANDIVal;
1542 else
1543 TotalVal = SDValue(CurDAG->getMachineNode(PPC::OR, dl, MVT::i32,
1544 ANDIVal, ANDISVal), 0);
1546 if (!Res)
1547 Res = TotalVal;
1548 else
1549 Res = SDValue(CurDAG->getMachineNode(PPC::OR, dl, MVT::i32,
1550 Res, TotalVal), 0);
1552 // Now, remove all groups with this underlying value and rotation
1553 // factor.
1554 eraseMatchingBitGroups([VRI](const BitGroup &BG) {
1555 return BG.V == VRI.V && BG.RLAmt == VRI.RLAmt;
1560 // Instruction selection for the 32-bit case.
1561 SDNode *Select32(SDNode *N, bool LateMask, unsigned *InstCnt) {
1562 SDLoc dl(N);
1563 SDValue Res;
1565 if (InstCnt) *InstCnt = 0;
1567 // Take care of cases that should use andi/andis first.
1568 SelectAndParts32(dl, Res, InstCnt);
1570 // If we've not yet selected a 'starting' instruction, and we have no zeros
1571 // to fill in, select the (Value, RLAmt) with the highest priority (largest
1572 // number of groups), and start with this rotated value.
1573 if ((!HasZeros || LateMask) && !Res) {
1574 ValueRotInfo &VRI = ValueRotsVec[0];
1575 if (VRI.RLAmt) {
1576 if (InstCnt) *InstCnt += 1;
1577 SDValue Ops[] =
1578 { VRI.V, getI32Imm(VRI.RLAmt, dl), getI32Imm(0, dl),
1579 getI32Imm(31, dl) };
1580 Res = SDValue(CurDAG->getMachineNode(PPC::RLWINM, dl, MVT::i32, Ops),
1582 } else {
1583 Res = VRI.V;
1586 // Now, remove all groups with this underlying value and rotation factor.
1587 eraseMatchingBitGroups([VRI](const BitGroup &BG) {
1588 return BG.V == VRI.V && BG.RLAmt == VRI.RLAmt;
1592 if (InstCnt) *InstCnt += BitGroups.size();
1594 // Insert the other groups (one at a time).
1595 for (auto &BG : BitGroups) {
1596 if (!Res) {
1597 SDValue Ops[] =
1598 { BG.V, getI32Imm(BG.RLAmt, dl),
1599 getI32Imm(Bits.size() - BG.EndIdx - 1, dl),
1600 getI32Imm(Bits.size() - BG.StartIdx - 1, dl) };
1601 Res = SDValue(CurDAG->getMachineNode(PPC::RLWINM, dl, MVT::i32, Ops), 0);
1602 } else {
1603 SDValue Ops[] =
1604 { Res, BG.V, getI32Imm(BG.RLAmt, dl),
1605 getI32Imm(Bits.size() - BG.EndIdx - 1, dl),
1606 getI32Imm(Bits.size() - BG.StartIdx - 1, dl) };
1607 Res = SDValue(CurDAG->getMachineNode(PPC::RLWIMI, dl, MVT::i32, Ops), 0);
1611 if (LateMask) {
1612 unsigned Mask = (unsigned) getZerosMask();
1614 unsigned ANDIMask = (Mask & UINT16_MAX), ANDISMask = Mask >> 16;
1615 assert((ANDIMask != 0 || ANDISMask != 0) &&
1616 "No set bits in zeros mask?");
1618 if (InstCnt) *InstCnt += (unsigned) (ANDIMask != 0) +
1619 (unsigned) (ANDISMask != 0) +
1620 (unsigned) (ANDIMask != 0 && ANDISMask != 0);
1622 SDValue ANDIVal, ANDISVal;
1623 if (ANDIMask != 0)
1624 ANDIVal = SDValue(CurDAG->getMachineNode(PPC::ANDIo, dl, MVT::i32,
1625 Res, getI32Imm(ANDIMask, dl)), 0);
1626 if (ANDISMask != 0)
1627 ANDISVal = SDValue(CurDAG->getMachineNode(PPC::ANDISo, dl, MVT::i32,
1628 Res, getI32Imm(ANDISMask, dl)), 0);
1630 if (!ANDIVal)
1631 Res = ANDISVal;
1632 else if (!ANDISVal)
1633 Res = ANDIVal;
1634 else
1635 Res = SDValue(CurDAG->getMachineNode(PPC::OR, dl, MVT::i32,
1636 ANDIVal, ANDISVal), 0);
1639 return Res.getNode();
1642 unsigned SelectRotMask64Count(unsigned RLAmt, bool Repl32,
1643 unsigned MaskStart, unsigned MaskEnd,
1644 bool IsIns) {
1645 // In the notation used by the instructions, 'start' and 'end' are reversed
1646 // because bits are counted from high to low order.
1647 unsigned InstMaskStart = 64 - MaskEnd - 1,
1648 InstMaskEnd = 64 - MaskStart - 1;
1650 if (Repl32)
1651 return 1;
1653 if ((!IsIns && (InstMaskEnd == 63 || InstMaskStart == 0)) ||
1654 InstMaskEnd == 63 - RLAmt)
1655 return 1;
1657 return 2;
1660 // For 64-bit values, not all combinations of rotates and masks are
1661 // available. Produce one if it is available.
1662 SDValue SelectRotMask64(SDValue V, const SDLoc &dl, unsigned RLAmt,
1663 bool Repl32, unsigned MaskStart, unsigned MaskEnd,
1664 unsigned *InstCnt = nullptr) {
1665 // In the notation used by the instructions, 'start' and 'end' are reversed
1666 // because bits are counted from high to low order.
1667 unsigned InstMaskStart = 64 - MaskEnd - 1,
1668 InstMaskEnd = 64 - MaskStart - 1;
1670 if (InstCnt) *InstCnt += 1;
1672 if (Repl32) {
1673 // This rotation amount assumes that the lower 32 bits of the quantity
1674 // are replicated in the high 32 bits by the rotation operator (which is
1675 // done by rlwinm and friends).
1676 assert(InstMaskStart >= 32 && "Mask cannot start out of range");
1677 assert(InstMaskEnd >= 32 && "Mask cannot end out of range");
1678 SDValue Ops[] =
1679 { ExtendToInt64(V, dl), getI32Imm(RLAmt, dl),
1680 getI32Imm(InstMaskStart - 32, dl), getI32Imm(InstMaskEnd - 32, dl) };
1681 return SDValue(CurDAG->getMachineNode(PPC::RLWINM8, dl, MVT::i64,
1682 Ops), 0);
1685 if (InstMaskEnd == 63) {
1686 SDValue Ops[] =
1687 { ExtendToInt64(V, dl), getI32Imm(RLAmt, dl),
1688 getI32Imm(InstMaskStart, dl) };
1689 return SDValue(CurDAG->getMachineNode(PPC::RLDICL, dl, MVT::i64, Ops), 0);
1692 if (InstMaskStart == 0) {
1693 SDValue Ops[] =
1694 { ExtendToInt64(V, dl), getI32Imm(RLAmt, dl),
1695 getI32Imm(InstMaskEnd, dl) };
1696 return SDValue(CurDAG->getMachineNode(PPC::RLDICR, dl, MVT::i64, Ops), 0);
1699 if (InstMaskEnd == 63 - RLAmt) {
1700 SDValue Ops[] =
1701 { ExtendToInt64(V, dl), getI32Imm(RLAmt, dl),
1702 getI32Imm(InstMaskStart, dl) };
1703 return SDValue(CurDAG->getMachineNode(PPC::RLDIC, dl, MVT::i64, Ops), 0);
1706 // We cannot do this with a single instruction, so we'll use two. The
1707 // problem is that we're not free to choose both a rotation amount and mask
1708 // start and end independently. We can choose an arbitrary mask start and
1709 // end, but then the rotation amount is fixed. Rotation, however, can be
1710 // inverted, and so by applying an "inverse" rotation first, we can get the
1711 // desired result.
1712 if (InstCnt) *InstCnt += 1;
1714 // The rotation mask for the second instruction must be MaskStart.
1715 unsigned RLAmt2 = MaskStart;
1716 // The first instruction must rotate V so that the overall rotation amount
1717 // is RLAmt.
1718 unsigned RLAmt1 = (64 + RLAmt - RLAmt2) % 64;
1719 if (RLAmt1)
1720 V = SelectRotMask64(V, dl, RLAmt1, false, 0, 63);
1721 return SelectRotMask64(V, dl, RLAmt2, false, MaskStart, MaskEnd);
1724 // For 64-bit values, not all combinations of rotates and masks are
1725 // available. Produce a rotate-mask-and-insert if one is available.
1726 SDValue SelectRotMaskIns64(SDValue Base, SDValue V, const SDLoc &dl,
1727 unsigned RLAmt, bool Repl32, unsigned MaskStart,
1728 unsigned MaskEnd, unsigned *InstCnt = nullptr) {
1729 // In the notation used by the instructions, 'start' and 'end' are reversed
1730 // because bits are counted from high to low order.
1731 unsigned InstMaskStart = 64 - MaskEnd - 1,
1732 InstMaskEnd = 64 - MaskStart - 1;
1734 if (InstCnt) *InstCnt += 1;
1736 if (Repl32) {
1737 // This rotation amount assumes that the lower 32 bits of the quantity
1738 // are replicated in the high 32 bits by the rotation operator (which is
1739 // done by rlwinm and friends).
1740 assert(InstMaskStart >= 32 && "Mask cannot start out of range");
1741 assert(InstMaskEnd >= 32 && "Mask cannot end out of range");
1742 SDValue Ops[] =
1743 { ExtendToInt64(Base, dl), ExtendToInt64(V, dl), getI32Imm(RLAmt, dl),
1744 getI32Imm(InstMaskStart - 32, dl), getI32Imm(InstMaskEnd - 32, dl) };
1745 return SDValue(CurDAG->getMachineNode(PPC::RLWIMI8, dl, MVT::i64,
1746 Ops), 0);
1749 if (InstMaskEnd == 63 - RLAmt) {
1750 SDValue Ops[] =
1751 { ExtendToInt64(Base, dl), ExtendToInt64(V, dl), getI32Imm(RLAmt, dl),
1752 getI32Imm(InstMaskStart, dl) };
1753 return SDValue(CurDAG->getMachineNode(PPC::RLDIMI, dl, MVT::i64, Ops), 0);
1756 // We cannot do this with a single instruction, so we'll use two. The
1757 // problem is that we're not free to choose both a rotation amount and mask
1758 // start and end independently. We can choose an arbitrary mask start and
1759 // end, but then the rotation amount is fixed. Rotation, however, can be
1760 // inverted, and so by applying an "inverse" rotation first, we can get the
1761 // desired result.
1762 if (InstCnt) *InstCnt += 1;
1764 // The rotation mask for the second instruction must be MaskStart.
1765 unsigned RLAmt2 = MaskStart;
1766 // The first instruction must rotate V so that the overall rotation amount
1767 // is RLAmt.
1768 unsigned RLAmt1 = (64 + RLAmt - RLAmt2) % 64;
1769 if (RLAmt1)
1770 V = SelectRotMask64(V, dl, RLAmt1, false, 0, 63);
1771 return SelectRotMaskIns64(Base, V, dl, RLAmt2, false, MaskStart, MaskEnd);
1774 void SelectAndParts64(const SDLoc &dl, SDValue &Res, unsigned *InstCnt) {
1775 if (BPermRewriterNoMasking)
1776 return;
1778 // The idea here is the same as in the 32-bit version, but with additional
1779 // complications from the fact that Repl32 might be true. Because we
1780 // aggressively convert bit groups to Repl32 form (which, for small
1781 // rotation factors, involves no other change), and then coalesce, it might
1782 // be the case that a single 64-bit masking operation could handle both
1783 // some Repl32 groups and some non-Repl32 groups. If converting to Repl32
1784 // form allowed coalescing, then we must use a 32-bit rotaton in order to
1785 // completely capture the new combined bit group.
1787 for (ValueRotInfo &VRI : ValueRotsVec) {
1788 uint64_t Mask = 0;
1790 // We need to add to the mask all bits from the associated bit groups.
1791 // If Repl32 is false, we need to add bits from bit groups that have
1792 // Repl32 true, but are trivially convertable to Repl32 false. Such a
1793 // group is trivially convertable if it overlaps only with the lower 32
1794 // bits, and the group has not been coalesced.
1795 auto MatchingBG = [VRI](const BitGroup &BG) {
1796 if (VRI.V != BG.V)
1797 return false;
1799 unsigned EffRLAmt = BG.RLAmt;
1800 if (!VRI.Repl32 && BG.Repl32) {
1801 if (BG.StartIdx < 32 && BG.EndIdx < 32 && BG.StartIdx <= BG.EndIdx &&
1802 !BG.Repl32Coalesced) {
1803 if (BG.Repl32CR)
1804 EffRLAmt += 32;
1805 } else {
1806 return false;
1808 } else if (VRI.Repl32 != BG.Repl32) {
1809 return false;
1812 return VRI.RLAmt == EffRLAmt;
1815 for (auto &BG : BitGroups) {
1816 if (!MatchingBG(BG))
1817 continue;
1819 if (BG.StartIdx <= BG.EndIdx) {
1820 for (unsigned i = BG.StartIdx; i <= BG.EndIdx; ++i)
1821 Mask |= (UINT64_C(1) << i);
1822 } else {
1823 for (unsigned i = BG.StartIdx; i < Bits.size(); ++i)
1824 Mask |= (UINT64_C(1) << i);
1825 for (unsigned i = 0; i <= BG.EndIdx; ++i)
1826 Mask |= (UINT64_C(1) << i);
1830 // We can use the 32-bit andi/andis technique if the mask does not
1831 // require any higher-order bits. This can save an instruction compared
1832 // to always using the general 64-bit technique.
1833 bool Use32BitInsts = isUInt<32>(Mask);
1834 // Compute the masks for andi/andis that would be necessary.
1835 unsigned ANDIMask = (Mask & UINT16_MAX),
1836 ANDISMask = (Mask >> 16) & UINT16_MAX;
1838 bool NeedsRotate = VRI.RLAmt || (VRI.Repl32 && !isUInt<32>(Mask));
1840 unsigned NumAndInsts = (unsigned) NeedsRotate +
1841 (unsigned) (bool) Res;
1842 if (Use32BitInsts)
1843 NumAndInsts += (unsigned) (ANDIMask != 0) + (unsigned) (ANDISMask != 0) +
1844 (unsigned) (ANDIMask != 0 && ANDISMask != 0);
1845 else
1846 NumAndInsts += selectI64ImmInstrCount(Mask) + /* and */ 1;
1848 unsigned NumRLInsts = 0;
1849 bool FirstBG = true;
1850 bool MoreBG = false;
1851 for (auto &BG : BitGroups) {
1852 if (!MatchingBG(BG)) {
1853 MoreBG = true;
1854 continue;
1856 NumRLInsts +=
1857 SelectRotMask64Count(BG.RLAmt, BG.Repl32, BG.StartIdx, BG.EndIdx,
1858 !FirstBG);
1859 FirstBG = false;
1862 DEBUG(dbgs() << "\t\trotation groups for " << VRI.V.getNode() <<
1863 " RL: " << VRI.RLAmt << (VRI.Repl32 ? " (32):" : ":") <<
1864 "\n\t\t\tisel using masking: " << NumAndInsts <<
1865 " using rotates: " << NumRLInsts << "\n");
1867 // When we'd use andi/andis, we bias toward using the rotates (andi only
1868 // has a record form, and is cracked on POWER cores). However, when using
1869 // general 64-bit constant formation, bias toward the constant form,
1870 // because that exposes more opportunities for CSE.
1871 if (NumAndInsts > NumRLInsts)
1872 continue;
1873 // When merging multiple bit groups, instruction or is used.
1874 // But when rotate is used, rldimi can inert the rotated value into any
1875 // register, so instruction or can be avoided.
1876 if ((Use32BitInsts || MoreBG) && NumAndInsts == NumRLInsts)
1877 continue;
1879 DEBUG(dbgs() << "\t\t\t\tusing masking\n");
1881 if (InstCnt) *InstCnt += NumAndInsts;
1883 SDValue VRot;
1884 // We actually need to generate a rotation if we have a non-zero rotation
1885 // factor or, in the Repl32 case, if we care about any of the
1886 // higher-order replicated bits. In the latter case, we generate a mask
1887 // backward so that it actually includes the entire 64 bits.
1888 if (VRI.RLAmt || (VRI.Repl32 && !isUInt<32>(Mask)))
1889 VRot = SelectRotMask64(VRI.V, dl, VRI.RLAmt, VRI.Repl32,
1890 VRI.Repl32 ? 31 : 0, VRI.Repl32 ? 30 : 63);
1891 else
1892 VRot = VRI.V;
1894 SDValue TotalVal;
1895 if (Use32BitInsts) {
1896 assert((ANDIMask != 0 || ANDISMask != 0) &&
1897 "No set bits in mask when using 32-bit ands for 64-bit value");
1899 SDValue ANDIVal, ANDISVal;
1900 if (ANDIMask != 0)
1901 ANDIVal = SDValue(CurDAG->getMachineNode(PPC::ANDIo8, dl, MVT::i64,
1902 ExtendToInt64(VRot, dl),
1903 getI32Imm(ANDIMask, dl)),
1905 if (ANDISMask != 0)
1906 ANDISVal = SDValue(CurDAG->getMachineNode(PPC::ANDISo8, dl, MVT::i64,
1907 ExtendToInt64(VRot, dl),
1908 getI32Imm(ANDISMask, dl)),
1911 if (!ANDIVal)
1912 TotalVal = ANDISVal;
1913 else if (!ANDISVal)
1914 TotalVal = ANDIVal;
1915 else
1916 TotalVal = SDValue(CurDAG->getMachineNode(PPC::OR8, dl, MVT::i64,
1917 ExtendToInt64(ANDIVal, dl), ANDISVal), 0);
1918 } else {
1919 TotalVal = SDValue(selectI64Imm(CurDAG, dl, Mask), 0);
1920 TotalVal =
1921 SDValue(CurDAG->getMachineNode(PPC::AND8, dl, MVT::i64,
1922 ExtendToInt64(VRot, dl), TotalVal),
1926 if (!Res)
1927 Res = TotalVal;
1928 else
1929 Res = SDValue(CurDAG->getMachineNode(PPC::OR8, dl, MVT::i64,
1930 ExtendToInt64(Res, dl), TotalVal),
1933 // Now, remove all groups with this underlying value and rotation
1934 // factor.
1935 eraseMatchingBitGroups(MatchingBG);
1939 // Instruction selection for the 64-bit case.
1940 SDNode *Select64(SDNode *N, bool LateMask, unsigned *InstCnt) {
1941 SDLoc dl(N);
1942 SDValue Res;
1944 if (InstCnt) *InstCnt = 0;
1946 // Take care of cases that should use andi/andis first.
1947 SelectAndParts64(dl, Res, InstCnt);
1949 // If we've not yet selected a 'starting' instruction, and we have no zeros
1950 // to fill in, select the (Value, RLAmt) with the highest priority (largest
1951 // number of groups), and start with this rotated value.
1952 if ((!HasZeros || LateMask) && !Res) {
1953 // If we have both Repl32 groups and non-Repl32 groups, the non-Repl32
1954 // groups will come first, and so the VRI representing the largest number
1955 // of groups might not be first (it might be the first Repl32 groups).
1956 unsigned MaxGroupsIdx = 0;
1957 if (!ValueRotsVec[0].Repl32) {
1958 for (unsigned i = 0, ie = ValueRotsVec.size(); i < ie; ++i)
1959 if (ValueRotsVec[i].Repl32) {
1960 if (ValueRotsVec[i].NumGroups > ValueRotsVec[0].NumGroups)
1961 MaxGroupsIdx = i;
1962 break;
1966 ValueRotInfo &VRI = ValueRotsVec[MaxGroupsIdx];
1967 bool NeedsRotate = false;
1968 if (VRI.RLAmt) {
1969 NeedsRotate = true;
1970 } else if (VRI.Repl32) {
1971 for (auto &BG : BitGroups) {
1972 if (BG.V != VRI.V || BG.RLAmt != VRI.RLAmt ||
1973 BG.Repl32 != VRI.Repl32)
1974 continue;
1976 // We don't need a rotate if the bit group is confined to the lower
1977 // 32 bits.
1978 if (BG.StartIdx < 32 && BG.EndIdx < 32 && BG.StartIdx < BG.EndIdx)
1979 continue;
1981 NeedsRotate = true;
1982 break;
1986 if (NeedsRotate)
1987 Res = SelectRotMask64(VRI.V, dl, VRI.RLAmt, VRI.Repl32,
1988 VRI.Repl32 ? 31 : 0, VRI.Repl32 ? 30 : 63,
1989 InstCnt);
1990 else
1991 Res = VRI.V;
1993 // Now, remove all groups with this underlying value and rotation factor.
1994 if (Res)
1995 eraseMatchingBitGroups([VRI](const BitGroup &BG) {
1996 return BG.V == VRI.V && BG.RLAmt == VRI.RLAmt &&
1997 BG.Repl32 == VRI.Repl32;
2001 // Because 64-bit rotates are more flexible than inserts, we might have a
2002 // preference regarding which one we do first (to save one instruction).
2003 if (!Res)
2004 for (auto I = BitGroups.begin(), IE = BitGroups.end(); I != IE; ++I) {
2005 if (SelectRotMask64Count(I->RLAmt, I->Repl32, I->StartIdx, I->EndIdx,
2006 false) <
2007 SelectRotMask64Count(I->RLAmt, I->Repl32, I->StartIdx, I->EndIdx,
2008 true)) {
2009 if (I != BitGroups.begin()) {
2010 BitGroup BG = *I;
2011 BitGroups.erase(I);
2012 BitGroups.insert(BitGroups.begin(), BG);
2015 break;
2019 // Insert the other groups (one at a time).
2020 for (auto &BG : BitGroups) {
2021 if (!Res)
2022 Res = SelectRotMask64(BG.V, dl, BG.RLAmt, BG.Repl32, BG.StartIdx,
2023 BG.EndIdx, InstCnt);
2024 else
2025 Res = SelectRotMaskIns64(Res, BG.V, dl, BG.RLAmt, BG.Repl32,
2026 BG.StartIdx, BG.EndIdx, InstCnt);
2029 if (LateMask) {
2030 uint64_t Mask = getZerosMask();
2032 // We can use the 32-bit andi/andis technique if the mask does not
2033 // require any higher-order bits. This can save an instruction compared
2034 // to always using the general 64-bit technique.
2035 bool Use32BitInsts = isUInt<32>(Mask);
2036 // Compute the masks for andi/andis that would be necessary.
2037 unsigned ANDIMask = (Mask & UINT16_MAX),
2038 ANDISMask = (Mask >> 16) & UINT16_MAX;
2040 if (Use32BitInsts) {
2041 assert((ANDIMask != 0 || ANDISMask != 0) &&
2042 "No set bits in mask when using 32-bit ands for 64-bit value");
2044 if (InstCnt) *InstCnt += (unsigned) (ANDIMask != 0) +
2045 (unsigned) (ANDISMask != 0) +
2046 (unsigned) (ANDIMask != 0 && ANDISMask != 0);
2048 SDValue ANDIVal, ANDISVal;
2049 if (ANDIMask != 0)
2050 ANDIVal = SDValue(CurDAG->getMachineNode(PPC::ANDIo8, dl, MVT::i64,
2051 ExtendToInt64(Res, dl), getI32Imm(ANDIMask, dl)), 0);
2052 if (ANDISMask != 0)
2053 ANDISVal = SDValue(CurDAG->getMachineNode(PPC::ANDISo8, dl, MVT::i64,
2054 ExtendToInt64(Res, dl), getI32Imm(ANDISMask, dl)), 0);
2056 if (!ANDIVal)
2057 Res = ANDISVal;
2058 else if (!ANDISVal)
2059 Res = ANDIVal;
2060 else
2061 Res = SDValue(CurDAG->getMachineNode(PPC::OR8, dl, MVT::i64,
2062 ExtendToInt64(ANDIVal, dl), ANDISVal), 0);
2063 } else {
2064 if (InstCnt) *InstCnt += selectI64ImmInstrCount(Mask) + /* and */ 1;
2066 SDValue MaskVal = SDValue(selectI64Imm(CurDAG, dl, Mask), 0);
2067 Res =
2068 SDValue(CurDAG->getMachineNode(PPC::AND8, dl, MVT::i64,
2069 ExtendToInt64(Res, dl), MaskVal), 0);
2073 return Res.getNode();
2076 SDNode *Select(SDNode *N, bool LateMask, unsigned *InstCnt = nullptr) {
2077 // Fill in BitGroups.
2078 collectBitGroups(LateMask);
2079 if (BitGroups.empty())
2080 return nullptr;
2082 // For 64-bit values, figure out when we can use 32-bit instructions.
2083 if (Bits.size() == 64)
2084 assignRepl32BitGroups();
2086 // Fill in ValueRotsVec.
2087 collectValueRotInfo();
2089 if (Bits.size() == 32) {
2090 return Select32(N, LateMask, InstCnt);
2091 } else {
2092 assert(Bits.size() == 64 && "Not 64 bits here?");
2093 return Select64(N, LateMask, InstCnt);
2096 return nullptr;
2099 void eraseMatchingBitGroups(function_ref<bool(const BitGroup &)> F) {
2100 BitGroups.erase(remove_if(BitGroups, F), BitGroups.end());
2103 SmallVector<ValueBit, 64> Bits;
2105 bool HasZeros;
2106 SmallVector<unsigned, 64> RLAmt;
2108 SmallVector<BitGroup, 16> BitGroups;
2110 DenseMap<std::pair<SDValue, unsigned>, ValueRotInfo> ValueRots;
2111 SmallVector<ValueRotInfo, 16> ValueRotsVec;
2113 SelectionDAG *CurDAG;
2115 public:
2116 BitPermutationSelector(SelectionDAG *DAG)
2117 : CurDAG(DAG) {}
2119 // Here we try to match complex bit permutations into a set of
2120 // rotate-and-shift/shift/and/or instructions, using a set of heuristics
2121 // known to produce optimial code for common cases (like i32 byte swapping).
2122 SDNode *Select(SDNode *N) {
2123 Memoizer.clear();
2124 auto Result =
2125 getValueBits(SDValue(N, 0), N->getValueType(0).getSizeInBits());
2126 if (!Result.first)
2127 return nullptr;
2128 Bits = std::move(*Result.second);
2130 DEBUG(dbgs() << "Considering bit-permutation-based instruction"
2131 " selection for: ");
2132 DEBUG(N->dump(CurDAG));
2134 // Fill it RLAmt and set HasZeros.
2135 computeRotationAmounts();
2137 if (!HasZeros)
2138 return Select(N, false);
2140 // We currently have two techniques for handling results with zeros: early
2141 // masking (the default) and late masking. Late masking is sometimes more
2142 // efficient, but because the structure of the bit groups is different, it
2143 // is hard to tell without generating both and comparing the results. With
2144 // late masking, we ignore zeros in the resulting value when inserting each
2145 // set of bit groups, and then mask in the zeros at the end. With early
2146 // masking, we only insert the non-zero parts of the result at every step.
2148 unsigned InstCnt, InstCntLateMask;
2149 DEBUG(dbgs() << "\tEarly masking:\n");
2150 SDNode *RN = Select(N, false, &InstCnt);
2151 DEBUG(dbgs() << "\t\tisel would use " << InstCnt << " instructions\n");
2153 DEBUG(dbgs() << "\tLate masking:\n");
2154 SDNode *RNLM = Select(N, true, &InstCntLateMask);
2155 DEBUG(dbgs() << "\t\tisel would use " << InstCntLateMask <<
2156 " instructions\n");
2158 if (InstCnt <= InstCntLateMask) {
2159 DEBUG(dbgs() << "\tUsing early-masking for isel\n");
2160 return RN;
2163 DEBUG(dbgs() << "\tUsing late-masking for isel\n");
2164 return RNLM;
2168 class IntegerCompareEliminator {
2169 SelectionDAG *CurDAG;
2170 PPCDAGToDAGISel *S;
2171 // Conversion type for interpreting results of a 32-bit instruction as
2172 // a 64-bit value or vice versa.
2173 enum ExtOrTruncConversion { Ext, Trunc };
2175 // Modifiers to guide how an ISD::SETCC node's result is to be computed
2176 // in a GPR.
2177 // ZExtOrig - use the original condition code, zero-extend value
2178 // ZExtInvert - invert the condition code, zero-extend value
2179 // SExtOrig - use the original condition code, sign-extend value
2180 // SExtInvert - invert the condition code, sign-extend value
2181 enum SetccInGPROpts { ZExtOrig, ZExtInvert, SExtOrig, SExtInvert };
2183 // Comparisons against zero to emit GPR code sequences for. Each of these
2184 // sequences may need to be emitted for two or more equivalent patterns.
2185 // For example (a >= 0) == (a > -1). The direction of the comparison (</>)
2186 // matters as well as the extension type: sext (-1/0), zext (1/0).
2187 // GEZExt - (zext (LHS >= 0))
2188 // GESExt - (sext (LHS >= 0))
2189 // LEZExt - (zext (LHS <= 0))
2190 // LESExt - (sext (LHS <= 0))
2191 enum ZeroCompare { GEZExt, GESExt, LEZExt, LESExt };
2193 SDNode *tryEXTEND(SDNode *N);
2194 SDNode *tryLogicOpOfCompares(SDNode *N);
2195 SDValue computeLogicOpInGPR(SDValue LogicOp);
2196 SDValue signExtendInputIfNeeded(SDValue Input);
2197 SDValue zeroExtendInputIfNeeded(SDValue Input);
2198 SDValue addExtOrTrunc(SDValue NatWidthRes, ExtOrTruncConversion Conv);
2199 SDValue getCompoundZeroComparisonInGPR(SDValue LHS, SDLoc dl,
2200 ZeroCompare CmpTy);
2201 SDValue get32BitZExtCompare(SDValue LHS, SDValue RHS, ISD::CondCode CC,
2202 int64_t RHSValue, SDLoc dl);
2203 SDValue get32BitSExtCompare(SDValue LHS, SDValue RHS, ISD::CondCode CC,
2204 int64_t RHSValue, SDLoc dl);
2205 SDValue get64BitZExtCompare(SDValue LHS, SDValue RHS, ISD::CondCode CC,
2206 int64_t RHSValue, SDLoc dl);
2207 SDValue get64BitSExtCompare(SDValue LHS, SDValue RHS, ISD::CondCode CC,
2208 int64_t RHSValue, SDLoc dl);
2209 SDValue getSETCCInGPR(SDValue Compare, SetccInGPROpts ConvOpts);
2211 public:
2212 IntegerCompareEliminator(SelectionDAG *DAG,
2213 PPCDAGToDAGISel *Sel) : CurDAG(DAG), S(Sel) {
2214 assert(CurDAG->getTargetLoweringInfo()
2215 .getPointerTy(CurDAG->getDataLayout()).getSizeInBits() == 64 &&
2216 "Only expecting to use this on 64 bit targets.");
2218 SDNode *Select(SDNode *N) {
2219 if (CmpInGPR == ICGPR_None)
2220 return nullptr;
2221 switch (N->getOpcode()) {
2222 default: break;
2223 case ISD::ZERO_EXTEND:
2224 if (CmpInGPR == ICGPR_Sext || CmpInGPR == ICGPR_SextI32 ||
2225 CmpInGPR == ICGPR_SextI64)
2226 return nullptr;
2227 case ISD::SIGN_EXTEND:
2228 if (CmpInGPR == ICGPR_Zext || CmpInGPR == ICGPR_ZextI32 ||
2229 CmpInGPR == ICGPR_ZextI64)
2230 return nullptr;
2231 return tryEXTEND(N);
2232 case ISD::AND:
2233 case ISD::OR:
2234 case ISD::XOR:
2235 return tryLogicOpOfCompares(N);
2237 return nullptr;
2241 static bool isLogicOp(unsigned Opc) {
2242 return Opc == ISD::AND || Opc == ISD::OR || Opc == ISD::XOR;
2244 // The obvious case for wanting to keep the value in a GPR. Namely, the
2245 // result of the comparison is actually needed in a GPR.
2246 SDNode *IntegerCompareEliminator::tryEXTEND(SDNode *N) {
2247 assert((N->getOpcode() == ISD::ZERO_EXTEND ||
2248 N->getOpcode() == ISD::SIGN_EXTEND) &&
2249 "Expecting a zero/sign extend node!");
2250 SDValue WideRes;
2251 // If we are zero-extending the result of a logical operation on i1
2252 // values, we can keep the values in GPRs.
2253 if (isLogicOp(N->getOperand(0).getOpcode()) &&
2254 N->getOperand(0).getValueType() == MVT::i1 &&
2255 N->getOpcode() == ISD::ZERO_EXTEND)
2256 WideRes = computeLogicOpInGPR(N->getOperand(0));
2257 else if (N->getOperand(0).getOpcode() != ISD::SETCC)
2258 return nullptr;
2259 else
2260 WideRes =
2261 getSETCCInGPR(N->getOperand(0),
2262 N->getOpcode() == ISD::SIGN_EXTEND ?
2263 SetccInGPROpts::SExtOrig : SetccInGPROpts::ZExtOrig);
2265 if (!WideRes)
2266 return nullptr;
2268 SDLoc dl(N);
2269 bool Input32Bit = WideRes.getValueType() == MVT::i32;
2270 bool Output32Bit = N->getValueType(0) == MVT::i32;
2272 NumSextSetcc += N->getOpcode() == ISD::SIGN_EXTEND ? 1 : 0;
2273 NumZextSetcc += N->getOpcode() == ISD::SIGN_EXTEND ? 0 : 1;
2275 SDValue ConvOp = WideRes;
2276 if (Input32Bit != Output32Bit)
2277 ConvOp = addExtOrTrunc(WideRes, Input32Bit ? ExtOrTruncConversion::Ext :
2278 ExtOrTruncConversion::Trunc);
2279 return ConvOp.getNode();
2282 // Attempt to perform logical operations on the results of comparisons while
2283 // keeping the values in GPRs. Without doing so, these would end up being
2284 // lowered to CR-logical operations which suffer from significant latency and
2285 // low ILP.
2286 SDNode *IntegerCompareEliminator::tryLogicOpOfCompares(SDNode *N) {
2287 if (N->getValueType(0) != MVT::i1)
2288 return nullptr;
2289 assert(isLogicOp(N->getOpcode()) &&
2290 "Expected a logic operation on setcc results.");
2291 SDValue LoweredLogical = computeLogicOpInGPR(SDValue(N, 0));
2292 if (!LoweredLogical)
2293 return nullptr;
2295 SDLoc dl(N);
2296 bool IsBitwiseNegate = LoweredLogical.getMachineOpcode() == PPC::XORI8;
2297 unsigned SubRegToExtract = IsBitwiseNegate ? PPC::sub_eq : PPC::sub_gt;
2298 SDValue CR0Reg = CurDAG->getRegister(PPC::CR0, MVT::i32);
2299 SDValue LHS = LoweredLogical.getOperand(0);
2300 SDValue RHS = LoweredLogical.getOperand(1);
2301 SDValue WideOp;
2302 SDValue OpToConvToRecForm;
2304 // Look through any 32-bit to 64-bit implicit extend nodes to find the
2305 // opcode that is input to the XORI.
2306 if (IsBitwiseNegate &&
2307 LoweredLogical.getOperand(0).getMachineOpcode() == PPC::INSERT_SUBREG)
2308 OpToConvToRecForm = LoweredLogical.getOperand(0).getOperand(1);
2309 else if (IsBitwiseNegate)
2310 // If the input to the XORI isn't an extension, that's what we're after.
2311 OpToConvToRecForm = LoweredLogical.getOperand(0);
2312 else
2313 // If this is not an XORI, it is a reg-reg logical op and we can convert
2314 // it to record-form.
2315 OpToConvToRecForm = LoweredLogical;
2317 // Get the record-form version of the node we're looking to use to get the
2318 // CR result from.
2319 uint16_t NonRecOpc = OpToConvToRecForm.getMachineOpcode();
2320 int NewOpc = PPCInstrInfo::getRecordFormOpcode(NonRecOpc);
2322 // Convert the right node to record-form. This is either the logical we're
2323 // looking at or it is the input node to the negation (if we're looking at
2324 // a bitwise negation).
2325 if (NewOpc != -1 && IsBitwiseNegate) {
2326 // The input to the XORI has a record-form. Use it.
2327 assert(LoweredLogical.getConstantOperandVal(1) == 1 &&
2328 "Expected a PPC::XORI8 only for bitwise negation.");
2329 // Emit the record-form instruction.
2330 std::vector<SDValue> Ops;
2331 for (int i = 0, e = OpToConvToRecForm.getNumOperands(); i < e; i++)
2332 Ops.push_back(OpToConvToRecForm.getOperand(i));
2334 WideOp =
2335 SDValue(CurDAG->getMachineNode(NewOpc, dl,
2336 OpToConvToRecForm.getValueType(),
2337 MVT::Glue, Ops), 0);
2338 } else {
2339 assert((NewOpc != -1 || !IsBitwiseNegate) &&
2340 "No record form available for AND8/OR8/XOR8?");
2341 WideOp =
2342 SDValue(CurDAG->getMachineNode(NewOpc == -1 ? PPC::ANDIo8 : NewOpc, dl,
2343 MVT::i64, MVT::Glue, LHS, RHS), 0);
2346 // Select this node to a single bit from CR0 set by the record-form node
2347 // just created. For bitwise negation, use the EQ bit which is the equivalent
2348 // of negating the result (i.e. it is a bit set when the result of the
2349 // operation is zero).
2350 SDValue SRIdxVal =
2351 CurDAG->getTargetConstant(SubRegToExtract, dl, MVT::i32);
2352 SDValue CRBit =
2353 SDValue(CurDAG->getMachineNode(TargetOpcode::EXTRACT_SUBREG, dl,
2354 MVT::i1, CR0Reg, SRIdxVal,
2355 WideOp.getValue(1)), 0);
2356 return CRBit.getNode();
2359 // Lower a logical operation on i1 values into a GPR sequence if possible.
2360 // The result can be kept in a GPR if requested.
2361 // Three types of inputs can be handled:
2362 // - SETCC
2363 // - TRUNCATE
2364 // - Logical operation (AND/OR/XOR)
2365 // There is also a special case that is handled (namely a complement operation
2366 // achieved with xor %a, -1).
2367 SDValue IntegerCompareEliminator::computeLogicOpInGPR(SDValue LogicOp) {
2368 assert(isLogicOp(LogicOp.getOpcode()) &&
2369 "Can only handle logic operations here.");
2370 assert(LogicOp.getValueType() == MVT::i1 &&
2371 "Can only handle logic operations on i1 values here.");
2372 SDLoc dl(LogicOp);
2373 SDValue LHS, RHS;
2375 // Special case: xor %a, -1
2376 bool IsBitwiseNegation = isBitwiseNot(LogicOp);
2378 // Produces a GPR sequence for each operand of the binary logic operation.
2379 // For SETCC, it produces the respective comparison, for TRUNCATE it truncates
2380 // the value in a GPR and for logic operations, it will recursively produce
2381 // a GPR sequence for the operation.
2382 auto getLogicOperand = [&] (SDValue Operand) -> SDValue {
2383 unsigned OperandOpcode = Operand.getOpcode();
2384 if (OperandOpcode == ISD::SETCC)
2385 return getSETCCInGPR(Operand, SetccInGPROpts::ZExtOrig);
2386 else if (OperandOpcode == ISD::TRUNCATE) {
2387 SDValue InputOp = Operand.getOperand(0);
2388 EVT InVT = InputOp.getValueType();
2389 return SDValue(CurDAG->getMachineNode(InVT == MVT::i32 ? PPC::RLDICL_32 :
2390 PPC::RLDICL, dl, InVT, InputOp,
2391 S->getI64Imm(0, dl),
2392 S->getI64Imm(63, dl)), 0);
2393 } else if (isLogicOp(OperandOpcode))
2394 return computeLogicOpInGPR(Operand);
2395 return SDValue();
2397 LHS = getLogicOperand(LogicOp.getOperand(0));
2398 RHS = getLogicOperand(LogicOp.getOperand(1));
2400 // If a GPR sequence can't be produced for the LHS we can't proceed.
2401 // Not producing a GPR sequence for the RHS is only a problem if this isn't
2402 // a bitwise negation operation.
2403 if (!LHS || (!RHS && !IsBitwiseNegation))
2404 return SDValue();
2406 NumLogicOpsOnComparison++;
2408 // We will use the inputs as 64-bit values.
2409 if (LHS.getValueType() == MVT::i32)
2410 LHS = addExtOrTrunc(LHS, ExtOrTruncConversion::Ext);
2411 if (!IsBitwiseNegation && RHS.getValueType() == MVT::i32)
2412 RHS = addExtOrTrunc(RHS, ExtOrTruncConversion::Ext);
2414 unsigned NewOpc;
2415 switch (LogicOp.getOpcode()) {
2416 default: llvm_unreachable("Unknown logic operation.");
2417 case ISD::AND: NewOpc = PPC::AND8; break;
2418 case ISD::OR: NewOpc = PPC::OR8; break;
2419 case ISD::XOR: NewOpc = PPC::XOR8; break;
2422 if (IsBitwiseNegation) {
2423 RHS = S->getI64Imm(1, dl);
2424 NewOpc = PPC::XORI8;
2427 return SDValue(CurDAG->getMachineNode(NewOpc, dl, MVT::i64, LHS, RHS), 0);
2431 /// If the value isn't guaranteed to be sign-extended to 64-bits, extend it.
2432 /// Otherwise just reinterpret it as a 64-bit value.
2433 /// Useful when emitting comparison code for 32-bit values without using
2434 /// the compare instruction (which only considers the lower 32-bits).
2435 SDValue IntegerCompareEliminator::signExtendInputIfNeeded(SDValue Input) {
2436 assert(Input.getValueType() == MVT::i32 &&
2437 "Can only sign-extend 32-bit values here.");
2438 unsigned Opc = Input.getOpcode();
2440 // The value was sign extended and then truncated to 32-bits. No need to
2441 // sign extend it again.
2442 if (Opc == ISD::TRUNCATE &&
2443 (Input.getOperand(0).getOpcode() == ISD::AssertSext ||
2444 Input.getOperand(0).getOpcode() == ISD::SIGN_EXTEND))
2445 return addExtOrTrunc(Input, ExtOrTruncConversion::Ext);
2447 LoadSDNode *InputLoad = dyn_cast<LoadSDNode>(Input);
2448 // The input is a sign-extending load. All ppc sign-extending loads
2449 // sign-extend to the full 64-bits.
2450 if (InputLoad && InputLoad->getExtensionType() == ISD::SEXTLOAD)
2451 return addExtOrTrunc(Input, ExtOrTruncConversion::Ext);
2453 ConstantSDNode *InputConst = dyn_cast<ConstantSDNode>(Input);
2454 // We don't sign-extend constants.
2455 if (InputConst)
2456 return addExtOrTrunc(Input, ExtOrTruncConversion::Ext);
2458 SDLoc dl(Input);
2459 SignExtensionsAdded++;
2460 return SDValue(CurDAG->getMachineNode(PPC::EXTSW_32_64, dl,
2461 MVT::i64, Input), 0);
2464 /// If the value isn't guaranteed to be zero-extended to 64-bits, extend it.
2465 /// Otherwise just reinterpret it as a 64-bit value.
2466 /// Useful when emitting comparison code for 32-bit values without using
2467 /// the compare instruction (which only considers the lower 32-bits).
2468 SDValue IntegerCompareEliminator::zeroExtendInputIfNeeded(SDValue Input) {
2469 assert(Input.getValueType() == MVT::i32 &&
2470 "Can only zero-extend 32-bit values here.");
2471 unsigned Opc = Input.getOpcode();
2473 // The only condition under which we can omit the actual extend instruction:
2474 // - The value is a positive constant
2475 // - The value comes from a load that isn't a sign-extending load
2476 // An ISD::TRUNCATE needs to be zero-extended unless it is fed by a zext.
2477 bool IsTruncateOfZExt = Opc == ISD::TRUNCATE &&
2478 (Input.getOperand(0).getOpcode() == ISD::AssertZext ||
2479 Input.getOperand(0).getOpcode() == ISD::ZERO_EXTEND);
2480 if (IsTruncateOfZExt)
2481 return addExtOrTrunc(Input, ExtOrTruncConversion::Ext);
2483 ConstantSDNode *InputConst = dyn_cast<ConstantSDNode>(Input);
2484 if (InputConst && InputConst->getSExtValue() >= 0)
2485 return addExtOrTrunc(Input, ExtOrTruncConversion::Ext);
2487 LoadSDNode *InputLoad = dyn_cast<LoadSDNode>(Input);
2488 // The input is a load that doesn't sign-extend (it will be zero-extended).
2489 if (InputLoad && InputLoad->getExtensionType() != ISD::SEXTLOAD)
2490 return addExtOrTrunc(Input, ExtOrTruncConversion::Ext);
2492 // None of the above, need to zero-extend.
2493 SDLoc dl(Input);
2494 ZeroExtensionsAdded++;
2495 return SDValue(CurDAG->getMachineNode(PPC::RLDICL_32_64, dl, MVT::i64, Input,
2496 S->getI64Imm(0, dl),
2497 S->getI64Imm(32, dl)), 0);
2500 // Handle a 32-bit value in a 64-bit register and vice-versa. These are of
2501 // course not actual zero/sign extensions that will generate machine code,
2502 // they're just a way to reinterpret a 32 bit value in a register as a
2503 // 64 bit value and vice-versa.
2504 SDValue IntegerCompareEliminator::addExtOrTrunc(SDValue NatWidthRes,
2505 ExtOrTruncConversion Conv) {
2506 SDLoc dl(NatWidthRes);
2508 // For reinterpreting 32-bit values as 64 bit values, we generate
2509 // INSERT_SUBREG IMPLICIT_DEF:i64, <input>, TargetConstant:i32<1>
2510 if (Conv == ExtOrTruncConversion::Ext) {
2511 SDValue ImDef(CurDAG->getMachineNode(PPC::IMPLICIT_DEF, dl, MVT::i64), 0);
2512 SDValue SubRegIdx =
2513 CurDAG->getTargetConstant(PPC::sub_32, dl, MVT::i32);
2514 return SDValue(CurDAG->getMachineNode(PPC::INSERT_SUBREG, dl, MVT::i64,
2515 ImDef, NatWidthRes, SubRegIdx), 0);
2518 assert(Conv == ExtOrTruncConversion::Trunc &&
2519 "Unknown convertion between 32 and 64 bit values.");
2520 // For reinterpreting 64-bit values as 32-bit values, we just need to
2521 // EXTRACT_SUBREG (i.e. extract the low word).
2522 SDValue SubRegIdx =
2523 CurDAG->getTargetConstant(PPC::sub_32, dl, MVT::i32);
2524 return SDValue(CurDAG->getMachineNode(PPC::EXTRACT_SUBREG, dl, MVT::i32,
2525 NatWidthRes, SubRegIdx), 0);
2528 // Produce a GPR sequence for compound comparisons (<=, >=) against zero.
2529 // Handle both zero-extensions and sign-extensions.
2530 SDValue
2531 IntegerCompareEliminator::getCompoundZeroComparisonInGPR(SDValue LHS, SDLoc dl,
2532 ZeroCompare CmpTy) {
2533 EVT InVT = LHS.getValueType();
2534 bool Is32Bit = InVT == MVT::i32;
2535 SDValue ToExtend;
2537 // Produce the value that needs to be either zero or sign extended.
2538 switch (CmpTy) {
2539 case ZeroCompare::GEZExt:
2540 case ZeroCompare::GESExt:
2541 ToExtend = SDValue(CurDAG->getMachineNode(Is32Bit ? PPC::NOR : PPC::NOR8,
2542 dl, InVT, LHS, LHS), 0);
2543 break;
2544 case ZeroCompare::LEZExt:
2545 case ZeroCompare::LESExt: {
2546 if (Is32Bit) {
2547 // Upper 32 bits cannot be undefined for this sequence.
2548 LHS = signExtendInputIfNeeded(LHS);
2549 SDValue Neg =
2550 SDValue(CurDAG->getMachineNode(PPC::NEG8, dl, MVT::i64, LHS), 0);
2551 ToExtend =
2552 SDValue(CurDAG->getMachineNode(PPC::RLDICL, dl, MVT::i64,
2553 Neg, S->getI64Imm(1, dl),
2554 S->getI64Imm(63, dl)), 0);
2555 } else {
2556 SDValue Addi =
2557 SDValue(CurDAG->getMachineNode(PPC::ADDI8, dl, MVT::i64, LHS,
2558 S->getI64Imm(~0ULL, dl)), 0);
2559 ToExtend = SDValue(CurDAG->getMachineNode(PPC::OR8, dl, MVT::i64,
2560 Addi, LHS), 0);
2562 break;
2566 // For 64-bit sequences, the extensions are the same for the GE/LE cases.
2567 if (!Is32Bit &&
2568 (CmpTy == ZeroCompare::GEZExt || CmpTy == ZeroCompare::LEZExt))
2569 return SDValue(CurDAG->getMachineNode(PPC::RLDICL, dl, MVT::i64,
2570 ToExtend, S->getI64Imm(1, dl),
2571 S->getI64Imm(63, dl)), 0);
2572 if (!Is32Bit &&
2573 (CmpTy == ZeroCompare::GESExt || CmpTy == ZeroCompare::LESExt))
2574 return SDValue(CurDAG->getMachineNode(PPC::SRADI, dl, MVT::i64, ToExtend,
2575 S->getI64Imm(63, dl)), 0);
2577 assert(Is32Bit && "Should have handled the 32-bit sequences above.");
2578 // For 32-bit sequences, the extensions differ between GE/LE cases.
2579 switch (CmpTy) {
2580 case ZeroCompare::GEZExt: {
2581 SDValue ShiftOps[] = { ToExtend, S->getI32Imm(1, dl), S->getI32Imm(31, dl),
2582 S->getI32Imm(31, dl) };
2583 return SDValue(CurDAG->getMachineNode(PPC::RLWINM, dl, MVT::i32,
2584 ShiftOps), 0);
2586 case ZeroCompare::GESExt:
2587 return SDValue(CurDAG->getMachineNode(PPC::SRAWI, dl, MVT::i32, ToExtend,
2588 S->getI32Imm(31, dl)), 0);
2589 case ZeroCompare::LEZExt:
2590 return SDValue(CurDAG->getMachineNode(PPC::XORI8, dl, MVT::i64, ToExtend,
2591 S->getI32Imm(1, dl)), 0);
2592 case ZeroCompare::LESExt:
2593 return SDValue(CurDAG->getMachineNode(PPC::ADDI8, dl, MVT::i64, ToExtend,
2594 S->getI32Imm(-1, dl)), 0);
2597 // The above case covers all the enumerators so it can't have a default clause
2598 // to avoid compiler warnings.
2599 llvm_unreachable("Unknown zero-comparison type.");
2602 /// Produces a zero-extended result of comparing two 32-bit values according to
2603 /// the passed condition code.
2604 SDValue
2605 IntegerCompareEliminator::get32BitZExtCompare(SDValue LHS, SDValue RHS,
2606 ISD::CondCode CC,
2607 int64_t RHSValue, SDLoc dl) {
2608 if (CmpInGPR == ICGPR_I64 || CmpInGPR == ICGPR_SextI64 ||
2609 CmpInGPR == ICGPR_ZextI64 || CmpInGPR == ICGPR_Sext)
2610 return SDValue();
2611 bool IsRHSZero = RHSValue == 0;
2612 bool IsRHSOne = RHSValue == 1;
2613 bool IsRHSNegOne = RHSValue == -1LL;
2614 switch (CC) {
2615 default: return SDValue();
2616 case ISD::SETEQ: {
2617 // (zext (setcc %a, %b, seteq)) -> (lshr (cntlzw (xor %a, %b)), 5)
2618 // (zext (setcc %a, 0, seteq)) -> (lshr (cntlzw %a), 5)
2619 SDValue Xor = IsRHSZero ? LHS :
2620 SDValue(CurDAG->getMachineNode(PPC::XOR, dl, MVT::i32, LHS, RHS), 0);
2621 SDValue Clz =
2622 SDValue(CurDAG->getMachineNode(PPC::CNTLZW, dl, MVT::i32, Xor), 0);
2623 SDValue ShiftOps[] = { Clz, S->getI32Imm(27, dl), S->getI32Imm(5, dl),
2624 S->getI32Imm(31, dl) };
2625 return SDValue(CurDAG->getMachineNode(PPC::RLWINM, dl, MVT::i32,
2626 ShiftOps), 0);
2628 case ISD::SETNE: {
2629 // (zext (setcc %a, %b, setne)) -> (xor (lshr (cntlzw (xor %a, %b)), 5), 1)
2630 // (zext (setcc %a, 0, setne)) -> (xor (lshr (cntlzw %a), 5), 1)
2631 SDValue Xor = IsRHSZero ? LHS :
2632 SDValue(CurDAG->getMachineNode(PPC::XOR, dl, MVT::i32, LHS, RHS), 0);
2633 SDValue Clz =
2634 SDValue(CurDAG->getMachineNode(PPC::CNTLZW, dl, MVT::i32, Xor), 0);
2635 SDValue ShiftOps[] = { Clz, S->getI32Imm(27, dl), S->getI32Imm(5, dl),
2636 S->getI32Imm(31, dl) };
2637 SDValue Shift =
2638 SDValue(CurDAG->getMachineNode(PPC::RLWINM, dl, MVT::i32, ShiftOps), 0);
2639 return SDValue(CurDAG->getMachineNode(PPC::XORI, dl, MVT::i32, Shift,
2640 S->getI32Imm(1, dl)), 0);
2642 case ISD::SETGE: {
2643 // (zext (setcc %a, %b, setge)) -> (xor (lshr (sub %a, %b), 63), 1)
2644 // (zext (setcc %a, 0, setge)) -> (lshr (~ %a), 31)
2645 if(IsRHSZero)
2646 return getCompoundZeroComparisonInGPR(LHS, dl, ZeroCompare::GEZExt);
2648 // Not a special case (i.e. RHS == 0). Handle (%a >= %b) as (%b <= %a)
2649 // by swapping inputs and falling through.
2650 std::swap(LHS, RHS);
2651 ConstantSDNode *RHSConst = dyn_cast<ConstantSDNode>(RHS);
2652 IsRHSZero = RHSConst && RHSConst->isNullValue();
2653 LLVM_FALLTHROUGH;
2655 case ISD::SETLE: {
2656 if (CmpInGPR == ICGPR_NonExtIn)
2657 return SDValue();
2658 // (zext (setcc %a, %b, setle)) -> (xor (lshr (sub %b, %a), 63), 1)
2659 // (zext (setcc %a, 0, setle)) -> (xor (lshr (- %a), 63), 1)
2660 if(IsRHSZero) {
2661 if (CmpInGPR == ICGPR_NonExtIn)
2662 return SDValue();
2663 return getCompoundZeroComparisonInGPR(LHS, dl, ZeroCompare::LEZExt);
2666 // The upper 32-bits of the register can't be undefined for this sequence.
2667 LHS = signExtendInputIfNeeded(LHS);
2668 RHS = signExtendInputIfNeeded(RHS);
2669 SDValue Sub =
2670 SDValue(CurDAG->getMachineNode(PPC::SUBF8, dl, MVT::i64, LHS, RHS), 0);
2671 SDValue Shift =
2672 SDValue(CurDAG->getMachineNode(PPC::RLDICL, dl, MVT::i64, Sub,
2673 S->getI64Imm(1, dl), S->getI64Imm(63, dl)),
2675 return
2676 SDValue(CurDAG->getMachineNode(PPC::XORI8, dl,
2677 MVT::i64, Shift, S->getI32Imm(1, dl)), 0);
2679 case ISD::SETGT: {
2680 // (zext (setcc %a, %b, setgt)) -> (lshr (sub %b, %a), 63)
2681 // (zext (setcc %a, -1, setgt)) -> (lshr (~ %a), 31)
2682 // (zext (setcc %a, 0, setgt)) -> (lshr (- %a), 63)
2683 // Handle SETLT -1 (which is equivalent to SETGE 0).
2684 if (IsRHSNegOne)
2685 return getCompoundZeroComparisonInGPR(LHS, dl, ZeroCompare::GEZExt);
2687 if (IsRHSZero) {
2688 if (CmpInGPR == ICGPR_NonExtIn)
2689 return SDValue();
2690 // The upper 32-bits of the register can't be undefined for this sequence.
2691 LHS = signExtendInputIfNeeded(LHS);
2692 RHS = signExtendInputIfNeeded(RHS);
2693 SDValue Neg =
2694 SDValue(CurDAG->getMachineNode(PPC::NEG8, dl, MVT::i64, LHS), 0);
2695 return SDValue(CurDAG->getMachineNode(PPC::RLDICL, dl, MVT::i64,
2696 Neg, S->getI32Imm(1, dl), S->getI32Imm(63, dl)), 0);
2698 // Not a special case (i.e. RHS == 0 or RHS == -1). Handle (%a > %b) as
2699 // (%b < %a) by swapping inputs and falling through.
2700 std::swap(LHS, RHS);
2701 ConstantSDNode *RHSConst = dyn_cast<ConstantSDNode>(RHS);
2702 IsRHSZero = RHSConst && RHSConst->isNullValue();
2703 IsRHSOne = RHSConst && RHSConst->getSExtValue() == 1;
2704 LLVM_FALLTHROUGH;
2706 case ISD::SETLT: {
2707 // (zext (setcc %a, %b, setlt)) -> (lshr (sub %a, %b), 63)
2708 // (zext (setcc %a, 1, setlt)) -> (xor (lshr (- %a), 63), 1)
2709 // (zext (setcc %a, 0, setlt)) -> (lshr %a, 31)
2710 // Handle SETLT 1 (which is equivalent to SETLE 0).
2711 if (IsRHSOne) {
2712 if (CmpInGPR == ICGPR_NonExtIn)
2713 return SDValue();
2714 return getCompoundZeroComparisonInGPR(LHS, dl, ZeroCompare::LEZExt);
2717 if (IsRHSZero) {
2718 SDValue ShiftOps[] = { LHS, S->getI32Imm(1, dl), S->getI32Imm(31, dl),
2719 S->getI32Imm(31, dl) };
2720 return SDValue(CurDAG->getMachineNode(PPC::RLWINM, dl, MVT::i32,
2721 ShiftOps), 0);
2724 if (CmpInGPR == ICGPR_NonExtIn)
2725 return SDValue();
2726 // The upper 32-bits of the register can't be undefined for this sequence.
2727 LHS = signExtendInputIfNeeded(LHS);
2728 RHS = signExtendInputIfNeeded(RHS);
2729 SDValue SUBFNode =
2730 SDValue(CurDAG->getMachineNode(PPC::SUBF8, dl, MVT::i64, RHS, LHS), 0);
2731 return SDValue(CurDAG->getMachineNode(PPC::RLDICL, dl, MVT::i64,
2732 SUBFNode, S->getI64Imm(1, dl),
2733 S->getI64Imm(63, dl)), 0);
2735 case ISD::SETUGE:
2736 // (zext (setcc %a, %b, setuge)) -> (xor (lshr (sub %b, %a), 63), 1)
2737 // (zext (setcc %a, %b, setule)) -> (xor (lshr (sub %a, %b), 63), 1)
2738 std::swap(LHS, RHS);
2739 LLVM_FALLTHROUGH;
2740 case ISD::SETULE: {
2741 if (CmpInGPR == ICGPR_NonExtIn)
2742 return SDValue();
2743 // The upper 32-bits of the register can't be undefined for this sequence.
2744 LHS = zeroExtendInputIfNeeded(LHS);
2745 RHS = zeroExtendInputIfNeeded(RHS);
2746 SDValue Subtract =
2747 SDValue(CurDAG->getMachineNode(PPC::SUBF8, dl, MVT::i64, LHS, RHS), 0);
2748 SDValue SrdiNode =
2749 SDValue(CurDAG->getMachineNode(PPC::RLDICL, dl, MVT::i64,
2750 Subtract, S->getI64Imm(1, dl),
2751 S->getI64Imm(63, dl)), 0);
2752 return SDValue(CurDAG->getMachineNode(PPC::XORI8, dl, MVT::i64, SrdiNode,
2753 S->getI32Imm(1, dl)), 0);
2755 case ISD::SETUGT:
2756 // (zext (setcc %a, %b, setugt)) -> (lshr (sub %b, %a), 63)
2757 // (zext (setcc %a, %b, setult)) -> (lshr (sub %a, %b), 63)
2758 std::swap(LHS, RHS);
2759 LLVM_FALLTHROUGH;
2760 case ISD::SETULT: {
2761 if (CmpInGPR == ICGPR_NonExtIn)
2762 return SDValue();
2763 // The upper 32-bits of the register can't be undefined for this sequence.
2764 LHS = zeroExtendInputIfNeeded(LHS);
2765 RHS = zeroExtendInputIfNeeded(RHS);
2766 SDValue Subtract =
2767 SDValue(CurDAG->getMachineNode(PPC::SUBF8, dl, MVT::i64, RHS, LHS), 0);
2768 return SDValue(CurDAG->getMachineNode(PPC::RLDICL, dl, MVT::i64,
2769 Subtract, S->getI64Imm(1, dl),
2770 S->getI64Imm(63, dl)), 0);
2775 /// Produces a sign-extended result of comparing two 32-bit values according to
2776 /// the passed condition code.
2777 SDValue
2778 IntegerCompareEliminator::get32BitSExtCompare(SDValue LHS, SDValue RHS,
2779 ISD::CondCode CC,
2780 int64_t RHSValue, SDLoc dl) {
2781 if (CmpInGPR == ICGPR_I64 || CmpInGPR == ICGPR_SextI64 ||
2782 CmpInGPR == ICGPR_ZextI64 || CmpInGPR == ICGPR_Zext)
2783 return SDValue();
2784 bool IsRHSZero = RHSValue == 0;
2785 bool IsRHSOne = RHSValue == 1;
2786 bool IsRHSNegOne = RHSValue == -1LL;
2788 switch (CC) {
2789 default: return SDValue();
2790 case ISD::SETEQ: {
2791 // (sext (setcc %a, %b, seteq)) ->
2792 // (ashr (shl (ctlz (xor %a, %b)), 58), 63)
2793 // (sext (setcc %a, 0, seteq)) ->
2794 // (ashr (shl (ctlz %a), 58), 63)
2795 SDValue CountInput = IsRHSZero ? LHS :
2796 SDValue(CurDAG->getMachineNode(PPC::XOR, dl, MVT::i32, LHS, RHS), 0);
2797 SDValue Cntlzw =
2798 SDValue(CurDAG->getMachineNode(PPC::CNTLZW, dl, MVT::i32, CountInput), 0);
2799 SDValue SHLOps[] = { Cntlzw, S->getI32Imm(27, dl),
2800 S->getI32Imm(5, dl), S->getI32Imm(31, dl) };
2801 SDValue Slwi =
2802 SDValue(CurDAG->getMachineNode(PPC::RLWINM, dl, MVT::i32, SHLOps), 0);
2803 return SDValue(CurDAG->getMachineNode(PPC::NEG, dl, MVT::i32, Slwi), 0);
2805 case ISD::SETNE: {
2806 // Bitwise xor the operands, count leading zeros, shift right by 5 bits and
2807 // flip the bit, finally take 2's complement.
2808 // (sext (setcc %a, %b, setne)) ->
2809 // (neg (xor (lshr (ctlz (xor %a, %b)), 5), 1))
2810 // Same as above, but the first xor is not needed.
2811 // (sext (setcc %a, 0, setne)) ->
2812 // (neg (xor (lshr (ctlz %a), 5), 1))
2813 SDValue Xor = IsRHSZero ? LHS :
2814 SDValue(CurDAG->getMachineNode(PPC::XOR, dl, MVT::i32, LHS, RHS), 0);
2815 SDValue Clz =
2816 SDValue(CurDAG->getMachineNode(PPC::CNTLZW, dl, MVT::i32, Xor), 0);
2817 SDValue ShiftOps[] =
2818 { Clz, S->getI32Imm(27, dl), S->getI32Imm(5, dl), S->getI32Imm(31, dl) };
2819 SDValue Shift =
2820 SDValue(CurDAG->getMachineNode(PPC::RLWINM, dl, MVT::i32, ShiftOps), 0);
2821 SDValue Xori =
2822 SDValue(CurDAG->getMachineNode(PPC::XORI, dl, MVT::i32, Shift,
2823 S->getI32Imm(1, dl)), 0);
2824 return SDValue(CurDAG->getMachineNode(PPC::NEG, dl, MVT::i32, Xori), 0);
2826 case ISD::SETGE: {
2827 // (sext (setcc %a, %b, setge)) -> (add (lshr (sub %a, %b), 63), -1)
2828 // (sext (setcc %a, 0, setge)) -> (ashr (~ %a), 31)
2829 if (IsRHSZero)
2830 return getCompoundZeroComparisonInGPR(LHS, dl, ZeroCompare::GESExt);
2832 // Not a special case (i.e. RHS == 0). Handle (%a >= %b) as (%b <= %a)
2833 // by swapping inputs and falling through.
2834 std::swap(LHS, RHS);
2835 ConstantSDNode *RHSConst = dyn_cast<ConstantSDNode>(RHS);
2836 IsRHSZero = RHSConst && RHSConst->isNullValue();
2837 LLVM_FALLTHROUGH;
2839 case ISD::SETLE: {
2840 if (CmpInGPR == ICGPR_NonExtIn)
2841 return SDValue();
2842 // (sext (setcc %a, %b, setge)) -> (add (lshr (sub %b, %a), 63), -1)
2843 // (sext (setcc %a, 0, setle)) -> (add (lshr (- %a), 63), -1)
2844 if (IsRHSZero)
2845 return getCompoundZeroComparisonInGPR(LHS, dl, ZeroCompare::LESExt);
2847 // The upper 32-bits of the register can't be undefined for this sequence.
2848 LHS = signExtendInputIfNeeded(LHS);
2849 RHS = signExtendInputIfNeeded(RHS);
2850 SDValue SUBFNode =
2851 SDValue(CurDAG->getMachineNode(PPC::SUBF8, dl, MVT::i64, MVT::Glue,
2852 LHS, RHS), 0);
2853 SDValue Srdi =
2854 SDValue(CurDAG->getMachineNode(PPC::RLDICL, dl, MVT::i64,
2855 SUBFNode, S->getI64Imm(1, dl),
2856 S->getI64Imm(63, dl)), 0);
2857 return SDValue(CurDAG->getMachineNode(PPC::ADDI8, dl, MVT::i64, Srdi,
2858 S->getI32Imm(-1, dl)), 0);
2860 case ISD::SETGT: {
2861 // (sext (setcc %a, %b, setgt)) -> (ashr (sub %b, %a), 63)
2862 // (sext (setcc %a, -1, setgt)) -> (ashr (~ %a), 31)
2863 // (sext (setcc %a, 0, setgt)) -> (ashr (- %a), 63)
2864 if (IsRHSNegOne)
2865 return getCompoundZeroComparisonInGPR(LHS, dl, ZeroCompare::GESExt);
2866 if (IsRHSZero) {
2867 if (CmpInGPR == ICGPR_NonExtIn)
2868 return SDValue();
2869 // The upper 32-bits of the register can't be undefined for this sequence.
2870 LHS = signExtendInputIfNeeded(LHS);
2871 RHS = signExtendInputIfNeeded(RHS);
2872 SDValue Neg =
2873 SDValue(CurDAG->getMachineNode(PPC::NEG8, dl, MVT::i64, LHS), 0);
2874 return SDValue(CurDAG->getMachineNode(PPC::SRADI, dl, MVT::i64, Neg,
2875 S->getI64Imm(63, dl)), 0);
2877 // Not a special case (i.e. RHS == 0 or RHS == -1). Handle (%a > %b) as
2878 // (%b < %a) by swapping inputs and falling through.
2879 std::swap(LHS, RHS);
2880 ConstantSDNode *RHSConst = dyn_cast<ConstantSDNode>(RHS);
2881 IsRHSZero = RHSConst && RHSConst->isNullValue();
2882 IsRHSOne = RHSConst && RHSConst->getSExtValue() == 1;
2883 LLVM_FALLTHROUGH;
2885 case ISD::SETLT: {
2886 // (sext (setcc %a, %b, setgt)) -> (ashr (sub %a, %b), 63)
2887 // (sext (setcc %a, 1, setgt)) -> (add (lshr (- %a), 63), -1)
2888 // (sext (setcc %a, 0, setgt)) -> (ashr %a, 31)
2889 if (IsRHSOne) {
2890 if (CmpInGPR == ICGPR_NonExtIn)
2891 return SDValue();
2892 return getCompoundZeroComparisonInGPR(LHS, dl, ZeroCompare::LESExt);
2894 if (IsRHSZero)
2895 return SDValue(CurDAG->getMachineNode(PPC::SRAWI, dl, MVT::i32, LHS,
2896 S->getI32Imm(31, dl)), 0);
2898 if (CmpInGPR == ICGPR_NonExtIn)
2899 return SDValue();
2900 // The upper 32-bits of the register can't be undefined for this sequence.
2901 LHS = signExtendInputIfNeeded(LHS);
2902 RHS = signExtendInputIfNeeded(RHS);
2903 SDValue SUBFNode =
2904 SDValue(CurDAG->getMachineNode(PPC::SUBF8, dl, MVT::i64, RHS, LHS), 0);
2905 return SDValue(CurDAG->getMachineNode(PPC::SRADI, dl, MVT::i64,
2906 SUBFNode, S->getI64Imm(63, dl)), 0);
2908 case ISD::SETUGE:
2909 // (sext (setcc %a, %b, setuge)) -> (add (lshr (sub %a, %b), 63), -1)
2910 // (sext (setcc %a, %b, setule)) -> (add (lshr (sub %b, %a), 63), -1)
2911 std::swap(LHS, RHS);
2912 LLVM_FALLTHROUGH;
2913 case ISD::SETULE: {
2914 if (CmpInGPR == ICGPR_NonExtIn)
2915 return SDValue();
2916 // The upper 32-bits of the register can't be undefined for this sequence.
2917 LHS = zeroExtendInputIfNeeded(LHS);
2918 RHS = zeroExtendInputIfNeeded(RHS);
2919 SDValue Subtract =
2920 SDValue(CurDAG->getMachineNode(PPC::SUBF8, dl, MVT::i64, LHS, RHS), 0);
2921 SDValue Shift =
2922 SDValue(CurDAG->getMachineNode(PPC::RLDICL, dl, MVT::i64, Subtract,
2923 S->getI32Imm(1, dl), S->getI32Imm(63,dl)),
2925 return SDValue(CurDAG->getMachineNode(PPC::ADDI8, dl, MVT::i64, Shift,
2926 S->getI32Imm(-1, dl)), 0);
2928 case ISD::SETUGT:
2929 // (sext (setcc %a, %b, setugt)) -> (ashr (sub %b, %a), 63)
2930 // (sext (setcc %a, %b, setugt)) -> (ashr (sub %a, %b), 63)
2931 std::swap(LHS, RHS);
2932 LLVM_FALLTHROUGH;
2933 case ISD::SETULT: {
2934 if (CmpInGPR == ICGPR_NonExtIn)
2935 return SDValue();
2936 // The upper 32-bits of the register can't be undefined for this sequence.
2937 LHS = zeroExtendInputIfNeeded(LHS);
2938 RHS = zeroExtendInputIfNeeded(RHS);
2939 SDValue Subtract =
2940 SDValue(CurDAG->getMachineNode(PPC::SUBF8, dl, MVT::i64, RHS, LHS), 0);
2941 return SDValue(CurDAG->getMachineNode(PPC::SRADI, dl, MVT::i64,
2942 Subtract, S->getI64Imm(63, dl)), 0);
2947 /// Produces a zero-extended result of comparing two 64-bit values according to
2948 /// the passed condition code.
2949 SDValue
2950 IntegerCompareEliminator::get64BitZExtCompare(SDValue LHS, SDValue RHS,
2951 ISD::CondCode CC,
2952 int64_t RHSValue, SDLoc dl) {
2953 if (CmpInGPR == ICGPR_I32 || CmpInGPR == ICGPR_SextI32 ||
2954 CmpInGPR == ICGPR_ZextI32 || CmpInGPR == ICGPR_Sext)
2955 return SDValue();
2956 bool IsRHSZero = RHSValue == 0;
2957 bool IsRHSOne = RHSValue == 1;
2958 bool IsRHSNegOne = RHSValue == -1LL;
2959 switch (CC) {
2960 default: return SDValue();
2961 case ISD::SETEQ: {
2962 // (zext (setcc %a, %b, seteq)) -> (lshr (ctlz (xor %a, %b)), 6)
2963 // (zext (setcc %a, 0, seteq)) -> (lshr (ctlz %a), 6)
2964 SDValue Xor = IsRHSZero ? LHS :
2965 SDValue(CurDAG->getMachineNode(PPC::XOR8, dl, MVT::i64, LHS, RHS), 0);
2966 SDValue Clz =
2967 SDValue(CurDAG->getMachineNode(PPC::CNTLZD, dl, MVT::i64, Xor), 0);
2968 return SDValue(CurDAG->getMachineNode(PPC::RLDICL, dl, MVT::i64, Clz,
2969 S->getI64Imm(58, dl),
2970 S->getI64Imm(63, dl)), 0);
2972 case ISD::SETNE: {
2973 // {addc.reg, addc.CA} = (addcarry (xor %a, %b), -1)
2974 // (zext (setcc %a, %b, setne)) -> (sube addc.reg, addc.reg, addc.CA)
2975 // {addcz.reg, addcz.CA} = (addcarry %a, -1)
2976 // (zext (setcc %a, 0, setne)) -> (sube addcz.reg, addcz.reg, addcz.CA)
2977 SDValue Xor = IsRHSZero ? LHS :
2978 SDValue(CurDAG->getMachineNode(PPC::XOR8, dl, MVT::i64, LHS, RHS), 0);
2979 SDValue AC =
2980 SDValue(CurDAG->getMachineNode(PPC::ADDIC8, dl, MVT::i64, MVT::Glue,
2981 Xor, S->getI32Imm(~0U, dl)), 0);
2982 return SDValue(CurDAG->getMachineNode(PPC::SUBFE8, dl, MVT::i64, AC,
2983 Xor, AC.getValue(1)), 0);
2985 case ISD::SETGE: {
2986 // {subc.reg, subc.CA} = (subcarry %a, %b)
2987 // (zext (setcc %a, %b, setge)) ->
2988 // (adde (lshr %b, 63), (ashr %a, 63), subc.CA)
2989 // (zext (setcc %a, 0, setge)) -> (lshr (~ %a), 63)
2990 if (IsRHSZero)
2991 return getCompoundZeroComparisonInGPR(LHS, dl, ZeroCompare::GEZExt);
2992 std::swap(LHS, RHS);
2993 ConstantSDNode *RHSConst = dyn_cast<ConstantSDNode>(RHS);
2994 IsRHSZero = RHSConst && RHSConst->isNullValue();
2995 LLVM_FALLTHROUGH;
2997 case ISD::SETLE: {
2998 // {subc.reg, subc.CA} = (subcarry %b, %a)
2999 // (zext (setcc %a, %b, setge)) ->
3000 // (adde (lshr %a, 63), (ashr %b, 63), subc.CA)
3001 // (zext (setcc %a, 0, setge)) -> (lshr (or %a, (add %a, -1)), 63)
3002 if (IsRHSZero)
3003 return getCompoundZeroComparisonInGPR(LHS, dl, ZeroCompare::LEZExt);
3004 SDValue ShiftL =
3005 SDValue(CurDAG->getMachineNode(PPC::RLDICL, dl, MVT::i64, LHS,
3006 S->getI64Imm(1, dl),
3007 S->getI64Imm(63, dl)), 0);
3008 SDValue ShiftR =
3009 SDValue(CurDAG->getMachineNode(PPC::SRADI, dl, MVT::i64, RHS,
3010 S->getI64Imm(63, dl)), 0);
3011 SDValue SubtractCarry =
3012 SDValue(CurDAG->getMachineNode(PPC::SUBFC8, dl, MVT::i64, MVT::Glue,
3013 LHS, RHS), 1);
3014 return SDValue(CurDAG->getMachineNode(PPC::ADDE8, dl, MVT::i64, MVT::Glue,
3015 ShiftR, ShiftL, SubtractCarry), 0);
3017 case ISD::SETGT: {
3018 // {subc.reg, subc.CA} = (subcarry %b, %a)
3019 // (zext (setcc %a, %b, setgt)) ->
3020 // (xor (adde (lshr %a, 63), (ashr %b, 63), subc.CA), 1)
3021 // (zext (setcc %a, 0, setgt)) -> (lshr (nor (add %a, -1), %a), 63)
3022 if (IsRHSNegOne)
3023 return getCompoundZeroComparisonInGPR(LHS, dl, ZeroCompare::GEZExt);
3024 if (IsRHSZero) {
3025 SDValue Addi =
3026 SDValue(CurDAG->getMachineNode(PPC::ADDI8, dl, MVT::i64, LHS,
3027 S->getI64Imm(~0ULL, dl)), 0);
3028 SDValue Nor =
3029 SDValue(CurDAG->getMachineNode(PPC::NOR8, dl, MVT::i64, Addi, LHS), 0);
3030 return SDValue(CurDAG->getMachineNode(PPC::RLDICL, dl, MVT::i64, Nor,
3031 S->getI64Imm(1, dl),
3032 S->getI64Imm(63, dl)), 0);
3034 std::swap(LHS, RHS);
3035 ConstantSDNode *RHSConst = dyn_cast<ConstantSDNode>(RHS);
3036 IsRHSZero = RHSConst && RHSConst->isNullValue();
3037 IsRHSOne = RHSConst && RHSConst->getSExtValue() == 1;
3038 LLVM_FALLTHROUGH;
3040 case ISD::SETLT: {
3041 // {subc.reg, subc.CA} = (subcarry %a, %b)
3042 // (zext (setcc %a, %b, setlt)) ->
3043 // (xor (adde (lshr %b, 63), (ashr %a, 63), subc.CA), 1)
3044 // (zext (setcc %a, 0, setlt)) -> (lshr %a, 63)
3045 if (IsRHSOne)
3046 return getCompoundZeroComparisonInGPR(LHS, dl, ZeroCompare::LEZExt);
3047 if (IsRHSZero)
3048 return SDValue(CurDAG->getMachineNode(PPC::RLDICL, dl, MVT::i64, LHS,
3049 S->getI64Imm(1, dl),
3050 S->getI64Imm(63, dl)), 0);
3051 SDValue SRADINode =
3052 SDValue(CurDAG->getMachineNode(PPC::SRADI, dl, MVT::i64,
3053 LHS, S->getI64Imm(63, dl)), 0);
3054 SDValue SRDINode =
3055 SDValue(CurDAG->getMachineNode(PPC::RLDICL, dl, MVT::i64,
3056 RHS, S->getI64Imm(1, dl),
3057 S->getI64Imm(63, dl)), 0);
3058 SDValue SUBFC8Carry =
3059 SDValue(CurDAG->getMachineNode(PPC::SUBFC8, dl, MVT::i64, MVT::Glue,
3060 RHS, LHS), 1);
3061 SDValue ADDE8Node =
3062 SDValue(CurDAG->getMachineNode(PPC::ADDE8, dl, MVT::i64, MVT::Glue,
3063 SRDINode, SRADINode, SUBFC8Carry), 0);
3064 return SDValue(CurDAG->getMachineNode(PPC::XORI8, dl, MVT::i64,
3065 ADDE8Node, S->getI64Imm(1, dl)), 0);
3067 case ISD::SETUGE:
3068 // {subc.reg, subc.CA} = (subcarry %a, %b)
3069 // (zext (setcc %a, %b, setuge)) -> (add (sube %b, %b, subc.CA), 1)
3070 std::swap(LHS, RHS);
3071 LLVM_FALLTHROUGH;
3072 case ISD::SETULE: {
3073 // {subc.reg, subc.CA} = (subcarry %b, %a)
3074 // (zext (setcc %a, %b, setule)) -> (add (sube %a, %a, subc.CA), 1)
3075 SDValue SUBFC8Carry =
3076 SDValue(CurDAG->getMachineNode(PPC::SUBFC8, dl, MVT::i64, MVT::Glue,
3077 LHS, RHS), 1);
3078 SDValue SUBFE8Node =
3079 SDValue(CurDAG->getMachineNode(PPC::SUBFE8, dl, MVT::i64, MVT::Glue,
3080 LHS, LHS, SUBFC8Carry), 0);
3081 return SDValue(CurDAG->getMachineNode(PPC::ADDI8, dl, MVT::i64,
3082 SUBFE8Node, S->getI64Imm(1, dl)), 0);
3084 case ISD::SETUGT:
3085 // {subc.reg, subc.CA} = (subcarry %b, %a)
3086 // (zext (setcc %a, %b, setugt)) -> -(sube %b, %b, subc.CA)
3087 std::swap(LHS, RHS);
3088 LLVM_FALLTHROUGH;
3089 case ISD::SETULT: {
3090 // {subc.reg, subc.CA} = (subcarry %a, %b)
3091 // (zext (setcc %a, %b, setult)) -> -(sube %a, %a, subc.CA)
3092 SDValue SubtractCarry =
3093 SDValue(CurDAG->getMachineNode(PPC::SUBFC8, dl, MVT::i64, MVT::Glue,
3094 RHS, LHS), 1);
3095 SDValue ExtSub =
3096 SDValue(CurDAG->getMachineNode(PPC::SUBFE8, dl, MVT::i64,
3097 LHS, LHS, SubtractCarry), 0);
3098 return SDValue(CurDAG->getMachineNode(PPC::NEG8, dl, MVT::i64,
3099 ExtSub), 0);
3104 /// Produces a sign-extended result of comparing two 64-bit values according to
3105 /// the passed condition code.
3106 SDValue
3107 IntegerCompareEliminator::get64BitSExtCompare(SDValue LHS, SDValue RHS,
3108 ISD::CondCode CC,
3109 int64_t RHSValue, SDLoc dl) {
3110 if (CmpInGPR == ICGPR_I32 || CmpInGPR == ICGPR_SextI32 ||
3111 CmpInGPR == ICGPR_ZextI32 || CmpInGPR == ICGPR_Zext)
3112 return SDValue();
3113 bool IsRHSZero = RHSValue == 0;
3114 bool IsRHSOne = RHSValue == 1;
3115 bool IsRHSNegOne = RHSValue == -1LL;
3116 switch (CC) {
3117 default: return SDValue();
3118 case ISD::SETEQ: {
3119 // {addc.reg, addc.CA} = (addcarry (xor %a, %b), -1)
3120 // (sext (setcc %a, %b, seteq)) -> (sube addc.reg, addc.reg, addc.CA)
3121 // {addcz.reg, addcz.CA} = (addcarry %a, -1)
3122 // (sext (setcc %a, 0, seteq)) -> (sube addcz.reg, addcz.reg, addcz.CA)
3123 SDValue AddInput = IsRHSZero ? LHS :
3124 SDValue(CurDAG->getMachineNode(PPC::XOR8, dl, MVT::i64, LHS, RHS), 0);
3125 SDValue Addic =
3126 SDValue(CurDAG->getMachineNode(PPC::ADDIC8, dl, MVT::i64, MVT::Glue,
3127 AddInput, S->getI32Imm(~0U, dl)), 0);
3128 return SDValue(CurDAG->getMachineNode(PPC::SUBFE8, dl, MVT::i64, Addic,
3129 Addic, Addic.getValue(1)), 0);
3131 case ISD::SETNE: {
3132 // {subfc.reg, subfc.CA} = (subcarry 0, (xor %a, %b))
3133 // (sext (setcc %a, %b, setne)) -> (sube subfc.reg, subfc.reg, subfc.CA)
3134 // {subfcz.reg, subfcz.CA} = (subcarry 0, %a)
3135 // (sext (setcc %a, 0, setne)) -> (sube subfcz.reg, subfcz.reg, subfcz.CA)
3136 SDValue Xor = IsRHSZero ? LHS :
3137 SDValue(CurDAG->getMachineNode(PPC::XOR8, dl, MVT::i64, LHS, RHS), 0);
3138 SDValue SC =
3139 SDValue(CurDAG->getMachineNode(PPC::SUBFIC8, dl, MVT::i64, MVT::Glue,
3140 Xor, S->getI32Imm(0, dl)), 0);
3141 return SDValue(CurDAG->getMachineNode(PPC::SUBFE8, dl, MVT::i64, SC,
3142 SC, SC.getValue(1)), 0);
3144 case ISD::SETGE: {
3145 // {subc.reg, subc.CA} = (subcarry %a, %b)
3146 // (zext (setcc %a, %b, setge)) ->
3147 // (- (adde (lshr %b, 63), (ashr %a, 63), subc.CA))
3148 // (zext (setcc %a, 0, setge)) -> (~ (ashr %a, 63))
3149 if (IsRHSZero)
3150 return getCompoundZeroComparisonInGPR(LHS, dl, ZeroCompare::GESExt);
3151 std::swap(LHS, RHS);
3152 ConstantSDNode *RHSConst = dyn_cast<ConstantSDNode>(RHS);
3153 IsRHSZero = RHSConst && RHSConst->isNullValue();
3154 LLVM_FALLTHROUGH;
3156 case ISD::SETLE: {
3157 // {subc.reg, subc.CA} = (subcarry %b, %a)
3158 // (zext (setcc %a, %b, setge)) ->
3159 // (- (adde (lshr %a, 63), (ashr %b, 63), subc.CA))
3160 // (zext (setcc %a, 0, setge)) -> (ashr (or %a, (add %a, -1)), 63)
3161 if (IsRHSZero)
3162 return getCompoundZeroComparisonInGPR(LHS, dl, ZeroCompare::LESExt);
3163 SDValue ShiftR =
3164 SDValue(CurDAG->getMachineNode(PPC::SRADI, dl, MVT::i64, RHS,
3165 S->getI64Imm(63, dl)), 0);
3166 SDValue ShiftL =
3167 SDValue(CurDAG->getMachineNode(PPC::RLDICL, dl, MVT::i64, LHS,
3168 S->getI64Imm(1, dl),
3169 S->getI64Imm(63, dl)), 0);
3170 SDValue SubtractCarry =
3171 SDValue(CurDAG->getMachineNode(PPC::SUBFC8, dl, MVT::i64, MVT::Glue,
3172 LHS, RHS), 1);
3173 SDValue Adde =
3174 SDValue(CurDAG->getMachineNode(PPC::ADDE8, dl, MVT::i64, MVT::Glue,
3175 ShiftR, ShiftL, SubtractCarry), 0);
3176 return SDValue(CurDAG->getMachineNode(PPC::NEG8, dl, MVT::i64, Adde), 0);
3178 case ISD::SETGT: {
3179 // {subc.reg, subc.CA} = (subcarry %b, %a)
3180 // (zext (setcc %a, %b, setgt)) ->
3181 // -(xor (adde (lshr %a, 63), (ashr %b, 63), subc.CA), 1)
3182 // (zext (setcc %a, 0, setgt)) -> (ashr (nor (add %a, -1), %a), 63)
3183 if (IsRHSNegOne)
3184 return getCompoundZeroComparisonInGPR(LHS, dl, ZeroCompare::GESExt);
3185 if (IsRHSZero) {
3186 SDValue Add =
3187 SDValue(CurDAG->getMachineNode(PPC::ADDI8, dl, MVT::i64, LHS,
3188 S->getI64Imm(-1, dl)), 0);
3189 SDValue Nor =
3190 SDValue(CurDAG->getMachineNode(PPC::NOR8, dl, MVT::i64, Add, LHS), 0);
3191 return SDValue(CurDAG->getMachineNode(PPC::SRADI, dl, MVT::i64, Nor,
3192 S->getI64Imm(63, dl)), 0);
3194 std::swap(LHS, RHS);
3195 ConstantSDNode *RHSConst = dyn_cast<ConstantSDNode>(RHS);
3196 IsRHSZero = RHSConst && RHSConst->isNullValue();
3197 IsRHSOne = RHSConst && RHSConst->getSExtValue() == 1;
3198 LLVM_FALLTHROUGH;
3200 case ISD::SETLT: {
3201 // {subc.reg, subc.CA} = (subcarry %a, %b)
3202 // (zext (setcc %a, %b, setlt)) ->
3203 // -(xor (adde (lshr %b, 63), (ashr %a, 63), subc.CA), 1)
3204 // (zext (setcc %a, 0, setlt)) -> (ashr %a, 63)
3205 if (IsRHSOne)
3206 return getCompoundZeroComparisonInGPR(LHS, dl, ZeroCompare::LESExt);
3207 if (IsRHSZero) {
3208 return SDValue(CurDAG->getMachineNode(PPC::SRADI, dl, MVT::i64, LHS,
3209 S->getI64Imm(63, dl)), 0);
3211 SDValue SRADINode =
3212 SDValue(CurDAG->getMachineNode(PPC::SRADI, dl, MVT::i64,
3213 LHS, S->getI64Imm(63, dl)), 0);
3214 SDValue SRDINode =
3215 SDValue(CurDAG->getMachineNode(PPC::RLDICL, dl, MVT::i64,
3216 RHS, S->getI64Imm(1, dl),
3217 S->getI64Imm(63, dl)), 0);
3218 SDValue SUBFC8Carry =
3219 SDValue(CurDAG->getMachineNode(PPC::SUBFC8, dl, MVT::i64, MVT::Glue,
3220 RHS, LHS), 1);
3221 SDValue ADDE8Node =
3222 SDValue(CurDAG->getMachineNode(PPC::ADDE8, dl, MVT::i64,
3223 SRDINode, SRADINode, SUBFC8Carry), 0);
3224 SDValue XORI8Node =
3225 SDValue(CurDAG->getMachineNode(PPC::XORI8, dl, MVT::i64,
3226 ADDE8Node, S->getI64Imm(1, dl)), 0);
3227 return SDValue(CurDAG->getMachineNode(PPC::NEG8, dl, MVT::i64,
3228 XORI8Node), 0);
3230 case ISD::SETUGE:
3231 // {subc.reg, subc.CA} = (subcarry %a, %b)
3232 // (sext (setcc %a, %b, setuge)) -> ~(sube %b, %b, subc.CA)
3233 std::swap(LHS, RHS);
3234 LLVM_FALLTHROUGH;
3235 case ISD::SETULE: {
3236 // {subc.reg, subc.CA} = (subcarry %b, %a)
3237 // (sext (setcc %a, %b, setule)) -> ~(sube %a, %a, subc.CA)
3238 SDValue SubtractCarry =
3239 SDValue(CurDAG->getMachineNode(PPC::SUBFC8, dl, MVT::i64, MVT::Glue,
3240 LHS, RHS), 1);
3241 SDValue ExtSub =
3242 SDValue(CurDAG->getMachineNode(PPC::SUBFE8, dl, MVT::i64, MVT::Glue, LHS,
3243 LHS, SubtractCarry), 0);
3244 return SDValue(CurDAG->getMachineNode(PPC::NOR8, dl, MVT::i64,
3245 ExtSub, ExtSub), 0);
3247 case ISD::SETUGT:
3248 // {subc.reg, subc.CA} = (subcarry %b, %a)
3249 // (sext (setcc %a, %b, setugt)) -> (sube %b, %b, subc.CA)
3250 std::swap(LHS, RHS);
3251 LLVM_FALLTHROUGH;
3252 case ISD::SETULT: {
3253 // {subc.reg, subc.CA} = (subcarry %a, %b)
3254 // (sext (setcc %a, %b, setult)) -> (sube %a, %a, subc.CA)
3255 SDValue SubCarry =
3256 SDValue(CurDAG->getMachineNode(PPC::SUBFC8, dl, MVT::i64, MVT::Glue,
3257 RHS, LHS), 1);
3258 return SDValue(CurDAG->getMachineNode(PPC::SUBFE8, dl, MVT::i64,
3259 LHS, LHS, SubCarry), 0);
3264 /// Do all uses of this SDValue need the result in a GPR?
3265 /// This is meant to be used on values that have type i1 since
3266 /// it is somewhat meaningless to ask if values of other types
3267 /// should be kept in GPR's.
3268 static bool allUsesExtend(SDValue Compare, SelectionDAG *CurDAG) {
3269 assert(Compare.getOpcode() == ISD::SETCC &&
3270 "An ISD::SETCC node required here.");
3272 // For values that have a single use, the caller should obviously already have
3273 // checked if that use is an extending use. We check the other uses here.
3274 if (Compare.hasOneUse())
3275 return true;
3276 // We want the value in a GPR if it is being extended, used for a select, or
3277 // used in logical operations.
3278 for (auto CompareUse : Compare.getNode()->uses())
3279 if (CompareUse->getOpcode() != ISD::SIGN_EXTEND &&
3280 CompareUse->getOpcode() != ISD::ZERO_EXTEND &&
3281 CompareUse->getOpcode() != ISD::SELECT &&
3282 !isLogicOp(CompareUse->getOpcode())) {
3283 OmittedForNonExtendUses++;
3284 return false;
3286 return true;
3289 /// Returns an equivalent of a SETCC node but with the result the same width as
3290 /// the inputs. This can nalso be used for SELECT_CC if either the true or false
3291 /// values is a power of two while the other is zero.
3292 SDValue IntegerCompareEliminator::getSETCCInGPR(SDValue Compare,
3293 SetccInGPROpts ConvOpts) {
3294 assert((Compare.getOpcode() == ISD::SETCC ||
3295 Compare.getOpcode() == ISD::SELECT_CC) &&
3296 "An ISD::SETCC node required here.");
3298 // Don't convert this comparison to a GPR sequence because there are uses
3299 // of the i1 result (i.e. uses that require the result in the CR).
3300 if ((Compare.getOpcode() == ISD::SETCC) && !allUsesExtend(Compare, CurDAG))
3301 return SDValue();
3303 SDValue LHS = Compare.getOperand(0);
3304 SDValue RHS = Compare.getOperand(1);
3306 // The condition code is operand 2 for SETCC and operand 4 for SELECT_CC.
3307 int CCOpNum = Compare.getOpcode() == ISD::SELECT_CC ? 4 : 2;
3308 ISD::CondCode CC =
3309 cast<CondCodeSDNode>(Compare.getOperand(CCOpNum))->get();
3310 EVT InputVT = LHS.getValueType();
3311 if (InputVT != MVT::i32 && InputVT != MVT::i64)
3312 return SDValue();
3314 if (ConvOpts == SetccInGPROpts::ZExtInvert ||
3315 ConvOpts == SetccInGPROpts::SExtInvert)
3316 CC = ISD::getSetCCInverse(CC, true);
3318 bool Inputs32Bit = InputVT == MVT::i32;
3320 SDLoc dl(Compare);
3321 ConstantSDNode *RHSConst = dyn_cast<ConstantSDNode>(RHS);
3322 int64_t RHSValue = RHSConst ? RHSConst->getSExtValue() : INT64_MAX;
3323 bool IsSext = ConvOpts == SetccInGPROpts::SExtOrig ||
3324 ConvOpts == SetccInGPROpts::SExtInvert;
3326 if (IsSext && Inputs32Bit)
3327 return get32BitSExtCompare(LHS, RHS, CC, RHSValue, dl);
3328 else if (Inputs32Bit)
3329 return get32BitZExtCompare(LHS, RHS, CC, RHSValue, dl);
3330 else if (IsSext)
3331 return get64BitSExtCompare(LHS, RHS, CC, RHSValue, dl);
3332 return get64BitZExtCompare(LHS, RHS, CC, RHSValue, dl);
3335 } // end anonymous namespace
3337 bool PPCDAGToDAGISel::tryIntCompareInGPR(SDNode *N) {
3338 if (N->getValueType(0) != MVT::i32 &&
3339 N->getValueType(0) != MVT::i64)
3340 return false;
3342 // This optimization will emit code that assumes 64-bit registers
3343 // so we don't want to run it in 32-bit mode. Also don't run it
3344 // on functions that are not to be optimized.
3345 if (TM.getOptLevel() == CodeGenOpt::None || !TM.isPPC64())
3346 return false;
3348 switch (N->getOpcode()) {
3349 default: break;
3350 case ISD::ZERO_EXTEND:
3351 case ISD::SIGN_EXTEND:
3352 case ISD::AND:
3353 case ISD::OR:
3354 case ISD::XOR: {
3355 IntegerCompareEliminator ICmpElim(CurDAG, this);
3356 if (SDNode *New = ICmpElim.Select(N)) {
3357 ReplaceNode(N, New);
3358 return true;
3362 return false;
3365 bool PPCDAGToDAGISel::tryBitPermutation(SDNode *N) {
3366 if (N->getValueType(0) != MVT::i32 &&
3367 N->getValueType(0) != MVT::i64)
3368 return false;
3370 if (!UseBitPermRewriter)
3371 return false;
3373 switch (N->getOpcode()) {
3374 default: break;
3375 case ISD::ROTL:
3376 case ISD::SHL:
3377 case ISD::SRL:
3378 case ISD::AND:
3379 case ISD::OR: {
3380 BitPermutationSelector BPS(CurDAG);
3381 if (SDNode *New = BPS.Select(N)) {
3382 ReplaceNode(N, New);
3383 return true;
3385 return false;
3389 return false;
3392 /// SelectCC - Select a comparison of the specified values with the specified
3393 /// condition code, returning the CR# of the expression.
3394 SDValue PPCDAGToDAGISel::SelectCC(SDValue LHS, SDValue RHS, ISD::CondCode CC,
3395 const SDLoc &dl) {
3396 // Always select the LHS.
3397 unsigned Opc;
3399 if (LHS.getValueType() == MVT::i32) {
3400 unsigned Imm;
3401 if (CC == ISD::SETEQ || CC == ISD::SETNE) {
3402 if (isInt32Immediate(RHS, Imm)) {
3403 // SETEQ/SETNE comparison with 16-bit immediate, fold it.
3404 if (isUInt<16>(Imm))
3405 return SDValue(CurDAG->getMachineNode(PPC::CMPLWI, dl, MVT::i32, LHS,
3406 getI32Imm(Imm & 0xFFFF, dl)),
3408 // If this is a 16-bit signed immediate, fold it.
3409 if (isInt<16>((int)Imm))
3410 return SDValue(CurDAG->getMachineNode(PPC::CMPWI, dl, MVT::i32, LHS,
3411 getI32Imm(Imm & 0xFFFF, dl)),
3414 // For non-equality comparisons, the default code would materialize the
3415 // constant, then compare against it, like this:
3416 // lis r2, 4660
3417 // ori r2, r2, 22136
3418 // cmpw cr0, r3, r2
3419 // Since we are just comparing for equality, we can emit this instead:
3420 // xoris r0,r3,0x1234
3421 // cmplwi cr0,r0,0x5678
3422 // beq cr0,L6
3423 SDValue Xor(CurDAG->getMachineNode(PPC::XORIS, dl, MVT::i32, LHS,
3424 getI32Imm(Imm >> 16, dl)), 0);
3425 return SDValue(CurDAG->getMachineNode(PPC::CMPLWI, dl, MVT::i32, Xor,
3426 getI32Imm(Imm & 0xFFFF, dl)), 0);
3428 Opc = PPC::CMPLW;
3429 } else if (ISD::isUnsignedIntSetCC(CC)) {
3430 if (isInt32Immediate(RHS, Imm) && isUInt<16>(Imm))
3431 return SDValue(CurDAG->getMachineNode(PPC::CMPLWI, dl, MVT::i32, LHS,
3432 getI32Imm(Imm & 0xFFFF, dl)), 0);
3433 Opc = PPC::CMPLW;
3434 } else {
3435 int16_t SImm;
3436 if (isIntS16Immediate(RHS, SImm))
3437 return SDValue(CurDAG->getMachineNode(PPC::CMPWI, dl, MVT::i32, LHS,
3438 getI32Imm((int)SImm & 0xFFFF,
3439 dl)),
3441 Opc = PPC::CMPW;
3443 } else if (LHS.getValueType() == MVT::i64) {
3444 uint64_t Imm;
3445 if (CC == ISD::SETEQ || CC == ISD::SETNE) {
3446 if (isInt64Immediate(RHS.getNode(), Imm)) {
3447 // SETEQ/SETNE comparison with 16-bit immediate, fold it.
3448 if (isUInt<16>(Imm))
3449 return SDValue(CurDAG->getMachineNode(PPC::CMPLDI, dl, MVT::i64, LHS,
3450 getI32Imm(Imm & 0xFFFF, dl)),
3452 // If this is a 16-bit signed immediate, fold it.
3453 if (isInt<16>(Imm))
3454 return SDValue(CurDAG->getMachineNode(PPC::CMPDI, dl, MVT::i64, LHS,
3455 getI32Imm(Imm & 0xFFFF, dl)),
3458 // For non-equality comparisons, the default code would materialize the
3459 // constant, then compare against it, like this:
3460 // lis r2, 4660
3461 // ori r2, r2, 22136
3462 // cmpd cr0, r3, r2
3463 // Since we are just comparing for equality, we can emit this instead:
3464 // xoris r0,r3,0x1234
3465 // cmpldi cr0,r0,0x5678
3466 // beq cr0,L6
3467 if (isUInt<32>(Imm)) {
3468 SDValue Xor(CurDAG->getMachineNode(PPC::XORIS8, dl, MVT::i64, LHS,
3469 getI64Imm(Imm >> 16, dl)), 0);
3470 return SDValue(CurDAG->getMachineNode(PPC::CMPLDI, dl, MVT::i64, Xor,
3471 getI64Imm(Imm & 0xFFFF, dl)),
3475 Opc = PPC::CMPLD;
3476 } else if (ISD::isUnsignedIntSetCC(CC)) {
3477 if (isInt64Immediate(RHS.getNode(), Imm) && isUInt<16>(Imm))
3478 return SDValue(CurDAG->getMachineNode(PPC::CMPLDI, dl, MVT::i64, LHS,
3479 getI64Imm(Imm & 0xFFFF, dl)), 0);
3480 Opc = PPC::CMPLD;
3481 } else {
3482 int16_t SImm;
3483 if (isIntS16Immediate(RHS, SImm))
3484 return SDValue(CurDAG->getMachineNode(PPC::CMPDI, dl, MVT::i64, LHS,
3485 getI64Imm(SImm & 0xFFFF, dl)),
3487 Opc = PPC::CMPD;
3489 } else if (LHS.getValueType() == MVT::f32) {
3490 Opc = PPC::FCMPUS;
3491 } else {
3492 assert(LHS.getValueType() == MVT::f64 && "Unknown vt!");
3493 Opc = PPCSubTarget->hasVSX() ? PPC::XSCMPUDP : PPC::FCMPUD;
3495 return SDValue(CurDAG->getMachineNode(Opc, dl, MVT::i32, LHS, RHS), 0);
3498 static PPC::Predicate getPredicateForSetCC(ISD::CondCode CC) {
3499 switch (CC) {
3500 case ISD::SETUEQ:
3501 case ISD::SETONE:
3502 case ISD::SETOLE:
3503 case ISD::SETOGE:
3504 llvm_unreachable("Should be lowered by legalize!");
3505 default: llvm_unreachable("Unknown condition!");
3506 case ISD::SETOEQ:
3507 case ISD::SETEQ: return PPC::PRED_EQ;
3508 case ISD::SETUNE:
3509 case ISD::SETNE: return PPC::PRED_NE;
3510 case ISD::SETOLT:
3511 case ISD::SETLT: return PPC::PRED_LT;
3512 case ISD::SETULE:
3513 case ISD::SETLE: return PPC::PRED_LE;
3514 case ISD::SETOGT:
3515 case ISD::SETGT: return PPC::PRED_GT;
3516 case ISD::SETUGE:
3517 case ISD::SETGE: return PPC::PRED_GE;
3518 case ISD::SETO: return PPC::PRED_NU;
3519 case ISD::SETUO: return PPC::PRED_UN;
3520 // These two are invalid for floating point. Assume we have int.
3521 case ISD::SETULT: return PPC::PRED_LT;
3522 case ISD::SETUGT: return PPC::PRED_GT;
3526 /// getCRIdxForSetCC - Return the index of the condition register field
3527 /// associated with the SetCC condition, and whether or not the field is
3528 /// treated as inverted. That is, lt = 0; ge = 0 inverted.
3529 static unsigned getCRIdxForSetCC(ISD::CondCode CC, bool &Invert) {
3530 Invert = false;
3531 switch (CC) {
3532 default: llvm_unreachable("Unknown condition!");
3533 case ISD::SETOLT:
3534 case ISD::SETLT: return 0; // Bit #0 = SETOLT
3535 case ISD::SETOGT:
3536 case ISD::SETGT: return 1; // Bit #1 = SETOGT
3537 case ISD::SETOEQ:
3538 case ISD::SETEQ: return 2; // Bit #2 = SETOEQ
3539 case ISD::SETUO: return 3; // Bit #3 = SETUO
3540 case ISD::SETUGE:
3541 case ISD::SETGE: Invert = true; return 0; // !Bit #0 = SETUGE
3542 case ISD::SETULE:
3543 case ISD::SETLE: Invert = true; return 1; // !Bit #1 = SETULE
3544 case ISD::SETUNE:
3545 case ISD::SETNE: Invert = true; return 2; // !Bit #2 = SETUNE
3546 case ISD::SETO: Invert = true; return 3; // !Bit #3 = SETO
3547 case ISD::SETUEQ:
3548 case ISD::SETOGE:
3549 case ISD::SETOLE:
3550 case ISD::SETONE:
3551 llvm_unreachable("Invalid branch code: should be expanded by legalize");
3552 // These are invalid for floating point. Assume integer.
3553 case ISD::SETULT: return 0;
3554 case ISD::SETUGT: return 1;
3558 // getVCmpInst: return the vector compare instruction for the specified
3559 // vector type and condition code. Since this is for altivec specific code,
3560 // only support the altivec types (v16i8, v8i16, v4i32, v2i64, and v4f32).
3561 static unsigned int getVCmpInst(MVT VecVT, ISD::CondCode CC,
3562 bool HasVSX, bool &Swap, bool &Negate) {
3563 Swap = false;
3564 Negate = false;
3566 if (VecVT.isFloatingPoint()) {
3567 /* Handle some cases by swapping input operands. */
3568 switch (CC) {
3569 case ISD::SETLE: CC = ISD::SETGE; Swap = true; break;
3570 case ISD::SETLT: CC = ISD::SETGT; Swap = true; break;
3571 case ISD::SETOLE: CC = ISD::SETOGE; Swap = true; break;
3572 case ISD::SETOLT: CC = ISD::SETOGT; Swap = true; break;
3573 case ISD::SETUGE: CC = ISD::SETULE; Swap = true; break;
3574 case ISD::SETUGT: CC = ISD::SETULT; Swap = true; break;
3575 default: break;
3577 /* Handle some cases by negating the result. */
3578 switch (CC) {
3579 case ISD::SETNE: CC = ISD::SETEQ; Negate = true; break;
3580 case ISD::SETUNE: CC = ISD::SETOEQ; Negate = true; break;
3581 case ISD::SETULE: CC = ISD::SETOGT; Negate = true; break;
3582 case ISD::SETULT: CC = ISD::SETOGE; Negate = true; break;
3583 default: break;
3585 /* We have instructions implementing the remaining cases. */
3586 switch (CC) {
3587 case ISD::SETEQ:
3588 case ISD::SETOEQ:
3589 if (VecVT == MVT::v4f32)
3590 return HasVSX ? PPC::XVCMPEQSP : PPC::VCMPEQFP;
3591 else if (VecVT == MVT::v2f64)
3592 return PPC::XVCMPEQDP;
3593 break;
3594 case ISD::SETGT:
3595 case ISD::SETOGT:
3596 if (VecVT == MVT::v4f32)
3597 return HasVSX ? PPC::XVCMPGTSP : PPC::VCMPGTFP;
3598 else if (VecVT == MVT::v2f64)
3599 return PPC::XVCMPGTDP;
3600 break;
3601 case ISD::SETGE:
3602 case ISD::SETOGE:
3603 if (VecVT == MVT::v4f32)
3604 return HasVSX ? PPC::XVCMPGESP : PPC::VCMPGEFP;
3605 else if (VecVT == MVT::v2f64)
3606 return PPC::XVCMPGEDP;
3607 break;
3608 default:
3609 break;
3611 llvm_unreachable("Invalid floating-point vector compare condition");
3612 } else {
3613 /* Handle some cases by swapping input operands. */
3614 switch (CC) {
3615 case ISD::SETGE: CC = ISD::SETLE; Swap = true; break;
3616 case ISD::SETLT: CC = ISD::SETGT; Swap = true; break;
3617 case ISD::SETUGE: CC = ISD::SETULE; Swap = true; break;
3618 case ISD::SETULT: CC = ISD::SETUGT; Swap = true; break;
3619 default: break;
3621 /* Handle some cases by negating the result. */
3622 switch (CC) {
3623 case ISD::SETNE: CC = ISD::SETEQ; Negate = true; break;
3624 case ISD::SETUNE: CC = ISD::SETUEQ; Negate = true; break;
3625 case ISD::SETLE: CC = ISD::SETGT; Negate = true; break;
3626 case ISD::SETULE: CC = ISD::SETUGT; Negate = true; break;
3627 default: break;
3629 /* We have instructions implementing the remaining cases. */
3630 switch (CC) {
3631 case ISD::SETEQ:
3632 case ISD::SETUEQ:
3633 if (VecVT == MVT::v16i8)
3634 return PPC::VCMPEQUB;
3635 else if (VecVT == MVT::v8i16)
3636 return PPC::VCMPEQUH;
3637 else if (VecVT == MVT::v4i32)
3638 return PPC::VCMPEQUW;
3639 else if (VecVT == MVT::v2i64)
3640 return PPC::VCMPEQUD;
3641 break;
3642 case ISD::SETGT:
3643 if (VecVT == MVT::v16i8)
3644 return PPC::VCMPGTSB;
3645 else if (VecVT == MVT::v8i16)
3646 return PPC::VCMPGTSH;
3647 else if (VecVT == MVT::v4i32)
3648 return PPC::VCMPGTSW;
3649 else if (VecVT == MVT::v2i64)
3650 return PPC::VCMPGTSD;
3651 break;
3652 case ISD::SETUGT:
3653 if (VecVT == MVT::v16i8)
3654 return PPC::VCMPGTUB;
3655 else if (VecVT == MVT::v8i16)
3656 return PPC::VCMPGTUH;
3657 else if (VecVT == MVT::v4i32)
3658 return PPC::VCMPGTUW;
3659 else if (VecVT == MVT::v2i64)
3660 return PPC::VCMPGTUD;
3661 break;
3662 default:
3663 break;
3665 llvm_unreachable("Invalid integer vector compare condition");
3669 bool PPCDAGToDAGISel::trySETCC(SDNode *N) {
3670 SDLoc dl(N);
3671 unsigned Imm;
3672 ISD::CondCode CC = cast<CondCodeSDNode>(N->getOperand(2))->get();
3673 EVT PtrVT =
3674 CurDAG->getTargetLoweringInfo().getPointerTy(CurDAG->getDataLayout());
3675 bool isPPC64 = (PtrVT == MVT::i64);
3677 if (!PPCSubTarget->useCRBits() &&
3678 isInt32Immediate(N->getOperand(1), Imm)) {
3679 // We can codegen setcc op, imm very efficiently compared to a brcond.
3680 // Check for those cases here.
3681 // setcc op, 0
3682 if (Imm == 0) {
3683 SDValue Op = N->getOperand(0);
3684 switch (CC) {
3685 default: break;
3686 case ISD::SETEQ: {
3687 Op = SDValue(CurDAG->getMachineNode(PPC::CNTLZW, dl, MVT::i32, Op), 0);
3688 SDValue Ops[] = { Op, getI32Imm(27, dl), getI32Imm(5, dl),
3689 getI32Imm(31, dl) };
3690 CurDAG->SelectNodeTo(N, PPC::RLWINM, MVT::i32, Ops);
3691 return true;
3693 case ISD::SETNE: {
3694 if (isPPC64) break;
3695 SDValue AD =
3696 SDValue(CurDAG->getMachineNode(PPC::ADDIC, dl, MVT::i32, MVT::Glue,
3697 Op, getI32Imm(~0U, dl)), 0);
3698 CurDAG->SelectNodeTo(N, PPC::SUBFE, MVT::i32, AD, Op, AD.getValue(1));
3699 return true;
3701 case ISD::SETLT: {
3702 SDValue Ops[] = { Op, getI32Imm(1, dl), getI32Imm(31, dl),
3703 getI32Imm(31, dl) };
3704 CurDAG->SelectNodeTo(N, PPC::RLWINM, MVT::i32, Ops);
3705 return true;
3707 case ISD::SETGT: {
3708 SDValue T =
3709 SDValue(CurDAG->getMachineNode(PPC::NEG, dl, MVT::i32, Op), 0);
3710 T = SDValue(CurDAG->getMachineNode(PPC::ANDC, dl, MVT::i32, T, Op), 0);
3711 SDValue Ops[] = { T, getI32Imm(1, dl), getI32Imm(31, dl),
3712 getI32Imm(31, dl) };
3713 CurDAG->SelectNodeTo(N, PPC::RLWINM, MVT::i32, Ops);
3714 return true;
3717 } else if (Imm == ~0U) { // setcc op, -1
3718 SDValue Op = N->getOperand(0);
3719 switch (CC) {
3720 default: break;
3721 case ISD::SETEQ:
3722 if (isPPC64) break;
3723 Op = SDValue(CurDAG->getMachineNode(PPC::ADDIC, dl, MVT::i32, MVT::Glue,
3724 Op, getI32Imm(1, dl)), 0);
3725 CurDAG->SelectNodeTo(N, PPC::ADDZE, MVT::i32,
3726 SDValue(CurDAG->getMachineNode(PPC::LI, dl,
3727 MVT::i32,
3728 getI32Imm(0, dl)),
3729 0), Op.getValue(1));
3730 return true;
3731 case ISD::SETNE: {
3732 if (isPPC64) break;
3733 Op = SDValue(CurDAG->getMachineNode(PPC::NOR, dl, MVT::i32, Op, Op), 0);
3734 SDNode *AD = CurDAG->getMachineNode(PPC::ADDIC, dl, MVT::i32, MVT::Glue,
3735 Op, getI32Imm(~0U, dl));
3736 CurDAG->SelectNodeTo(N, PPC::SUBFE, MVT::i32, SDValue(AD, 0), Op,
3737 SDValue(AD, 1));
3738 return true;
3740 case ISD::SETLT: {
3741 SDValue AD = SDValue(CurDAG->getMachineNode(PPC::ADDI, dl, MVT::i32, Op,
3742 getI32Imm(1, dl)), 0);
3743 SDValue AN = SDValue(CurDAG->getMachineNode(PPC::AND, dl, MVT::i32, AD,
3744 Op), 0);
3745 SDValue Ops[] = { AN, getI32Imm(1, dl), getI32Imm(31, dl),
3746 getI32Imm(31, dl) };
3747 CurDAG->SelectNodeTo(N, PPC::RLWINM, MVT::i32, Ops);
3748 return true;
3750 case ISD::SETGT: {
3751 SDValue Ops[] = { Op, getI32Imm(1, dl), getI32Imm(31, dl),
3752 getI32Imm(31, dl) };
3753 Op = SDValue(CurDAG->getMachineNode(PPC::RLWINM, dl, MVT::i32, Ops), 0);
3754 CurDAG->SelectNodeTo(N, PPC::XORI, MVT::i32, Op, getI32Imm(1, dl));
3755 return true;
3761 SDValue LHS = N->getOperand(0);
3762 SDValue RHS = N->getOperand(1);
3764 // Altivec Vector compare instructions do not set any CR register by default and
3765 // vector compare operations return the same type as the operands.
3766 if (LHS.getValueType().isVector()) {
3767 if (PPCSubTarget->hasQPX())
3768 return false;
3770 EVT VecVT = LHS.getValueType();
3771 bool Swap, Negate;
3772 unsigned int VCmpInst = getVCmpInst(VecVT.getSimpleVT(), CC,
3773 PPCSubTarget->hasVSX(), Swap, Negate);
3774 if (Swap)
3775 std::swap(LHS, RHS);
3777 EVT ResVT = VecVT.changeVectorElementTypeToInteger();
3778 if (Negate) {
3779 SDValue VCmp(CurDAG->getMachineNode(VCmpInst, dl, ResVT, LHS, RHS), 0);
3780 CurDAG->SelectNodeTo(N, PPCSubTarget->hasVSX() ? PPC::XXLNOR : PPC::VNOR,
3781 ResVT, VCmp, VCmp);
3782 return true;
3785 CurDAG->SelectNodeTo(N, VCmpInst, ResVT, LHS, RHS);
3786 return true;
3789 if (PPCSubTarget->useCRBits())
3790 return false;
3792 bool Inv;
3793 unsigned Idx = getCRIdxForSetCC(CC, Inv);
3794 SDValue CCReg = SelectCC(LHS, RHS, CC, dl);
3795 SDValue IntCR;
3797 // Force the ccreg into CR7.
3798 SDValue CR7Reg = CurDAG->getRegister(PPC::CR7, MVT::i32);
3800 SDValue InFlag(nullptr, 0); // Null incoming flag value.
3801 CCReg = CurDAG->getCopyToReg(CurDAG->getEntryNode(), dl, CR7Reg, CCReg,
3802 InFlag).getValue(1);
3804 IntCR = SDValue(CurDAG->getMachineNode(PPC::MFOCRF, dl, MVT::i32, CR7Reg,
3805 CCReg), 0);
3807 SDValue Ops[] = { IntCR, getI32Imm((32 - (3 - Idx)) & 31, dl),
3808 getI32Imm(31, dl), getI32Imm(31, dl) };
3809 if (!Inv) {
3810 CurDAG->SelectNodeTo(N, PPC::RLWINM, MVT::i32, Ops);
3811 return true;
3814 // Get the specified bit.
3815 SDValue Tmp =
3816 SDValue(CurDAG->getMachineNode(PPC::RLWINM, dl, MVT::i32, Ops), 0);
3817 CurDAG->SelectNodeTo(N, PPC::XORI, MVT::i32, Tmp, getI32Imm(1, dl));
3818 return true;
3821 /// Does this node represent a load/store node whose address can be represented
3822 /// with a register plus an immediate that's a multiple of \p Val:
3823 bool PPCDAGToDAGISel::isOffsetMultipleOf(SDNode *N, unsigned Val) const {
3824 LoadSDNode *LDN = dyn_cast<LoadSDNode>(N);
3825 StoreSDNode *STN = dyn_cast<StoreSDNode>(N);
3826 SDValue AddrOp;
3827 if (LDN)
3828 AddrOp = LDN->getOperand(1);
3829 else if (STN)
3830 AddrOp = STN->getOperand(2);
3832 short Imm = 0;
3833 if (AddrOp.getOpcode() == ISD::ADD) {
3834 // If op0 is a frame index that is under aligned, we can't do it either,
3835 // because it is translated to r31 or r1 + slot + offset. We won't know the
3836 // slot number until the stack frame is finalized.
3837 if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(AddrOp.getOperand(0))) {
3838 const MachineFrameInfo &MFI = CurDAG->getMachineFunction().getFrameInfo();
3839 unsigned SlotAlign = MFI.getObjectAlignment(FI->getIndex());
3840 if ((SlotAlign % Val) != 0)
3841 return false;
3843 return isIntS16Immediate(AddrOp.getOperand(1), Imm) && !(Imm % Val);
3846 // If the address comes from the outside, the offset will be zero.
3847 return AddrOp.getOpcode() == ISD::CopyFromReg;
3850 void PPCDAGToDAGISel::transferMemOperands(SDNode *N, SDNode *Result) {
3851 // Transfer memoperands.
3852 MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1);
3853 MemOp[0] = cast<MemSDNode>(N)->getMemOperand();
3854 cast<MachineSDNode>(Result)->setMemRefs(MemOp, MemOp + 1);
3857 // Select - Convert the specified operand from a target-independent to a
3858 // target-specific node if it hasn't already been changed.
3859 void PPCDAGToDAGISel::Select(SDNode *N) {
3860 SDLoc dl(N);
3861 if (N->isMachineOpcode()) {
3862 N->setNodeId(-1);
3863 return; // Already selected.
3866 // In case any misguided DAG-level optimizations form an ADD with a
3867 // TargetConstant operand, crash here instead of miscompiling (by selecting
3868 // an r+r add instead of some kind of r+i add).
3869 if (N->getOpcode() == ISD::ADD &&
3870 N->getOperand(1).getOpcode() == ISD::TargetConstant)
3871 llvm_unreachable("Invalid ADD with TargetConstant operand");
3873 // Try matching complex bit permutations before doing anything else.
3874 if (tryBitPermutation(N))
3875 return;
3877 // Try to emit integer compares as GPR-only sequences (i.e. no use of CR).
3878 if (tryIntCompareInGPR(N))
3879 return;
3881 switch (N->getOpcode()) {
3882 default: break;
3884 case ISD::Constant:
3885 if (N->getValueType(0) == MVT::i64) {
3886 ReplaceNode(N, selectI64Imm(CurDAG, N));
3887 return;
3889 break;
3891 case ISD::SETCC:
3892 if (trySETCC(N))
3893 return;
3894 break;
3896 case PPCISD::GlobalBaseReg:
3897 ReplaceNode(N, getGlobalBaseReg());
3898 return;
3900 case ISD::FrameIndex:
3901 selectFrameIndex(N, N);
3902 return;
3904 case PPCISD::MFOCRF: {
3905 SDValue InFlag = N->getOperand(1);
3906 ReplaceNode(N, CurDAG->getMachineNode(PPC::MFOCRF, dl, MVT::i32,
3907 N->getOperand(0), InFlag));
3908 return;
3911 case PPCISD::READ_TIME_BASE:
3912 ReplaceNode(N, CurDAG->getMachineNode(PPC::ReadTB, dl, MVT::i32, MVT::i32,
3913 MVT::Other, N->getOperand(0)));
3914 return;
3916 case PPCISD::SRA_ADDZE: {
3917 SDValue N0 = N->getOperand(0);
3918 SDValue ShiftAmt =
3919 CurDAG->getTargetConstant(*cast<ConstantSDNode>(N->getOperand(1))->
3920 getConstantIntValue(), dl,
3921 N->getValueType(0));
3922 if (N->getValueType(0) == MVT::i64) {
3923 SDNode *Op =
3924 CurDAG->getMachineNode(PPC::SRADI, dl, MVT::i64, MVT::Glue,
3925 N0, ShiftAmt);
3926 CurDAG->SelectNodeTo(N, PPC::ADDZE8, MVT::i64, SDValue(Op, 0),
3927 SDValue(Op, 1));
3928 return;
3929 } else {
3930 assert(N->getValueType(0) == MVT::i32 &&
3931 "Expecting i64 or i32 in PPCISD::SRA_ADDZE");
3932 SDNode *Op =
3933 CurDAG->getMachineNode(PPC::SRAWI, dl, MVT::i32, MVT::Glue,
3934 N0, ShiftAmt);
3935 CurDAG->SelectNodeTo(N, PPC::ADDZE, MVT::i32, SDValue(Op, 0),
3936 SDValue(Op, 1));
3937 return;
3941 case ISD::LOAD: {
3942 // Handle preincrement loads.
3943 LoadSDNode *LD = cast<LoadSDNode>(N);
3944 EVT LoadedVT = LD->getMemoryVT();
3946 // Normal loads are handled by code generated from the .td file.
3947 if (LD->getAddressingMode() != ISD::PRE_INC)
3948 break;
3950 SDValue Offset = LD->getOffset();
3951 if (Offset.getOpcode() == ISD::TargetConstant ||
3952 Offset.getOpcode() == ISD::TargetGlobalAddress) {
3954 unsigned Opcode;
3955 bool isSExt = LD->getExtensionType() == ISD::SEXTLOAD;
3956 if (LD->getValueType(0) != MVT::i64) {
3957 // Handle PPC32 integer and normal FP loads.
3958 assert((!isSExt || LoadedVT == MVT::i16) && "Invalid sext update load");
3959 switch (LoadedVT.getSimpleVT().SimpleTy) {
3960 default: llvm_unreachable("Invalid PPC load type!");
3961 case MVT::f64: Opcode = PPC::LFDU; break;
3962 case MVT::f32: Opcode = PPC::LFSU; break;
3963 case MVT::i32: Opcode = PPC::LWZU; break;
3964 case MVT::i16: Opcode = isSExt ? PPC::LHAU : PPC::LHZU; break;
3965 case MVT::i1:
3966 case MVT::i8: Opcode = PPC::LBZU; break;
3968 } else {
3969 assert(LD->getValueType(0) == MVT::i64 && "Unknown load result type!");
3970 assert((!isSExt || LoadedVT == MVT::i16) && "Invalid sext update load");
3971 switch (LoadedVT.getSimpleVT().SimpleTy) {
3972 default: llvm_unreachable("Invalid PPC load type!");
3973 case MVT::i64: Opcode = PPC::LDU; break;
3974 case MVT::i32: Opcode = PPC::LWZU8; break;
3975 case MVT::i16: Opcode = isSExt ? PPC::LHAU8 : PPC::LHZU8; break;
3976 case MVT::i1:
3977 case MVT::i8: Opcode = PPC::LBZU8; break;
3981 SDValue Chain = LD->getChain();
3982 SDValue Base = LD->getBasePtr();
3983 SDValue Ops[] = { Offset, Base, Chain };
3984 SDNode *MN = CurDAG->getMachineNode(
3985 Opcode, dl, LD->getValueType(0),
3986 PPCLowering->getPointerTy(CurDAG->getDataLayout()), MVT::Other, Ops);
3987 transferMemOperands(N, MN);
3988 ReplaceNode(N, MN);
3989 return;
3990 } else {
3991 unsigned Opcode;
3992 bool isSExt = LD->getExtensionType() == ISD::SEXTLOAD;
3993 if (LD->getValueType(0) != MVT::i64) {
3994 // Handle PPC32 integer and normal FP loads.
3995 assert((!isSExt || LoadedVT == MVT::i16) && "Invalid sext update load");
3996 switch (LoadedVT.getSimpleVT().SimpleTy) {
3997 default: llvm_unreachable("Invalid PPC load type!");
3998 case MVT::v4f64: Opcode = PPC::QVLFDUX; break; // QPX
3999 case MVT::v4f32: Opcode = PPC::QVLFSUX; break; // QPX
4000 case MVT::f64: Opcode = PPC::LFDUX; break;
4001 case MVT::f32: Opcode = PPC::LFSUX; break;
4002 case MVT::i32: Opcode = PPC::LWZUX; break;
4003 case MVT::i16: Opcode = isSExt ? PPC::LHAUX : PPC::LHZUX; break;
4004 case MVT::i1:
4005 case MVT::i8: Opcode = PPC::LBZUX; break;
4007 } else {
4008 assert(LD->getValueType(0) == MVT::i64 && "Unknown load result type!");
4009 assert((!isSExt || LoadedVT == MVT::i16 || LoadedVT == MVT::i32) &&
4010 "Invalid sext update load");
4011 switch (LoadedVT.getSimpleVT().SimpleTy) {
4012 default: llvm_unreachable("Invalid PPC load type!");
4013 case MVT::i64: Opcode = PPC::LDUX; break;
4014 case MVT::i32: Opcode = isSExt ? PPC::LWAUX : PPC::LWZUX8; break;
4015 case MVT::i16: Opcode = isSExt ? PPC::LHAUX8 : PPC::LHZUX8; break;
4016 case MVT::i1:
4017 case MVT::i8: Opcode = PPC::LBZUX8; break;
4021 SDValue Chain = LD->getChain();
4022 SDValue Base = LD->getBasePtr();
4023 SDValue Ops[] = { Base, Offset, Chain };
4024 SDNode *MN = CurDAG->getMachineNode(
4025 Opcode, dl, LD->getValueType(0),
4026 PPCLowering->getPointerTy(CurDAG->getDataLayout()), MVT::Other, Ops);
4027 transferMemOperands(N, MN);
4028 ReplaceNode(N, MN);
4029 return;
4033 case ISD::AND: {
4034 unsigned Imm, Imm2, SH, MB, ME;
4035 uint64_t Imm64;
4037 // If this is an and of a value rotated between 0 and 31 bits and then and'd
4038 // with a mask, emit rlwinm
4039 if (isInt32Immediate(N->getOperand(1), Imm) &&
4040 isRotateAndMask(N->getOperand(0).getNode(), Imm, false, SH, MB, ME)) {
4041 SDValue Val = N->getOperand(0).getOperand(0);
4042 SDValue Ops[] = { Val, getI32Imm(SH, dl), getI32Imm(MB, dl),
4043 getI32Imm(ME, dl) };
4044 CurDAG->SelectNodeTo(N, PPC::RLWINM, MVT::i32, Ops);
4045 return;
4047 // If this is just a masked value where the input is not handled above, and
4048 // is not a rotate-left (handled by a pattern in the .td file), emit rlwinm
4049 if (isInt32Immediate(N->getOperand(1), Imm) &&
4050 isRunOfOnes(Imm, MB, ME) &&
4051 N->getOperand(0).getOpcode() != ISD::ROTL) {
4052 SDValue Val = N->getOperand(0);
4053 SDValue Ops[] = { Val, getI32Imm(0, dl), getI32Imm(MB, dl),
4054 getI32Imm(ME, dl) };
4055 CurDAG->SelectNodeTo(N, PPC::RLWINM, MVT::i32, Ops);
4056 return;
4058 // If this is a 64-bit zero-extension mask, emit rldicl.
4059 if (isInt64Immediate(N->getOperand(1).getNode(), Imm64) &&
4060 isMask_64(Imm64)) {
4061 SDValue Val = N->getOperand(0);
4062 MB = 64 - countTrailingOnes(Imm64);
4063 SH = 0;
4065 if (Val.getOpcode() == ISD::ANY_EXTEND) {
4066 auto Op0 = Val.getOperand(0);
4067 if ( Op0.getOpcode() == ISD::SRL &&
4068 isInt32Immediate(Op0.getOperand(1).getNode(), Imm) && Imm <= MB) {
4070 auto ResultType = Val.getNode()->getValueType(0);
4071 auto ImDef = CurDAG->getMachineNode(PPC::IMPLICIT_DEF, dl,
4072 ResultType);
4073 SDValue IDVal (ImDef, 0);
4075 Val = SDValue(CurDAG->getMachineNode(PPC::INSERT_SUBREG, dl,
4076 ResultType, IDVal, Op0.getOperand(0),
4077 getI32Imm(1, dl)), 0);
4078 SH = 64 - Imm;
4082 // If the operand is a logical right shift, we can fold it into this
4083 // instruction: rldicl(rldicl(x, 64-n, n), 0, mb) -> rldicl(x, 64-n, mb)
4084 // for n <= mb. The right shift is really a left rotate followed by a
4085 // mask, and this mask is a more-restrictive sub-mask of the mask implied
4086 // by the shift.
4087 if (Val.getOpcode() == ISD::SRL &&
4088 isInt32Immediate(Val.getOperand(1).getNode(), Imm) && Imm <= MB) {
4089 assert(Imm < 64 && "Illegal shift amount");
4090 Val = Val.getOperand(0);
4091 SH = 64 - Imm;
4094 SDValue Ops[] = { Val, getI32Imm(SH, dl), getI32Imm(MB, dl) };
4095 CurDAG->SelectNodeTo(N, PPC::RLDICL, MVT::i64, Ops);
4096 return;
4098 // If this is a negated 64-bit zero-extension mask,
4099 // i.e. the immediate is a sequence of ones from most significant side
4100 // and all zero for reminder, we should use rldicr.
4101 if (isInt64Immediate(N->getOperand(1).getNode(), Imm64) &&
4102 isMask_64(~Imm64)) {
4103 SDValue Val = N->getOperand(0);
4104 MB = 63 - countTrailingOnes(~Imm64);
4105 SH = 0;
4106 SDValue Ops[] = { Val, getI32Imm(SH, dl), getI32Imm(MB, dl) };
4107 CurDAG->SelectNodeTo(N, PPC::RLDICR, MVT::i64, Ops);
4108 return;
4111 // AND X, 0 -> 0, not "rlwinm 32".
4112 if (isInt32Immediate(N->getOperand(1), Imm) && (Imm == 0)) {
4113 ReplaceUses(SDValue(N, 0), N->getOperand(1));
4114 return;
4116 // ISD::OR doesn't get all the bitfield insertion fun.
4117 // (and (or x, c1), c2) where isRunOfOnes(~(c1^c2)) might be a
4118 // bitfield insert.
4119 if (isInt32Immediate(N->getOperand(1), Imm) &&
4120 N->getOperand(0).getOpcode() == ISD::OR &&
4121 isInt32Immediate(N->getOperand(0).getOperand(1), Imm2)) {
4122 // The idea here is to check whether this is equivalent to:
4123 // (c1 & m) | (x & ~m)
4124 // where m is a run-of-ones mask. The logic here is that, for each bit in
4125 // c1 and c2:
4126 // - if both are 1, then the output will be 1.
4127 // - if both are 0, then the output will be 0.
4128 // - if the bit in c1 is 0, and the bit in c2 is 1, then the output will
4129 // come from x.
4130 // - if the bit in c1 is 1, and the bit in c2 is 0, then the output will
4131 // be 0.
4132 // If that last condition is never the case, then we can form m from the
4133 // bits that are the same between c1 and c2.
4134 unsigned MB, ME;
4135 if (isRunOfOnes(~(Imm^Imm2), MB, ME) && !(~Imm & Imm2)) {
4136 SDValue Ops[] = { N->getOperand(0).getOperand(0),
4137 N->getOperand(0).getOperand(1),
4138 getI32Imm(0, dl), getI32Imm(MB, dl),
4139 getI32Imm(ME, dl) };
4140 ReplaceNode(N, CurDAG->getMachineNode(PPC::RLWIMI, dl, MVT::i32, Ops));
4141 return;
4145 // Other cases are autogenerated.
4146 break;
4148 case ISD::OR: {
4149 if (N->getValueType(0) == MVT::i32)
4150 if (tryBitfieldInsert(N))
4151 return;
4153 int16_t Imm;
4154 if (N->getOperand(0)->getOpcode() == ISD::FrameIndex &&
4155 isIntS16Immediate(N->getOperand(1), Imm)) {
4156 KnownBits LHSKnown;
4157 CurDAG->computeKnownBits(N->getOperand(0), LHSKnown);
4159 // If this is equivalent to an add, then we can fold it with the
4160 // FrameIndex calculation.
4161 if ((LHSKnown.Zero.getZExtValue()|~(uint64_t)Imm) == ~0ULL) {
4162 selectFrameIndex(N, N->getOperand(0).getNode(), (int)Imm);
4163 return;
4167 // OR with a 32-bit immediate can be handled by ori + oris
4168 // without creating an immediate in a GPR.
4169 uint64_t Imm64 = 0;
4170 bool IsPPC64 = PPCSubTarget->isPPC64();
4171 if (IsPPC64 && isInt64Immediate(N->getOperand(1), Imm64) &&
4172 (Imm64 & ~0xFFFFFFFFuLL) == 0) {
4173 // If ImmHi (ImmHi) is zero, only one ori (oris) is generated later.
4174 uint64_t ImmHi = Imm64 >> 16;
4175 uint64_t ImmLo = Imm64 & 0xFFFF;
4176 if (ImmHi != 0 && ImmLo != 0) {
4177 SDNode *Lo = CurDAG->getMachineNode(PPC::ORI8, dl, MVT::i64,
4178 N->getOperand(0),
4179 getI16Imm(ImmLo, dl));
4180 SDValue Ops1[] = { SDValue(Lo, 0), getI16Imm(ImmHi, dl)};
4181 CurDAG->SelectNodeTo(N, PPC::ORIS8, MVT::i64, Ops1);
4182 return;
4186 // Other cases are autogenerated.
4187 break;
4189 case ISD::XOR: {
4190 // XOR with a 32-bit immediate can be handled by xori + xoris
4191 // without creating an immediate in a GPR.
4192 uint64_t Imm64 = 0;
4193 bool IsPPC64 = PPCSubTarget->isPPC64();
4194 if (IsPPC64 && isInt64Immediate(N->getOperand(1), Imm64) &&
4195 (Imm64 & ~0xFFFFFFFFuLL) == 0) {
4196 // If ImmHi (ImmHi) is zero, only one xori (xoris) is generated later.
4197 uint64_t ImmHi = Imm64 >> 16;
4198 uint64_t ImmLo = Imm64 & 0xFFFF;
4199 if (ImmHi != 0 && ImmLo != 0) {
4200 SDNode *Lo = CurDAG->getMachineNode(PPC::XORI8, dl, MVT::i64,
4201 N->getOperand(0),
4202 getI16Imm(ImmLo, dl));
4203 SDValue Ops1[] = { SDValue(Lo, 0), getI16Imm(ImmHi, dl)};
4204 CurDAG->SelectNodeTo(N, PPC::XORIS8, MVT::i64, Ops1);
4205 return;
4209 break;
4211 case ISD::ADD: {
4212 int16_t Imm;
4213 if (N->getOperand(0)->getOpcode() == ISD::FrameIndex &&
4214 isIntS16Immediate(N->getOperand(1), Imm)) {
4215 selectFrameIndex(N, N->getOperand(0).getNode(), (int)Imm);
4216 return;
4219 break;
4221 case ISD::SHL: {
4222 unsigned Imm, SH, MB, ME;
4223 if (isOpcWithIntImmediate(N->getOperand(0).getNode(), ISD::AND, Imm) &&
4224 isRotateAndMask(N, Imm, true, SH, MB, ME)) {
4225 SDValue Ops[] = { N->getOperand(0).getOperand(0),
4226 getI32Imm(SH, dl), getI32Imm(MB, dl),
4227 getI32Imm(ME, dl) };
4228 CurDAG->SelectNodeTo(N, PPC::RLWINM, MVT::i32, Ops);
4229 return;
4232 // Other cases are autogenerated.
4233 break;
4235 case ISD::SRL: {
4236 unsigned Imm, SH, MB, ME;
4237 if (isOpcWithIntImmediate(N->getOperand(0).getNode(), ISD::AND, Imm) &&
4238 isRotateAndMask(N, Imm, true, SH, MB, ME)) {
4239 SDValue Ops[] = { N->getOperand(0).getOperand(0),
4240 getI32Imm(SH, dl), getI32Imm(MB, dl),
4241 getI32Imm(ME, dl) };
4242 CurDAG->SelectNodeTo(N, PPC::RLWINM, MVT::i32, Ops);
4243 return;
4246 // Other cases are autogenerated.
4247 break;
4249 // FIXME: Remove this once the ANDI glue bug is fixed:
4250 case PPCISD::ANDIo_1_EQ_BIT:
4251 case PPCISD::ANDIo_1_GT_BIT: {
4252 if (!ANDIGlueBug)
4253 break;
4255 EVT InVT = N->getOperand(0).getValueType();
4256 assert((InVT == MVT::i64 || InVT == MVT::i32) &&
4257 "Invalid input type for ANDIo_1_EQ_BIT");
4259 unsigned Opcode = (InVT == MVT::i64) ? PPC::ANDIo8 : PPC::ANDIo;
4260 SDValue AndI(CurDAG->getMachineNode(Opcode, dl, InVT, MVT::Glue,
4261 N->getOperand(0),
4262 CurDAG->getTargetConstant(1, dl, InVT)),
4264 SDValue CR0Reg = CurDAG->getRegister(PPC::CR0, MVT::i32);
4265 SDValue SRIdxVal =
4266 CurDAG->getTargetConstant(N->getOpcode() == PPCISD::ANDIo_1_EQ_BIT ?
4267 PPC::sub_eq : PPC::sub_gt, dl, MVT::i32);
4269 CurDAG->SelectNodeTo(N, TargetOpcode::EXTRACT_SUBREG, MVT::i1, CR0Reg,
4270 SRIdxVal, SDValue(AndI.getNode(), 1) /* glue */);
4271 return;
4273 case ISD::SELECT_CC: {
4274 ISD::CondCode CC = cast<CondCodeSDNode>(N->getOperand(4))->get();
4275 EVT PtrVT =
4276 CurDAG->getTargetLoweringInfo().getPointerTy(CurDAG->getDataLayout());
4277 bool isPPC64 = (PtrVT == MVT::i64);
4279 // If this is a select of i1 operands, we'll pattern match it.
4280 if (PPCSubTarget->useCRBits() &&
4281 N->getOperand(0).getValueType() == MVT::i1)
4282 break;
4284 // Handle the setcc cases here. select_cc lhs, 0, 1, 0, cc
4285 if (!isPPC64)
4286 if (ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N->getOperand(1)))
4287 if (ConstantSDNode *N2C = dyn_cast<ConstantSDNode>(N->getOperand(2)))
4288 if (ConstantSDNode *N3C = dyn_cast<ConstantSDNode>(N->getOperand(3)))
4289 if (N1C->isNullValue() && N3C->isNullValue() &&
4290 N2C->getZExtValue() == 1ULL && CC == ISD::SETNE &&
4291 // FIXME: Implement this optzn for PPC64.
4292 N->getValueType(0) == MVT::i32) {
4293 SDNode *Tmp =
4294 CurDAG->getMachineNode(PPC::ADDIC, dl, MVT::i32, MVT::Glue,
4295 N->getOperand(0), getI32Imm(~0U, dl));
4296 CurDAG->SelectNodeTo(N, PPC::SUBFE, MVT::i32, SDValue(Tmp, 0),
4297 N->getOperand(0), SDValue(Tmp, 1));
4298 return;
4301 SDValue CCReg = SelectCC(N->getOperand(0), N->getOperand(1), CC, dl);
4303 if (N->getValueType(0) == MVT::i1) {
4304 // An i1 select is: (c & t) | (!c & f).
4305 bool Inv;
4306 unsigned Idx = getCRIdxForSetCC(CC, Inv);
4308 unsigned SRI;
4309 switch (Idx) {
4310 default: llvm_unreachable("Invalid CC index");
4311 case 0: SRI = PPC::sub_lt; break;
4312 case 1: SRI = PPC::sub_gt; break;
4313 case 2: SRI = PPC::sub_eq; break;
4314 case 3: SRI = PPC::sub_un; break;
4317 SDValue CCBit = CurDAG->getTargetExtractSubreg(SRI, dl, MVT::i1, CCReg);
4319 SDValue NotCCBit(CurDAG->getMachineNode(PPC::CRNOR, dl, MVT::i1,
4320 CCBit, CCBit), 0);
4321 SDValue C = Inv ? NotCCBit : CCBit,
4322 NotC = Inv ? CCBit : NotCCBit;
4324 SDValue CAndT(CurDAG->getMachineNode(PPC::CRAND, dl, MVT::i1,
4325 C, N->getOperand(2)), 0);
4326 SDValue NotCAndF(CurDAG->getMachineNode(PPC::CRAND, dl, MVT::i1,
4327 NotC, N->getOperand(3)), 0);
4329 CurDAG->SelectNodeTo(N, PPC::CROR, MVT::i1, CAndT, NotCAndF);
4330 return;
4333 unsigned BROpc = getPredicateForSetCC(CC);
4335 unsigned SelectCCOp;
4336 if (N->getValueType(0) == MVT::i32)
4337 SelectCCOp = PPC::SELECT_CC_I4;
4338 else if (N->getValueType(0) == MVT::i64)
4339 SelectCCOp = PPC::SELECT_CC_I8;
4340 else if (N->getValueType(0) == MVT::f32)
4341 if (PPCSubTarget->hasP8Vector())
4342 SelectCCOp = PPC::SELECT_CC_VSSRC;
4343 else
4344 SelectCCOp = PPC::SELECT_CC_F4;
4345 else if (N->getValueType(0) == MVT::f64)
4346 if (PPCSubTarget->hasVSX())
4347 SelectCCOp = PPC::SELECT_CC_VSFRC;
4348 else
4349 SelectCCOp = PPC::SELECT_CC_F8;
4350 else if (PPCSubTarget->hasQPX() && N->getValueType(0) == MVT::v4f64)
4351 SelectCCOp = PPC::SELECT_CC_QFRC;
4352 else if (PPCSubTarget->hasQPX() && N->getValueType(0) == MVT::v4f32)
4353 SelectCCOp = PPC::SELECT_CC_QSRC;
4354 else if (PPCSubTarget->hasQPX() && N->getValueType(0) == MVT::v4i1)
4355 SelectCCOp = PPC::SELECT_CC_QBRC;
4356 else if (N->getValueType(0) == MVT::v2f64 ||
4357 N->getValueType(0) == MVT::v2i64)
4358 SelectCCOp = PPC::SELECT_CC_VSRC;
4359 else
4360 SelectCCOp = PPC::SELECT_CC_VRRC;
4362 SDValue Ops[] = { CCReg, N->getOperand(2), N->getOperand(3),
4363 getI32Imm(BROpc, dl) };
4364 CurDAG->SelectNodeTo(N, SelectCCOp, N->getValueType(0), Ops);
4365 return;
4367 case ISD::VSELECT:
4368 if (PPCSubTarget->hasVSX()) {
4369 SDValue Ops[] = { N->getOperand(2), N->getOperand(1), N->getOperand(0) };
4370 CurDAG->SelectNodeTo(N, PPC::XXSEL, N->getValueType(0), Ops);
4371 return;
4373 break;
4375 case ISD::VECTOR_SHUFFLE:
4376 if (PPCSubTarget->hasVSX() && (N->getValueType(0) == MVT::v2f64 ||
4377 N->getValueType(0) == MVT::v2i64)) {
4378 ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(N);
4380 SDValue Op1 = N->getOperand(SVN->getMaskElt(0) < 2 ? 0 : 1),
4381 Op2 = N->getOperand(SVN->getMaskElt(1) < 2 ? 0 : 1);
4382 unsigned DM[2];
4384 for (int i = 0; i < 2; ++i)
4385 if (SVN->getMaskElt(i) <= 0 || SVN->getMaskElt(i) == 2)
4386 DM[i] = 0;
4387 else
4388 DM[i] = 1;
4390 if (Op1 == Op2 && DM[0] == 0 && DM[1] == 0 &&
4391 Op1.getOpcode() == ISD::SCALAR_TO_VECTOR &&
4392 isa<LoadSDNode>(Op1.getOperand(0))) {
4393 LoadSDNode *LD = cast<LoadSDNode>(Op1.getOperand(0));
4394 SDValue Base, Offset;
4396 if (LD->isUnindexed() && LD->hasOneUse() && Op1.hasOneUse() &&
4397 (LD->getMemoryVT() == MVT::f64 ||
4398 LD->getMemoryVT() == MVT::i64) &&
4399 SelectAddrIdxOnly(LD->getBasePtr(), Base, Offset)) {
4400 SDValue Chain = LD->getChain();
4401 SDValue Ops[] = { Base, Offset, Chain };
4402 MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1);
4403 MemOp[0] = LD->getMemOperand();
4404 SDNode *NewN = CurDAG->SelectNodeTo(N, PPC::LXVDSX,
4405 N->getValueType(0), Ops);
4406 cast<MachineSDNode>(NewN)->setMemRefs(MemOp, MemOp + 1);
4407 return;
4411 // For little endian, we must swap the input operands and adjust
4412 // the mask elements (reverse and invert them).
4413 if (PPCSubTarget->isLittleEndian()) {
4414 std::swap(Op1, Op2);
4415 unsigned tmp = DM[0];
4416 DM[0] = 1 - DM[1];
4417 DM[1] = 1 - tmp;
4420 SDValue DMV = CurDAG->getTargetConstant(DM[1] | (DM[0] << 1), dl,
4421 MVT::i32);
4422 SDValue Ops[] = { Op1, Op2, DMV };
4423 CurDAG->SelectNodeTo(N, PPC::XXPERMDI, N->getValueType(0), Ops);
4424 return;
4427 break;
4428 case PPCISD::BDNZ:
4429 case PPCISD::BDZ: {
4430 bool IsPPC64 = PPCSubTarget->isPPC64();
4431 SDValue Ops[] = { N->getOperand(1), N->getOperand(0) };
4432 CurDAG->SelectNodeTo(N, N->getOpcode() == PPCISD::BDNZ
4433 ? (IsPPC64 ? PPC::BDNZ8 : PPC::BDNZ)
4434 : (IsPPC64 ? PPC::BDZ8 : PPC::BDZ),
4435 MVT::Other, Ops);
4436 return;
4438 case PPCISD::COND_BRANCH: {
4439 // Op #0 is the Chain.
4440 // Op #1 is the PPC::PRED_* number.
4441 // Op #2 is the CR#
4442 // Op #3 is the Dest MBB
4443 // Op #4 is the Flag.
4444 // Prevent PPC::PRED_* from being selected into LI.
4445 unsigned PCC = cast<ConstantSDNode>(N->getOperand(1))->getZExtValue();
4446 if (EnableBranchHint)
4447 PCC |= getBranchHint(PCC, FuncInfo, N->getOperand(3));
4449 SDValue Pred = getI32Imm(PCC, dl);
4450 SDValue Ops[] = { Pred, N->getOperand(2), N->getOperand(3),
4451 N->getOperand(0), N->getOperand(4) };
4452 CurDAG->SelectNodeTo(N, PPC::BCC, MVT::Other, Ops);
4453 return;
4455 case ISD::BR_CC: {
4456 ISD::CondCode CC = cast<CondCodeSDNode>(N->getOperand(1))->get();
4457 unsigned PCC = getPredicateForSetCC(CC);
4459 if (N->getOperand(2).getValueType() == MVT::i1) {
4460 unsigned Opc;
4461 bool Swap;
4462 switch (PCC) {
4463 default: llvm_unreachable("Unexpected Boolean-operand predicate");
4464 case PPC::PRED_LT: Opc = PPC::CRANDC; Swap = true; break;
4465 case PPC::PRED_LE: Opc = PPC::CRORC; Swap = true; break;
4466 case PPC::PRED_EQ: Opc = PPC::CREQV; Swap = false; break;
4467 case PPC::PRED_GE: Opc = PPC::CRORC; Swap = false; break;
4468 case PPC::PRED_GT: Opc = PPC::CRANDC; Swap = false; break;
4469 case PPC::PRED_NE: Opc = PPC::CRXOR; Swap = false; break;
4472 SDValue BitComp(CurDAG->getMachineNode(Opc, dl, MVT::i1,
4473 N->getOperand(Swap ? 3 : 2),
4474 N->getOperand(Swap ? 2 : 3)), 0);
4475 CurDAG->SelectNodeTo(N, PPC::BC, MVT::Other, BitComp, N->getOperand(4),
4476 N->getOperand(0));
4477 return;
4480 if (EnableBranchHint)
4481 PCC |= getBranchHint(PCC, FuncInfo, N->getOperand(4));
4483 SDValue CondCode = SelectCC(N->getOperand(2), N->getOperand(3), CC, dl);
4484 SDValue Ops[] = { getI32Imm(PCC, dl), CondCode,
4485 N->getOperand(4), N->getOperand(0) };
4486 CurDAG->SelectNodeTo(N, PPC::BCC, MVT::Other, Ops);
4487 return;
4489 case ISD::BRIND: {
4490 // FIXME: Should custom lower this.
4491 SDValue Chain = N->getOperand(0);
4492 SDValue Target = N->getOperand(1);
4493 unsigned Opc = Target.getValueType() == MVT::i32 ? PPC::MTCTR : PPC::MTCTR8;
4494 unsigned Reg = Target.getValueType() == MVT::i32 ? PPC::BCTR : PPC::BCTR8;
4495 Chain = SDValue(CurDAG->getMachineNode(Opc, dl, MVT::Glue, Target,
4496 Chain), 0);
4497 CurDAG->SelectNodeTo(N, Reg, MVT::Other, Chain);
4498 return;
4500 case PPCISD::TOC_ENTRY: {
4501 assert ((PPCSubTarget->isPPC64() || PPCSubTarget->isSVR4ABI()) &&
4502 "Only supported for 64-bit ABI and 32-bit SVR4");
4503 if (PPCSubTarget->isSVR4ABI() && !PPCSubTarget->isPPC64()) {
4504 SDValue GA = N->getOperand(0);
4505 SDNode *MN = CurDAG->getMachineNode(PPC::LWZtoc, dl, MVT::i32, GA,
4506 N->getOperand(1));
4507 transferMemOperands(N, MN);
4508 ReplaceNode(N, MN);
4509 return;
4512 // For medium and large code model, we generate two instructions as
4513 // described below. Otherwise we allow SelectCodeCommon to handle this,
4514 // selecting one of LDtoc, LDtocJTI, LDtocCPT, and LDtocBA.
4515 CodeModel::Model CModel = TM.getCodeModel();
4516 if (CModel != CodeModel::Medium && CModel != CodeModel::Large)
4517 break;
4519 // The first source operand is a TargetGlobalAddress or a TargetJumpTable.
4520 // If it must be toc-referenced according to PPCSubTarget, we generate:
4521 // LDtocL(@sym, ADDIStocHA(%x2, @sym))
4522 // Otherwise we generate:
4523 // ADDItocL(ADDIStocHA(%x2, @sym), @sym)
4524 SDValue GA = N->getOperand(0);
4525 SDValue TOCbase = N->getOperand(1);
4526 SDNode *Tmp = CurDAG->getMachineNode(PPC::ADDIStocHA, dl, MVT::i64,
4527 TOCbase, GA);
4529 if (isa<JumpTableSDNode>(GA) || isa<BlockAddressSDNode>(GA) ||
4530 CModel == CodeModel::Large) {
4531 SDNode *MN = CurDAG->getMachineNode(PPC::LDtocL, dl, MVT::i64, GA,
4532 SDValue(Tmp, 0));
4533 transferMemOperands(N, MN);
4534 ReplaceNode(N, MN);
4535 return;
4538 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(GA)) {
4539 const GlobalValue *GV = G->getGlobal();
4540 unsigned char GVFlags = PPCSubTarget->classifyGlobalReference(GV);
4541 if (GVFlags & PPCII::MO_NLP_FLAG) {
4542 SDNode *MN = CurDAG->getMachineNode(PPC::LDtocL, dl, MVT::i64, GA,
4543 SDValue(Tmp, 0));
4544 transferMemOperands(N, MN);
4545 ReplaceNode(N, MN);
4546 return;
4550 ReplaceNode(N, CurDAG->getMachineNode(PPC::ADDItocL, dl, MVT::i64,
4551 SDValue(Tmp, 0), GA));
4552 return;
4554 case PPCISD::PPC32_PICGOT:
4555 // Generate a PIC-safe GOT reference.
4556 assert(!PPCSubTarget->isPPC64() && PPCSubTarget->isSVR4ABI() &&
4557 "PPCISD::PPC32_PICGOT is only supported for 32-bit SVR4");
4558 CurDAG->SelectNodeTo(N, PPC::PPC32PICGOT,
4559 PPCLowering->getPointerTy(CurDAG->getDataLayout()),
4560 MVT::i32);
4561 return;
4563 case PPCISD::VADD_SPLAT: {
4564 // This expands into one of three sequences, depending on whether
4565 // the first operand is odd or even, positive or negative.
4566 assert(isa<ConstantSDNode>(N->getOperand(0)) &&
4567 isa<ConstantSDNode>(N->getOperand(1)) &&
4568 "Invalid operand on VADD_SPLAT!");
4570 int Elt = N->getConstantOperandVal(0);
4571 int EltSize = N->getConstantOperandVal(1);
4572 unsigned Opc1, Opc2, Opc3;
4573 EVT VT;
4575 if (EltSize == 1) {
4576 Opc1 = PPC::VSPLTISB;
4577 Opc2 = PPC::VADDUBM;
4578 Opc3 = PPC::VSUBUBM;
4579 VT = MVT::v16i8;
4580 } else if (EltSize == 2) {
4581 Opc1 = PPC::VSPLTISH;
4582 Opc2 = PPC::VADDUHM;
4583 Opc3 = PPC::VSUBUHM;
4584 VT = MVT::v8i16;
4585 } else {
4586 assert(EltSize == 4 && "Invalid element size on VADD_SPLAT!");
4587 Opc1 = PPC::VSPLTISW;
4588 Opc2 = PPC::VADDUWM;
4589 Opc3 = PPC::VSUBUWM;
4590 VT = MVT::v4i32;
4593 if ((Elt & 1) == 0) {
4594 // Elt is even, in the range [-32,-18] + [16,30].
4596 // Convert: VADD_SPLAT elt, size
4597 // Into: tmp = VSPLTIS[BHW] elt
4598 // VADDU[BHW]M tmp, tmp
4599 // Where: [BHW] = B for size = 1, H for size = 2, W for size = 4
4600 SDValue EltVal = getI32Imm(Elt >> 1, dl);
4601 SDNode *Tmp = CurDAG->getMachineNode(Opc1, dl, VT, EltVal);
4602 SDValue TmpVal = SDValue(Tmp, 0);
4603 ReplaceNode(N, CurDAG->getMachineNode(Opc2, dl, VT, TmpVal, TmpVal));
4604 return;
4605 } else if (Elt > 0) {
4606 // Elt is odd and positive, in the range [17,31].
4608 // Convert: VADD_SPLAT elt, size
4609 // Into: tmp1 = VSPLTIS[BHW] elt-16
4610 // tmp2 = VSPLTIS[BHW] -16
4611 // VSUBU[BHW]M tmp1, tmp2
4612 SDValue EltVal = getI32Imm(Elt - 16, dl);
4613 SDNode *Tmp1 = CurDAG->getMachineNode(Opc1, dl, VT, EltVal);
4614 EltVal = getI32Imm(-16, dl);
4615 SDNode *Tmp2 = CurDAG->getMachineNode(Opc1, dl, VT, EltVal);
4616 ReplaceNode(N, CurDAG->getMachineNode(Opc3, dl, VT, SDValue(Tmp1, 0),
4617 SDValue(Tmp2, 0)));
4618 return;
4619 } else {
4620 // Elt is odd and negative, in the range [-31,-17].
4622 // Convert: VADD_SPLAT elt, size
4623 // Into: tmp1 = VSPLTIS[BHW] elt+16
4624 // tmp2 = VSPLTIS[BHW] -16
4625 // VADDU[BHW]M tmp1, tmp2
4626 SDValue EltVal = getI32Imm(Elt + 16, dl);
4627 SDNode *Tmp1 = CurDAG->getMachineNode(Opc1, dl, VT, EltVal);
4628 EltVal = getI32Imm(-16, dl);
4629 SDNode *Tmp2 = CurDAG->getMachineNode(Opc1, dl, VT, EltVal);
4630 ReplaceNode(N, CurDAG->getMachineNode(Opc2, dl, VT, SDValue(Tmp1, 0),
4631 SDValue(Tmp2, 0)));
4632 return;
4637 SelectCode(N);
4640 // If the target supports the cmpb instruction, do the idiom recognition here.
4641 // We don't do this as a DAG combine because we don't want to do it as nodes
4642 // are being combined (because we might miss part of the eventual idiom). We
4643 // don't want to do it during instruction selection because we want to reuse
4644 // the logic for lowering the masking operations already part of the
4645 // instruction selector.
4646 SDValue PPCDAGToDAGISel::combineToCMPB(SDNode *N) {
4647 SDLoc dl(N);
4649 assert(N->getOpcode() == ISD::OR &&
4650 "Only OR nodes are supported for CMPB");
4652 SDValue Res;
4653 if (!PPCSubTarget->hasCMPB())
4654 return Res;
4656 if (N->getValueType(0) != MVT::i32 &&
4657 N->getValueType(0) != MVT::i64)
4658 return Res;
4660 EVT VT = N->getValueType(0);
4662 SDValue RHS, LHS;
4663 bool BytesFound[8] = {false, false, false, false, false, false, false, false};
4664 uint64_t Mask = 0, Alt = 0;
4666 auto IsByteSelectCC = [this](SDValue O, unsigned &b,
4667 uint64_t &Mask, uint64_t &Alt,
4668 SDValue &LHS, SDValue &RHS) {
4669 if (O.getOpcode() != ISD::SELECT_CC)
4670 return false;
4671 ISD::CondCode CC = cast<CondCodeSDNode>(O.getOperand(4))->get();
4673 if (!isa<ConstantSDNode>(O.getOperand(2)) ||
4674 !isa<ConstantSDNode>(O.getOperand(3)))
4675 return false;
4677 uint64_t PM = O.getConstantOperandVal(2);
4678 uint64_t PAlt = O.getConstantOperandVal(3);
4679 for (b = 0; b < 8; ++b) {
4680 uint64_t Mask = UINT64_C(0xFF) << (8*b);
4681 if (PM && (PM & Mask) == PM && (PAlt & Mask) == PAlt)
4682 break;
4685 if (b == 8)
4686 return false;
4687 Mask |= PM;
4688 Alt |= PAlt;
4690 if (!isa<ConstantSDNode>(O.getOperand(1)) ||
4691 O.getConstantOperandVal(1) != 0) {
4692 SDValue Op0 = O.getOperand(0), Op1 = O.getOperand(1);
4693 if (Op0.getOpcode() == ISD::TRUNCATE)
4694 Op0 = Op0.getOperand(0);
4695 if (Op1.getOpcode() == ISD::TRUNCATE)
4696 Op1 = Op1.getOperand(0);
4698 if (Op0.getOpcode() == ISD::SRL && Op1.getOpcode() == ISD::SRL &&
4699 Op0.getOperand(1) == Op1.getOperand(1) && CC == ISD::SETEQ &&
4700 isa<ConstantSDNode>(Op0.getOperand(1))) {
4702 unsigned Bits = Op0.getValueSizeInBits();
4703 if (b != Bits/8-1)
4704 return false;
4705 if (Op0.getConstantOperandVal(1) != Bits-8)
4706 return false;
4708 LHS = Op0.getOperand(0);
4709 RHS = Op1.getOperand(0);
4710 return true;
4713 // When we have small integers (i16 to be specific), the form present
4714 // post-legalization uses SETULT in the SELECT_CC for the
4715 // higher-order byte, depending on the fact that the
4716 // even-higher-order bytes are known to all be zero, for example:
4717 // select_cc (xor $lhs, $rhs), 256, 65280, 0, setult
4718 // (so when the second byte is the same, because all higher-order
4719 // bits from bytes 3 and 4 are known to be zero, the result of the
4720 // xor can be at most 255)
4721 if (Op0.getOpcode() == ISD::XOR && CC == ISD::SETULT &&
4722 isa<ConstantSDNode>(O.getOperand(1))) {
4724 uint64_t ULim = O.getConstantOperandVal(1);
4725 if (ULim != (UINT64_C(1) << b*8))
4726 return false;
4728 // Now we need to make sure that the upper bytes are known to be
4729 // zero.
4730 unsigned Bits = Op0.getValueSizeInBits();
4731 if (!CurDAG->MaskedValueIsZero(
4732 Op0, APInt::getHighBitsSet(Bits, Bits - (b + 1) * 8)))
4733 return false;
4735 LHS = Op0.getOperand(0);
4736 RHS = Op0.getOperand(1);
4737 return true;
4740 return false;
4743 if (CC != ISD::SETEQ)
4744 return false;
4746 SDValue Op = O.getOperand(0);
4747 if (Op.getOpcode() == ISD::AND) {
4748 if (!isa<ConstantSDNode>(Op.getOperand(1)))
4749 return false;
4750 if (Op.getConstantOperandVal(1) != (UINT64_C(0xFF) << (8*b)))
4751 return false;
4753 SDValue XOR = Op.getOperand(0);
4754 if (XOR.getOpcode() == ISD::TRUNCATE)
4755 XOR = XOR.getOperand(0);
4756 if (XOR.getOpcode() != ISD::XOR)
4757 return false;
4759 LHS = XOR.getOperand(0);
4760 RHS = XOR.getOperand(1);
4761 return true;
4762 } else if (Op.getOpcode() == ISD::SRL) {
4763 if (!isa<ConstantSDNode>(Op.getOperand(1)))
4764 return false;
4765 unsigned Bits = Op.getValueSizeInBits();
4766 if (b != Bits/8-1)
4767 return false;
4768 if (Op.getConstantOperandVal(1) != Bits-8)
4769 return false;
4771 SDValue XOR = Op.getOperand(0);
4772 if (XOR.getOpcode() == ISD::TRUNCATE)
4773 XOR = XOR.getOperand(0);
4774 if (XOR.getOpcode() != ISD::XOR)
4775 return false;
4777 LHS = XOR.getOperand(0);
4778 RHS = XOR.getOperand(1);
4779 return true;
4782 return false;
4785 SmallVector<SDValue, 8> Queue(1, SDValue(N, 0));
4786 while (!Queue.empty()) {
4787 SDValue V = Queue.pop_back_val();
4789 for (const SDValue &O : V.getNode()->ops()) {
4790 unsigned b;
4791 uint64_t M = 0, A = 0;
4792 SDValue OLHS, ORHS;
4793 if (O.getOpcode() == ISD::OR) {
4794 Queue.push_back(O);
4795 } else if (IsByteSelectCC(O, b, M, A, OLHS, ORHS)) {
4796 if (!LHS) {
4797 LHS = OLHS;
4798 RHS = ORHS;
4799 BytesFound[b] = true;
4800 Mask |= M;
4801 Alt |= A;
4802 } else if ((LHS == ORHS && RHS == OLHS) ||
4803 (RHS == ORHS && LHS == OLHS)) {
4804 BytesFound[b] = true;
4805 Mask |= M;
4806 Alt |= A;
4807 } else {
4808 return Res;
4810 } else {
4811 return Res;
4816 unsigned LastB = 0, BCnt = 0;
4817 for (unsigned i = 0; i < 8; ++i)
4818 if (BytesFound[LastB]) {
4819 ++BCnt;
4820 LastB = i;
4823 if (!LastB || BCnt < 2)
4824 return Res;
4826 // Because we'll be zero-extending the output anyway if don't have a specific
4827 // value for each input byte (via the Mask), we can 'anyext' the inputs.
4828 if (LHS.getValueType() != VT) {
4829 LHS = CurDAG->getAnyExtOrTrunc(LHS, dl, VT);
4830 RHS = CurDAG->getAnyExtOrTrunc(RHS, dl, VT);
4833 Res = CurDAG->getNode(PPCISD::CMPB, dl, VT, LHS, RHS);
4835 bool NonTrivialMask = ((int64_t) Mask) != INT64_C(-1);
4836 if (NonTrivialMask && !Alt) {
4837 // Res = Mask & CMPB
4838 Res = CurDAG->getNode(ISD::AND, dl, VT, Res,
4839 CurDAG->getConstant(Mask, dl, VT));
4840 } else if (Alt) {
4841 // Res = (CMPB & Mask) | (~CMPB & Alt)
4842 // Which, as suggested here:
4843 // https://graphics.stanford.edu/~seander/bithacks.html#MaskedMerge
4844 // can be written as:
4845 // Res = Alt ^ ((Alt ^ Mask) & CMPB)
4846 // useful because the (Alt ^ Mask) can be pre-computed.
4847 Res = CurDAG->getNode(ISD::AND, dl, VT, Res,
4848 CurDAG->getConstant(Mask ^ Alt, dl, VT));
4849 Res = CurDAG->getNode(ISD::XOR, dl, VT, Res,
4850 CurDAG->getConstant(Alt, dl, VT));
4853 return Res;
4856 // When CR bit registers are enabled, an extension of an i1 variable to a i32
4857 // or i64 value is lowered in terms of a SELECT_I[48] operation, and thus
4858 // involves constant materialization of a 0 or a 1 or both. If the result of
4859 // the extension is then operated upon by some operator that can be constant
4860 // folded with a constant 0 or 1, and that constant can be materialized using
4861 // only one instruction (like a zero or one), then we should fold in those
4862 // operations with the select.
4863 void PPCDAGToDAGISel::foldBoolExts(SDValue &Res, SDNode *&N) {
4864 if (!PPCSubTarget->useCRBits())
4865 return;
4867 if (N->getOpcode() != ISD::ZERO_EXTEND &&
4868 N->getOpcode() != ISD::SIGN_EXTEND &&
4869 N->getOpcode() != ISD::ANY_EXTEND)
4870 return;
4872 if (N->getOperand(0).getValueType() != MVT::i1)
4873 return;
4875 if (!N->hasOneUse())
4876 return;
4878 SDLoc dl(N);
4879 EVT VT = N->getValueType(0);
4880 SDValue Cond = N->getOperand(0);
4881 SDValue ConstTrue =
4882 CurDAG->getConstant(N->getOpcode() == ISD::SIGN_EXTEND ? -1 : 1, dl, VT);
4883 SDValue ConstFalse = CurDAG->getConstant(0, dl, VT);
4885 do {
4886 SDNode *User = *N->use_begin();
4887 if (User->getNumOperands() != 2)
4888 break;
4890 auto TryFold = [this, N, User, dl](SDValue Val) {
4891 SDValue UserO0 = User->getOperand(0), UserO1 = User->getOperand(1);
4892 SDValue O0 = UserO0.getNode() == N ? Val : UserO0;
4893 SDValue O1 = UserO1.getNode() == N ? Val : UserO1;
4895 return CurDAG->FoldConstantArithmetic(User->getOpcode(), dl,
4896 User->getValueType(0),
4897 O0.getNode(), O1.getNode());
4900 // FIXME: When the semantics of the interaction between select and undef
4901 // are clearly defined, it may turn out to be unnecessary to break here.
4902 SDValue TrueRes = TryFold(ConstTrue);
4903 if (!TrueRes || TrueRes.isUndef())
4904 break;
4905 SDValue FalseRes = TryFold(ConstFalse);
4906 if (!FalseRes || FalseRes.isUndef())
4907 break;
4909 // For us to materialize these using one instruction, we must be able to
4910 // represent them as signed 16-bit integers.
4911 uint64_t True = cast<ConstantSDNode>(TrueRes)->getZExtValue(),
4912 False = cast<ConstantSDNode>(FalseRes)->getZExtValue();
4913 if (!isInt<16>(True) || !isInt<16>(False))
4914 break;
4916 // We can replace User with a new SELECT node, and try again to see if we
4917 // can fold the select with its user.
4918 Res = CurDAG->getSelect(dl, User->getValueType(0), Cond, TrueRes, FalseRes);
4919 N = User;
4920 ConstTrue = TrueRes;
4921 ConstFalse = FalseRes;
4922 } while (N->hasOneUse());
4925 void PPCDAGToDAGISel::PreprocessISelDAG() {
4926 SelectionDAG::allnodes_iterator Position(CurDAG->getRoot().getNode());
4927 ++Position;
4929 bool MadeChange = false;
4930 while (Position != CurDAG->allnodes_begin()) {
4931 SDNode *N = &*--Position;
4932 if (N->use_empty())
4933 continue;
4935 SDValue Res;
4936 switch (N->getOpcode()) {
4937 default: break;
4938 case ISD::OR:
4939 Res = combineToCMPB(N);
4940 break;
4943 if (!Res)
4944 foldBoolExts(Res, N);
4946 if (Res) {
4947 DEBUG(dbgs() << "PPC DAG preprocessing replacing:\nOld: ");
4948 DEBUG(N->dump(CurDAG));
4949 DEBUG(dbgs() << "\nNew: ");
4950 DEBUG(Res.getNode()->dump(CurDAG));
4951 DEBUG(dbgs() << "\n");
4953 CurDAG->ReplaceAllUsesOfValueWith(SDValue(N, 0), Res);
4954 MadeChange = true;
4958 if (MadeChange)
4959 CurDAG->RemoveDeadNodes();
4962 /// PostprocessISelDAG - Perform some late peephole optimizations
4963 /// on the DAG representation.
4964 void PPCDAGToDAGISel::PostprocessISelDAG() {
4965 // Skip peepholes at -O0.
4966 if (TM.getOptLevel() == CodeGenOpt::None)
4967 return;
4969 PeepholePPC64();
4970 PeepholeCROps();
4971 PeepholePPC64ZExt();
4974 // Check if all users of this node will become isel where the second operand
4975 // is the constant zero. If this is so, and if we can negate the condition,
4976 // then we can flip the true and false operands. This will allow the zero to
4977 // be folded with the isel so that we don't need to materialize a register
4978 // containing zero.
4979 bool PPCDAGToDAGISel::AllUsersSelectZero(SDNode *N) {
4980 for (SDNode::use_iterator UI = N->use_begin(), UE = N->use_end();
4981 UI != UE; ++UI) {
4982 SDNode *User = *UI;
4983 if (!User->isMachineOpcode())
4984 return false;
4985 if (User->getMachineOpcode() != PPC::SELECT_I4 &&
4986 User->getMachineOpcode() != PPC::SELECT_I8)
4987 return false;
4989 SDNode *Op2 = User->getOperand(2).getNode();
4990 if (!Op2->isMachineOpcode())
4991 return false;
4993 if (Op2->getMachineOpcode() != PPC::LI &&
4994 Op2->getMachineOpcode() != PPC::LI8)
4995 return false;
4997 ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op2->getOperand(0));
4998 if (!C)
4999 return false;
5001 if (!C->isNullValue())
5002 return false;
5005 return true;
5008 void PPCDAGToDAGISel::SwapAllSelectUsers(SDNode *N) {
5009 SmallVector<SDNode *, 4> ToReplace;
5010 for (SDNode::use_iterator UI = N->use_begin(), UE = N->use_end();
5011 UI != UE; ++UI) {
5012 SDNode *User = *UI;
5013 assert((User->getMachineOpcode() == PPC::SELECT_I4 ||
5014 User->getMachineOpcode() == PPC::SELECT_I8) &&
5015 "Must have all select users");
5016 ToReplace.push_back(User);
5019 for (SmallVector<SDNode *, 4>::iterator UI = ToReplace.begin(),
5020 UE = ToReplace.end(); UI != UE; ++UI) {
5021 SDNode *User = *UI;
5022 SDNode *ResNode =
5023 CurDAG->getMachineNode(User->getMachineOpcode(), SDLoc(User),
5024 User->getValueType(0), User->getOperand(0),
5025 User->getOperand(2),
5026 User->getOperand(1));
5028 DEBUG(dbgs() << "CR Peephole replacing:\nOld: ");
5029 DEBUG(User->dump(CurDAG));
5030 DEBUG(dbgs() << "\nNew: ");
5031 DEBUG(ResNode->dump(CurDAG));
5032 DEBUG(dbgs() << "\n");
5034 ReplaceUses(User, ResNode);
5038 void PPCDAGToDAGISel::PeepholeCROps() {
5039 bool IsModified;
5040 do {
5041 IsModified = false;
5042 for (SDNode &Node : CurDAG->allnodes()) {
5043 MachineSDNode *MachineNode = dyn_cast<MachineSDNode>(&Node);
5044 if (!MachineNode || MachineNode->use_empty())
5045 continue;
5046 SDNode *ResNode = MachineNode;
5048 bool Op1Set = false, Op1Unset = false,
5049 Op1Not = false,
5050 Op2Set = false, Op2Unset = false,
5051 Op2Not = false;
5053 unsigned Opcode = MachineNode->getMachineOpcode();
5054 switch (Opcode) {
5055 default: break;
5056 case PPC::CRAND:
5057 case PPC::CRNAND:
5058 case PPC::CROR:
5059 case PPC::CRXOR:
5060 case PPC::CRNOR:
5061 case PPC::CREQV:
5062 case PPC::CRANDC:
5063 case PPC::CRORC: {
5064 SDValue Op = MachineNode->getOperand(1);
5065 if (Op.isMachineOpcode()) {
5066 if (Op.getMachineOpcode() == PPC::CRSET)
5067 Op2Set = true;
5068 else if (Op.getMachineOpcode() == PPC::CRUNSET)
5069 Op2Unset = true;
5070 else if (Op.getMachineOpcode() == PPC::CRNOR &&
5071 Op.getOperand(0) == Op.getOperand(1))
5072 Op2Not = true;
5074 LLVM_FALLTHROUGH;
5076 case PPC::BC:
5077 case PPC::BCn:
5078 case PPC::SELECT_I4:
5079 case PPC::SELECT_I8:
5080 case PPC::SELECT_F4:
5081 case PPC::SELECT_F8:
5082 case PPC::SELECT_QFRC:
5083 case PPC::SELECT_QSRC:
5084 case PPC::SELECT_QBRC:
5085 case PPC::SELECT_VRRC:
5086 case PPC::SELECT_VSFRC:
5087 case PPC::SELECT_VSSRC:
5088 case PPC::SELECT_VSRC: {
5089 SDValue Op = MachineNode->getOperand(0);
5090 if (Op.isMachineOpcode()) {
5091 if (Op.getMachineOpcode() == PPC::CRSET)
5092 Op1Set = true;
5093 else if (Op.getMachineOpcode() == PPC::CRUNSET)
5094 Op1Unset = true;
5095 else if (Op.getMachineOpcode() == PPC::CRNOR &&
5096 Op.getOperand(0) == Op.getOperand(1))
5097 Op1Not = true;
5100 break;
5103 bool SelectSwap = false;
5104 switch (Opcode) {
5105 default: break;
5106 case PPC::CRAND:
5107 if (MachineNode->getOperand(0) == MachineNode->getOperand(1))
5108 // x & x = x
5109 ResNode = MachineNode->getOperand(0).getNode();
5110 else if (Op1Set)
5111 // 1 & y = y
5112 ResNode = MachineNode->getOperand(1).getNode();
5113 else if (Op2Set)
5114 // x & 1 = x
5115 ResNode = MachineNode->getOperand(0).getNode();
5116 else if (Op1Unset || Op2Unset)
5117 // x & 0 = 0 & y = 0
5118 ResNode = CurDAG->getMachineNode(PPC::CRUNSET, SDLoc(MachineNode),
5119 MVT::i1);
5120 else if (Op1Not)
5121 // ~x & y = andc(y, x)
5122 ResNode = CurDAG->getMachineNode(PPC::CRANDC, SDLoc(MachineNode),
5123 MVT::i1, MachineNode->getOperand(1),
5124 MachineNode->getOperand(0).
5125 getOperand(0));
5126 else if (Op2Not)
5127 // x & ~y = andc(x, y)
5128 ResNode = CurDAG->getMachineNode(PPC::CRANDC, SDLoc(MachineNode),
5129 MVT::i1, MachineNode->getOperand(0),
5130 MachineNode->getOperand(1).
5131 getOperand(0));
5132 else if (AllUsersSelectZero(MachineNode)) {
5133 ResNode = CurDAG->getMachineNode(PPC::CRNAND, SDLoc(MachineNode),
5134 MVT::i1, MachineNode->getOperand(0),
5135 MachineNode->getOperand(1));
5136 SelectSwap = true;
5138 break;
5139 case PPC::CRNAND:
5140 if (MachineNode->getOperand(0) == MachineNode->getOperand(1))
5141 // nand(x, x) -> nor(x, x)
5142 ResNode = CurDAG->getMachineNode(PPC::CRNOR, SDLoc(MachineNode),
5143 MVT::i1, MachineNode->getOperand(0),
5144 MachineNode->getOperand(0));
5145 else if (Op1Set)
5146 // nand(1, y) -> nor(y, y)
5147 ResNode = CurDAG->getMachineNode(PPC::CRNOR, SDLoc(MachineNode),
5148 MVT::i1, MachineNode->getOperand(1),
5149 MachineNode->getOperand(1));
5150 else if (Op2Set)
5151 // nand(x, 1) -> nor(x, x)
5152 ResNode = CurDAG->getMachineNode(PPC::CRNOR, SDLoc(MachineNode),
5153 MVT::i1, MachineNode->getOperand(0),
5154 MachineNode->getOperand(0));
5155 else if (Op1Unset || Op2Unset)
5156 // nand(x, 0) = nand(0, y) = 1
5157 ResNode = CurDAG->getMachineNode(PPC::CRSET, SDLoc(MachineNode),
5158 MVT::i1);
5159 else if (Op1Not)
5160 // nand(~x, y) = ~(~x & y) = x | ~y = orc(x, y)
5161 ResNode = CurDAG->getMachineNode(PPC::CRORC, SDLoc(MachineNode),
5162 MVT::i1, MachineNode->getOperand(0).
5163 getOperand(0),
5164 MachineNode->getOperand(1));
5165 else if (Op2Not)
5166 // nand(x, ~y) = ~x | y = orc(y, x)
5167 ResNode = CurDAG->getMachineNode(PPC::CRORC, SDLoc(MachineNode),
5168 MVT::i1, MachineNode->getOperand(1).
5169 getOperand(0),
5170 MachineNode->getOperand(0));
5171 else if (AllUsersSelectZero(MachineNode)) {
5172 ResNode = CurDAG->getMachineNode(PPC::CRAND, SDLoc(MachineNode),
5173 MVT::i1, MachineNode->getOperand(0),
5174 MachineNode->getOperand(1));
5175 SelectSwap = true;
5177 break;
5178 case PPC::CROR:
5179 if (MachineNode->getOperand(0) == MachineNode->getOperand(1))
5180 // x | x = x
5181 ResNode = MachineNode->getOperand(0).getNode();
5182 else if (Op1Set || Op2Set)
5183 // x | 1 = 1 | y = 1
5184 ResNode = CurDAG->getMachineNode(PPC::CRSET, SDLoc(MachineNode),
5185 MVT::i1);
5186 else if (Op1Unset)
5187 // 0 | y = y
5188 ResNode = MachineNode->getOperand(1).getNode();
5189 else if (Op2Unset)
5190 // x | 0 = x
5191 ResNode = MachineNode->getOperand(0).getNode();
5192 else if (Op1Not)
5193 // ~x | y = orc(y, x)
5194 ResNode = CurDAG->getMachineNode(PPC::CRORC, SDLoc(MachineNode),
5195 MVT::i1, MachineNode->getOperand(1),
5196 MachineNode->getOperand(0).
5197 getOperand(0));
5198 else if (Op2Not)
5199 // x | ~y = orc(x, y)
5200 ResNode = CurDAG->getMachineNode(PPC::CRORC, SDLoc(MachineNode),
5201 MVT::i1, MachineNode->getOperand(0),
5202 MachineNode->getOperand(1).
5203 getOperand(0));
5204 else if (AllUsersSelectZero(MachineNode)) {
5205 ResNode = CurDAG->getMachineNode(PPC::CRNOR, SDLoc(MachineNode),
5206 MVT::i1, MachineNode->getOperand(0),
5207 MachineNode->getOperand(1));
5208 SelectSwap = true;
5210 break;
5211 case PPC::CRXOR:
5212 if (MachineNode->getOperand(0) == MachineNode->getOperand(1))
5213 // xor(x, x) = 0
5214 ResNode = CurDAG->getMachineNode(PPC::CRUNSET, SDLoc(MachineNode),
5215 MVT::i1);
5216 else if (Op1Set)
5217 // xor(1, y) -> nor(y, y)
5218 ResNode = CurDAG->getMachineNode(PPC::CRNOR, SDLoc(MachineNode),
5219 MVT::i1, MachineNode->getOperand(1),
5220 MachineNode->getOperand(1));
5221 else if (Op2Set)
5222 // xor(x, 1) -> nor(x, x)
5223 ResNode = CurDAG->getMachineNode(PPC::CRNOR, SDLoc(MachineNode),
5224 MVT::i1, MachineNode->getOperand(0),
5225 MachineNode->getOperand(0));
5226 else if (Op1Unset)
5227 // xor(0, y) = y
5228 ResNode = MachineNode->getOperand(1).getNode();
5229 else if (Op2Unset)
5230 // xor(x, 0) = x
5231 ResNode = MachineNode->getOperand(0).getNode();
5232 else if (Op1Not)
5233 // xor(~x, y) = eqv(x, y)
5234 ResNode = CurDAG->getMachineNode(PPC::CREQV, SDLoc(MachineNode),
5235 MVT::i1, MachineNode->getOperand(0).
5236 getOperand(0),
5237 MachineNode->getOperand(1));
5238 else if (Op2Not)
5239 // xor(x, ~y) = eqv(x, y)
5240 ResNode = CurDAG->getMachineNode(PPC::CREQV, SDLoc(MachineNode),
5241 MVT::i1, MachineNode->getOperand(0),
5242 MachineNode->getOperand(1).
5243 getOperand(0));
5244 else if (AllUsersSelectZero(MachineNode)) {
5245 ResNode = CurDAG->getMachineNode(PPC::CREQV, SDLoc(MachineNode),
5246 MVT::i1, MachineNode->getOperand(0),
5247 MachineNode->getOperand(1));
5248 SelectSwap = true;
5250 break;
5251 case PPC::CRNOR:
5252 if (Op1Set || Op2Set)
5253 // nor(1, y) -> 0
5254 ResNode = CurDAG->getMachineNode(PPC::CRUNSET, SDLoc(MachineNode),
5255 MVT::i1);
5256 else if (Op1Unset)
5257 // nor(0, y) = ~y -> nor(y, y)
5258 ResNode = CurDAG->getMachineNode(PPC::CRNOR, SDLoc(MachineNode),
5259 MVT::i1, MachineNode->getOperand(1),
5260 MachineNode->getOperand(1));
5261 else if (Op2Unset)
5262 // nor(x, 0) = ~x
5263 ResNode = CurDAG->getMachineNode(PPC::CRNOR, SDLoc(MachineNode),
5264 MVT::i1, MachineNode->getOperand(0),
5265 MachineNode->getOperand(0));
5266 else if (Op1Not)
5267 // nor(~x, y) = andc(x, y)
5268 ResNode = CurDAG->getMachineNode(PPC::CRANDC, SDLoc(MachineNode),
5269 MVT::i1, MachineNode->getOperand(0).
5270 getOperand(0),
5271 MachineNode->getOperand(1));
5272 else if (Op2Not)
5273 // nor(x, ~y) = andc(y, x)
5274 ResNode = CurDAG->getMachineNode(PPC::CRANDC, SDLoc(MachineNode),
5275 MVT::i1, MachineNode->getOperand(1).
5276 getOperand(0),
5277 MachineNode->getOperand(0));
5278 else if (AllUsersSelectZero(MachineNode)) {
5279 ResNode = CurDAG->getMachineNode(PPC::CROR, SDLoc(MachineNode),
5280 MVT::i1, MachineNode->getOperand(0),
5281 MachineNode->getOperand(1));
5282 SelectSwap = true;
5284 break;
5285 case PPC::CREQV:
5286 if (MachineNode->getOperand(0) == MachineNode->getOperand(1))
5287 // eqv(x, x) = 1
5288 ResNode = CurDAG->getMachineNode(PPC::CRSET, SDLoc(MachineNode),
5289 MVT::i1);
5290 else if (Op1Set)
5291 // eqv(1, y) = y
5292 ResNode = MachineNode->getOperand(1).getNode();
5293 else if (Op2Set)
5294 // eqv(x, 1) = x
5295 ResNode = MachineNode->getOperand(0).getNode();
5296 else if (Op1Unset)
5297 // eqv(0, y) = ~y -> nor(y, y)
5298 ResNode = CurDAG->getMachineNode(PPC::CRNOR, SDLoc(MachineNode),
5299 MVT::i1, MachineNode->getOperand(1),
5300 MachineNode->getOperand(1));
5301 else if (Op2Unset)
5302 // eqv(x, 0) = ~x
5303 ResNode = CurDAG->getMachineNode(PPC::CRNOR, SDLoc(MachineNode),
5304 MVT::i1, MachineNode->getOperand(0),
5305 MachineNode->getOperand(0));
5306 else if (Op1Not)
5307 // eqv(~x, y) = xor(x, y)
5308 ResNode = CurDAG->getMachineNode(PPC::CRXOR, SDLoc(MachineNode),
5309 MVT::i1, MachineNode->getOperand(0).
5310 getOperand(0),
5311 MachineNode->getOperand(1));
5312 else if (Op2Not)
5313 // eqv(x, ~y) = xor(x, y)
5314 ResNode = CurDAG->getMachineNode(PPC::CRXOR, SDLoc(MachineNode),
5315 MVT::i1, MachineNode->getOperand(0),
5316 MachineNode->getOperand(1).
5317 getOperand(0));
5318 else if (AllUsersSelectZero(MachineNode)) {
5319 ResNode = CurDAG->getMachineNode(PPC::CRXOR, SDLoc(MachineNode),
5320 MVT::i1, MachineNode->getOperand(0),
5321 MachineNode->getOperand(1));
5322 SelectSwap = true;
5324 break;
5325 case PPC::CRANDC:
5326 if (MachineNode->getOperand(0) == MachineNode->getOperand(1))
5327 // andc(x, x) = 0
5328 ResNode = CurDAG->getMachineNode(PPC::CRUNSET, SDLoc(MachineNode),
5329 MVT::i1);
5330 else if (Op1Set)
5331 // andc(1, y) = ~y
5332 ResNode = CurDAG->getMachineNode(PPC::CRNOR, SDLoc(MachineNode),
5333 MVT::i1, MachineNode->getOperand(1),
5334 MachineNode->getOperand(1));
5335 else if (Op1Unset || Op2Set)
5336 // andc(0, y) = andc(x, 1) = 0
5337 ResNode = CurDAG->getMachineNode(PPC::CRUNSET, SDLoc(MachineNode),
5338 MVT::i1);
5339 else if (Op2Unset)
5340 // andc(x, 0) = x
5341 ResNode = MachineNode->getOperand(0).getNode();
5342 else if (Op1Not)
5343 // andc(~x, y) = ~(x | y) = nor(x, y)
5344 ResNode = CurDAG->getMachineNode(PPC::CRNOR, SDLoc(MachineNode),
5345 MVT::i1, MachineNode->getOperand(0).
5346 getOperand(0),
5347 MachineNode->getOperand(1));
5348 else if (Op2Not)
5349 // andc(x, ~y) = x & y
5350 ResNode = CurDAG->getMachineNode(PPC::CRAND, SDLoc(MachineNode),
5351 MVT::i1, MachineNode->getOperand(0),
5352 MachineNode->getOperand(1).
5353 getOperand(0));
5354 else if (AllUsersSelectZero(MachineNode)) {
5355 ResNode = CurDAG->getMachineNode(PPC::CRORC, SDLoc(MachineNode),
5356 MVT::i1, MachineNode->getOperand(1),
5357 MachineNode->getOperand(0));
5358 SelectSwap = true;
5360 break;
5361 case PPC::CRORC:
5362 if (MachineNode->getOperand(0) == MachineNode->getOperand(1))
5363 // orc(x, x) = 1
5364 ResNode = CurDAG->getMachineNode(PPC::CRSET, SDLoc(MachineNode),
5365 MVT::i1);
5366 else if (Op1Set || Op2Unset)
5367 // orc(1, y) = orc(x, 0) = 1
5368 ResNode = CurDAG->getMachineNode(PPC::CRSET, SDLoc(MachineNode),
5369 MVT::i1);
5370 else if (Op2Set)
5371 // orc(x, 1) = x
5372 ResNode = MachineNode->getOperand(0).getNode();
5373 else if (Op1Unset)
5374 // orc(0, y) = ~y
5375 ResNode = CurDAG->getMachineNode(PPC::CRNOR, SDLoc(MachineNode),
5376 MVT::i1, MachineNode->getOperand(1),
5377 MachineNode->getOperand(1));
5378 else if (Op1Not)
5379 // orc(~x, y) = ~(x & y) = nand(x, y)
5380 ResNode = CurDAG->getMachineNode(PPC::CRNAND, SDLoc(MachineNode),
5381 MVT::i1, MachineNode->getOperand(0).
5382 getOperand(0),
5383 MachineNode->getOperand(1));
5384 else if (Op2Not)
5385 // orc(x, ~y) = x | y
5386 ResNode = CurDAG->getMachineNode(PPC::CROR, SDLoc(MachineNode),
5387 MVT::i1, MachineNode->getOperand(0),
5388 MachineNode->getOperand(1).
5389 getOperand(0));
5390 else if (AllUsersSelectZero(MachineNode)) {
5391 ResNode = CurDAG->getMachineNode(PPC::CRANDC, SDLoc(MachineNode),
5392 MVT::i1, MachineNode->getOperand(1),
5393 MachineNode->getOperand(0));
5394 SelectSwap = true;
5396 break;
5397 case PPC::SELECT_I4:
5398 case PPC::SELECT_I8:
5399 case PPC::SELECT_F4:
5400 case PPC::SELECT_F8:
5401 case PPC::SELECT_QFRC:
5402 case PPC::SELECT_QSRC:
5403 case PPC::SELECT_QBRC:
5404 case PPC::SELECT_VRRC:
5405 case PPC::SELECT_VSFRC:
5406 case PPC::SELECT_VSSRC:
5407 case PPC::SELECT_VSRC:
5408 if (Op1Set)
5409 ResNode = MachineNode->getOperand(1).getNode();
5410 else if (Op1Unset)
5411 ResNode = MachineNode->getOperand(2).getNode();
5412 else if (Op1Not)
5413 ResNode = CurDAG->getMachineNode(MachineNode->getMachineOpcode(),
5414 SDLoc(MachineNode),
5415 MachineNode->getValueType(0),
5416 MachineNode->getOperand(0).
5417 getOperand(0),
5418 MachineNode->getOperand(2),
5419 MachineNode->getOperand(1));
5420 break;
5421 case PPC::BC:
5422 case PPC::BCn:
5423 if (Op1Not)
5424 ResNode = CurDAG->getMachineNode(Opcode == PPC::BC ? PPC::BCn :
5425 PPC::BC,
5426 SDLoc(MachineNode),
5427 MVT::Other,
5428 MachineNode->getOperand(0).
5429 getOperand(0),
5430 MachineNode->getOperand(1),
5431 MachineNode->getOperand(2));
5432 // FIXME: Handle Op1Set, Op1Unset here too.
5433 break;
5436 // If we're inverting this node because it is used only by selects that
5437 // we'd like to swap, then swap the selects before the node replacement.
5438 if (SelectSwap)
5439 SwapAllSelectUsers(MachineNode);
5441 if (ResNode != MachineNode) {
5442 DEBUG(dbgs() << "CR Peephole replacing:\nOld: ");
5443 DEBUG(MachineNode->dump(CurDAG));
5444 DEBUG(dbgs() << "\nNew: ");
5445 DEBUG(ResNode->dump(CurDAG));
5446 DEBUG(dbgs() << "\n");
5448 ReplaceUses(MachineNode, ResNode);
5449 IsModified = true;
5452 if (IsModified)
5453 CurDAG->RemoveDeadNodes();
5454 } while (IsModified);
5457 // Gather the set of 32-bit operations that are known to have their
5458 // higher-order 32 bits zero, where ToPromote contains all such operations.
5459 static bool PeepholePPC64ZExtGather(SDValue Op32,
5460 SmallPtrSetImpl<SDNode *> &ToPromote) {
5461 if (!Op32.isMachineOpcode())
5462 return false;
5464 // First, check for the "frontier" instructions (those that will clear the
5465 // higher-order 32 bits.
5467 // For RLWINM and RLWNM, we need to make sure that the mask does not wrap
5468 // around. If it does not, then these instructions will clear the
5469 // higher-order bits.
5470 if ((Op32.getMachineOpcode() == PPC::RLWINM ||
5471 Op32.getMachineOpcode() == PPC::RLWNM) &&
5472 Op32.getConstantOperandVal(2) <= Op32.getConstantOperandVal(3)) {
5473 ToPromote.insert(Op32.getNode());
5474 return true;
5477 // SLW and SRW always clear the higher-order bits.
5478 if (Op32.getMachineOpcode() == PPC::SLW ||
5479 Op32.getMachineOpcode() == PPC::SRW) {
5480 ToPromote.insert(Op32.getNode());
5481 return true;
5484 // For LI and LIS, we need the immediate to be positive (so that it is not
5485 // sign extended).
5486 if (Op32.getMachineOpcode() == PPC::LI ||
5487 Op32.getMachineOpcode() == PPC::LIS) {
5488 if (!isUInt<15>(Op32.getConstantOperandVal(0)))
5489 return false;
5491 ToPromote.insert(Op32.getNode());
5492 return true;
5495 // LHBRX and LWBRX always clear the higher-order bits.
5496 if (Op32.getMachineOpcode() == PPC::LHBRX ||
5497 Op32.getMachineOpcode() == PPC::LWBRX) {
5498 ToPromote.insert(Op32.getNode());
5499 return true;
5502 // CNT[LT]ZW always produce a 64-bit value in [0,32], and so is zero extended.
5503 if (Op32.getMachineOpcode() == PPC::CNTLZW ||
5504 Op32.getMachineOpcode() == PPC::CNTTZW) {
5505 ToPromote.insert(Op32.getNode());
5506 return true;
5509 // Next, check for those instructions we can look through.
5511 // Assuming the mask does not wrap around, then the higher-order bits are
5512 // taken directly from the first operand.
5513 if (Op32.getMachineOpcode() == PPC::RLWIMI &&
5514 Op32.getConstantOperandVal(3) <= Op32.getConstantOperandVal(4)) {
5515 SmallPtrSet<SDNode *, 16> ToPromote1;
5516 if (!PeepholePPC64ZExtGather(Op32.getOperand(0), ToPromote1))
5517 return false;
5519 ToPromote.insert(Op32.getNode());
5520 ToPromote.insert(ToPromote1.begin(), ToPromote1.end());
5521 return true;
5524 // For OR, the higher-order bits are zero if that is true for both operands.
5525 // For SELECT_I4, the same is true (but the relevant operand numbers are
5526 // shifted by 1).
5527 if (Op32.getMachineOpcode() == PPC::OR ||
5528 Op32.getMachineOpcode() == PPC::SELECT_I4) {
5529 unsigned B = Op32.getMachineOpcode() == PPC::SELECT_I4 ? 1 : 0;
5530 SmallPtrSet<SDNode *, 16> ToPromote1;
5531 if (!PeepholePPC64ZExtGather(Op32.getOperand(B+0), ToPromote1))
5532 return false;
5533 if (!PeepholePPC64ZExtGather(Op32.getOperand(B+1), ToPromote1))
5534 return false;
5536 ToPromote.insert(Op32.getNode());
5537 ToPromote.insert(ToPromote1.begin(), ToPromote1.end());
5538 return true;
5541 // For ORI and ORIS, we need the higher-order bits of the first operand to be
5542 // zero, and also for the constant to be positive (so that it is not sign
5543 // extended).
5544 if (Op32.getMachineOpcode() == PPC::ORI ||
5545 Op32.getMachineOpcode() == PPC::ORIS) {
5546 SmallPtrSet<SDNode *, 16> ToPromote1;
5547 if (!PeepholePPC64ZExtGather(Op32.getOperand(0), ToPromote1))
5548 return false;
5549 if (!isUInt<15>(Op32.getConstantOperandVal(1)))
5550 return false;
5552 ToPromote.insert(Op32.getNode());
5553 ToPromote.insert(ToPromote1.begin(), ToPromote1.end());
5554 return true;
5557 // The higher-order bits of AND are zero if that is true for at least one of
5558 // the operands.
5559 if (Op32.getMachineOpcode() == PPC::AND) {
5560 SmallPtrSet<SDNode *, 16> ToPromote1, ToPromote2;
5561 bool Op0OK =
5562 PeepholePPC64ZExtGather(Op32.getOperand(0), ToPromote1);
5563 bool Op1OK =
5564 PeepholePPC64ZExtGather(Op32.getOperand(1), ToPromote2);
5565 if (!Op0OK && !Op1OK)
5566 return false;
5568 ToPromote.insert(Op32.getNode());
5570 if (Op0OK)
5571 ToPromote.insert(ToPromote1.begin(), ToPromote1.end());
5573 if (Op1OK)
5574 ToPromote.insert(ToPromote2.begin(), ToPromote2.end());
5576 return true;
5579 // For ANDI and ANDIS, the higher-order bits are zero if either that is true
5580 // of the first operand, or if the second operand is positive (so that it is
5581 // not sign extended).
5582 if (Op32.getMachineOpcode() == PPC::ANDIo ||
5583 Op32.getMachineOpcode() == PPC::ANDISo) {
5584 SmallPtrSet<SDNode *, 16> ToPromote1;
5585 bool Op0OK =
5586 PeepholePPC64ZExtGather(Op32.getOperand(0), ToPromote1);
5587 bool Op1OK = isUInt<15>(Op32.getConstantOperandVal(1));
5588 if (!Op0OK && !Op1OK)
5589 return false;
5591 ToPromote.insert(Op32.getNode());
5593 if (Op0OK)
5594 ToPromote.insert(ToPromote1.begin(), ToPromote1.end());
5596 return true;
5599 return false;
5602 void PPCDAGToDAGISel::PeepholePPC64ZExt() {
5603 if (!PPCSubTarget->isPPC64())
5604 return;
5606 // When we zero-extend from i32 to i64, we use a pattern like this:
5607 // def : Pat<(i64 (zext i32:$in)),
5608 // (RLDICL (INSERT_SUBREG (i64 (IMPLICIT_DEF)), $in, sub_32),
5609 // 0, 32)>;
5610 // There are several 32-bit shift/rotate instructions, however, that will
5611 // clear the higher-order bits of their output, rendering the RLDICL
5612 // unnecessary. When that happens, we remove it here, and redefine the
5613 // relevant 32-bit operation to be a 64-bit operation.
5615 SelectionDAG::allnodes_iterator Position(CurDAG->getRoot().getNode());
5616 ++Position;
5618 bool MadeChange = false;
5619 while (Position != CurDAG->allnodes_begin()) {
5620 SDNode *N = &*--Position;
5621 // Skip dead nodes and any non-machine opcodes.
5622 if (N->use_empty() || !N->isMachineOpcode())
5623 continue;
5625 if (N->getMachineOpcode() != PPC::RLDICL)
5626 continue;
5628 if (N->getConstantOperandVal(1) != 0 ||
5629 N->getConstantOperandVal(2) != 32)
5630 continue;
5632 SDValue ISR = N->getOperand(0);
5633 if (!ISR.isMachineOpcode() ||
5634 ISR.getMachineOpcode() != TargetOpcode::INSERT_SUBREG)
5635 continue;
5637 if (!ISR.hasOneUse())
5638 continue;
5640 if (ISR.getConstantOperandVal(2) != PPC::sub_32)
5641 continue;
5643 SDValue IDef = ISR.getOperand(0);
5644 if (!IDef.isMachineOpcode() ||
5645 IDef.getMachineOpcode() != TargetOpcode::IMPLICIT_DEF)
5646 continue;
5648 // We now know that we're looking at a canonical i32 -> i64 zext. See if we
5649 // can get rid of it.
5651 SDValue Op32 = ISR->getOperand(1);
5652 if (!Op32.isMachineOpcode())
5653 continue;
5655 // There are some 32-bit instructions that always clear the high-order 32
5656 // bits, there are also some instructions (like AND) that we can look
5657 // through.
5658 SmallPtrSet<SDNode *, 16> ToPromote;
5659 if (!PeepholePPC64ZExtGather(Op32, ToPromote))
5660 continue;
5662 // If the ToPromote set contains nodes that have uses outside of the set
5663 // (except for the original INSERT_SUBREG), then abort the transformation.
5664 bool OutsideUse = false;
5665 for (SDNode *PN : ToPromote) {
5666 for (SDNode *UN : PN->uses()) {
5667 if (!ToPromote.count(UN) && UN != ISR.getNode()) {
5668 OutsideUse = true;
5669 break;
5673 if (OutsideUse)
5674 break;
5676 if (OutsideUse)
5677 continue;
5679 MadeChange = true;
5681 // We now know that this zero extension can be removed by promoting to
5682 // nodes in ToPromote to 64-bit operations, where for operations in the
5683 // frontier of the set, we need to insert INSERT_SUBREGs for their
5684 // operands.
5685 for (SDNode *PN : ToPromote) {
5686 unsigned NewOpcode;
5687 switch (PN->getMachineOpcode()) {
5688 default:
5689 llvm_unreachable("Don't know the 64-bit variant of this instruction");
5690 case PPC::RLWINM: NewOpcode = PPC::RLWINM8; break;
5691 case PPC::RLWNM: NewOpcode = PPC::RLWNM8; break;
5692 case PPC::SLW: NewOpcode = PPC::SLW8; break;
5693 case PPC::SRW: NewOpcode = PPC::SRW8; break;
5694 case PPC::LI: NewOpcode = PPC::LI8; break;
5695 case PPC::LIS: NewOpcode = PPC::LIS8; break;
5696 case PPC::LHBRX: NewOpcode = PPC::LHBRX8; break;
5697 case PPC::LWBRX: NewOpcode = PPC::LWBRX8; break;
5698 case PPC::CNTLZW: NewOpcode = PPC::CNTLZW8; break;
5699 case PPC::CNTTZW: NewOpcode = PPC::CNTTZW8; break;
5700 case PPC::RLWIMI: NewOpcode = PPC::RLWIMI8; break;
5701 case PPC::OR: NewOpcode = PPC::OR8; break;
5702 case PPC::SELECT_I4: NewOpcode = PPC::SELECT_I8; break;
5703 case PPC::ORI: NewOpcode = PPC::ORI8; break;
5704 case PPC::ORIS: NewOpcode = PPC::ORIS8; break;
5705 case PPC::AND: NewOpcode = PPC::AND8; break;
5706 case PPC::ANDIo: NewOpcode = PPC::ANDIo8; break;
5707 case PPC::ANDISo: NewOpcode = PPC::ANDISo8; break;
5710 // Note: During the replacement process, the nodes will be in an
5711 // inconsistent state (some instructions will have operands with values
5712 // of the wrong type). Once done, however, everything should be right
5713 // again.
5715 SmallVector<SDValue, 4> Ops;
5716 for (const SDValue &V : PN->ops()) {
5717 if (!ToPromote.count(V.getNode()) && V.getValueType() == MVT::i32 &&
5718 !isa<ConstantSDNode>(V)) {
5719 SDValue ReplOpOps[] = { ISR.getOperand(0), V, ISR.getOperand(2) };
5720 SDNode *ReplOp =
5721 CurDAG->getMachineNode(TargetOpcode::INSERT_SUBREG, SDLoc(V),
5722 ISR.getNode()->getVTList(), ReplOpOps);
5723 Ops.push_back(SDValue(ReplOp, 0));
5724 } else {
5725 Ops.push_back(V);
5729 // Because all to-be-promoted nodes only have users that are other
5730 // promoted nodes (or the original INSERT_SUBREG), we can safely replace
5731 // the i32 result value type with i64.
5733 SmallVector<EVT, 2> NewVTs;
5734 SDVTList VTs = PN->getVTList();
5735 for (unsigned i = 0, ie = VTs.NumVTs; i != ie; ++i)
5736 if (VTs.VTs[i] == MVT::i32)
5737 NewVTs.push_back(MVT::i64);
5738 else
5739 NewVTs.push_back(VTs.VTs[i]);
5741 DEBUG(dbgs() << "PPC64 ZExt Peephole morphing:\nOld: ");
5742 DEBUG(PN->dump(CurDAG));
5744 CurDAG->SelectNodeTo(PN, NewOpcode, CurDAG->getVTList(NewVTs), Ops);
5746 DEBUG(dbgs() << "\nNew: ");
5747 DEBUG(PN->dump(CurDAG));
5748 DEBUG(dbgs() << "\n");
5751 // Now we replace the original zero extend and its associated INSERT_SUBREG
5752 // with the value feeding the INSERT_SUBREG (which has now been promoted to
5753 // return an i64).
5755 DEBUG(dbgs() << "PPC64 ZExt Peephole replacing:\nOld: ");
5756 DEBUG(N->dump(CurDAG));
5757 DEBUG(dbgs() << "\nNew: ");
5758 DEBUG(Op32.getNode()->dump(CurDAG));
5759 DEBUG(dbgs() << "\n");
5761 ReplaceUses(N, Op32.getNode());
5764 if (MadeChange)
5765 CurDAG->RemoveDeadNodes();
5768 void PPCDAGToDAGISel::PeepholePPC64() {
5769 // These optimizations are currently supported only for 64-bit SVR4.
5770 if (PPCSubTarget->isDarwin() || !PPCSubTarget->isPPC64())
5771 return;
5773 SelectionDAG::allnodes_iterator Position(CurDAG->getRoot().getNode());
5774 ++Position;
5776 while (Position != CurDAG->allnodes_begin()) {
5777 SDNode *N = &*--Position;
5778 // Skip dead nodes and any non-machine opcodes.
5779 if (N->use_empty() || !N->isMachineOpcode())
5780 continue;
5782 unsigned FirstOp;
5783 unsigned StorageOpcode = N->getMachineOpcode();
5785 switch (StorageOpcode) {
5786 default: continue;
5788 case PPC::LBZ:
5789 case PPC::LBZ8:
5790 case PPC::LD:
5791 case PPC::LFD:
5792 case PPC::LFS:
5793 case PPC::LHA:
5794 case PPC::LHA8:
5795 case PPC::LHZ:
5796 case PPC::LHZ8:
5797 case PPC::LWA:
5798 case PPC::LWZ:
5799 case PPC::LWZ8:
5800 FirstOp = 0;
5801 break;
5803 case PPC::STB:
5804 case PPC::STB8:
5805 case PPC::STD:
5806 case PPC::STFD:
5807 case PPC::STFS:
5808 case PPC::STH:
5809 case PPC::STH8:
5810 case PPC::STW:
5811 case PPC::STW8:
5812 FirstOp = 1;
5813 break;
5816 // If this is a load or store with a zero offset, or within the alignment,
5817 // we may be able to fold an add-immediate into the memory operation.
5818 // The check against alignment is below, as it can't occur until we check
5819 // the arguments to N
5820 if (!isa<ConstantSDNode>(N->getOperand(FirstOp)))
5821 continue;
5823 SDValue Base = N->getOperand(FirstOp + 1);
5824 if (!Base.isMachineOpcode())
5825 continue;
5827 unsigned Flags = 0;
5828 bool ReplaceFlags = true;
5830 // When the feeding operation is an add-immediate of some sort,
5831 // determine whether we need to add relocation information to the
5832 // target flags on the immediate operand when we fold it into the
5833 // load instruction.
5835 // For something like ADDItocL, the relocation information is
5836 // inferred from the opcode; when we process it in the AsmPrinter,
5837 // we add the necessary relocation there. A load, though, can receive
5838 // relocation from various flavors of ADDIxxx, so we need to carry
5839 // the relocation information in the target flags.
5840 switch (Base.getMachineOpcode()) {
5841 default: continue;
5843 case PPC::ADDI8:
5844 case PPC::ADDI:
5845 // In some cases (such as TLS) the relocation information
5846 // is already in place on the operand, so copying the operand
5847 // is sufficient.
5848 ReplaceFlags = false;
5849 // For these cases, the immediate may not be divisible by 4, in
5850 // which case the fold is illegal for DS-form instructions. (The
5851 // other cases provide aligned addresses and are always safe.)
5852 if ((StorageOpcode == PPC::LWA ||
5853 StorageOpcode == PPC::LD ||
5854 StorageOpcode == PPC::STD) &&
5855 (!isa<ConstantSDNode>(Base.getOperand(1)) ||
5856 Base.getConstantOperandVal(1) % 4 != 0))
5857 continue;
5858 break;
5859 case PPC::ADDIdtprelL:
5860 Flags = PPCII::MO_DTPREL_LO;
5861 break;
5862 case PPC::ADDItlsldL:
5863 Flags = PPCII::MO_TLSLD_LO;
5864 break;
5865 case PPC::ADDItocL:
5866 Flags = PPCII::MO_TOC_LO;
5867 break;
5870 SDValue ImmOpnd = Base.getOperand(1);
5872 // On PPC64, the TOC base pointer is guaranteed by the ABI only to have
5873 // 8-byte alignment, and so we can only use offsets less than 8 (otherwise,
5874 // we might have needed different @ha relocation values for the offset
5875 // pointers).
5876 int MaxDisplacement = 7;
5877 if (GlobalAddressSDNode *GA = dyn_cast<GlobalAddressSDNode>(ImmOpnd)) {
5878 const GlobalValue *GV = GA->getGlobal();
5879 MaxDisplacement = std::min((int) GV->getAlignment() - 1, MaxDisplacement);
5882 bool UpdateHBase = false;
5883 SDValue HBase = Base.getOperand(0);
5885 int Offset = N->getConstantOperandVal(FirstOp);
5886 if (ReplaceFlags) {
5887 if (Offset < 0 || Offset > MaxDisplacement) {
5888 // If we have a addi(toc@l)/addis(toc@ha) pair, and the addis has only
5889 // one use, then we can do this for any offset, we just need to also
5890 // update the offset (i.e. the symbol addend) on the addis also.
5891 if (Base.getMachineOpcode() != PPC::ADDItocL)
5892 continue;
5894 if (!HBase.isMachineOpcode() ||
5895 HBase.getMachineOpcode() != PPC::ADDIStocHA)
5896 continue;
5898 if (!Base.hasOneUse() || !HBase.hasOneUse())
5899 continue;
5901 SDValue HImmOpnd = HBase.getOperand(1);
5902 if (HImmOpnd != ImmOpnd)
5903 continue;
5905 UpdateHBase = true;
5907 } else {
5908 // If we're directly folding the addend from an addi instruction, then:
5909 // 1. In general, the offset on the memory access must be zero.
5910 // 2. If the addend is a constant, then it can be combined with a
5911 // non-zero offset, but only if the result meets the encoding
5912 // requirements.
5913 if (auto *C = dyn_cast<ConstantSDNode>(ImmOpnd)) {
5914 Offset += C->getSExtValue();
5916 if ((StorageOpcode == PPC::LWA || StorageOpcode == PPC::LD ||
5917 StorageOpcode == PPC::STD) && (Offset % 4) != 0)
5918 continue;
5920 if (!isInt<16>(Offset))
5921 continue;
5923 ImmOpnd = CurDAG->getTargetConstant(Offset, SDLoc(ImmOpnd),
5924 ImmOpnd.getValueType());
5925 } else if (Offset != 0) {
5926 continue;
5930 // We found an opportunity. Reverse the operands from the add
5931 // immediate and substitute them into the load or store. If
5932 // needed, update the target flags for the immediate operand to
5933 // reflect the necessary relocation information.
5934 DEBUG(dbgs() << "Folding add-immediate into mem-op:\nBase: ");
5935 DEBUG(Base->dump(CurDAG));
5936 DEBUG(dbgs() << "\nN: ");
5937 DEBUG(N->dump(CurDAG));
5938 DEBUG(dbgs() << "\n");
5940 // If the relocation information isn't already present on the
5941 // immediate operand, add it now.
5942 if (ReplaceFlags) {
5943 if (GlobalAddressSDNode *GA = dyn_cast<GlobalAddressSDNode>(ImmOpnd)) {
5944 SDLoc dl(GA);
5945 const GlobalValue *GV = GA->getGlobal();
5946 // We can't perform this optimization for data whose alignment
5947 // is insufficient for the instruction encoding.
5948 if (GV->getAlignment() < 4 &&
5949 (StorageOpcode == PPC::LD || StorageOpcode == PPC::STD ||
5950 StorageOpcode == PPC::LWA || (Offset % 4) != 0)) {
5951 DEBUG(dbgs() << "Rejected this candidate for alignment.\n\n");
5952 continue;
5954 ImmOpnd = CurDAG->getTargetGlobalAddress(GV, dl, MVT::i64, Offset, Flags);
5955 } else if (ConstantPoolSDNode *CP =
5956 dyn_cast<ConstantPoolSDNode>(ImmOpnd)) {
5957 const Constant *C = CP->getConstVal();
5958 ImmOpnd = CurDAG->getTargetConstantPool(C, MVT::i64,
5959 CP->getAlignment(),
5960 Offset, Flags);
5964 if (FirstOp == 1) // Store
5965 (void)CurDAG->UpdateNodeOperands(N, N->getOperand(0), ImmOpnd,
5966 Base.getOperand(0), N->getOperand(3));
5967 else // Load
5968 (void)CurDAG->UpdateNodeOperands(N, ImmOpnd, Base.getOperand(0),
5969 N->getOperand(2));
5971 if (UpdateHBase)
5972 (void)CurDAG->UpdateNodeOperands(HBase.getNode(), HBase.getOperand(0),
5973 ImmOpnd);
5975 // The add-immediate may now be dead, in which case remove it.
5976 if (Base.getNode()->use_empty())
5977 CurDAG->RemoveDeadNode(Base.getNode());
5981 /// createPPCISelDag - This pass converts a legalized DAG into a
5982 /// PowerPC-specific DAG, ready for instruction scheduling.
5984 FunctionPass *llvm::createPPCISelDag(PPCTargetMachine &TM,
5985 CodeGenOpt::Level OptLevel) {
5986 return new PPCDAGToDAGISel(TM, OptLevel);