Silence -Wunused-variable in release builds.
[llvm/stm8.git] / lib / Target / ARM / ARMISelDAGToDAG.cpp
blob2c9481b86c557830fd6f195a8f81fdd16cad0857
1 //===-- ARMISelDAGToDAG.cpp - A dag to dag inst selector for ARM ----------===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file defines an instruction selector for the ARM target.
12 //===----------------------------------------------------------------------===//
14 #define DEBUG_TYPE "arm-isel"
15 #include "ARM.h"
16 #include "ARMBaseInstrInfo.h"
17 #include "ARMAddressingModes.h"
18 #include "ARMTargetMachine.h"
19 #include "llvm/CallingConv.h"
20 #include "llvm/Constants.h"
21 #include "llvm/DerivedTypes.h"
22 #include "llvm/Function.h"
23 #include "llvm/Intrinsics.h"
24 #include "llvm/LLVMContext.h"
25 #include "llvm/CodeGen/MachineFrameInfo.h"
26 #include "llvm/CodeGen/MachineFunction.h"
27 #include "llvm/CodeGen/MachineInstrBuilder.h"
28 #include "llvm/CodeGen/SelectionDAG.h"
29 #include "llvm/CodeGen/SelectionDAGISel.h"
30 #include "llvm/Target/TargetLowering.h"
31 #include "llvm/Target/TargetOptions.h"
32 #include "llvm/Support/CommandLine.h"
33 #include "llvm/Support/Compiler.h"
34 #include "llvm/Support/Debug.h"
35 #include "llvm/Support/ErrorHandling.h"
36 #include "llvm/Support/raw_ostream.h"
38 using namespace llvm;
40 static cl::opt<bool>
41 DisableShifterOp("disable-shifter-op", cl::Hidden,
42 cl::desc("Disable isel of shifter-op"),
43 cl::init(false));
45 static cl::opt<bool>
46 CheckVMLxHazard("check-vmlx-hazard", cl::Hidden,
47 cl::desc("Check fp vmla / vmls hazard at isel time"),
48 cl::init(true));
50 //===--------------------------------------------------------------------===//
51 /// ARMDAGToDAGISel - ARM specific code to select ARM machine
52 /// instructions for SelectionDAG operations.
53 ///
54 namespace {
56 enum AddrMode2Type {
57 AM2_BASE, // Simple AM2 (+-imm12)
58 AM2_SHOP // Shifter-op AM2
61 class ARMDAGToDAGISel : public SelectionDAGISel {
62 ARMBaseTargetMachine &TM;
63 const ARMBaseInstrInfo *TII;
65 /// Subtarget - Keep a pointer to the ARMSubtarget around so that we can
66 /// make the right decision when generating code for different targets.
67 const ARMSubtarget *Subtarget;
69 public:
70 explicit ARMDAGToDAGISel(ARMBaseTargetMachine &tm,
71 CodeGenOpt::Level OptLevel)
72 : SelectionDAGISel(tm, OptLevel), TM(tm),
73 TII(static_cast<const ARMBaseInstrInfo*>(TM.getInstrInfo())),
74 Subtarget(&TM.getSubtarget<ARMSubtarget>()) {
77 virtual const char *getPassName() const {
78 return "ARM Instruction Selection";
81 /// getI32Imm - Return a target constant of type i32 with the specified
82 /// value.
83 inline SDValue getI32Imm(unsigned Imm) {
84 return CurDAG->getTargetConstant(Imm, MVT::i32);
87 SDNode *Select(SDNode *N);
90 bool hasNoVMLxHazardUse(SDNode *N) const;
91 bool isShifterOpProfitable(const SDValue &Shift,
92 ARM_AM::ShiftOpc ShOpcVal, unsigned ShAmt);
93 bool SelectShifterOperandReg(SDValue N, SDValue &A,
94 SDValue &B, SDValue &C,
95 bool CheckProfitability = true);
96 bool SelectShiftShifterOperandReg(SDValue N, SDValue &A,
97 SDValue &B, SDValue &C) {
98 // Don't apply the profitability check
99 return SelectShifterOperandReg(N, A, B, C, false);
102 bool SelectAddrModeImm12(SDValue N, SDValue &Base, SDValue &OffImm);
103 bool SelectLdStSOReg(SDValue N, SDValue &Base, SDValue &Offset, SDValue &Opc);
105 AddrMode2Type SelectAddrMode2Worker(SDValue N, SDValue &Base,
106 SDValue &Offset, SDValue &Opc);
107 bool SelectAddrMode2Base(SDValue N, SDValue &Base, SDValue &Offset,
108 SDValue &Opc) {
109 return SelectAddrMode2Worker(N, Base, Offset, Opc) == AM2_BASE;
112 bool SelectAddrMode2ShOp(SDValue N, SDValue &Base, SDValue &Offset,
113 SDValue &Opc) {
114 return SelectAddrMode2Worker(N, Base, Offset, Opc) == AM2_SHOP;
117 bool SelectAddrMode2(SDValue N, SDValue &Base, SDValue &Offset,
118 SDValue &Opc) {
119 SelectAddrMode2Worker(N, Base, Offset, Opc);
120 // return SelectAddrMode2ShOp(N, Base, Offset, Opc);
121 // This always matches one way or another.
122 return true;
125 bool SelectAddrMode2Offset(SDNode *Op, SDValue N,
126 SDValue &Offset, SDValue &Opc);
127 bool SelectAddrMode3(SDValue N, SDValue &Base,
128 SDValue &Offset, SDValue &Opc);
129 bool SelectAddrMode3Offset(SDNode *Op, SDValue N,
130 SDValue &Offset, SDValue &Opc);
131 bool SelectAddrMode5(SDValue N, SDValue &Base,
132 SDValue &Offset);
133 bool SelectAddrMode6(SDNode *Parent, SDValue N, SDValue &Addr,SDValue &Align);
134 bool SelectAddrMode6Offset(SDNode *Op, SDValue N, SDValue &Offset);
136 bool SelectAddrModePC(SDValue N, SDValue &Offset, SDValue &Label);
138 // Thumb Addressing Modes:
139 bool SelectThumbAddrModeRR(SDValue N, SDValue &Base, SDValue &Offset);
140 bool SelectThumbAddrModeRI(SDValue N, SDValue &Base, SDValue &Offset,
141 unsigned Scale);
142 bool SelectThumbAddrModeRI5S1(SDValue N, SDValue &Base, SDValue &Offset);
143 bool SelectThumbAddrModeRI5S2(SDValue N, SDValue &Base, SDValue &Offset);
144 bool SelectThumbAddrModeRI5S4(SDValue N, SDValue &Base, SDValue &Offset);
145 bool SelectThumbAddrModeImm5S(SDValue N, unsigned Scale, SDValue &Base,
146 SDValue &OffImm);
147 bool SelectThumbAddrModeImm5S1(SDValue N, SDValue &Base,
148 SDValue &OffImm);
149 bool SelectThumbAddrModeImm5S2(SDValue N, SDValue &Base,
150 SDValue &OffImm);
151 bool SelectThumbAddrModeImm5S4(SDValue N, SDValue &Base,
152 SDValue &OffImm);
153 bool SelectThumbAddrModeSP(SDValue N, SDValue &Base, SDValue &OffImm);
155 // Thumb 2 Addressing Modes:
156 bool SelectT2ShifterOperandReg(SDValue N,
157 SDValue &BaseReg, SDValue &Opc);
158 bool SelectT2AddrModeImm12(SDValue N, SDValue &Base, SDValue &OffImm);
159 bool SelectT2AddrModeImm8(SDValue N, SDValue &Base,
160 SDValue &OffImm);
161 bool SelectT2AddrModeImm8Offset(SDNode *Op, SDValue N,
162 SDValue &OffImm);
163 bool SelectT2AddrModeSoReg(SDValue N, SDValue &Base,
164 SDValue &OffReg, SDValue &ShImm);
166 inline bool is_so_imm(unsigned Imm) const {
167 return ARM_AM::getSOImmVal(Imm) != -1;
170 inline bool is_so_imm_not(unsigned Imm) const {
171 return ARM_AM::getSOImmVal(~Imm) != -1;
174 inline bool is_t2_so_imm(unsigned Imm) const {
175 return ARM_AM::getT2SOImmVal(Imm) != -1;
178 inline bool is_t2_so_imm_not(unsigned Imm) const {
179 return ARM_AM::getT2SOImmVal(~Imm) != -1;
182 // Include the pieces autogenerated from the target description.
183 #include "ARMGenDAGISel.inc"
185 private:
186 /// SelectARMIndexedLoad - Indexed (pre/post inc/dec) load matching code for
187 /// ARM.
188 SDNode *SelectARMIndexedLoad(SDNode *N);
189 SDNode *SelectT2IndexedLoad(SDNode *N);
191 /// SelectVLD - Select NEON load intrinsics. NumVecs should be
192 /// 1, 2, 3 or 4. The opcode arrays specify the instructions used for
193 /// loads of D registers and even subregs and odd subregs of Q registers.
194 /// For NumVecs <= 2, QOpcodes1 is not used.
195 SDNode *SelectVLD(SDNode *N, bool isUpdating, unsigned NumVecs,
196 unsigned *DOpcodes,
197 unsigned *QOpcodes0, unsigned *QOpcodes1);
199 /// SelectVST - Select NEON store intrinsics. NumVecs should
200 /// be 1, 2, 3 or 4. The opcode arrays specify the instructions used for
201 /// stores of D registers and even subregs and odd subregs of Q registers.
202 /// For NumVecs <= 2, QOpcodes1 is not used.
203 SDNode *SelectVST(SDNode *N, bool isUpdating, unsigned NumVecs,
204 unsigned *DOpcodes,
205 unsigned *QOpcodes0, unsigned *QOpcodes1);
207 /// SelectVLDSTLane - Select NEON load/store lane intrinsics. NumVecs should
208 /// be 2, 3 or 4. The opcode arrays specify the instructions used for
209 /// load/store of D registers and Q registers.
210 SDNode *SelectVLDSTLane(SDNode *N, bool IsLoad,
211 bool isUpdating, unsigned NumVecs,
212 unsigned *DOpcodes, unsigned *QOpcodes);
214 /// SelectVLDDup - Select NEON load-duplicate intrinsics. NumVecs
215 /// should be 2, 3 or 4. The opcode array specifies the instructions used
216 /// for loading D registers. (Q registers are not supported.)
217 SDNode *SelectVLDDup(SDNode *N, bool isUpdating, unsigned NumVecs,
218 unsigned *Opcodes);
220 /// SelectVTBL - Select NEON VTBL and VTBX intrinsics. NumVecs should be 2,
221 /// 3 or 4. These are custom-selected so that a REG_SEQUENCE can be
222 /// generated to force the table registers to be consecutive.
223 SDNode *SelectVTBL(SDNode *N, bool IsExt, unsigned NumVecs, unsigned Opc);
225 /// SelectV6T2BitfieldExtractOp - Select SBFX/UBFX instructions for ARM.
226 SDNode *SelectV6T2BitfieldExtractOp(SDNode *N, bool isSigned);
228 /// SelectCMOVOp - Select CMOV instructions for ARM.
229 SDNode *SelectCMOVOp(SDNode *N);
230 SDNode *SelectT2CMOVShiftOp(SDNode *N, SDValue FalseVal, SDValue TrueVal,
231 ARMCC::CondCodes CCVal, SDValue CCR,
232 SDValue InFlag);
233 SDNode *SelectARMCMOVShiftOp(SDNode *N, SDValue FalseVal, SDValue TrueVal,
234 ARMCC::CondCodes CCVal, SDValue CCR,
235 SDValue InFlag);
236 SDNode *SelectT2CMOVImmOp(SDNode *N, SDValue FalseVal, SDValue TrueVal,
237 ARMCC::CondCodes CCVal, SDValue CCR,
238 SDValue InFlag);
239 SDNode *SelectARMCMOVImmOp(SDNode *N, SDValue FalseVal, SDValue TrueVal,
240 ARMCC::CondCodes CCVal, SDValue CCR,
241 SDValue InFlag);
243 SDNode *SelectConcatVector(SDNode *N);
245 /// SelectInlineAsmMemoryOperand - Implement addressing mode selection for
246 /// inline asm expressions.
247 virtual bool SelectInlineAsmMemoryOperand(const SDValue &Op,
248 char ConstraintCode,
249 std::vector<SDValue> &OutOps);
251 // Form pairs of consecutive S, D, or Q registers.
252 SDNode *PairSRegs(EVT VT, SDValue V0, SDValue V1);
253 SDNode *PairDRegs(EVT VT, SDValue V0, SDValue V1);
254 SDNode *PairQRegs(EVT VT, SDValue V0, SDValue V1);
256 // Form sequences of 4 consecutive S, D, or Q registers.
257 SDNode *QuadSRegs(EVT VT, SDValue V0, SDValue V1, SDValue V2, SDValue V3);
258 SDNode *QuadDRegs(EVT VT, SDValue V0, SDValue V1, SDValue V2, SDValue V3);
259 SDNode *QuadQRegs(EVT VT, SDValue V0, SDValue V1, SDValue V2, SDValue V3);
261 // Get the alignment operand for a NEON VLD or VST instruction.
262 SDValue GetVLDSTAlign(SDValue Align, unsigned NumVecs, bool is64BitVector);
266 /// isInt32Immediate - This method tests to see if the node is a 32-bit constant
267 /// operand. If so Imm will receive the 32-bit value.
268 static bool isInt32Immediate(SDNode *N, unsigned &Imm) {
269 if (N->getOpcode() == ISD::Constant && N->getValueType(0) == MVT::i32) {
270 Imm = cast<ConstantSDNode>(N)->getZExtValue();
271 return true;
273 return false;
276 // isInt32Immediate - This method tests to see if a constant operand.
277 // If so Imm will receive the 32 bit value.
278 static bool isInt32Immediate(SDValue N, unsigned &Imm) {
279 return isInt32Immediate(N.getNode(), Imm);
282 // isOpcWithIntImmediate - This method tests to see if the node is a specific
283 // opcode and that it has a immediate integer right operand.
284 // If so Imm will receive the 32 bit value.
285 static bool isOpcWithIntImmediate(SDNode *N, unsigned Opc, unsigned& Imm) {
286 return N->getOpcode() == Opc &&
287 isInt32Immediate(N->getOperand(1).getNode(), Imm);
290 /// \brief Check whether a particular node is a constant value representable as
291 /// (N * Scale) where (N in [\arg RangeMin, \arg RangeMax).
293 /// \param ScaledConstant [out] - On success, the pre-scaled constant value.
294 static bool isScaledConstantInRange(SDValue Node, unsigned Scale,
295 int RangeMin, int RangeMax,
296 int &ScaledConstant) {
297 assert(Scale && "Invalid scale!");
299 // Check that this is a constant.
300 const ConstantSDNode *C = dyn_cast<ConstantSDNode>(Node);
301 if (!C)
302 return false;
304 ScaledConstant = (int) C->getZExtValue();
305 if ((ScaledConstant % Scale) != 0)
306 return false;
308 ScaledConstant /= Scale;
309 return ScaledConstant >= RangeMin && ScaledConstant < RangeMax;
312 /// hasNoVMLxHazardUse - Return true if it's desirable to select a FP MLA / MLS
313 /// node. VFP / NEON fp VMLA / VMLS instructions have special RAW hazards (at
314 /// least on current ARM implementations) which should be avoidded.
315 bool ARMDAGToDAGISel::hasNoVMLxHazardUse(SDNode *N) const {
316 if (OptLevel == CodeGenOpt::None)
317 return true;
319 if (!CheckVMLxHazard)
320 return true;
322 if (!Subtarget->isCortexA8() && !Subtarget->isCortexA9())
323 return true;
325 if (!N->hasOneUse())
326 return false;
328 SDNode *Use = *N->use_begin();
329 if (Use->getOpcode() == ISD::CopyToReg)
330 return true;
331 if (Use->isMachineOpcode()) {
332 const MCInstrDesc &MCID = TII->get(Use->getMachineOpcode());
333 if (MCID.mayStore())
334 return true;
335 unsigned Opcode = MCID.getOpcode();
336 if (Opcode == ARM::VMOVRS || Opcode == ARM::VMOVRRD)
337 return true;
338 // vmlx feeding into another vmlx. We actually want to unfold
339 // the use later in the MLxExpansion pass. e.g.
340 // vmla
341 // vmla (stall 8 cycles)
343 // vmul (5 cycles)
344 // vadd (5 cycles)
345 // vmla
346 // This adds up to about 18 - 19 cycles.
348 // vmla
349 // vmul (stall 4 cycles)
350 // vadd adds up to about 14 cycles.
351 return TII->isFpMLxInstruction(Opcode);
354 return false;
357 bool ARMDAGToDAGISel::isShifterOpProfitable(const SDValue &Shift,
358 ARM_AM::ShiftOpc ShOpcVal,
359 unsigned ShAmt) {
360 if (!Subtarget->isCortexA9())
361 return true;
362 if (Shift.hasOneUse())
363 return true;
364 // R << 2 is free.
365 return ShOpcVal == ARM_AM::lsl && ShAmt == 2;
368 bool ARMDAGToDAGISel::SelectShifterOperandReg(SDValue N,
369 SDValue &BaseReg,
370 SDValue &ShReg,
371 SDValue &Opc,
372 bool CheckProfitability) {
373 if (DisableShifterOp)
374 return false;
376 ARM_AM::ShiftOpc ShOpcVal = ARM_AM::getShiftOpcForNode(N);
378 // Don't match base register only case. That is matched to a separate
379 // lower complexity pattern with explicit register operand.
380 if (ShOpcVal == ARM_AM::no_shift) return false;
382 BaseReg = N.getOperand(0);
383 unsigned ShImmVal = 0;
384 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
385 ShReg = CurDAG->getRegister(0, MVT::i32);
386 ShImmVal = RHS->getZExtValue() & 31;
387 } else {
388 ShReg = N.getOperand(1);
389 if (CheckProfitability && !isShifterOpProfitable(N, ShOpcVal, ShImmVal))
390 return false;
392 Opc = CurDAG->getTargetConstant(ARM_AM::getSORegOpc(ShOpcVal, ShImmVal),
393 MVT::i32);
394 return true;
397 bool ARMDAGToDAGISel::SelectAddrModeImm12(SDValue N,
398 SDValue &Base,
399 SDValue &OffImm) {
400 // Match simple R + imm12 operands.
402 // Base only.
403 if (N.getOpcode() != ISD::ADD && N.getOpcode() != ISD::SUB &&
404 !CurDAG->isBaseWithConstantOffset(N)) {
405 if (N.getOpcode() == ISD::FrameIndex) {
406 // Match frame index.
407 int FI = cast<FrameIndexSDNode>(N)->getIndex();
408 Base = CurDAG->getTargetFrameIndex(FI, TLI.getPointerTy());
409 OffImm = CurDAG->getTargetConstant(0, MVT::i32);
410 return true;
413 if (N.getOpcode() == ARMISD::Wrapper &&
414 !(Subtarget->useMovt() &&
415 N.getOperand(0).getOpcode() == ISD::TargetGlobalAddress)) {
416 Base = N.getOperand(0);
417 } else
418 Base = N;
419 OffImm = CurDAG->getTargetConstant(0, MVT::i32);
420 return true;
423 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
424 int RHSC = (int)RHS->getZExtValue();
425 if (N.getOpcode() == ISD::SUB)
426 RHSC = -RHSC;
428 if (RHSC >= 0 && RHSC < 0x1000) { // 12 bits (unsigned)
429 Base = N.getOperand(0);
430 if (Base.getOpcode() == ISD::FrameIndex) {
431 int FI = cast<FrameIndexSDNode>(Base)->getIndex();
432 Base = CurDAG->getTargetFrameIndex(FI, TLI.getPointerTy());
434 OffImm = CurDAG->getTargetConstant(RHSC, MVT::i32);
435 return true;
439 // Base only.
440 Base = N;
441 OffImm = CurDAG->getTargetConstant(0, MVT::i32);
442 return true;
447 bool ARMDAGToDAGISel::SelectLdStSOReg(SDValue N, SDValue &Base, SDValue &Offset,
448 SDValue &Opc) {
449 if (N.getOpcode() == ISD::MUL &&
450 (!Subtarget->isCortexA9() || N.hasOneUse())) {
451 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
452 // X * [3,5,9] -> X + X * [2,4,8] etc.
453 int RHSC = (int)RHS->getZExtValue();
454 if (RHSC & 1) {
455 RHSC = RHSC & ~1;
456 ARM_AM::AddrOpc AddSub = ARM_AM::add;
457 if (RHSC < 0) {
458 AddSub = ARM_AM::sub;
459 RHSC = - RHSC;
461 if (isPowerOf2_32(RHSC)) {
462 unsigned ShAmt = Log2_32(RHSC);
463 Base = Offset = N.getOperand(0);
464 Opc = CurDAG->getTargetConstant(ARM_AM::getAM2Opc(AddSub, ShAmt,
465 ARM_AM::lsl),
466 MVT::i32);
467 return true;
473 if (N.getOpcode() != ISD::ADD && N.getOpcode() != ISD::SUB &&
474 // ISD::OR that is equivalent to an ISD::ADD.
475 !CurDAG->isBaseWithConstantOffset(N))
476 return false;
478 // Leave simple R +/- imm12 operands for LDRi12
479 if (N.getOpcode() == ISD::ADD || N.getOpcode() == ISD::OR) {
480 int RHSC;
481 if (isScaledConstantInRange(N.getOperand(1), /*Scale=*/1,
482 -0x1000+1, 0x1000, RHSC)) // 12 bits.
483 return false;
486 if (Subtarget->isCortexA9() && !N.hasOneUse())
487 // Compute R +/- (R << N) and reuse it.
488 return false;
490 // Otherwise this is R +/- [possibly shifted] R.
491 ARM_AM::AddrOpc AddSub = N.getOpcode() == ISD::SUB ? ARM_AM::sub:ARM_AM::add;
492 ARM_AM::ShiftOpc ShOpcVal = ARM_AM::getShiftOpcForNode(N.getOperand(1));
493 unsigned ShAmt = 0;
495 Base = N.getOperand(0);
496 Offset = N.getOperand(1);
498 if (ShOpcVal != ARM_AM::no_shift) {
499 // Check to see if the RHS of the shift is a constant, if not, we can't fold
500 // it.
501 if (ConstantSDNode *Sh =
502 dyn_cast<ConstantSDNode>(N.getOperand(1).getOperand(1))) {
503 ShAmt = Sh->getZExtValue();
504 if (isShifterOpProfitable(Offset, ShOpcVal, ShAmt))
505 Offset = N.getOperand(1).getOperand(0);
506 else {
507 ShAmt = 0;
508 ShOpcVal = ARM_AM::no_shift;
510 } else {
511 ShOpcVal = ARM_AM::no_shift;
515 // Try matching (R shl C) + (R).
516 if (N.getOpcode() != ISD::SUB && ShOpcVal == ARM_AM::no_shift &&
517 !(Subtarget->isCortexA9() || N.getOperand(0).hasOneUse())) {
518 ShOpcVal = ARM_AM::getShiftOpcForNode(N.getOperand(0));
519 if (ShOpcVal != ARM_AM::no_shift) {
520 // Check to see if the RHS of the shift is a constant, if not, we can't
521 // fold it.
522 if (ConstantSDNode *Sh =
523 dyn_cast<ConstantSDNode>(N.getOperand(0).getOperand(1))) {
524 ShAmt = Sh->getZExtValue();
525 if (!Subtarget->isCortexA9() ||
526 (N.hasOneUse() &&
527 isShifterOpProfitable(N.getOperand(0), ShOpcVal, ShAmt))) {
528 Offset = N.getOperand(0).getOperand(0);
529 Base = N.getOperand(1);
530 } else {
531 ShAmt = 0;
532 ShOpcVal = ARM_AM::no_shift;
534 } else {
535 ShOpcVal = ARM_AM::no_shift;
540 Opc = CurDAG->getTargetConstant(ARM_AM::getAM2Opc(AddSub, ShAmt, ShOpcVal),
541 MVT::i32);
542 return true;
548 //-----
550 AddrMode2Type ARMDAGToDAGISel::SelectAddrMode2Worker(SDValue N,
551 SDValue &Base,
552 SDValue &Offset,
553 SDValue &Opc) {
554 if (N.getOpcode() == ISD::MUL &&
555 (!Subtarget->isCortexA9() || N.hasOneUse())) {
556 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
557 // X * [3,5,9] -> X + X * [2,4,8] etc.
558 int RHSC = (int)RHS->getZExtValue();
559 if (RHSC & 1) {
560 RHSC = RHSC & ~1;
561 ARM_AM::AddrOpc AddSub = ARM_AM::add;
562 if (RHSC < 0) {
563 AddSub = ARM_AM::sub;
564 RHSC = - RHSC;
566 if (isPowerOf2_32(RHSC)) {
567 unsigned ShAmt = Log2_32(RHSC);
568 Base = Offset = N.getOperand(0);
569 Opc = CurDAG->getTargetConstant(ARM_AM::getAM2Opc(AddSub, ShAmt,
570 ARM_AM::lsl),
571 MVT::i32);
572 return AM2_SHOP;
578 if (N.getOpcode() != ISD::ADD && N.getOpcode() != ISD::SUB &&
579 // ISD::OR that is equivalent to an ADD.
580 !CurDAG->isBaseWithConstantOffset(N)) {
581 Base = N;
582 if (N.getOpcode() == ISD::FrameIndex) {
583 int FI = cast<FrameIndexSDNode>(N)->getIndex();
584 Base = CurDAG->getTargetFrameIndex(FI, TLI.getPointerTy());
585 } else if (N.getOpcode() == ARMISD::Wrapper &&
586 !(Subtarget->useMovt() &&
587 N.getOperand(0).getOpcode() == ISD::TargetGlobalAddress)) {
588 Base = N.getOperand(0);
590 Offset = CurDAG->getRegister(0, MVT::i32);
591 Opc = CurDAG->getTargetConstant(ARM_AM::getAM2Opc(ARM_AM::add, 0,
592 ARM_AM::no_shift),
593 MVT::i32);
594 return AM2_BASE;
597 // Match simple R +/- imm12 operands.
598 if (N.getOpcode() != ISD::SUB) {
599 int RHSC;
600 if (isScaledConstantInRange(N.getOperand(1), /*Scale=*/1,
601 -0x1000+1, 0x1000, RHSC)) { // 12 bits.
602 Base = N.getOperand(0);
603 if (Base.getOpcode() == ISD::FrameIndex) {
604 int FI = cast<FrameIndexSDNode>(Base)->getIndex();
605 Base = CurDAG->getTargetFrameIndex(FI, TLI.getPointerTy());
607 Offset = CurDAG->getRegister(0, MVT::i32);
609 ARM_AM::AddrOpc AddSub = ARM_AM::add;
610 if (RHSC < 0) {
611 AddSub = ARM_AM::sub;
612 RHSC = - RHSC;
614 Opc = CurDAG->getTargetConstant(ARM_AM::getAM2Opc(AddSub, RHSC,
615 ARM_AM::no_shift),
616 MVT::i32);
617 return AM2_BASE;
621 if (Subtarget->isCortexA9() && !N.hasOneUse()) {
622 // Compute R +/- (R << N) and reuse it.
623 Base = N;
624 Offset = CurDAG->getRegister(0, MVT::i32);
625 Opc = CurDAG->getTargetConstant(ARM_AM::getAM2Opc(ARM_AM::add, 0,
626 ARM_AM::no_shift),
627 MVT::i32);
628 return AM2_BASE;
631 // Otherwise this is R +/- [possibly shifted] R.
632 ARM_AM::AddrOpc AddSub = N.getOpcode() != ISD::SUB ? ARM_AM::add:ARM_AM::sub;
633 ARM_AM::ShiftOpc ShOpcVal = ARM_AM::getShiftOpcForNode(N.getOperand(1));
634 unsigned ShAmt = 0;
636 Base = N.getOperand(0);
637 Offset = N.getOperand(1);
639 if (ShOpcVal != ARM_AM::no_shift) {
640 // Check to see if the RHS of the shift is a constant, if not, we can't fold
641 // it.
642 if (ConstantSDNode *Sh =
643 dyn_cast<ConstantSDNode>(N.getOperand(1).getOperand(1))) {
644 ShAmt = Sh->getZExtValue();
645 if (isShifterOpProfitable(Offset, ShOpcVal, ShAmt))
646 Offset = N.getOperand(1).getOperand(0);
647 else {
648 ShAmt = 0;
649 ShOpcVal = ARM_AM::no_shift;
651 } else {
652 ShOpcVal = ARM_AM::no_shift;
656 // Try matching (R shl C) + (R).
657 if (N.getOpcode() != ISD::SUB && ShOpcVal == ARM_AM::no_shift &&
658 !(Subtarget->isCortexA9() || N.getOperand(0).hasOneUse())) {
659 ShOpcVal = ARM_AM::getShiftOpcForNode(N.getOperand(0));
660 if (ShOpcVal != ARM_AM::no_shift) {
661 // Check to see if the RHS of the shift is a constant, if not, we can't
662 // fold it.
663 if (ConstantSDNode *Sh =
664 dyn_cast<ConstantSDNode>(N.getOperand(0).getOperand(1))) {
665 ShAmt = Sh->getZExtValue();
666 if (!Subtarget->isCortexA9() ||
667 (N.hasOneUse() &&
668 isShifterOpProfitable(N.getOperand(0), ShOpcVal, ShAmt))) {
669 Offset = N.getOperand(0).getOperand(0);
670 Base = N.getOperand(1);
671 } else {
672 ShAmt = 0;
673 ShOpcVal = ARM_AM::no_shift;
675 } else {
676 ShOpcVal = ARM_AM::no_shift;
681 Opc = CurDAG->getTargetConstant(ARM_AM::getAM2Opc(AddSub, ShAmt, ShOpcVal),
682 MVT::i32);
683 return AM2_SHOP;
686 bool ARMDAGToDAGISel::SelectAddrMode2Offset(SDNode *Op, SDValue N,
687 SDValue &Offset, SDValue &Opc) {
688 unsigned Opcode = Op->getOpcode();
689 ISD::MemIndexedMode AM = (Opcode == ISD::LOAD)
690 ? cast<LoadSDNode>(Op)->getAddressingMode()
691 : cast<StoreSDNode>(Op)->getAddressingMode();
692 ARM_AM::AddrOpc AddSub = (AM == ISD::PRE_INC || AM == ISD::POST_INC)
693 ? ARM_AM::add : ARM_AM::sub;
694 int Val;
695 if (isScaledConstantInRange(N, /*Scale=*/1, 0, 0x1000, Val)) { // 12 bits.
696 Offset = CurDAG->getRegister(0, MVT::i32);
697 Opc = CurDAG->getTargetConstant(ARM_AM::getAM2Opc(AddSub, Val,
698 ARM_AM::no_shift),
699 MVT::i32);
700 return true;
703 Offset = N;
704 ARM_AM::ShiftOpc ShOpcVal = ARM_AM::getShiftOpcForNode(N);
705 unsigned ShAmt = 0;
706 if (ShOpcVal != ARM_AM::no_shift) {
707 // Check to see if the RHS of the shift is a constant, if not, we can't fold
708 // it.
709 if (ConstantSDNode *Sh = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
710 ShAmt = Sh->getZExtValue();
711 if (isShifterOpProfitable(N, ShOpcVal, ShAmt))
712 Offset = N.getOperand(0);
713 else {
714 ShAmt = 0;
715 ShOpcVal = ARM_AM::no_shift;
717 } else {
718 ShOpcVal = ARM_AM::no_shift;
722 Opc = CurDAG->getTargetConstant(ARM_AM::getAM2Opc(AddSub, ShAmt, ShOpcVal),
723 MVT::i32);
724 return true;
728 bool ARMDAGToDAGISel::SelectAddrMode3(SDValue N,
729 SDValue &Base, SDValue &Offset,
730 SDValue &Opc) {
731 if (N.getOpcode() == ISD::SUB) {
732 // X - C is canonicalize to X + -C, no need to handle it here.
733 Base = N.getOperand(0);
734 Offset = N.getOperand(1);
735 Opc = CurDAG->getTargetConstant(ARM_AM::getAM3Opc(ARM_AM::sub, 0),MVT::i32);
736 return true;
739 if (!CurDAG->isBaseWithConstantOffset(N)) {
740 Base = N;
741 if (N.getOpcode() == ISD::FrameIndex) {
742 int FI = cast<FrameIndexSDNode>(N)->getIndex();
743 Base = CurDAG->getTargetFrameIndex(FI, TLI.getPointerTy());
745 Offset = CurDAG->getRegister(0, MVT::i32);
746 Opc = CurDAG->getTargetConstant(ARM_AM::getAM3Opc(ARM_AM::add, 0),MVT::i32);
747 return true;
750 // If the RHS is +/- imm8, fold into addr mode.
751 int RHSC;
752 if (isScaledConstantInRange(N.getOperand(1), /*Scale=*/1,
753 -256 + 1, 256, RHSC)) { // 8 bits.
754 Base = N.getOperand(0);
755 if (Base.getOpcode() == ISD::FrameIndex) {
756 int FI = cast<FrameIndexSDNode>(Base)->getIndex();
757 Base = CurDAG->getTargetFrameIndex(FI, TLI.getPointerTy());
759 Offset = CurDAG->getRegister(0, MVT::i32);
761 ARM_AM::AddrOpc AddSub = ARM_AM::add;
762 if (RHSC < 0) {
763 AddSub = ARM_AM::sub;
764 RHSC = -RHSC;
766 Opc = CurDAG->getTargetConstant(ARM_AM::getAM3Opc(AddSub, RHSC),MVT::i32);
767 return true;
770 Base = N.getOperand(0);
771 Offset = N.getOperand(1);
772 Opc = CurDAG->getTargetConstant(ARM_AM::getAM3Opc(ARM_AM::add, 0), MVT::i32);
773 return true;
776 bool ARMDAGToDAGISel::SelectAddrMode3Offset(SDNode *Op, SDValue N,
777 SDValue &Offset, SDValue &Opc) {
778 unsigned Opcode = Op->getOpcode();
779 ISD::MemIndexedMode AM = (Opcode == ISD::LOAD)
780 ? cast<LoadSDNode>(Op)->getAddressingMode()
781 : cast<StoreSDNode>(Op)->getAddressingMode();
782 ARM_AM::AddrOpc AddSub = (AM == ISD::PRE_INC || AM == ISD::POST_INC)
783 ? ARM_AM::add : ARM_AM::sub;
784 int Val;
785 if (isScaledConstantInRange(N, /*Scale=*/1, 0, 256, Val)) { // 12 bits.
786 Offset = CurDAG->getRegister(0, MVT::i32);
787 Opc = CurDAG->getTargetConstant(ARM_AM::getAM3Opc(AddSub, Val), MVT::i32);
788 return true;
791 Offset = N;
792 Opc = CurDAG->getTargetConstant(ARM_AM::getAM3Opc(AddSub, 0), MVT::i32);
793 return true;
796 bool ARMDAGToDAGISel::SelectAddrMode5(SDValue N,
797 SDValue &Base, SDValue &Offset) {
798 if (!CurDAG->isBaseWithConstantOffset(N)) {
799 Base = N;
800 if (N.getOpcode() == ISD::FrameIndex) {
801 int FI = cast<FrameIndexSDNode>(N)->getIndex();
802 Base = CurDAG->getTargetFrameIndex(FI, TLI.getPointerTy());
803 } else if (N.getOpcode() == ARMISD::Wrapper &&
804 !(Subtarget->useMovt() &&
805 N.getOperand(0).getOpcode() == ISD::TargetGlobalAddress)) {
806 Base = N.getOperand(0);
808 Offset = CurDAG->getTargetConstant(ARM_AM::getAM5Opc(ARM_AM::add, 0),
809 MVT::i32);
810 return true;
813 // If the RHS is +/- imm8, fold into addr mode.
814 int RHSC;
815 if (isScaledConstantInRange(N.getOperand(1), /*Scale=*/4,
816 -256 + 1, 256, RHSC)) {
817 Base = N.getOperand(0);
818 if (Base.getOpcode() == ISD::FrameIndex) {
819 int FI = cast<FrameIndexSDNode>(Base)->getIndex();
820 Base = CurDAG->getTargetFrameIndex(FI, TLI.getPointerTy());
823 ARM_AM::AddrOpc AddSub = ARM_AM::add;
824 if (RHSC < 0) {
825 AddSub = ARM_AM::sub;
826 RHSC = -RHSC;
828 Offset = CurDAG->getTargetConstant(ARM_AM::getAM5Opc(AddSub, RHSC),
829 MVT::i32);
830 return true;
833 Base = N;
834 Offset = CurDAG->getTargetConstant(ARM_AM::getAM5Opc(ARM_AM::add, 0),
835 MVT::i32);
836 return true;
839 bool ARMDAGToDAGISel::SelectAddrMode6(SDNode *Parent, SDValue N, SDValue &Addr,
840 SDValue &Align) {
841 Addr = N;
843 unsigned Alignment = 0;
844 if (LSBaseSDNode *LSN = dyn_cast<LSBaseSDNode>(Parent)) {
845 // This case occurs only for VLD1-lane/dup and VST1-lane instructions.
846 // The maximum alignment is equal to the memory size being referenced.
847 unsigned LSNAlign = LSN->getAlignment();
848 unsigned MemSize = LSN->getMemoryVT().getSizeInBits() / 8;
849 if (LSNAlign > MemSize && MemSize > 1)
850 Alignment = MemSize;
851 } else {
852 // All other uses of addrmode6 are for intrinsics. For now just record
853 // the raw alignment value; it will be refined later based on the legal
854 // alignment operands for the intrinsic.
855 Alignment = cast<MemIntrinsicSDNode>(Parent)->getAlignment();
858 Align = CurDAG->getTargetConstant(Alignment, MVT::i32);
859 return true;
862 bool ARMDAGToDAGISel::SelectAddrMode6Offset(SDNode *Op, SDValue N,
863 SDValue &Offset) {
864 LSBaseSDNode *LdSt = cast<LSBaseSDNode>(Op);
865 ISD::MemIndexedMode AM = LdSt->getAddressingMode();
866 if (AM != ISD::POST_INC)
867 return false;
868 Offset = N;
869 if (ConstantSDNode *NC = dyn_cast<ConstantSDNode>(N)) {
870 if (NC->getZExtValue() * 8 == LdSt->getMemoryVT().getSizeInBits())
871 Offset = CurDAG->getRegister(0, MVT::i32);
873 return true;
876 bool ARMDAGToDAGISel::SelectAddrModePC(SDValue N,
877 SDValue &Offset, SDValue &Label) {
878 if (N.getOpcode() == ARMISD::PIC_ADD && N.hasOneUse()) {
879 Offset = N.getOperand(0);
880 SDValue N1 = N.getOperand(1);
881 Label = CurDAG->getTargetConstant(cast<ConstantSDNode>(N1)->getZExtValue(),
882 MVT::i32);
883 return true;
886 return false;
890 //===----------------------------------------------------------------------===//
891 // Thumb Addressing Modes
892 //===----------------------------------------------------------------------===//
894 bool ARMDAGToDAGISel::SelectThumbAddrModeRR(SDValue N,
895 SDValue &Base, SDValue &Offset){
896 if (N.getOpcode() != ISD::ADD && !CurDAG->isBaseWithConstantOffset(N)) {
897 ConstantSDNode *NC = dyn_cast<ConstantSDNode>(N);
898 if (!NC || !NC->isNullValue())
899 return false;
901 Base = Offset = N;
902 return true;
905 Base = N.getOperand(0);
906 Offset = N.getOperand(1);
907 return true;
910 bool
911 ARMDAGToDAGISel::SelectThumbAddrModeRI(SDValue N, SDValue &Base,
912 SDValue &Offset, unsigned Scale) {
913 if (Scale == 4) {
914 SDValue TmpBase, TmpOffImm;
915 if (SelectThumbAddrModeSP(N, TmpBase, TmpOffImm))
916 return false; // We want to select tLDRspi / tSTRspi instead.
918 if (N.getOpcode() == ARMISD::Wrapper &&
919 N.getOperand(0).getOpcode() == ISD::TargetConstantPool)
920 return false; // We want to select tLDRpci instead.
923 if (!CurDAG->isBaseWithConstantOffset(N))
924 return false;
926 // Thumb does not have [sp, r] address mode.
927 RegisterSDNode *LHSR = dyn_cast<RegisterSDNode>(N.getOperand(0));
928 RegisterSDNode *RHSR = dyn_cast<RegisterSDNode>(N.getOperand(1));
929 if ((LHSR && LHSR->getReg() == ARM::SP) ||
930 (RHSR && RHSR->getReg() == ARM::SP))
931 return false;
933 // FIXME: Why do we explicitly check for a match here and then return false?
934 // Presumably to allow something else to match, but shouldn't this be
935 // documented?
936 int RHSC;
937 if (isScaledConstantInRange(N.getOperand(1), Scale, 0, 32, RHSC))
938 return false;
940 Base = N.getOperand(0);
941 Offset = N.getOperand(1);
942 return true;
945 bool
946 ARMDAGToDAGISel::SelectThumbAddrModeRI5S1(SDValue N,
947 SDValue &Base,
948 SDValue &Offset) {
949 return SelectThumbAddrModeRI(N, Base, Offset, 1);
952 bool
953 ARMDAGToDAGISel::SelectThumbAddrModeRI5S2(SDValue N,
954 SDValue &Base,
955 SDValue &Offset) {
956 return SelectThumbAddrModeRI(N, Base, Offset, 2);
959 bool
960 ARMDAGToDAGISel::SelectThumbAddrModeRI5S4(SDValue N,
961 SDValue &Base,
962 SDValue &Offset) {
963 return SelectThumbAddrModeRI(N, Base, Offset, 4);
966 bool
967 ARMDAGToDAGISel::SelectThumbAddrModeImm5S(SDValue N, unsigned Scale,
968 SDValue &Base, SDValue &OffImm) {
969 if (Scale == 4) {
970 SDValue TmpBase, TmpOffImm;
971 if (SelectThumbAddrModeSP(N, TmpBase, TmpOffImm))
972 return false; // We want to select tLDRspi / tSTRspi instead.
974 if (N.getOpcode() == ARMISD::Wrapper &&
975 N.getOperand(0).getOpcode() == ISD::TargetConstantPool)
976 return false; // We want to select tLDRpci instead.
979 if (!CurDAG->isBaseWithConstantOffset(N)) {
980 if (N.getOpcode() == ARMISD::Wrapper &&
981 !(Subtarget->useMovt() &&
982 N.getOperand(0).getOpcode() == ISD::TargetGlobalAddress)) {
983 Base = N.getOperand(0);
984 } else {
985 Base = N;
988 OffImm = CurDAG->getTargetConstant(0, MVT::i32);
989 return true;
992 RegisterSDNode *LHSR = dyn_cast<RegisterSDNode>(N.getOperand(0));
993 RegisterSDNode *RHSR = dyn_cast<RegisterSDNode>(N.getOperand(1));
994 if ((LHSR && LHSR->getReg() == ARM::SP) ||
995 (RHSR && RHSR->getReg() == ARM::SP)) {
996 ConstantSDNode *LHS = dyn_cast<ConstantSDNode>(N.getOperand(0));
997 ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1));
998 unsigned LHSC = LHS ? LHS->getZExtValue() : 0;
999 unsigned RHSC = RHS ? RHS->getZExtValue() : 0;
1001 // Thumb does not have [sp, #imm5] address mode for non-zero imm5.
1002 if (LHSC != 0 || RHSC != 0) return false;
1004 Base = N;
1005 OffImm = CurDAG->getTargetConstant(0, MVT::i32);
1006 return true;
1009 // If the RHS is + imm5 * scale, fold into addr mode.
1010 int RHSC;
1011 if (isScaledConstantInRange(N.getOperand(1), Scale, 0, 32, RHSC)) {
1012 Base = N.getOperand(0);
1013 OffImm = CurDAG->getTargetConstant(RHSC, MVT::i32);
1014 return true;
1017 Base = N.getOperand(0);
1018 OffImm = CurDAG->getTargetConstant(0, MVT::i32);
1019 return true;
1022 bool
1023 ARMDAGToDAGISel::SelectThumbAddrModeImm5S4(SDValue N, SDValue &Base,
1024 SDValue &OffImm) {
1025 return SelectThumbAddrModeImm5S(N, 4, Base, OffImm);
1028 bool
1029 ARMDAGToDAGISel::SelectThumbAddrModeImm5S2(SDValue N, SDValue &Base,
1030 SDValue &OffImm) {
1031 return SelectThumbAddrModeImm5S(N, 2, Base, OffImm);
1034 bool
1035 ARMDAGToDAGISel::SelectThumbAddrModeImm5S1(SDValue N, SDValue &Base,
1036 SDValue &OffImm) {
1037 return SelectThumbAddrModeImm5S(N, 1, Base, OffImm);
1040 bool ARMDAGToDAGISel::SelectThumbAddrModeSP(SDValue N,
1041 SDValue &Base, SDValue &OffImm) {
1042 if (N.getOpcode() == ISD::FrameIndex) {
1043 int FI = cast<FrameIndexSDNode>(N)->getIndex();
1044 Base = CurDAG->getTargetFrameIndex(FI, TLI.getPointerTy());
1045 OffImm = CurDAG->getTargetConstant(0, MVT::i32);
1046 return true;
1049 if (!CurDAG->isBaseWithConstantOffset(N))
1050 return false;
1052 RegisterSDNode *LHSR = dyn_cast<RegisterSDNode>(N.getOperand(0));
1053 if (N.getOperand(0).getOpcode() == ISD::FrameIndex ||
1054 (LHSR && LHSR->getReg() == ARM::SP)) {
1055 // If the RHS is + imm8 * scale, fold into addr mode.
1056 int RHSC;
1057 if (isScaledConstantInRange(N.getOperand(1), /*Scale=*/4, 0, 256, RHSC)) {
1058 Base = N.getOperand(0);
1059 if (Base.getOpcode() == ISD::FrameIndex) {
1060 int FI = cast<FrameIndexSDNode>(Base)->getIndex();
1061 Base = CurDAG->getTargetFrameIndex(FI, TLI.getPointerTy());
1063 OffImm = CurDAG->getTargetConstant(RHSC, MVT::i32);
1064 return true;
1068 return false;
1072 //===----------------------------------------------------------------------===//
1073 // Thumb 2 Addressing Modes
1074 //===----------------------------------------------------------------------===//
1077 bool ARMDAGToDAGISel::SelectT2ShifterOperandReg(SDValue N, SDValue &BaseReg,
1078 SDValue &Opc) {
1079 if (DisableShifterOp)
1080 return false;
1082 ARM_AM::ShiftOpc ShOpcVal = ARM_AM::getShiftOpcForNode(N);
1084 // Don't match base register only case. That is matched to a separate
1085 // lower complexity pattern with explicit register operand.
1086 if (ShOpcVal == ARM_AM::no_shift) return false;
1088 BaseReg = N.getOperand(0);
1089 unsigned ShImmVal = 0;
1090 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
1091 ShImmVal = RHS->getZExtValue() & 31;
1092 Opc = getI32Imm(ARM_AM::getSORegOpc(ShOpcVal, ShImmVal));
1093 return true;
1096 return false;
1099 bool ARMDAGToDAGISel::SelectT2AddrModeImm12(SDValue N,
1100 SDValue &Base, SDValue &OffImm) {
1101 // Match simple R + imm12 operands.
1103 // Base only.
1104 if (N.getOpcode() != ISD::ADD && N.getOpcode() != ISD::SUB &&
1105 !CurDAG->isBaseWithConstantOffset(N)) {
1106 if (N.getOpcode() == ISD::FrameIndex) {
1107 // Match frame index.
1108 int FI = cast<FrameIndexSDNode>(N)->getIndex();
1109 Base = CurDAG->getTargetFrameIndex(FI, TLI.getPointerTy());
1110 OffImm = CurDAG->getTargetConstant(0, MVT::i32);
1111 return true;
1114 if (N.getOpcode() == ARMISD::Wrapper &&
1115 !(Subtarget->useMovt() &&
1116 N.getOperand(0).getOpcode() == ISD::TargetGlobalAddress)) {
1117 Base = N.getOperand(0);
1118 if (Base.getOpcode() == ISD::TargetConstantPool)
1119 return false; // We want to select t2LDRpci instead.
1120 } else
1121 Base = N;
1122 OffImm = CurDAG->getTargetConstant(0, MVT::i32);
1123 return true;
1126 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
1127 if (SelectT2AddrModeImm8(N, Base, OffImm))
1128 // Let t2LDRi8 handle (R - imm8).
1129 return false;
1131 int RHSC = (int)RHS->getZExtValue();
1132 if (N.getOpcode() == ISD::SUB)
1133 RHSC = -RHSC;
1135 if (RHSC >= 0 && RHSC < 0x1000) { // 12 bits (unsigned)
1136 Base = N.getOperand(0);
1137 if (Base.getOpcode() == ISD::FrameIndex) {
1138 int FI = cast<FrameIndexSDNode>(Base)->getIndex();
1139 Base = CurDAG->getTargetFrameIndex(FI, TLI.getPointerTy());
1141 OffImm = CurDAG->getTargetConstant(RHSC, MVT::i32);
1142 return true;
1146 // Base only.
1147 Base = N;
1148 OffImm = CurDAG->getTargetConstant(0, MVT::i32);
1149 return true;
1152 bool ARMDAGToDAGISel::SelectT2AddrModeImm8(SDValue N,
1153 SDValue &Base, SDValue &OffImm) {
1154 // Match simple R - imm8 operands.
1155 if (N.getOpcode() != ISD::ADD && N.getOpcode() != ISD::SUB &&
1156 !CurDAG->isBaseWithConstantOffset(N))
1157 return false;
1159 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
1160 int RHSC = (int)RHS->getSExtValue();
1161 if (N.getOpcode() == ISD::SUB)
1162 RHSC = -RHSC;
1164 if ((RHSC >= -255) && (RHSC < 0)) { // 8 bits (always negative)
1165 Base = N.getOperand(0);
1166 if (Base.getOpcode() == ISD::FrameIndex) {
1167 int FI = cast<FrameIndexSDNode>(Base)->getIndex();
1168 Base = CurDAG->getTargetFrameIndex(FI, TLI.getPointerTy());
1170 OffImm = CurDAG->getTargetConstant(RHSC, MVT::i32);
1171 return true;
1175 return false;
1178 bool ARMDAGToDAGISel::SelectT2AddrModeImm8Offset(SDNode *Op, SDValue N,
1179 SDValue &OffImm){
1180 unsigned Opcode = Op->getOpcode();
1181 ISD::MemIndexedMode AM = (Opcode == ISD::LOAD)
1182 ? cast<LoadSDNode>(Op)->getAddressingMode()
1183 : cast<StoreSDNode>(Op)->getAddressingMode();
1184 int RHSC;
1185 if (isScaledConstantInRange(N, /*Scale=*/1, 0, 0x100, RHSC)) { // 8 bits.
1186 OffImm = ((AM == ISD::PRE_INC) || (AM == ISD::POST_INC))
1187 ? CurDAG->getTargetConstant(RHSC, MVT::i32)
1188 : CurDAG->getTargetConstant(-RHSC, MVT::i32);
1189 return true;
1192 return false;
1195 bool ARMDAGToDAGISel::SelectT2AddrModeSoReg(SDValue N,
1196 SDValue &Base,
1197 SDValue &OffReg, SDValue &ShImm) {
1198 // (R - imm8) should be handled by t2LDRi8. The rest are handled by t2LDRi12.
1199 if (N.getOpcode() != ISD::ADD && !CurDAG->isBaseWithConstantOffset(N))
1200 return false;
1202 // Leave (R + imm12) for t2LDRi12, (R - imm8) for t2LDRi8.
1203 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
1204 int RHSC = (int)RHS->getZExtValue();
1205 if (RHSC >= 0 && RHSC < 0x1000) // 12 bits (unsigned)
1206 return false;
1207 else if (RHSC < 0 && RHSC >= -255) // 8 bits
1208 return false;
1211 if (Subtarget->isCortexA9() && !N.hasOneUse()) {
1212 // Compute R + (R << [1,2,3]) and reuse it.
1213 Base = N;
1214 return false;
1217 // Look for (R + R) or (R + (R << [1,2,3])).
1218 unsigned ShAmt = 0;
1219 Base = N.getOperand(0);
1220 OffReg = N.getOperand(1);
1222 // Swap if it is ((R << c) + R).
1223 ARM_AM::ShiftOpc ShOpcVal = ARM_AM::getShiftOpcForNode(OffReg);
1224 if (ShOpcVal != ARM_AM::lsl) {
1225 ShOpcVal = ARM_AM::getShiftOpcForNode(Base);
1226 if (ShOpcVal == ARM_AM::lsl)
1227 std::swap(Base, OffReg);
1230 if (ShOpcVal == ARM_AM::lsl) {
1231 // Check to see if the RHS of the shift is a constant, if not, we can't fold
1232 // it.
1233 if (ConstantSDNode *Sh = dyn_cast<ConstantSDNode>(OffReg.getOperand(1))) {
1234 ShAmt = Sh->getZExtValue();
1235 if (ShAmt < 4 && isShifterOpProfitable(OffReg, ShOpcVal, ShAmt))
1236 OffReg = OffReg.getOperand(0);
1237 else {
1238 ShAmt = 0;
1239 ShOpcVal = ARM_AM::no_shift;
1241 } else {
1242 ShOpcVal = ARM_AM::no_shift;
1246 ShImm = CurDAG->getTargetConstant(ShAmt, MVT::i32);
1248 return true;
1251 //===--------------------------------------------------------------------===//
1253 /// getAL - Returns a ARMCC::AL immediate node.
1254 static inline SDValue getAL(SelectionDAG *CurDAG) {
1255 return CurDAG->getTargetConstant((uint64_t)ARMCC::AL, MVT::i32);
1258 SDNode *ARMDAGToDAGISel::SelectARMIndexedLoad(SDNode *N) {
1259 LoadSDNode *LD = cast<LoadSDNode>(N);
1260 ISD::MemIndexedMode AM = LD->getAddressingMode();
1261 if (AM == ISD::UNINDEXED)
1262 return NULL;
1264 EVT LoadedVT = LD->getMemoryVT();
1265 SDValue Offset, AMOpc;
1266 bool isPre = (AM == ISD::PRE_INC) || (AM == ISD::PRE_DEC);
1267 unsigned Opcode = 0;
1268 bool Match = false;
1269 if (LoadedVT == MVT::i32 &&
1270 SelectAddrMode2Offset(N, LD->getOffset(), Offset, AMOpc)) {
1271 Opcode = isPre ? ARM::LDR_PRE : ARM::LDR_POST;
1272 Match = true;
1273 } else if (LoadedVT == MVT::i16 &&
1274 SelectAddrMode3Offset(N, LD->getOffset(), Offset, AMOpc)) {
1275 Match = true;
1276 Opcode = (LD->getExtensionType() == ISD::SEXTLOAD)
1277 ? (isPre ? ARM::LDRSH_PRE : ARM::LDRSH_POST)
1278 : (isPre ? ARM::LDRH_PRE : ARM::LDRH_POST);
1279 } else if (LoadedVT == MVT::i8 || LoadedVT == MVT::i1) {
1280 if (LD->getExtensionType() == ISD::SEXTLOAD) {
1281 if (SelectAddrMode3Offset(N, LD->getOffset(), Offset, AMOpc)) {
1282 Match = true;
1283 Opcode = isPre ? ARM::LDRSB_PRE : ARM::LDRSB_POST;
1285 } else {
1286 if (SelectAddrMode2Offset(N, LD->getOffset(), Offset, AMOpc)) {
1287 Match = true;
1288 Opcode = isPre ? ARM::LDRB_PRE : ARM::LDRB_POST;
1293 if (Match) {
1294 SDValue Chain = LD->getChain();
1295 SDValue Base = LD->getBasePtr();
1296 SDValue Ops[]= { Base, Offset, AMOpc, getAL(CurDAG),
1297 CurDAG->getRegister(0, MVT::i32), Chain };
1298 return CurDAG->getMachineNode(Opcode, N->getDebugLoc(), MVT::i32, MVT::i32,
1299 MVT::Other, Ops, 6);
1302 return NULL;
1305 SDNode *ARMDAGToDAGISel::SelectT2IndexedLoad(SDNode *N) {
1306 LoadSDNode *LD = cast<LoadSDNode>(N);
1307 ISD::MemIndexedMode AM = LD->getAddressingMode();
1308 if (AM == ISD::UNINDEXED)
1309 return NULL;
1311 EVT LoadedVT = LD->getMemoryVT();
1312 bool isSExtLd = LD->getExtensionType() == ISD::SEXTLOAD;
1313 SDValue Offset;
1314 bool isPre = (AM == ISD::PRE_INC) || (AM == ISD::PRE_DEC);
1315 unsigned Opcode = 0;
1316 bool Match = false;
1317 if (SelectT2AddrModeImm8Offset(N, LD->getOffset(), Offset)) {
1318 switch (LoadedVT.getSimpleVT().SimpleTy) {
1319 case MVT::i32:
1320 Opcode = isPre ? ARM::t2LDR_PRE : ARM::t2LDR_POST;
1321 break;
1322 case MVT::i16:
1323 if (isSExtLd)
1324 Opcode = isPre ? ARM::t2LDRSH_PRE : ARM::t2LDRSH_POST;
1325 else
1326 Opcode = isPre ? ARM::t2LDRH_PRE : ARM::t2LDRH_POST;
1327 break;
1328 case MVT::i8:
1329 case MVT::i1:
1330 if (isSExtLd)
1331 Opcode = isPre ? ARM::t2LDRSB_PRE : ARM::t2LDRSB_POST;
1332 else
1333 Opcode = isPre ? ARM::t2LDRB_PRE : ARM::t2LDRB_POST;
1334 break;
1335 default:
1336 return NULL;
1338 Match = true;
1341 if (Match) {
1342 SDValue Chain = LD->getChain();
1343 SDValue Base = LD->getBasePtr();
1344 SDValue Ops[]= { Base, Offset, getAL(CurDAG),
1345 CurDAG->getRegister(0, MVT::i32), Chain };
1346 return CurDAG->getMachineNode(Opcode, N->getDebugLoc(), MVT::i32, MVT::i32,
1347 MVT::Other, Ops, 5);
1350 return NULL;
1353 /// PairSRegs - Form a D register from a pair of S registers.
1355 SDNode *ARMDAGToDAGISel::PairSRegs(EVT VT, SDValue V0, SDValue V1) {
1356 DebugLoc dl = V0.getNode()->getDebugLoc();
1357 SDValue RegClass =
1358 CurDAG->getTargetConstant(ARM::DPR_VFP2RegClassID, MVT::i32);
1359 SDValue SubReg0 = CurDAG->getTargetConstant(ARM::ssub_0, MVT::i32);
1360 SDValue SubReg1 = CurDAG->getTargetConstant(ARM::ssub_1, MVT::i32);
1361 const SDValue Ops[] = { RegClass, V0, SubReg0, V1, SubReg1 };
1362 return CurDAG->getMachineNode(TargetOpcode::REG_SEQUENCE, dl, VT, Ops, 5);
1365 /// PairDRegs - Form a quad register from a pair of D registers.
1367 SDNode *ARMDAGToDAGISel::PairDRegs(EVT VT, SDValue V0, SDValue V1) {
1368 DebugLoc dl = V0.getNode()->getDebugLoc();
1369 SDValue RegClass = CurDAG->getTargetConstant(ARM::QPRRegClassID, MVT::i32);
1370 SDValue SubReg0 = CurDAG->getTargetConstant(ARM::dsub_0, MVT::i32);
1371 SDValue SubReg1 = CurDAG->getTargetConstant(ARM::dsub_1, MVT::i32);
1372 const SDValue Ops[] = { RegClass, V0, SubReg0, V1, SubReg1 };
1373 return CurDAG->getMachineNode(TargetOpcode::REG_SEQUENCE, dl, VT, Ops, 5);
1376 /// PairQRegs - Form 4 consecutive D registers from a pair of Q registers.
1378 SDNode *ARMDAGToDAGISel::PairQRegs(EVT VT, SDValue V0, SDValue V1) {
1379 DebugLoc dl = V0.getNode()->getDebugLoc();
1380 SDValue RegClass = CurDAG->getTargetConstant(ARM::QQPRRegClassID, MVT::i32);
1381 SDValue SubReg0 = CurDAG->getTargetConstant(ARM::qsub_0, MVT::i32);
1382 SDValue SubReg1 = CurDAG->getTargetConstant(ARM::qsub_1, MVT::i32);
1383 const SDValue Ops[] = { RegClass, V0, SubReg0, V1, SubReg1 };
1384 return CurDAG->getMachineNode(TargetOpcode::REG_SEQUENCE, dl, VT, Ops, 5);
1387 /// QuadSRegs - Form 4 consecutive S registers.
1389 SDNode *ARMDAGToDAGISel::QuadSRegs(EVT VT, SDValue V0, SDValue V1,
1390 SDValue V2, SDValue V3) {
1391 DebugLoc dl = V0.getNode()->getDebugLoc();
1392 SDValue RegClass =
1393 CurDAG->getTargetConstant(ARM::QPR_VFP2RegClassID, MVT::i32);
1394 SDValue SubReg0 = CurDAG->getTargetConstant(ARM::ssub_0, MVT::i32);
1395 SDValue SubReg1 = CurDAG->getTargetConstant(ARM::ssub_1, MVT::i32);
1396 SDValue SubReg2 = CurDAG->getTargetConstant(ARM::ssub_2, MVT::i32);
1397 SDValue SubReg3 = CurDAG->getTargetConstant(ARM::ssub_3, MVT::i32);
1398 const SDValue Ops[] = { RegClass, V0, SubReg0, V1, SubReg1,
1399 V2, SubReg2, V3, SubReg3 };
1400 return CurDAG->getMachineNode(TargetOpcode::REG_SEQUENCE, dl, VT, Ops, 9);
1403 /// QuadDRegs - Form 4 consecutive D registers.
1405 SDNode *ARMDAGToDAGISel::QuadDRegs(EVT VT, SDValue V0, SDValue V1,
1406 SDValue V2, SDValue V3) {
1407 DebugLoc dl = V0.getNode()->getDebugLoc();
1408 SDValue RegClass = CurDAG->getTargetConstant(ARM::QQPRRegClassID, MVT::i32);
1409 SDValue SubReg0 = CurDAG->getTargetConstant(ARM::dsub_0, MVT::i32);
1410 SDValue SubReg1 = CurDAG->getTargetConstant(ARM::dsub_1, MVT::i32);
1411 SDValue SubReg2 = CurDAG->getTargetConstant(ARM::dsub_2, MVT::i32);
1412 SDValue SubReg3 = CurDAG->getTargetConstant(ARM::dsub_3, MVT::i32);
1413 const SDValue Ops[] = { RegClass, V0, SubReg0, V1, SubReg1,
1414 V2, SubReg2, V3, SubReg3 };
1415 return CurDAG->getMachineNode(TargetOpcode::REG_SEQUENCE, dl, VT, Ops, 9);
1418 /// QuadQRegs - Form 4 consecutive Q registers.
1420 SDNode *ARMDAGToDAGISel::QuadQRegs(EVT VT, SDValue V0, SDValue V1,
1421 SDValue V2, SDValue V3) {
1422 DebugLoc dl = V0.getNode()->getDebugLoc();
1423 SDValue RegClass = CurDAG->getTargetConstant(ARM::QQQQPRRegClassID, MVT::i32);
1424 SDValue SubReg0 = CurDAG->getTargetConstant(ARM::qsub_0, MVT::i32);
1425 SDValue SubReg1 = CurDAG->getTargetConstant(ARM::qsub_1, MVT::i32);
1426 SDValue SubReg2 = CurDAG->getTargetConstant(ARM::qsub_2, MVT::i32);
1427 SDValue SubReg3 = CurDAG->getTargetConstant(ARM::qsub_3, MVT::i32);
1428 const SDValue Ops[] = { RegClass, V0, SubReg0, V1, SubReg1,
1429 V2, SubReg2, V3, SubReg3 };
1430 return CurDAG->getMachineNode(TargetOpcode::REG_SEQUENCE, dl, VT, Ops, 9);
1433 /// GetVLDSTAlign - Get the alignment (in bytes) for the alignment operand
1434 /// of a NEON VLD or VST instruction. The supported values depend on the
1435 /// number of registers being loaded.
1436 SDValue ARMDAGToDAGISel::GetVLDSTAlign(SDValue Align, unsigned NumVecs,
1437 bool is64BitVector) {
1438 unsigned NumRegs = NumVecs;
1439 if (!is64BitVector && NumVecs < 3)
1440 NumRegs *= 2;
1442 unsigned Alignment = cast<ConstantSDNode>(Align)->getZExtValue();
1443 if (Alignment >= 32 && NumRegs == 4)
1444 Alignment = 32;
1445 else if (Alignment >= 16 && (NumRegs == 2 || NumRegs == 4))
1446 Alignment = 16;
1447 else if (Alignment >= 8)
1448 Alignment = 8;
1449 else
1450 Alignment = 0;
1452 return CurDAG->getTargetConstant(Alignment, MVT::i32);
1455 SDNode *ARMDAGToDAGISel::SelectVLD(SDNode *N, bool isUpdating, unsigned NumVecs,
1456 unsigned *DOpcodes, unsigned *QOpcodes0,
1457 unsigned *QOpcodes1) {
1458 assert(NumVecs >= 1 && NumVecs <= 4 && "VLD NumVecs out-of-range");
1459 DebugLoc dl = N->getDebugLoc();
1461 SDValue MemAddr, Align;
1462 unsigned AddrOpIdx = isUpdating ? 1 : 2;
1463 if (!SelectAddrMode6(N, N->getOperand(AddrOpIdx), MemAddr, Align))
1464 return NULL;
1466 SDValue Chain = N->getOperand(0);
1467 EVT VT = N->getValueType(0);
1468 bool is64BitVector = VT.is64BitVector();
1469 Align = GetVLDSTAlign(Align, NumVecs, is64BitVector);
1471 unsigned OpcodeIndex;
1472 switch (VT.getSimpleVT().SimpleTy) {
1473 default: llvm_unreachable("unhandled vld type");
1474 // Double-register operations:
1475 case MVT::v8i8: OpcodeIndex = 0; break;
1476 case MVT::v4i16: OpcodeIndex = 1; break;
1477 case MVT::v2f32:
1478 case MVT::v2i32: OpcodeIndex = 2; break;
1479 case MVT::v1i64: OpcodeIndex = 3; break;
1480 // Quad-register operations:
1481 case MVT::v16i8: OpcodeIndex = 0; break;
1482 case MVT::v8i16: OpcodeIndex = 1; break;
1483 case MVT::v4f32:
1484 case MVT::v4i32: OpcodeIndex = 2; break;
1485 case MVT::v2i64: OpcodeIndex = 3;
1486 assert(NumVecs == 1 && "v2i64 type only supported for VLD1");
1487 break;
1490 EVT ResTy;
1491 if (NumVecs == 1)
1492 ResTy = VT;
1493 else {
1494 unsigned ResTyElts = (NumVecs == 3) ? 4 : NumVecs;
1495 if (!is64BitVector)
1496 ResTyElts *= 2;
1497 ResTy = EVT::getVectorVT(*CurDAG->getContext(), MVT::i64, ResTyElts);
1499 std::vector<EVT> ResTys;
1500 ResTys.push_back(ResTy);
1501 if (isUpdating)
1502 ResTys.push_back(MVT::i32);
1503 ResTys.push_back(MVT::Other);
1505 SDValue Pred = getAL(CurDAG);
1506 SDValue Reg0 = CurDAG->getRegister(0, MVT::i32);
1507 SDNode *VLd;
1508 SmallVector<SDValue, 7> Ops;
1510 // Double registers and VLD1/VLD2 quad registers are directly supported.
1511 if (is64BitVector || NumVecs <= 2) {
1512 unsigned Opc = (is64BitVector ? DOpcodes[OpcodeIndex] :
1513 QOpcodes0[OpcodeIndex]);
1514 Ops.push_back(MemAddr);
1515 Ops.push_back(Align);
1516 if (isUpdating) {
1517 SDValue Inc = N->getOperand(AddrOpIdx + 1);
1518 Ops.push_back(isa<ConstantSDNode>(Inc.getNode()) ? Reg0 : Inc);
1520 Ops.push_back(Pred);
1521 Ops.push_back(Reg0);
1522 Ops.push_back(Chain);
1523 VLd = CurDAG->getMachineNode(Opc, dl, ResTys, Ops.data(), Ops.size());
1525 } else {
1526 // Otherwise, quad registers are loaded with two separate instructions,
1527 // where one loads the even registers and the other loads the odd registers.
1528 EVT AddrTy = MemAddr.getValueType();
1530 // Load the even subregs. This is always an updating load, so that it
1531 // provides the address to the second load for the odd subregs.
1532 SDValue ImplDef =
1533 SDValue(CurDAG->getMachineNode(TargetOpcode::IMPLICIT_DEF, dl, ResTy), 0);
1534 const SDValue OpsA[] = { MemAddr, Align, Reg0, ImplDef, Pred, Reg0, Chain };
1535 SDNode *VLdA = CurDAG->getMachineNode(QOpcodes0[OpcodeIndex], dl,
1536 ResTy, AddrTy, MVT::Other, OpsA, 7);
1537 Chain = SDValue(VLdA, 2);
1539 // Load the odd subregs.
1540 Ops.push_back(SDValue(VLdA, 1));
1541 Ops.push_back(Align);
1542 if (isUpdating) {
1543 SDValue Inc = N->getOperand(AddrOpIdx + 1);
1544 assert(isa<ConstantSDNode>(Inc.getNode()) &&
1545 "only constant post-increment update allowed for VLD3/4");
1546 (void)Inc;
1547 Ops.push_back(Reg0);
1549 Ops.push_back(SDValue(VLdA, 0));
1550 Ops.push_back(Pred);
1551 Ops.push_back(Reg0);
1552 Ops.push_back(Chain);
1553 VLd = CurDAG->getMachineNode(QOpcodes1[OpcodeIndex], dl, ResTys,
1554 Ops.data(), Ops.size());
1557 // Transfer memoperands.
1558 MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1);
1559 MemOp[0] = cast<MemIntrinsicSDNode>(N)->getMemOperand();
1560 cast<MachineSDNode>(VLd)->setMemRefs(MemOp, MemOp + 1);
1562 if (NumVecs == 1)
1563 return VLd;
1565 // Extract out the subregisters.
1566 SDValue SuperReg = SDValue(VLd, 0);
1567 assert(ARM::dsub_7 == ARM::dsub_0+7 &&
1568 ARM::qsub_3 == ARM::qsub_0+3 && "Unexpected subreg numbering");
1569 unsigned Sub0 = (is64BitVector ? ARM::dsub_0 : ARM::qsub_0);
1570 for (unsigned Vec = 0; Vec < NumVecs; ++Vec)
1571 ReplaceUses(SDValue(N, Vec),
1572 CurDAG->getTargetExtractSubreg(Sub0 + Vec, dl, VT, SuperReg));
1573 ReplaceUses(SDValue(N, NumVecs), SDValue(VLd, 1));
1574 if (isUpdating)
1575 ReplaceUses(SDValue(N, NumVecs + 1), SDValue(VLd, 2));
1576 return NULL;
1579 SDNode *ARMDAGToDAGISel::SelectVST(SDNode *N, bool isUpdating, unsigned NumVecs,
1580 unsigned *DOpcodes, unsigned *QOpcodes0,
1581 unsigned *QOpcodes1) {
1582 assert(NumVecs >= 1 && NumVecs <= 4 && "VST NumVecs out-of-range");
1583 DebugLoc dl = N->getDebugLoc();
1585 SDValue MemAddr, Align;
1586 unsigned AddrOpIdx = isUpdating ? 1 : 2;
1587 unsigned Vec0Idx = 3; // AddrOpIdx + (isUpdating ? 2 : 1)
1588 if (!SelectAddrMode6(N, N->getOperand(AddrOpIdx), MemAddr, Align))
1589 return NULL;
1591 MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1);
1592 MemOp[0] = cast<MemIntrinsicSDNode>(N)->getMemOperand();
1594 SDValue Chain = N->getOperand(0);
1595 EVT VT = N->getOperand(Vec0Idx).getValueType();
1596 bool is64BitVector = VT.is64BitVector();
1597 Align = GetVLDSTAlign(Align, NumVecs, is64BitVector);
1599 unsigned OpcodeIndex;
1600 switch (VT.getSimpleVT().SimpleTy) {
1601 default: llvm_unreachable("unhandled vst type");
1602 // Double-register operations:
1603 case MVT::v8i8: OpcodeIndex = 0; break;
1604 case MVT::v4i16: OpcodeIndex = 1; break;
1605 case MVT::v2f32:
1606 case MVT::v2i32: OpcodeIndex = 2; break;
1607 case MVT::v1i64: OpcodeIndex = 3; break;
1608 // Quad-register operations:
1609 case MVT::v16i8: OpcodeIndex = 0; break;
1610 case MVT::v8i16: OpcodeIndex = 1; break;
1611 case MVT::v4f32:
1612 case MVT::v4i32: OpcodeIndex = 2; break;
1613 case MVT::v2i64: OpcodeIndex = 3;
1614 assert(NumVecs == 1 && "v2i64 type only supported for VST1");
1615 break;
1618 std::vector<EVT> ResTys;
1619 if (isUpdating)
1620 ResTys.push_back(MVT::i32);
1621 ResTys.push_back(MVT::Other);
1623 SDValue Pred = getAL(CurDAG);
1624 SDValue Reg0 = CurDAG->getRegister(0, MVT::i32);
1625 SmallVector<SDValue, 7> Ops;
1627 // Double registers and VST1/VST2 quad registers are directly supported.
1628 if (is64BitVector || NumVecs <= 2) {
1629 SDValue SrcReg;
1630 if (NumVecs == 1) {
1631 SrcReg = N->getOperand(Vec0Idx);
1632 } else if (is64BitVector) {
1633 // Form a REG_SEQUENCE to force register allocation.
1634 SDValue V0 = N->getOperand(Vec0Idx + 0);
1635 SDValue V1 = N->getOperand(Vec0Idx + 1);
1636 if (NumVecs == 2)
1637 SrcReg = SDValue(PairDRegs(MVT::v2i64, V0, V1), 0);
1638 else {
1639 SDValue V2 = N->getOperand(Vec0Idx + 2);
1640 // If it's a vst3, form a quad D-register and leave the last part as
1641 // an undef.
1642 SDValue V3 = (NumVecs == 3)
1643 ? SDValue(CurDAG->getMachineNode(TargetOpcode::IMPLICIT_DEF,dl,VT), 0)
1644 : N->getOperand(Vec0Idx + 3);
1645 SrcReg = SDValue(QuadDRegs(MVT::v4i64, V0, V1, V2, V3), 0);
1647 } else {
1648 // Form a QQ register.
1649 SDValue Q0 = N->getOperand(Vec0Idx);
1650 SDValue Q1 = N->getOperand(Vec0Idx + 1);
1651 SrcReg = SDValue(PairQRegs(MVT::v4i64, Q0, Q1), 0);
1654 unsigned Opc = (is64BitVector ? DOpcodes[OpcodeIndex] :
1655 QOpcodes0[OpcodeIndex]);
1656 Ops.push_back(MemAddr);
1657 Ops.push_back(Align);
1658 if (isUpdating) {
1659 SDValue Inc = N->getOperand(AddrOpIdx + 1);
1660 Ops.push_back(isa<ConstantSDNode>(Inc.getNode()) ? Reg0 : Inc);
1662 Ops.push_back(SrcReg);
1663 Ops.push_back(Pred);
1664 Ops.push_back(Reg0);
1665 Ops.push_back(Chain);
1666 SDNode *VSt =
1667 CurDAG->getMachineNode(Opc, dl, ResTys, Ops.data(), Ops.size());
1669 // Transfer memoperands.
1670 cast<MachineSDNode>(VSt)->setMemRefs(MemOp, MemOp + 1);
1672 return VSt;
1675 // Otherwise, quad registers are stored with two separate instructions,
1676 // where one stores the even registers and the other stores the odd registers.
1678 // Form the QQQQ REG_SEQUENCE.
1679 SDValue V0 = N->getOperand(Vec0Idx + 0);
1680 SDValue V1 = N->getOperand(Vec0Idx + 1);
1681 SDValue V2 = N->getOperand(Vec0Idx + 2);
1682 SDValue V3 = (NumVecs == 3)
1683 ? SDValue(CurDAG->getMachineNode(TargetOpcode::IMPLICIT_DEF, dl, VT), 0)
1684 : N->getOperand(Vec0Idx + 3);
1685 SDValue RegSeq = SDValue(QuadQRegs(MVT::v8i64, V0, V1, V2, V3), 0);
1687 // Store the even D registers. This is always an updating store, so that it
1688 // provides the address to the second store for the odd subregs.
1689 const SDValue OpsA[] = { MemAddr, Align, Reg0, RegSeq, Pred, Reg0, Chain };
1690 SDNode *VStA = CurDAG->getMachineNode(QOpcodes0[OpcodeIndex], dl,
1691 MemAddr.getValueType(),
1692 MVT::Other, OpsA, 7);
1693 cast<MachineSDNode>(VStA)->setMemRefs(MemOp, MemOp + 1);
1694 Chain = SDValue(VStA, 1);
1696 // Store the odd D registers.
1697 Ops.push_back(SDValue(VStA, 0));
1698 Ops.push_back(Align);
1699 if (isUpdating) {
1700 SDValue Inc = N->getOperand(AddrOpIdx + 1);
1701 assert(isa<ConstantSDNode>(Inc.getNode()) &&
1702 "only constant post-increment update allowed for VST3/4");
1703 (void)Inc;
1704 Ops.push_back(Reg0);
1706 Ops.push_back(RegSeq);
1707 Ops.push_back(Pred);
1708 Ops.push_back(Reg0);
1709 Ops.push_back(Chain);
1710 SDNode *VStB = CurDAG->getMachineNode(QOpcodes1[OpcodeIndex], dl, ResTys,
1711 Ops.data(), Ops.size());
1712 cast<MachineSDNode>(VStB)->setMemRefs(MemOp, MemOp + 1);
1713 return VStB;
1716 SDNode *ARMDAGToDAGISel::SelectVLDSTLane(SDNode *N, bool IsLoad,
1717 bool isUpdating, unsigned NumVecs,
1718 unsigned *DOpcodes,
1719 unsigned *QOpcodes) {
1720 assert(NumVecs >=2 && NumVecs <= 4 && "VLDSTLane NumVecs out-of-range");
1721 DebugLoc dl = N->getDebugLoc();
1723 SDValue MemAddr, Align;
1724 unsigned AddrOpIdx = isUpdating ? 1 : 2;
1725 unsigned Vec0Idx = 3; // AddrOpIdx + (isUpdating ? 2 : 1)
1726 if (!SelectAddrMode6(N, N->getOperand(AddrOpIdx), MemAddr, Align))
1727 return NULL;
1729 MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1);
1730 MemOp[0] = cast<MemIntrinsicSDNode>(N)->getMemOperand();
1732 SDValue Chain = N->getOperand(0);
1733 unsigned Lane =
1734 cast<ConstantSDNode>(N->getOperand(Vec0Idx + NumVecs))->getZExtValue();
1735 EVT VT = N->getOperand(Vec0Idx).getValueType();
1736 bool is64BitVector = VT.is64BitVector();
1738 unsigned Alignment = 0;
1739 if (NumVecs != 3) {
1740 Alignment = cast<ConstantSDNode>(Align)->getZExtValue();
1741 unsigned NumBytes = NumVecs * VT.getVectorElementType().getSizeInBits()/8;
1742 if (Alignment > NumBytes)
1743 Alignment = NumBytes;
1744 if (Alignment < 8 && Alignment < NumBytes)
1745 Alignment = 0;
1746 // Alignment must be a power of two; make sure of that.
1747 Alignment = (Alignment & -Alignment);
1748 if (Alignment == 1)
1749 Alignment = 0;
1751 Align = CurDAG->getTargetConstant(Alignment, MVT::i32);
1753 unsigned OpcodeIndex;
1754 switch (VT.getSimpleVT().SimpleTy) {
1755 default: llvm_unreachable("unhandled vld/vst lane type");
1756 // Double-register operations:
1757 case MVT::v8i8: OpcodeIndex = 0; break;
1758 case MVT::v4i16: OpcodeIndex = 1; break;
1759 case MVT::v2f32:
1760 case MVT::v2i32: OpcodeIndex = 2; break;
1761 // Quad-register operations:
1762 case MVT::v8i16: OpcodeIndex = 0; break;
1763 case MVT::v4f32:
1764 case MVT::v4i32: OpcodeIndex = 1; break;
1767 std::vector<EVT> ResTys;
1768 if (IsLoad) {
1769 unsigned ResTyElts = (NumVecs == 3) ? 4 : NumVecs;
1770 if (!is64BitVector)
1771 ResTyElts *= 2;
1772 ResTys.push_back(EVT::getVectorVT(*CurDAG->getContext(),
1773 MVT::i64, ResTyElts));
1775 if (isUpdating)
1776 ResTys.push_back(MVT::i32);
1777 ResTys.push_back(MVT::Other);
1779 SDValue Pred = getAL(CurDAG);
1780 SDValue Reg0 = CurDAG->getRegister(0, MVT::i32);
1782 SmallVector<SDValue, 8> Ops;
1783 Ops.push_back(MemAddr);
1784 Ops.push_back(Align);
1785 if (isUpdating) {
1786 SDValue Inc = N->getOperand(AddrOpIdx + 1);
1787 Ops.push_back(isa<ConstantSDNode>(Inc.getNode()) ? Reg0 : Inc);
1790 SDValue SuperReg;
1791 SDValue V0 = N->getOperand(Vec0Idx + 0);
1792 SDValue V1 = N->getOperand(Vec0Idx + 1);
1793 if (NumVecs == 2) {
1794 if (is64BitVector)
1795 SuperReg = SDValue(PairDRegs(MVT::v2i64, V0, V1), 0);
1796 else
1797 SuperReg = SDValue(PairQRegs(MVT::v4i64, V0, V1), 0);
1798 } else {
1799 SDValue V2 = N->getOperand(Vec0Idx + 2);
1800 SDValue V3 = (NumVecs == 3)
1801 ? SDValue(CurDAG->getMachineNode(TargetOpcode::IMPLICIT_DEF, dl, VT), 0)
1802 : N->getOperand(Vec0Idx + 3);
1803 if (is64BitVector)
1804 SuperReg = SDValue(QuadDRegs(MVT::v4i64, V0, V1, V2, V3), 0);
1805 else
1806 SuperReg = SDValue(QuadQRegs(MVT::v8i64, V0, V1, V2, V3), 0);
1808 Ops.push_back(SuperReg);
1809 Ops.push_back(getI32Imm(Lane));
1810 Ops.push_back(Pred);
1811 Ops.push_back(Reg0);
1812 Ops.push_back(Chain);
1814 unsigned Opc = (is64BitVector ? DOpcodes[OpcodeIndex] :
1815 QOpcodes[OpcodeIndex]);
1816 SDNode *VLdLn = CurDAG->getMachineNode(Opc, dl, ResTys,
1817 Ops.data(), Ops.size());
1818 cast<MachineSDNode>(VLdLn)->setMemRefs(MemOp, MemOp + 1);
1819 if (!IsLoad)
1820 return VLdLn;
1822 // Extract the subregisters.
1823 SuperReg = SDValue(VLdLn, 0);
1824 assert(ARM::dsub_7 == ARM::dsub_0+7 &&
1825 ARM::qsub_3 == ARM::qsub_0+3 && "Unexpected subreg numbering");
1826 unsigned Sub0 = is64BitVector ? ARM::dsub_0 : ARM::qsub_0;
1827 for (unsigned Vec = 0; Vec < NumVecs; ++Vec)
1828 ReplaceUses(SDValue(N, Vec),
1829 CurDAG->getTargetExtractSubreg(Sub0 + Vec, dl, VT, SuperReg));
1830 ReplaceUses(SDValue(N, NumVecs), SDValue(VLdLn, 1));
1831 if (isUpdating)
1832 ReplaceUses(SDValue(N, NumVecs + 1), SDValue(VLdLn, 2));
1833 return NULL;
1836 SDNode *ARMDAGToDAGISel::SelectVLDDup(SDNode *N, bool isUpdating,
1837 unsigned NumVecs, unsigned *Opcodes) {
1838 assert(NumVecs >=2 && NumVecs <= 4 && "VLDDup NumVecs out-of-range");
1839 DebugLoc dl = N->getDebugLoc();
1841 SDValue MemAddr, Align;
1842 if (!SelectAddrMode6(N, N->getOperand(1), MemAddr, Align))
1843 return NULL;
1845 MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1);
1846 MemOp[0] = cast<MemIntrinsicSDNode>(N)->getMemOperand();
1848 SDValue Chain = N->getOperand(0);
1849 EVT VT = N->getValueType(0);
1851 unsigned Alignment = 0;
1852 if (NumVecs != 3) {
1853 Alignment = cast<ConstantSDNode>(Align)->getZExtValue();
1854 unsigned NumBytes = NumVecs * VT.getVectorElementType().getSizeInBits()/8;
1855 if (Alignment > NumBytes)
1856 Alignment = NumBytes;
1857 if (Alignment < 8 && Alignment < NumBytes)
1858 Alignment = 0;
1859 // Alignment must be a power of two; make sure of that.
1860 Alignment = (Alignment & -Alignment);
1861 if (Alignment == 1)
1862 Alignment = 0;
1864 Align = CurDAG->getTargetConstant(Alignment, MVT::i32);
1866 unsigned OpcodeIndex;
1867 switch (VT.getSimpleVT().SimpleTy) {
1868 default: llvm_unreachable("unhandled vld-dup type");
1869 case MVT::v8i8: OpcodeIndex = 0; break;
1870 case MVT::v4i16: OpcodeIndex = 1; break;
1871 case MVT::v2f32:
1872 case MVT::v2i32: OpcodeIndex = 2; break;
1875 SDValue Pred = getAL(CurDAG);
1876 SDValue Reg0 = CurDAG->getRegister(0, MVT::i32);
1877 SDValue SuperReg;
1878 unsigned Opc = Opcodes[OpcodeIndex];
1879 SmallVector<SDValue, 6> Ops;
1880 Ops.push_back(MemAddr);
1881 Ops.push_back(Align);
1882 if (isUpdating) {
1883 SDValue Inc = N->getOperand(2);
1884 Ops.push_back(isa<ConstantSDNode>(Inc.getNode()) ? Reg0 : Inc);
1886 Ops.push_back(Pred);
1887 Ops.push_back(Reg0);
1888 Ops.push_back(Chain);
1890 unsigned ResTyElts = (NumVecs == 3) ? 4 : NumVecs;
1891 std::vector<EVT> ResTys;
1892 ResTys.push_back(EVT::getVectorVT(*CurDAG->getContext(), MVT::i64,ResTyElts));
1893 if (isUpdating)
1894 ResTys.push_back(MVT::i32);
1895 ResTys.push_back(MVT::Other);
1896 SDNode *VLdDup =
1897 CurDAG->getMachineNode(Opc, dl, ResTys, Ops.data(), Ops.size());
1898 cast<MachineSDNode>(VLdDup)->setMemRefs(MemOp, MemOp + 1);
1899 SuperReg = SDValue(VLdDup, 0);
1901 // Extract the subregisters.
1902 assert(ARM::dsub_7 == ARM::dsub_0+7 && "Unexpected subreg numbering");
1903 unsigned SubIdx = ARM::dsub_0;
1904 for (unsigned Vec = 0; Vec < NumVecs; ++Vec)
1905 ReplaceUses(SDValue(N, Vec),
1906 CurDAG->getTargetExtractSubreg(SubIdx+Vec, dl, VT, SuperReg));
1907 ReplaceUses(SDValue(N, NumVecs), SDValue(VLdDup, 1));
1908 if (isUpdating)
1909 ReplaceUses(SDValue(N, NumVecs + 1), SDValue(VLdDup, 2));
1910 return NULL;
1913 SDNode *ARMDAGToDAGISel::SelectVTBL(SDNode *N, bool IsExt, unsigned NumVecs,
1914 unsigned Opc) {
1915 assert(NumVecs >= 2 && NumVecs <= 4 && "VTBL NumVecs out-of-range");
1916 DebugLoc dl = N->getDebugLoc();
1917 EVT VT = N->getValueType(0);
1918 unsigned FirstTblReg = IsExt ? 2 : 1;
1920 // Form a REG_SEQUENCE to force register allocation.
1921 SDValue RegSeq;
1922 SDValue V0 = N->getOperand(FirstTblReg + 0);
1923 SDValue V1 = N->getOperand(FirstTblReg + 1);
1924 if (NumVecs == 2)
1925 RegSeq = SDValue(PairDRegs(MVT::v16i8, V0, V1), 0);
1926 else {
1927 SDValue V2 = N->getOperand(FirstTblReg + 2);
1928 // If it's a vtbl3, form a quad D-register and leave the last part as
1929 // an undef.
1930 SDValue V3 = (NumVecs == 3)
1931 ? SDValue(CurDAG->getMachineNode(TargetOpcode::IMPLICIT_DEF, dl, VT), 0)
1932 : N->getOperand(FirstTblReg + 3);
1933 RegSeq = SDValue(QuadDRegs(MVT::v4i64, V0, V1, V2, V3), 0);
1936 SmallVector<SDValue, 6> Ops;
1937 if (IsExt)
1938 Ops.push_back(N->getOperand(1));
1939 Ops.push_back(RegSeq);
1940 Ops.push_back(N->getOperand(FirstTblReg + NumVecs));
1941 Ops.push_back(getAL(CurDAG)); // predicate
1942 Ops.push_back(CurDAG->getRegister(0, MVT::i32)); // predicate register
1943 return CurDAG->getMachineNode(Opc, dl, VT, Ops.data(), Ops.size());
1946 SDNode *ARMDAGToDAGISel::SelectV6T2BitfieldExtractOp(SDNode *N,
1947 bool isSigned) {
1948 if (!Subtarget->hasV6T2Ops())
1949 return NULL;
1951 unsigned Opc = isSigned ? (Subtarget->isThumb() ? ARM::t2SBFX : ARM::SBFX)
1952 : (Subtarget->isThumb() ? ARM::t2UBFX : ARM::UBFX);
1955 // For unsigned extracts, check for a shift right and mask
1956 unsigned And_imm = 0;
1957 if (N->getOpcode() == ISD::AND) {
1958 if (isOpcWithIntImmediate(N, ISD::AND, And_imm)) {
1960 // The immediate is a mask of the low bits iff imm & (imm+1) == 0
1961 if (And_imm & (And_imm + 1))
1962 return NULL;
1964 unsigned Srl_imm = 0;
1965 if (isOpcWithIntImmediate(N->getOperand(0).getNode(), ISD::SRL,
1966 Srl_imm)) {
1967 assert(Srl_imm > 0 && Srl_imm < 32 && "bad amount in shift node!");
1969 unsigned Width = CountTrailingOnes_32(And_imm);
1970 unsigned LSB = Srl_imm;
1971 SDValue Reg0 = CurDAG->getRegister(0, MVT::i32);
1972 SDValue Ops[] = { N->getOperand(0).getOperand(0),
1973 CurDAG->getTargetConstant(LSB, MVT::i32),
1974 CurDAG->getTargetConstant(Width, MVT::i32),
1975 getAL(CurDAG), Reg0 };
1976 return CurDAG->SelectNodeTo(N, Opc, MVT::i32, Ops, 5);
1979 return NULL;
1982 // Otherwise, we're looking for a shift of a shift
1983 unsigned Shl_imm = 0;
1984 if (isOpcWithIntImmediate(N->getOperand(0).getNode(), ISD::SHL, Shl_imm)) {
1985 assert(Shl_imm > 0 && Shl_imm < 32 && "bad amount in shift node!");
1986 unsigned Srl_imm = 0;
1987 if (isInt32Immediate(N->getOperand(1), Srl_imm)) {
1988 assert(Srl_imm > 0 && Srl_imm < 32 && "bad amount in shift node!");
1989 unsigned Width = 32 - Srl_imm;
1990 int LSB = Srl_imm - Shl_imm;
1991 if (LSB < 0)
1992 return NULL;
1993 SDValue Reg0 = CurDAG->getRegister(0, MVT::i32);
1994 SDValue Ops[] = { N->getOperand(0).getOperand(0),
1995 CurDAG->getTargetConstant(LSB, MVT::i32),
1996 CurDAG->getTargetConstant(Width, MVT::i32),
1997 getAL(CurDAG), Reg0 };
1998 return CurDAG->SelectNodeTo(N, Opc, MVT::i32, Ops, 5);
2001 return NULL;
2004 SDNode *ARMDAGToDAGISel::
2005 SelectT2CMOVShiftOp(SDNode *N, SDValue FalseVal, SDValue TrueVal,
2006 ARMCC::CondCodes CCVal, SDValue CCR, SDValue InFlag) {
2007 SDValue CPTmp0;
2008 SDValue CPTmp1;
2009 if (SelectT2ShifterOperandReg(TrueVal, CPTmp0, CPTmp1)) {
2010 unsigned SOVal = cast<ConstantSDNode>(CPTmp1)->getZExtValue();
2011 unsigned SOShOp = ARM_AM::getSORegShOp(SOVal);
2012 unsigned Opc = 0;
2013 switch (SOShOp) {
2014 case ARM_AM::lsl: Opc = ARM::t2MOVCClsl; break;
2015 case ARM_AM::lsr: Opc = ARM::t2MOVCClsr; break;
2016 case ARM_AM::asr: Opc = ARM::t2MOVCCasr; break;
2017 case ARM_AM::ror: Opc = ARM::t2MOVCCror; break;
2018 default:
2019 llvm_unreachable("Unknown so_reg opcode!");
2020 break;
2022 SDValue SOShImm =
2023 CurDAG->getTargetConstant(ARM_AM::getSORegOffset(SOVal), MVT::i32);
2024 SDValue CC = CurDAG->getTargetConstant(CCVal, MVT::i32);
2025 SDValue Ops[] = { FalseVal, CPTmp0, SOShImm, CC, CCR, InFlag };
2026 return CurDAG->SelectNodeTo(N, Opc, MVT::i32,Ops, 6);
2028 return 0;
2031 SDNode *ARMDAGToDAGISel::
2032 SelectARMCMOVShiftOp(SDNode *N, SDValue FalseVal, SDValue TrueVal,
2033 ARMCC::CondCodes CCVal, SDValue CCR, SDValue InFlag) {
2034 SDValue CPTmp0;
2035 SDValue CPTmp1;
2036 SDValue CPTmp2;
2037 if (SelectShifterOperandReg(TrueVal, CPTmp0, CPTmp1, CPTmp2)) {
2038 SDValue CC = CurDAG->getTargetConstant(CCVal, MVT::i32);
2039 SDValue Ops[] = { FalseVal, CPTmp0, CPTmp1, CPTmp2, CC, CCR, InFlag };
2040 return CurDAG->SelectNodeTo(N, ARM::MOVCCs, MVT::i32, Ops, 7);
2042 return 0;
2045 SDNode *ARMDAGToDAGISel::
2046 SelectT2CMOVImmOp(SDNode *N, SDValue FalseVal, SDValue TrueVal,
2047 ARMCC::CondCodes CCVal, SDValue CCR, SDValue InFlag) {
2048 ConstantSDNode *T = dyn_cast<ConstantSDNode>(TrueVal);
2049 if (!T)
2050 return 0;
2052 unsigned Opc = 0;
2053 unsigned TrueImm = T->getZExtValue();
2054 if (is_t2_so_imm(TrueImm)) {
2055 Opc = ARM::t2MOVCCi;
2056 } else if (TrueImm <= 0xffff) {
2057 Opc = ARM::t2MOVCCi16;
2058 } else if (is_t2_so_imm_not(TrueImm)) {
2059 TrueImm = ~TrueImm;
2060 Opc = ARM::t2MVNCCi;
2061 } else if (TrueVal.getNode()->hasOneUse() && Subtarget->hasV6T2Ops()) {
2062 // Large immediate.
2063 Opc = ARM::t2MOVCCi32imm;
2066 if (Opc) {
2067 SDValue True = CurDAG->getTargetConstant(TrueImm, MVT::i32);
2068 SDValue CC = CurDAG->getTargetConstant(CCVal, MVT::i32);
2069 SDValue Ops[] = { FalseVal, True, CC, CCR, InFlag };
2070 return CurDAG->SelectNodeTo(N, Opc, MVT::i32, Ops, 5);
2073 return 0;
2076 SDNode *ARMDAGToDAGISel::
2077 SelectARMCMOVImmOp(SDNode *N, SDValue FalseVal, SDValue TrueVal,
2078 ARMCC::CondCodes CCVal, SDValue CCR, SDValue InFlag) {
2079 ConstantSDNode *T = dyn_cast<ConstantSDNode>(TrueVal);
2080 if (!T)
2081 return 0;
2083 unsigned Opc = 0;
2084 unsigned TrueImm = T->getZExtValue();
2085 bool isSoImm = is_so_imm(TrueImm);
2086 if (isSoImm) {
2087 Opc = ARM::MOVCCi;
2088 } else if (Subtarget->hasV6T2Ops() && TrueImm <= 0xffff) {
2089 Opc = ARM::MOVCCi16;
2090 } else if (is_so_imm_not(TrueImm)) {
2091 TrueImm = ~TrueImm;
2092 Opc = ARM::MVNCCi;
2093 } else if (TrueVal.getNode()->hasOneUse() &&
2094 (Subtarget->hasV6T2Ops() || ARM_AM::isSOImmTwoPartVal(TrueImm))) {
2095 // Large immediate.
2096 Opc = ARM::MOVCCi32imm;
2099 if (Opc) {
2100 SDValue True = CurDAG->getTargetConstant(TrueImm, MVT::i32);
2101 SDValue CC = CurDAG->getTargetConstant(CCVal, MVT::i32);
2102 SDValue Ops[] = { FalseVal, True, CC, CCR, InFlag };
2103 return CurDAG->SelectNodeTo(N, Opc, MVT::i32, Ops, 5);
2106 return 0;
2109 SDNode *ARMDAGToDAGISel::SelectCMOVOp(SDNode *N) {
2110 EVT VT = N->getValueType(0);
2111 SDValue FalseVal = N->getOperand(0);
2112 SDValue TrueVal = N->getOperand(1);
2113 SDValue CC = N->getOperand(2);
2114 SDValue CCR = N->getOperand(3);
2115 SDValue InFlag = N->getOperand(4);
2116 assert(CC.getOpcode() == ISD::Constant);
2117 assert(CCR.getOpcode() == ISD::Register);
2118 ARMCC::CondCodes CCVal =
2119 (ARMCC::CondCodes)cast<ConstantSDNode>(CC)->getZExtValue();
2121 if (!Subtarget->isThumb1Only() && VT == MVT::i32) {
2122 // Pattern: (ARMcmov:i32 GPR:i32:$false, so_reg:i32:$true, (imm:i32):$cc)
2123 // Emits: (MOVCCs:i32 GPR:i32:$false, so_reg:i32:$true, (imm:i32):$cc)
2124 // Pattern complexity = 18 cost = 1 size = 0
2125 SDValue CPTmp0;
2126 SDValue CPTmp1;
2127 SDValue CPTmp2;
2128 if (Subtarget->isThumb()) {
2129 SDNode *Res = SelectT2CMOVShiftOp(N, FalseVal, TrueVal,
2130 CCVal, CCR, InFlag);
2131 if (!Res)
2132 Res = SelectT2CMOVShiftOp(N, TrueVal, FalseVal,
2133 ARMCC::getOppositeCondition(CCVal), CCR, InFlag);
2134 if (Res)
2135 return Res;
2136 } else {
2137 SDNode *Res = SelectARMCMOVShiftOp(N, FalseVal, TrueVal,
2138 CCVal, CCR, InFlag);
2139 if (!Res)
2140 Res = SelectARMCMOVShiftOp(N, TrueVal, FalseVal,
2141 ARMCC::getOppositeCondition(CCVal), CCR, InFlag);
2142 if (Res)
2143 return Res;
2146 // Pattern: (ARMcmov:i32 GPR:i32:$false,
2147 // (imm:i32)<<P:Pred_so_imm>>:$true,
2148 // (imm:i32):$cc)
2149 // Emits: (MOVCCi:i32 GPR:i32:$false,
2150 // (so_imm:i32 (imm:i32):$true), (imm:i32):$cc)
2151 // Pattern complexity = 10 cost = 1 size = 0
2152 if (Subtarget->isThumb()) {
2153 SDNode *Res = SelectT2CMOVImmOp(N, FalseVal, TrueVal,
2154 CCVal, CCR, InFlag);
2155 if (!Res)
2156 Res = SelectT2CMOVImmOp(N, TrueVal, FalseVal,
2157 ARMCC::getOppositeCondition(CCVal), CCR, InFlag);
2158 if (Res)
2159 return Res;
2160 } else {
2161 SDNode *Res = SelectARMCMOVImmOp(N, FalseVal, TrueVal,
2162 CCVal, CCR, InFlag);
2163 if (!Res)
2164 Res = SelectARMCMOVImmOp(N, TrueVal, FalseVal,
2165 ARMCC::getOppositeCondition(CCVal), CCR, InFlag);
2166 if (Res)
2167 return Res;
2171 // Pattern: (ARMcmov:i32 GPR:i32:$false, GPR:i32:$true, (imm:i32):$cc)
2172 // Emits: (MOVCCr:i32 GPR:i32:$false, GPR:i32:$true, (imm:i32):$cc)
2173 // Pattern complexity = 6 cost = 1 size = 0
2175 // Pattern: (ARMcmov:i32 GPR:i32:$false, GPR:i32:$true, (imm:i32):$cc)
2176 // Emits: (tMOVCCr:i32 GPR:i32:$false, GPR:i32:$true, (imm:i32):$cc)
2177 // Pattern complexity = 6 cost = 11 size = 0
2179 // Also VMOVScc and VMOVDcc.
2180 SDValue Tmp2 = CurDAG->getTargetConstant(CCVal, MVT::i32);
2181 SDValue Ops[] = { FalseVal, TrueVal, Tmp2, CCR, InFlag };
2182 unsigned Opc = 0;
2183 switch (VT.getSimpleVT().SimpleTy) {
2184 default: assert(false && "Illegal conditional move type!");
2185 break;
2186 case MVT::i32:
2187 Opc = Subtarget->isThumb()
2188 ? (Subtarget->hasThumb2() ? ARM::t2MOVCCr : ARM::tMOVCCr_pseudo)
2189 : ARM::MOVCCr;
2190 break;
2191 case MVT::f32:
2192 Opc = ARM::VMOVScc;
2193 break;
2194 case MVT::f64:
2195 Opc = ARM::VMOVDcc;
2196 break;
2198 return CurDAG->SelectNodeTo(N, Opc, VT, Ops, 5);
2201 SDNode *ARMDAGToDAGISel::SelectConcatVector(SDNode *N) {
2202 // The only time a CONCAT_VECTORS operation can have legal types is when
2203 // two 64-bit vectors are concatenated to a 128-bit vector.
2204 EVT VT = N->getValueType(0);
2205 if (!VT.is128BitVector() || N->getNumOperands() != 2)
2206 llvm_unreachable("unexpected CONCAT_VECTORS");
2207 return PairDRegs(VT, N->getOperand(0), N->getOperand(1));
2210 SDNode *ARMDAGToDAGISel::Select(SDNode *N) {
2211 DebugLoc dl = N->getDebugLoc();
2213 if (N->isMachineOpcode())
2214 return NULL; // Already selected.
2216 switch (N->getOpcode()) {
2217 default: break;
2218 case ISD::Constant: {
2219 unsigned Val = cast<ConstantSDNode>(N)->getZExtValue();
2220 bool UseCP = true;
2221 if (Subtarget->hasThumb2())
2222 // Thumb2-aware targets have the MOVT instruction, so all immediates can
2223 // be done with MOV + MOVT, at worst.
2224 UseCP = 0;
2225 else {
2226 if (Subtarget->isThumb()) {
2227 UseCP = (Val > 255 && // MOV
2228 ~Val > 255 && // MOV + MVN
2229 !ARM_AM::isThumbImmShiftedVal(Val)); // MOV + LSL
2230 } else
2231 UseCP = (ARM_AM::getSOImmVal(Val) == -1 && // MOV
2232 ARM_AM::getSOImmVal(~Val) == -1 && // MVN
2233 !ARM_AM::isSOImmTwoPartVal(Val)); // two instrs.
2236 if (UseCP) {
2237 SDValue CPIdx =
2238 CurDAG->getTargetConstantPool(ConstantInt::get(
2239 Type::getInt32Ty(*CurDAG->getContext()), Val),
2240 TLI.getPointerTy());
2242 SDNode *ResNode;
2243 if (Subtarget->isThumb1Only()) {
2244 SDValue Pred = getAL(CurDAG);
2245 SDValue PredReg = CurDAG->getRegister(0, MVT::i32);
2246 SDValue Ops[] = { CPIdx, Pred, PredReg, CurDAG->getEntryNode() };
2247 ResNode = CurDAG->getMachineNode(ARM::tLDRpci, dl, MVT::i32, MVT::Other,
2248 Ops, 4);
2249 } else {
2250 SDValue Ops[] = {
2251 CPIdx,
2252 CurDAG->getTargetConstant(0, MVT::i32),
2253 getAL(CurDAG),
2254 CurDAG->getRegister(0, MVT::i32),
2255 CurDAG->getEntryNode()
2257 ResNode=CurDAG->getMachineNode(ARM::LDRcp, dl, MVT::i32, MVT::Other,
2258 Ops, 5);
2260 ReplaceUses(SDValue(N, 0), SDValue(ResNode, 0));
2261 return NULL;
2264 // Other cases are autogenerated.
2265 break;
2267 case ISD::FrameIndex: {
2268 // Selects to ADDri FI, 0 which in turn will become ADDri SP, imm.
2269 int FI = cast<FrameIndexSDNode>(N)->getIndex();
2270 SDValue TFI = CurDAG->getTargetFrameIndex(FI, TLI.getPointerTy());
2271 if (Subtarget->isThumb1Only()) {
2272 return CurDAG->SelectNodeTo(N, ARM::tADDrSPi, MVT::i32, TFI,
2273 CurDAG->getTargetConstant(0, MVT::i32));
2274 } else {
2275 unsigned Opc = ((Subtarget->isThumb() && Subtarget->hasThumb2()) ?
2276 ARM::t2ADDri : ARM::ADDri);
2277 SDValue Ops[] = { TFI, CurDAG->getTargetConstant(0, MVT::i32),
2278 getAL(CurDAG), CurDAG->getRegister(0, MVT::i32),
2279 CurDAG->getRegister(0, MVT::i32) };
2280 return CurDAG->SelectNodeTo(N, Opc, MVT::i32, Ops, 5);
2283 case ISD::SRL:
2284 if (SDNode *I = SelectV6T2BitfieldExtractOp(N, false))
2285 return I;
2286 break;
2287 case ISD::SRA:
2288 if (SDNode *I = SelectV6T2BitfieldExtractOp(N, true))
2289 return I;
2290 break;
2291 case ISD::MUL:
2292 if (Subtarget->isThumb1Only())
2293 break;
2294 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(N->getOperand(1))) {
2295 unsigned RHSV = C->getZExtValue();
2296 if (!RHSV) break;
2297 if (isPowerOf2_32(RHSV-1)) { // 2^n+1?
2298 unsigned ShImm = Log2_32(RHSV-1);
2299 if (ShImm >= 32)
2300 break;
2301 SDValue V = N->getOperand(0);
2302 ShImm = ARM_AM::getSORegOpc(ARM_AM::lsl, ShImm);
2303 SDValue ShImmOp = CurDAG->getTargetConstant(ShImm, MVT::i32);
2304 SDValue Reg0 = CurDAG->getRegister(0, MVT::i32);
2305 if (Subtarget->isThumb()) {
2306 SDValue Ops[] = { V, V, ShImmOp, getAL(CurDAG), Reg0, Reg0 };
2307 return CurDAG->SelectNodeTo(N, ARM::t2ADDrs, MVT::i32, Ops, 6);
2308 } else {
2309 SDValue Ops[] = { V, V, Reg0, ShImmOp, getAL(CurDAG), Reg0, Reg0 };
2310 return CurDAG->SelectNodeTo(N, ARM::ADDrs, MVT::i32, Ops, 7);
2313 if (isPowerOf2_32(RHSV+1)) { // 2^n-1?
2314 unsigned ShImm = Log2_32(RHSV+1);
2315 if (ShImm >= 32)
2316 break;
2317 SDValue V = N->getOperand(0);
2318 ShImm = ARM_AM::getSORegOpc(ARM_AM::lsl, ShImm);
2319 SDValue ShImmOp = CurDAG->getTargetConstant(ShImm, MVT::i32);
2320 SDValue Reg0 = CurDAG->getRegister(0, MVT::i32);
2321 if (Subtarget->isThumb()) {
2322 SDValue Ops[] = { V, V, ShImmOp, getAL(CurDAG), Reg0, Reg0 };
2323 return CurDAG->SelectNodeTo(N, ARM::t2RSBrs, MVT::i32, Ops, 6);
2324 } else {
2325 SDValue Ops[] = { V, V, Reg0, ShImmOp, getAL(CurDAG), Reg0, Reg0 };
2326 return CurDAG->SelectNodeTo(N, ARM::RSBrs, MVT::i32, Ops, 7);
2330 break;
2331 case ISD::AND: {
2332 // Check for unsigned bitfield extract
2333 if (SDNode *I = SelectV6T2BitfieldExtractOp(N, false))
2334 return I;
2336 // (and (or x, c2), c1) and top 16-bits of c1 and c2 match, lower 16-bits
2337 // of c1 are 0xffff, and lower 16-bit of c2 are 0. That is, the top 16-bits
2338 // are entirely contributed by c2 and lower 16-bits are entirely contributed
2339 // by x. That's equal to (or (and x, 0xffff), (and c1, 0xffff0000)).
2340 // Select it to: "movt x, ((c1 & 0xffff) >> 16)
2341 EVT VT = N->getValueType(0);
2342 if (VT != MVT::i32)
2343 break;
2344 unsigned Opc = (Subtarget->isThumb() && Subtarget->hasThumb2())
2345 ? ARM::t2MOVTi16
2346 : (Subtarget->hasV6T2Ops() ? ARM::MOVTi16 : 0);
2347 if (!Opc)
2348 break;
2349 SDValue N0 = N->getOperand(0), N1 = N->getOperand(1);
2350 ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1);
2351 if (!N1C)
2352 break;
2353 if (N0.getOpcode() == ISD::OR && N0.getNode()->hasOneUse()) {
2354 SDValue N2 = N0.getOperand(1);
2355 ConstantSDNode *N2C = dyn_cast<ConstantSDNode>(N2);
2356 if (!N2C)
2357 break;
2358 unsigned N1CVal = N1C->getZExtValue();
2359 unsigned N2CVal = N2C->getZExtValue();
2360 if ((N1CVal & 0xffff0000U) == (N2CVal & 0xffff0000U) &&
2361 (N1CVal & 0xffffU) == 0xffffU &&
2362 (N2CVal & 0xffffU) == 0x0U) {
2363 SDValue Imm16 = CurDAG->getTargetConstant((N2CVal & 0xFFFF0000U) >> 16,
2364 MVT::i32);
2365 SDValue Ops[] = { N0.getOperand(0), Imm16,
2366 getAL(CurDAG), CurDAG->getRegister(0, MVT::i32) };
2367 return CurDAG->getMachineNode(Opc, dl, VT, Ops, 4);
2370 break;
2372 case ARMISD::VMOVRRD:
2373 return CurDAG->getMachineNode(ARM::VMOVRRD, dl, MVT::i32, MVT::i32,
2374 N->getOperand(0), getAL(CurDAG),
2375 CurDAG->getRegister(0, MVT::i32));
2376 case ISD::UMUL_LOHI: {
2377 if (Subtarget->isThumb1Only())
2378 break;
2379 if (Subtarget->isThumb()) {
2380 SDValue Ops[] = { N->getOperand(0), N->getOperand(1),
2381 getAL(CurDAG), CurDAG->getRegister(0, MVT::i32),
2382 CurDAG->getRegister(0, MVT::i32) };
2383 return CurDAG->getMachineNode(ARM::t2UMULL, dl, MVT::i32, MVT::i32,Ops,4);
2384 } else {
2385 SDValue Ops[] = { N->getOperand(0), N->getOperand(1),
2386 getAL(CurDAG), CurDAG->getRegister(0, MVT::i32),
2387 CurDAG->getRegister(0, MVT::i32) };
2388 return CurDAG->getMachineNode(Subtarget->hasV6Ops() ?
2389 ARM::UMULL : ARM::UMULLv5,
2390 dl, MVT::i32, MVT::i32, Ops, 5);
2393 case ISD::SMUL_LOHI: {
2394 if (Subtarget->isThumb1Only())
2395 break;
2396 if (Subtarget->isThumb()) {
2397 SDValue Ops[] = { N->getOperand(0), N->getOperand(1),
2398 getAL(CurDAG), CurDAG->getRegister(0, MVT::i32) };
2399 return CurDAG->getMachineNode(ARM::t2SMULL, dl, MVT::i32, MVT::i32,Ops,4);
2400 } else {
2401 SDValue Ops[] = { N->getOperand(0), N->getOperand(1),
2402 getAL(CurDAG), CurDAG->getRegister(0, MVT::i32),
2403 CurDAG->getRegister(0, MVT::i32) };
2404 return CurDAG->getMachineNode(Subtarget->hasV6Ops() ?
2405 ARM::SMULL : ARM::SMULLv5,
2406 dl, MVT::i32, MVT::i32, Ops, 5);
2409 case ISD::LOAD: {
2410 SDNode *ResNode = 0;
2411 if (Subtarget->isThumb() && Subtarget->hasThumb2())
2412 ResNode = SelectT2IndexedLoad(N);
2413 else
2414 ResNode = SelectARMIndexedLoad(N);
2415 if (ResNode)
2416 return ResNode;
2417 // Other cases are autogenerated.
2418 break;
2420 case ARMISD::BRCOND: {
2421 // Pattern: (ARMbrcond:void (bb:Other):$dst, (imm:i32):$cc)
2422 // Emits: (Bcc:void (bb:Other):$dst, (imm:i32):$cc)
2423 // Pattern complexity = 6 cost = 1 size = 0
2425 // Pattern: (ARMbrcond:void (bb:Other):$dst, (imm:i32):$cc)
2426 // Emits: (tBcc:void (bb:Other):$dst, (imm:i32):$cc)
2427 // Pattern complexity = 6 cost = 1 size = 0
2429 // Pattern: (ARMbrcond:void (bb:Other):$dst, (imm:i32):$cc)
2430 // Emits: (t2Bcc:void (bb:Other):$dst, (imm:i32):$cc)
2431 // Pattern complexity = 6 cost = 1 size = 0
2433 unsigned Opc = Subtarget->isThumb() ?
2434 ((Subtarget->hasThumb2()) ? ARM::t2Bcc : ARM::tBcc) : ARM::Bcc;
2435 SDValue Chain = N->getOperand(0);
2436 SDValue N1 = N->getOperand(1);
2437 SDValue N2 = N->getOperand(2);
2438 SDValue N3 = N->getOperand(3);
2439 SDValue InFlag = N->getOperand(4);
2440 assert(N1.getOpcode() == ISD::BasicBlock);
2441 assert(N2.getOpcode() == ISD::Constant);
2442 assert(N3.getOpcode() == ISD::Register);
2444 SDValue Tmp2 = CurDAG->getTargetConstant(((unsigned)
2445 cast<ConstantSDNode>(N2)->getZExtValue()),
2446 MVT::i32);
2447 SDValue Ops[] = { N1, Tmp2, N3, Chain, InFlag };
2448 SDNode *ResNode = CurDAG->getMachineNode(Opc, dl, MVT::Other,
2449 MVT::Glue, Ops, 5);
2450 Chain = SDValue(ResNode, 0);
2451 if (N->getNumValues() == 2) {
2452 InFlag = SDValue(ResNode, 1);
2453 ReplaceUses(SDValue(N, 1), InFlag);
2455 ReplaceUses(SDValue(N, 0),
2456 SDValue(Chain.getNode(), Chain.getResNo()));
2457 return NULL;
2459 case ARMISD::CMOV:
2460 return SelectCMOVOp(N);
2461 case ARMISD::VZIP: {
2462 unsigned Opc = 0;
2463 EVT VT = N->getValueType(0);
2464 switch (VT.getSimpleVT().SimpleTy) {
2465 default: return NULL;
2466 case MVT::v8i8: Opc = ARM::VZIPd8; break;
2467 case MVT::v4i16: Opc = ARM::VZIPd16; break;
2468 case MVT::v2f32:
2469 case MVT::v2i32: Opc = ARM::VZIPd32; break;
2470 case MVT::v16i8: Opc = ARM::VZIPq8; break;
2471 case MVT::v8i16: Opc = ARM::VZIPq16; break;
2472 case MVT::v4f32:
2473 case MVT::v4i32: Opc = ARM::VZIPq32; break;
2475 SDValue Pred = getAL(CurDAG);
2476 SDValue PredReg = CurDAG->getRegister(0, MVT::i32);
2477 SDValue Ops[] = { N->getOperand(0), N->getOperand(1), Pred, PredReg };
2478 return CurDAG->getMachineNode(Opc, dl, VT, VT, Ops, 4);
2480 case ARMISD::VUZP: {
2481 unsigned Opc = 0;
2482 EVT VT = N->getValueType(0);
2483 switch (VT.getSimpleVT().SimpleTy) {
2484 default: return NULL;
2485 case MVT::v8i8: Opc = ARM::VUZPd8; break;
2486 case MVT::v4i16: Opc = ARM::VUZPd16; break;
2487 case MVT::v2f32:
2488 case MVT::v2i32: Opc = ARM::VUZPd32; break;
2489 case MVT::v16i8: Opc = ARM::VUZPq8; break;
2490 case MVT::v8i16: Opc = ARM::VUZPq16; break;
2491 case MVT::v4f32:
2492 case MVT::v4i32: Opc = ARM::VUZPq32; break;
2494 SDValue Pred = getAL(CurDAG);
2495 SDValue PredReg = CurDAG->getRegister(0, MVT::i32);
2496 SDValue Ops[] = { N->getOperand(0), N->getOperand(1), Pred, PredReg };
2497 return CurDAG->getMachineNode(Opc, dl, VT, VT, Ops, 4);
2499 case ARMISD::VTRN: {
2500 unsigned Opc = 0;
2501 EVT VT = N->getValueType(0);
2502 switch (VT.getSimpleVT().SimpleTy) {
2503 default: return NULL;
2504 case MVT::v8i8: Opc = ARM::VTRNd8; break;
2505 case MVT::v4i16: Opc = ARM::VTRNd16; break;
2506 case MVT::v2f32:
2507 case MVT::v2i32: Opc = ARM::VTRNd32; break;
2508 case MVT::v16i8: Opc = ARM::VTRNq8; break;
2509 case MVT::v8i16: Opc = ARM::VTRNq16; break;
2510 case MVT::v4f32:
2511 case MVT::v4i32: Opc = ARM::VTRNq32; break;
2513 SDValue Pred = getAL(CurDAG);
2514 SDValue PredReg = CurDAG->getRegister(0, MVT::i32);
2515 SDValue Ops[] = { N->getOperand(0), N->getOperand(1), Pred, PredReg };
2516 return CurDAG->getMachineNode(Opc, dl, VT, VT, Ops, 4);
2518 case ARMISD::BUILD_VECTOR: {
2519 EVT VecVT = N->getValueType(0);
2520 EVT EltVT = VecVT.getVectorElementType();
2521 unsigned NumElts = VecVT.getVectorNumElements();
2522 if (EltVT == MVT::f64) {
2523 assert(NumElts == 2 && "unexpected type for BUILD_VECTOR");
2524 return PairDRegs(VecVT, N->getOperand(0), N->getOperand(1));
2526 assert(EltVT == MVT::f32 && "unexpected type for BUILD_VECTOR");
2527 if (NumElts == 2)
2528 return PairSRegs(VecVT, N->getOperand(0), N->getOperand(1));
2529 assert(NumElts == 4 && "unexpected type for BUILD_VECTOR");
2530 return QuadSRegs(VecVT, N->getOperand(0), N->getOperand(1),
2531 N->getOperand(2), N->getOperand(3));
2534 case ARMISD::VLD2DUP: {
2535 unsigned Opcodes[] = { ARM::VLD2DUPd8Pseudo, ARM::VLD2DUPd16Pseudo,
2536 ARM::VLD2DUPd32Pseudo };
2537 return SelectVLDDup(N, false, 2, Opcodes);
2540 case ARMISD::VLD3DUP: {
2541 unsigned Opcodes[] = { ARM::VLD3DUPd8Pseudo, ARM::VLD3DUPd16Pseudo,
2542 ARM::VLD3DUPd32Pseudo };
2543 return SelectVLDDup(N, false, 3, Opcodes);
2546 case ARMISD::VLD4DUP: {
2547 unsigned Opcodes[] = { ARM::VLD4DUPd8Pseudo, ARM::VLD4DUPd16Pseudo,
2548 ARM::VLD4DUPd32Pseudo };
2549 return SelectVLDDup(N, false, 4, Opcodes);
2552 case ARMISD::VLD2DUP_UPD: {
2553 unsigned Opcodes[] = { ARM::VLD2DUPd8Pseudo_UPD, ARM::VLD2DUPd16Pseudo_UPD,
2554 ARM::VLD2DUPd32Pseudo_UPD };
2555 return SelectVLDDup(N, true, 2, Opcodes);
2558 case ARMISD::VLD3DUP_UPD: {
2559 unsigned Opcodes[] = { ARM::VLD3DUPd8Pseudo_UPD, ARM::VLD3DUPd16Pseudo_UPD,
2560 ARM::VLD3DUPd32Pseudo_UPD };
2561 return SelectVLDDup(N, true, 3, Opcodes);
2564 case ARMISD::VLD4DUP_UPD: {
2565 unsigned Opcodes[] = { ARM::VLD4DUPd8Pseudo_UPD, ARM::VLD4DUPd16Pseudo_UPD,
2566 ARM::VLD4DUPd32Pseudo_UPD };
2567 return SelectVLDDup(N, true, 4, Opcodes);
2570 case ARMISD::VLD1_UPD: {
2571 unsigned DOpcodes[] = { ARM::VLD1d8_UPD, ARM::VLD1d16_UPD,
2572 ARM::VLD1d32_UPD, ARM::VLD1d64_UPD };
2573 unsigned QOpcodes[] = { ARM::VLD1q8Pseudo_UPD, ARM::VLD1q16Pseudo_UPD,
2574 ARM::VLD1q32Pseudo_UPD, ARM::VLD1q64Pseudo_UPD };
2575 return SelectVLD(N, true, 1, DOpcodes, QOpcodes, 0);
2578 case ARMISD::VLD2_UPD: {
2579 unsigned DOpcodes[] = { ARM::VLD2d8Pseudo_UPD, ARM::VLD2d16Pseudo_UPD,
2580 ARM::VLD2d32Pseudo_UPD, ARM::VLD1q64Pseudo_UPD };
2581 unsigned QOpcodes[] = { ARM::VLD2q8Pseudo_UPD, ARM::VLD2q16Pseudo_UPD,
2582 ARM::VLD2q32Pseudo_UPD };
2583 return SelectVLD(N, true, 2, DOpcodes, QOpcodes, 0);
2586 case ARMISD::VLD3_UPD: {
2587 unsigned DOpcodes[] = { ARM::VLD3d8Pseudo_UPD, ARM::VLD3d16Pseudo_UPD,
2588 ARM::VLD3d32Pseudo_UPD, ARM::VLD1d64TPseudo_UPD };
2589 unsigned QOpcodes0[] = { ARM::VLD3q8Pseudo_UPD,
2590 ARM::VLD3q16Pseudo_UPD,
2591 ARM::VLD3q32Pseudo_UPD };
2592 unsigned QOpcodes1[] = { ARM::VLD3q8oddPseudo_UPD,
2593 ARM::VLD3q16oddPseudo_UPD,
2594 ARM::VLD3q32oddPseudo_UPD };
2595 return SelectVLD(N, true, 3, DOpcodes, QOpcodes0, QOpcodes1);
2598 case ARMISD::VLD4_UPD: {
2599 unsigned DOpcodes[] = { ARM::VLD4d8Pseudo_UPD, ARM::VLD4d16Pseudo_UPD,
2600 ARM::VLD4d32Pseudo_UPD, ARM::VLD1d64QPseudo_UPD };
2601 unsigned QOpcodes0[] = { ARM::VLD4q8Pseudo_UPD,
2602 ARM::VLD4q16Pseudo_UPD,
2603 ARM::VLD4q32Pseudo_UPD };
2604 unsigned QOpcodes1[] = { ARM::VLD4q8oddPseudo_UPD,
2605 ARM::VLD4q16oddPseudo_UPD,
2606 ARM::VLD4q32oddPseudo_UPD };
2607 return SelectVLD(N, true, 4, DOpcodes, QOpcodes0, QOpcodes1);
2610 case ARMISD::VLD2LN_UPD: {
2611 unsigned DOpcodes[] = { ARM::VLD2LNd8Pseudo_UPD, ARM::VLD2LNd16Pseudo_UPD,
2612 ARM::VLD2LNd32Pseudo_UPD };
2613 unsigned QOpcodes[] = { ARM::VLD2LNq16Pseudo_UPD,
2614 ARM::VLD2LNq32Pseudo_UPD };
2615 return SelectVLDSTLane(N, true, true, 2, DOpcodes, QOpcodes);
2618 case ARMISD::VLD3LN_UPD: {
2619 unsigned DOpcodes[] = { ARM::VLD3LNd8Pseudo_UPD, ARM::VLD3LNd16Pseudo_UPD,
2620 ARM::VLD3LNd32Pseudo_UPD };
2621 unsigned QOpcodes[] = { ARM::VLD3LNq16Pseudo_UPD,
2622 ARM::VLD3LNq32Pseudo_UPD };
2623 return SelectVLDSTLane(N, true, true, 3, DOpcodes, QOpcodes);
2626 case ARMISD::VLD4LN_UPD: {
2627 unsigned DOpcodes[] = { ARM::VLD4LNd8Pseudo_UPD, ARM::VLD4LNd16Pseudo_UPD,
2628 ARM::VLD4LNd32Pseudo_UPD };
2629 unsigned QOpcodes[] = { ARM::VLD4LNq16Pseudo_UPD,
2630 ARM::VLD4LNq32Pseudo_UPD };
2631 return SelectVLDSTLane(N, true, true, 4, DOpcodes, QOpcodes);
2634 case ARMISD::VST1_UPD: {
2635 unsigned DOpcodes[] = { ARM::VST1d8_UPD, ARM::VST1d16_UPD,
2636 ARM::VST1d32_UPD, ARM::VST1d64_UPD };
2637 unsigned QOpcodes[] = { ARM::VST1q8Pseudo_UPD, ARM::VST1q16Pseudo_UPD,
2638 ARM::VST1q32Pseudo_UPD, ARM::VST1q64Pseudo_UPD };
2639 return SelectVST(N, true, 1, DOpcodes, QOpcodes, 0);
2642 case ARMISD::VST2_UPD: {
2643 unsigned DOpcodes[] = { ARM::VST2d8Pseudo_UPD, ARM::VST2d16Pseudo_UPD,
2644 ARM::VST2d32Pseudo_UPD, ARM::VST1q64Pseudo_UPD };
2645 unsigned QOpcodes[] = { ARM::VST2q8Pseudo_UPD, ARM::VST2q16Pseudo_UPD,
2646 ARM::VST2q32Pseudo_UPD };
2647 return SelectVST(N, true, 2, DOpcodes, QOpcodes, 0);
2650 case ARMISD::VST3_UPD: {
2651 unsigned DOpcodes[] = { ARM::VST3d8Pseudo_UPD, ARM::VST3d16Pseudo_UPD,
2652 ARM::VST3d32Pseudo_UPD, ARM::VST1d64TPseudo_UPD };
2653 unsigned QOpcodes0[] = { ARM::VST3q8Pseudo_UPD,
2654 ARM::VST3q16Pseudo_UPD,
2655 ARM::VST3q32Pseudo_UPD };
2656 unsigned QOpcodes1[] = { ARM::VST3q8oddPseudo_UPD,
2657 ARM::VST3q16oddPseudo_UPD,
2658 ARM::VST3q32oddPseudo_UPD };
2659 return SelectVST(N, true, 3, DOpcodes, QOpcodes0, QOpcodes1);
2662 case ARMISD::VST4_UPD: {
2663 unsigned DOpcodes[] = { ARM::VST4d8Pseudo_UPD, ARM::VST4d16Pseudo_UPD,
2664 ARM::VST4d32Pseudo_UPD, ARM::VST1d64QPseudo_UPD };
2665 unsigned QOpcodes0[] = { ARM::VST4q8Pseudo_UPD,
2666 ARM::VST4q16Pseudo_UPD,
2667 ARM::VST4q32Pseudo_UPD };
2668 unsigned QOpcodes1[] = { ARM::VST4q8oddPseudo_UPD,
2669 ARM::VST4q16oddPseudo_UPD,
2670 ARM::VST4q32oddPseudo_UPD };
2671 return SelectVST(N, true, 4, DOpcodes, QOpcodes0, QOpcodes1);
2674 case ARMISD::VST2LN_UPD: {
2675 unsigned DOpcodes[] = { ARM::VST2LNd8Pseudo_UPD, ARM::VST2LNd16Pseudo_UPD,
2676 ARM::VST2LNd32Pseudo_UPD };
2677 unsigned QOpcodes[] = { ARM::VST2LNq16Pseudo_UPD,
2678 ARM::VST2LNq32Pseudo_UPD };
2679 return SelectVLDSTLane(N, false, true, 2, DOpcodes, QOpcodes);
2682 case ARMISD::VST3LN_UPD: {
2683 unsigned DOpcodes[] = { ARM::VST3LNd8Pseudo_UPD, ARM::VST3LNd16Pseudo_UPD,
2684 ARM::VST3LNd32Pseudo_UPD };
2685 unsigned QOpcodes[] = { ARM::VST3LNq16Pseudo_UPD,
2686 ARM::VST3LNq32Pseudo_UPD };
2687 return SelectVLDSTLane(N, false, true, 3, DOpcodes, QOpcodes);
2690 case ARMISD::VST4LN_UPD: {
2691 unsigned DOpcodes[] = { ARM::VST4LNd8Pseudo_UPD, ARM::VST4LNd16Pseudo_UPD,
2692 ARM::VST4LNd32Pseudo_UPD };
2693 unsigned QOpcodes[] = { ARM::VST4LNq16Pseudo_UPD,
2694 ARM::VST4LNq32Pseudo_UPD };
2695 return SelectVLDSTLane(N, false, true, 4, DOpcodes, QOpcodes);
2698 case ISD::INTRINSIC_VOID:
2699 case ISD::INTRINSIC_W_CHAIN: {
2700 unsigned IntNo = cast<ConstantSDNode>(N->getOperand(1))->getZExtValue();
2701 switch (IntNo) {
2702 default:
2703 break;
2705 case Intrinsic::arm_ldrexd: {
2706 SDValue MemAddr = N->getOperand(2);
2707 DebugLoc dl = N->getDebugLoc();
2708 SDValue Chain = N->getOperand(0);
2710 unsigned NewOpc = ARM::LDREXD;
2711 if (Subtarget->isThumb() && Subtarget->hasThumb2())
2712 NewOpc = ARM::t2LDREXD;
2714 // arm_ldrexd returns a i64 value in {i32, i32}
2715 std::vector<EVT> ResTys;
2716 ResTys.push_back(MVT::i32);
2717 ResTys.push_back(MVT::i32);
2718 ResTys.push_back(MVT::Other);
2720 // place arguments in the right order
2721 SmallVector<SDValue, 7> Ops;
2722 Ops.push_back(MemAddr);
2723 Ops.push_back(getAL(CurDAG));
2724 Ops.push_back(CurDAG->getRegister(0, MVT::i32));
2725 Ops.push_back(Chain);
2726 SDNode *Ld = CurDAG->getMachineNode(NewOpc, dl, ResTys, Ops.data(),
2727 Ops.size());
2728 // Transfer memoperands.
2729 MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1);
2730 MemOp[0] = cast<MemIntrinsicSDNode>(N)->getMemOperand();
2731 cast<MachineSDNode>(Ld)->setMemRefs(MemOp, MemOp + 1);
2733 // Until there's support for specifing explicit register constraints
2734 // like the use of even/odd register pair, hardcode ldrexd to always
2735 // use the pair [R0, R1] to hold the load result.
2736 Chain = CurDAG->getCopyToReg(CurDAG->getEntryNode(), dl, ARM::R0,
2737 SDValue(Ld, 0), SDValue(0,0));
2738 Chain = CurDAG->getCopyToReg(Chain, dl, ARM::R1,
2739 SDValue(Ld, 1), Chain.getValue(1));
2741 // Remap uses.
2742 SDValue Glue = Chain.getValue(1);
2743 if (!SDValue(N, 0).use_empty()) {
2744 SDValue Result = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), dl,
2745 ARM::R0, MVT::i32, Glue);
2746 Glue = Result.getValue(2);
2747 ReplaceUses(SDValue(N, 0), Result);
2749 if (!SDValue(N, 1).use_empty()) {
2750 SDValue Result = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), dl,
2751 ARM::R1, MVT::i32, Glue);
2752 Glue = Result.getValue(2);
2753 ReplaceUses(SDValue(N, 1), Result);
2756 ReplaceUses(SDValue(N, 2), SDValue(Ld, 2));
2757 return NULL;
2760 case Intrinsic::arm_strexd: {
2761 DebugLoc dl = N->getDebugLoc();
2762 SDValue Chain = N->getOperand(0);
2763 SDValue Val0 = N->getOperand(2);
2764 SDValue Val1 = N->getOperand(3);
2765 SDValue MemAddr = N->getOperand(4);
2767 // Until there's support for specifing explicit register constraints
2768 // like the use of even/odd register pair, hardcode strexd to always
2769 // use the pair [R2, R3] to hold the i64 (i32, i32) value to be stored.
2770 Chain = CurDAG->getCopyToReg(CurDAG->getEntryNode(), dl, ARM::R2, Val0,
2771 SDValue(0, 0));
2772 Chain = CurDAG->getCopyToReg(Chain, dl, ARM::R3, Val1, Chain.getValue(1));
2774 SDValue Glue = Chain.getValue(1);
2775 Val0 = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), dl,
2776 ARM::R2, MVT::i32, Glue);
2777 Glue = Val0.getValue(1);
2778 Val1 = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), dl,
2779 ARM::R3, MVT::i32, Glue);
2781 // Store exclusive double return a i32 value which is the return status
2782 // of the issued store.
2783 std::vector<EVT> ResTys;
2784 ResTys.push_back(MVT::i32);
2785 ResTys.push_back(MVT::Other);
2787 // place arguments in the right order
2788 SmallVector<SDValue, 7> Ops;
2789 Ops.push_back(Val0);
2790 Ops.push_back(Val1);
2791 Ops.push_back(MemAddr);
2792 Ops.push_back(getAL(CurDAG));
2793 Ops.push_back(CurDAG->getRegister(0, MVT::i32));
2794 Ops.push_back(Chain);
2796 unsigned NewOpc = ARM::STREXD;
2797 if (Subtarget->isThumb() && Subtarget->hasThumb2())
2798 NewOpc = ARM::t2STREXD;
2800 SDNode *St = CurDAG->getMachineNode(NewOpc, dl, ResTys, Ops.data(),
2801 Ops.size());
2802 // Transfer memoperands.
2803 MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1);
2804 MemOp[0] = cast<MemIntrinsicSDNode>(N)->getMemOperand();
2805 cast<MachineSDNode>(St)->setMemRefs(MemOp, MemOp + 1);
2807 return St;
2810 case Intrinsic::arm_neon_vld1: {
2811 unsigned DOpcodes[] = { ARM::VLD1d8, ARM::VLD1d16,
2812 ARM::VLD1d32, ARM::VLD1d64 };
2813 unsigned QOpcodes[] = { ARM::VLD1q8Pseudo, ARM::VLD1q16Pseudo,
2814 ARM::VLD1q32Pseudo, ARM::VLD1q64Pseudo };
2815 return SelectVLD(N, false, 1, DOpcodes, QOpcodes, 0);
2818 case Intrinsic::arm_neon_vld2: {
2819 unsigned DOpcodes[] = { ARM::VLD2d8Pseudo, ARM::VLD2d16Pseudo,
2820 ARM::VLD2d32Pseudo, ARM::VLD1q64Pseudo };
2821 unsigned QOpcodes[] = { ARM::VLD2q8Pseudo, ARM::VLD2q16Pseudo,
2822 ARM::VLD2q32Pseudo };
2823 return SelectVLD(N, false, 2, DOpcodes, QOpcodes, 0);
2826 case Intrinsic::arm_neon_vld3: {
2827 unsigned DOpcodes[] = { ARM::VLD3d8Pseudo, ARM::VLD3d16Pseudo,
2828 ARM::VLD3d32Pseudo, ARM::VLD1d64TPseudo };
2829 unsigned QOpcodes0[] = { ARM::VLD3q8Pseudo_UPD,
2830 ARM::VLD3q16Pseudo_UPD,
2831 ARM::VLD3q32Pseudo_UPD };
2832 unsigned QOpcodes1[] = { ARM::VLD3q8oddPseudo,
2833 ARM::VLD3q16oddPseudo,
2834 ARM::VLD3q32oddPseudo };
2835 return SelectVLD(N, false, 3, DOpcodes, QOpcodes0, QOpcodes1);
2838 case Intrinsic::arm_neon_vld4: {
2839 unsigned DOpcodes[] = { ARM::VLD4d8Pseudo, ARM::VLD4d16Pseudo,
2840 ARM::VLD4d32Pseudo, ARM::VLD1d64QPseudo };
2841 unsigned QOpcodes0[] = { ARM::VLD4q8Pseudo_UPD,
2842 ARM::VLD4q16Pseudo_UPD,
2843 ARM::VLD4q32Pseudo_UPD };
2844 unsigned QOpcodes1[] = { ARM::VLD4q8oddPseudo,
2845 ARM::VLD4q16oddPseudo,
2846 ARM::VLD4q32oddPseudo };
2847 return SelectVLD(N, false, 4, DOpcodes, QOpcodes0, QOpcodes1);
2850 case Intrinsic::arm_neon_vld2lane: {
2851 unsigned DOpcodes[] = { ARM::VLD2LNd8Pseudo, ARM::VLD2LNd16Pseudo,
2852 ARM::VLD2LNd32Pseudo };
2853 unsigned QOpcodes[] = { ARM::VLD2LNq16Pseudo, ARM::VLD2LNq32Pseudo };
2854 return SelectVLDSTLane(N, true, false, 2, DOpcodes, QOpcodes);
2857 case Intrinsic::arm_neon_vld3lane: {
2858 unsigned DOpcodes[] = { ARM::VLD3LNd8Pseudo, ARM::VLD3LNd16Pseudo,
2859 ARM::VLD3LNd32Pseudo };
2860 unsigned QOpcodes[] = { ARM::VLD3LNq16Pseudo, ARM::VLD3LNq32Pseudo };
2861 return SelectVLDSTLane(N, true, false, 3, DOpcodes, QOpcodes);
2864 case Intrinsic::arm_neon_vld4lane: {
2865 unsigned DOpcodes[] = { ARM::VLD4LNd8Pseudo, ARM::VLD4LNd16Pseudo,
2866 ARM::VLD4LNd32Pseudo };
2867 unsigned QOpcodes[] = { ARM::VLD4LNq16Pseudo, ARM::VLD4LNq32Pseudo };
2868 return SelectVLDSTLane(N, true, false, 4, DOpcodes, QOpcodes);
2871 case Intrinsic::arm_neon_vst1: {
2872 unsigned DOpcodes[] = { ARM::VST1d8, ARM::VST1d16,
2873 ARM::VST1d32, ARM::VST1d64 };
2874 unsigned QOpcodes[] = { ARM::VST1q8Pseudo, ARM::VST1q16Pseudo,
2875 ARM::VST1q32Pseudo, ARM::VST1q64Pseudo };
2876 return SelectVST(N, false, 1, DOpcodes, QOpcodes, 0);
2879 case Intrinsic::arm_neon_vst2: {
2880 unsigned DOpcodes[] = { ARM::VST2d8Pseudo, ARM::VST2d16Pseudo,
2881 ARM::VST2d32Pseudo, ARM::VST1q64Pseudo };
2882 unsigned QOpcodes[] = { ARM::VST2q8Pseudo, ARM::VST2q16Pseudo,
2883 ARM::VST2q32Pseudo };
2884 return SelectVST(N, false, 2, DOpcodes, QOpcodes, 0);
2887 case Intrinsic::arm_neon_vst3: {
2888 unsigned DOpcodes[] = { ARM::VST3d8Pseudo, ARM::VST3d16Pseudo,
2889 ARM::VST3d32Pseudo, ARM::VST1d64TPseudo };
2890 unsigned QOpcodes0[] = { ARM::VST3q8Pseudo_UPD,
2891 ARM::VST3q16Pseudo_UPD,
2892 ARM::VST3q32Pseudo_UPD };
2893 unsigned QOpcodes1[] = { ARM::VST3q8oddPseudo,
2894 ARM::VST3q16oddPseudo,
2895 ARM::VST3q32oddPseudo };
2896 return SelectVST(N, false, 3, DOpcodes, QOpcodes0, QOpcodes1);
2899 case Intrinsic::arm_neon_vst4: {
2900 unsigned DOpcodes[] = { ARM::VST4d8Pseudo, ARM::VST4d16Pseudo,
2901 ARM::VST4d32Pseudo, ARM::VST1d64QPseudo };
2902 unsigned QOpcodes0[] = { ARM::VST4q8Pseudo_UPD,
2903 ARM::VST4q16Pseudo_UPD,
2904 ARM::VST4q32Pseudo_UPD };
2905 unsigned QOpcodes1[] = { ARM::VST4q8oddPseudo,
2906 ARM::VST4q16oddPseudo,
2907 ARM::VST4q32oddPseudo };
2908 return SelectVST(N, false, 4, DOpcodes, QOpcodes0, QOpcodes1);
2911 case Intrinsic::arm_neon_vst2lane: {
2912 unsigned DOpcodes[] = { ARM::VST2LNd8Pseudo, ARM::VST2LNd16Pseudo,
2913 ARM::VST2LNd32Pseudo };
2914 unsigned QOpcodes[] = { ARM::VST2LNq16Pseudo, ARM::VST2LNq32Pseudo };
2915 return SelectVLDSTLane(N, false, false, 2, DOpcodes, QOpcodes);
2918 case Intrinsic::arm_neon_vst3lane: {
2919 unsigned DOpcodes[] = { ARM::VST3LNd8Pseudo, ARM::VST3LNd16Pseudo,
2920 ARM::VST3LNd32Pseudo };
2921 unsigned QOpcodes[] = { ARM::VST3LNq16Pseudo, ARM::VST3LNq32Pseudo };
2922 return SelectVLDSTLane(N, false, false, 3, DOpcodes, QOpcodes);
2925 case Intrinsic::arm_neon_vst4lane: {
2926 unsigned DOpcodes[] = { ARM::VST4LNd8Pseudo, ARM::VST4LNd16Pseudo,
2927 ARM::VST4LNd32Pseudo };
2928 unsigned QOpcodes[] = { ARM::VST4LNq16Pseudo, ARM::VST4LNq32Pseudo };
2929 return SelectVLDSTLane(N, false, false, 4, DOpcodes, QOpcodes);
2932 break;
2935 case ISD::INTRINSIC_WO_CHAIN: {
2936 unsigned IntNo = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue();
2937 switch (IntNo) {
2938 default:
2939 break;
2941 case Intrinsic::arm_neon_vtbl2:
2942 return SelectVTBL(N, false, 2, ARM::VTBL2Pseudo);
2943 case Intrinsic::arm_neon_vtbl3:
2944 return SelectVTBL(N, false, 3, ARM::VTBL3Pseudo);
2945 case Intrinsic::arm_neon_vtbl4:
2946 return SelectVTBL(N, false, 4, ARM::VTBL4Pseudo);
2948 case Intrinsic::arm_neon_vtbx2:
2949 return SelectVTBL(N, true, 2, ARM::VTBX2Pseudo);
2950 case Intrinsic::arm_neon_vtbx3:
2951 return SelectVTBL(N, true, 3, ARM::VTBX3Pseudo);
2952 case Intrinsic::arm_neon_vtbx4:
2953 return SelectVTBL(N, true, 4, ARM::VTBX4Pseudo);
2955 break;
2958 case ARMISD::VTBL1: {
2959 DebugLoc dl = N->getDebugLoc();
2960 EVT VT = N->getValueType(0);
2961 SmallVector<SDValue, 6> Ops;
2963 Ops.push_back(N->getOperand(0));
2964 Ops.push_back(N->getOperand(1));
2965 Ops.push_back(getAL(CurDAG)); // Predicate
2966 Ops.push_back(CurDAG->getRegister(0, MVT::i32)); // Predicate Register
2967 return CurDAG->getMachineNode(ARM::VTBL1, dl, VT, Ops.data(), Ops.size());
2969 case ARMISD::VTBL2: {
2970 DebugLoc dl = N->getDebugLoc();
2971 EVT VT = N->getValueType(0);
2973 // Form a REG_SEQUENCE to force register allocation.
2974 SDValue V0 = N->getOperand(0);
2975 SDValue V1 = N->getOperand(1);
2976 SDValue RegSeq = SDValue(PairDRegs(MVT::v16i8, V0, V1), 0);
2978 SmallVector<SDValue, 6> Ops;
2979 Ops.push_back(RegSeq);
2980 Ops.push_back(N->getOperand(2));
2981 Ops.push_back(getAL(CurDAG)); // Predicate
2982 Ops.push_back(CurDAG->getRegister(0, MVT::i32)); // Predicate Register
2983 return CurDAG->getMachineNode(ARM::VTBL2Pseudo, dl, VT,
2984 Ops.data(), Ops.size());
2987 case ISD::CONCAT_VECTORS:
2988 return SelectConcatVector(N);
2991 return SelectCode(N);
2994 bool ARMDAGToDAGISel::
2995 SelectInlineAsmMemoryOperand(const SDValue &Op, char ConstraintCode,
2996 std::vector<SDValue> &OutOps) {
2997 assert(ConstraintCode == 'm' && "unexpected asm memory constraint");
2998 // Require the address to be in a register. That is safe for all ARM
2999 // variants and it is hard to do anything much smarter without knowing
3000 // how the operand is used.
3001 OutOps.push_back(Op);
3002 return false;
3005 /// createARMISelDag - This pass converts a legalized DAG into a
3006 /// ARM-specific DAG, ready for instruction scheduling.
3008 FunctionPass *llvm::createARMISelDag(ARMBaseTargetMachine &TM,
3009 CodeGenOpt::Level OptLevel) {
3010 return new ARMDAGToDAGISel(TM, OptLevel);