Fixed some bugs.
[llvm/zpu.git] / lib / Target / ARM / ARMISelLowering.h
blob8504b83d2432055fdb5808221dba0fd8f48c0827
1 //===-- ARMISelLowering.h - ARM DAG Lowering Interface ----------*- C++ -*-===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file defines the interfaces that ARM uses to lower LLVM code into a
11 // selection DAG.
13 //===----------------------------------------------------------------------===//
15 #ifndef ARMISELLOWERING_H
16 #define ARMISELLOWERING_H
18 #include "ARMSubtarget.h"
19 #include "llvm/Target/TargetLowering.h"
20 #include "llvm/Target/TargetRegisterInfo.h"
21 #include "llvm/CodeGen/FastISel.h"
22 #include "llvm/CodeGen/SelectionDAG.h"
23 #include "llvm/CodeGen/CallingConvLower.h"
24 #include <vector>
26 namespace llvm {
27 class ARMConstantPoolValue;
29 namespace ARMISD {
30 // ARM Specific DAG Nodes
31 enum NodeType {
32 // Start the numbering where the builtin ops and target ops leave off.
33 FIRST_NUMBER = ISD::BUILTIN_OP_END,
35 Wrapper, // Wrapper - A wrapper node for TargetConstantPool,
36 // TargetExternalSymbol, and TargetGlobalAddress.
37 WrapperJT, // WrapperJT - A wrapper node for TargetJumpTable
39 CALL, // Function call.
40 CALL_PRED, // Function call that's predicable.
41 CALL_NOLINK, // Function call with branch not branch-and-link.
42 tCALL, // Thumb function call.
43 BRCOND, // Conditional branch.
44 BR_JT, // Jumptable branch.
45 BR2_JT, // Jumptable branch (2 level - jumptable entry is a jump).
46 RET_FLAG, // Return with a flag operand.
48 PIC_ADD, // Add with a PC operand and a PIC label.
50 CMP, // ARM compare instructions.
51 CMPZ, // ARM compare that sets only Z flag.
52 CMPFP, // ARM VFP compare instruction, sets FPSCR.
53 CMPFPw0, // ARM VFP compare against zero instruction, sets FPSCR.
54 FMSTAT, // ARM fmstat instruction.
55 CMOV, // ARM conditional move instructions.
56 CNEG, // ARM conditional negate instructions.
58 BCC_i64,
60 RBIT, // ARM bitreverse instruction
62 FTOSI, // FP to sint within a FP register.
63 FTOUI, // FP to uint within a FP register.
64 SITOF, // sint to FP within a FP register.
65 UITOF, // uint to FP within a FP register.
67 SRL_FLAG, // V,Flag = srl_flag X -> srl X, 1 + save carry out.
68 SRA_FLAG, // V,Flag = sra_flag X -> sra X, 1 + save carry out.
69 RRX, // V = RRX X, Flag -> srl X, 1 + shift in carry flag.
71 VMOVRRD, // double to two gprs.
72 VMOVDRR, // Two gprs to double.
74 EH_SJLJ_SETJMP, // SjLj exception handling setjmp.
75 EH_SJLJ_LONGJMP, // SjLj exception handling longjmp.
76 EH_SJLJ_DISPATCHSETUP, // SjLj exception handling dispatch setup.
78 TC_RETURN, // Tail call return pseudo.
80 THREAD_POINTER,
82 DYN_ALLOC, // Dynamic allocation on the stack.
84 MEMBARRIER, // Memory barrier (DMB)
85 MEMBARRIER_MCR, // Memory barrier (MCR)
87 PRELOAD, // Preload
89 VCEQ, // Vector compare equal.
90 VCGE, // Vector compare greater than or equal.
91 VCGEU, // Vector compare unsigned greater than or equal.
92 VCGT, // Vector compare greater than.
93 VCGTU, // Vector compare unsigned greater than.
94 VTST, // Vector test bits.
96 // Vector shift by immediate:
97 VSHL, // ...left
98 VSHRs, // ...right (signed)
99 VSHRu, // ...right (unsigned)
100 VSHLLs, // ...left long (signed)
101 VSHLLu, // ...left long (unsigned)
102 VSHLLi, // ...left long (with maximum shift count)
103 VSHRN, // ...right narrow
105 // Vector rounding shift by immediate:
106 VRSHRs, // ...right (signed)
107 VRSHRu, // ...right (unsigned)
108 VRSHRN, // ...right narrow
110 // Vector saturating shift by immediate:
111 VQSHLs, // ...left (signed)
112 VQSHLu, // ...left (unsigned)
113 VQSHLsu, // ...left (signed to unsigned)
114 VQSHRNs, // ...right narrow (signed)
115 VQSHRNu, // ...right narrow (unsigned)
116 VQSHRNsu, // ...right narrow (signed to unsigned)
118 // Vector saturating rounding shift by immediate:
119 VQRSHRNs, // ...right narrow (signed)
120 VQRSHRNu, // ...right narrow (unsigned)
121 VQRSHRNsu, // ...right narrow (signed to unsigned)
123 // Vector shift and insert:
124 VSLI, // ...left
125 VSRI, // ...right
127 // Vector get lane (VMOV scalar to ARM core register)
128 // (These are used for 8- and 16-bit element types only.)
129 VGETLANEu, // zero-extend vector extract element
130 VGETLANEs, // sign-extend vector extract element
132 // Vector move immediate and move negated immediate:
133 VMOVIMM,
134 VMVNIMM,
136 // Vector duplicate:
137 VDUP,
138 VDUPLANE,
140 // Vector shuffles:
141 VEXT, // extract
142 VREV64, // reverse elements within 64-bit doublewords
143 VREV32, // reverse elements within 32-bit words
144 VREV16, // reverse elements within 16-bit halfwords
145 VZIP, // zip (interleave)
146 VUZP, // unzip (deinterleave)
147 VTRN, // transpose
149 // Vector multiply long:
150 VMULLs, // ...signed
151 VMULLu, // ...unsigned
153 // Operands of the standard BUILD_VECTOR node are not legalized, which
154 // is fine if BUILD_VECTORs are always lowered to shuffles or other
155 // operations, but for ARM some BUILD_VECTORs are legal as-is and their
156 // operands need to be legalized. Define an ARM-specific version of
157 // BUILD_VECTOR for this purpose.
158 BUILD_VECTOR,
160 // Floating-point max and min:
161 FMAX,
162 FMIN,
164 // Bit-field insert
169 /// Define some predicates that are used for node matching.
170 namespace ARM {
171 /// getVFPf32Imm / getVFPf64Imm - If the given fp immediate can be
172 /// materialized with a VMOV.f32 / VMOV.f64 (i.e. fconsts / fconstd)
173 /// instruction, returns its 8-bit integer representation. Otherwise,
174 /// returns -1.
175 int getVFPf32Imm(const APFloat &FPImm);
176 int getVFPf64Imm(const APFloat &FPImm);
177 bool isBitFieldInvertedMask(unsigned v);
180 //===--------------------------------------------------------------------===//
181 // ARMTargetLowering - ARM Implementation of the TargetLowering interface
183 class ARMTargetLowering : public TargetLowering {
184 public:
185 explicit ARMTargetLowering(TargetMachine &TM);
187 virtual unsigned getJumpTableEncoding(void) const;
189 virtual SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const;
191 /// ReplaceNodeResults - Replace the results of node with an illegal result
192 /// type with new values built out of custom code.
194 virtual void ReplaceNodeResults(SDNode *N, SmallVectorImpl<SDValue>&Results,
195 SelectionDAG &DAG) const;
197 virtual SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const;
199 virtual const char *getTargetNodeName(unsigned Opcode) const;
201 virtual MachineBasicBlock *
202 EmitInstrWithCustomInserter(MachineInstr *MI,
203 MachineBasicBlock *MBB) const;
205 /// allowsUnalignedMemoryAccesses - Returns true if the target allows
206 /// unaligned memory accesses. of the specified type.
207 /// FIXME: Add getOptimalMemOpType to implement memcpy with NEON?
208 virtual bool allowsUnalignedMemoryAccesses(EVT VT) const;
210 /// isLegalAddressingMode - Return true if the addressing mode represented
211 /// by AM is legal for this target, for a load/store of the specified type.
212 virtual bool isLegalAddressingMode(const AddrMode &AM, const Type *Ty)const;
213 bool isLegalT2ScaledAddressingMode(const AddrMode &AM, EVT VT) const;
215 /// isLegalICmpImmediate - Return true if the specified immediate is legal
216 /// icmp immediate, that is the target has icmp instructions which can
217 /// compare a register against the immediate without having to materialize
218 /// the immediate into a register.
219 virtual bool isLegalICmpImmediate(int64_t Imm) const;
221 /// getPreIndexedAddressParts - returns true by value, base pointer and
222 /// offset pointer and addressing mode by reference if the node's address
223 /// can be legally represented as pre-indexed load / store address.
224 virtual bool getPreIndexedAddressParts(SDNode *N, SDValue &Base,
225 SDValue &Offset,
226 ISD::MemIndexedMode &AM,
227 SelectionDAG &DAG) const;
229 /// getPostIndexedAddressParts - returns true by value, base pointer and
230 /// offset pointer and addressing mode by reference if this node can be
231 /// combined with a load / store to form a post-indexed load / store.
232 virtual bool getPostIndexedAddressParts(SDNode *N, SDNode *Op,
233 SDValue &Base, SDValue &Offset,
234 ISD::MemIndexedMode &AM,
235 SelectionDAG &DAG) const;
237 virtual void computeMaskedBitsForTargetNode(const SDValue Op,
238 const APInt &Mask,
239 APInt &KnownZero,
240 APInt &KnownOne,
241 const SelectionDAG &DAG,
242 unsigned Depth) const;
245 ConstraintType getConstraintType(const std::string &Constraint) const;
247 /// Examine constraint string and operand type and determine a weight value.
248 /// The operand object must already have been set up with the operand type.
249 ConstraintWeight getSingleConstraintMatchWeight(
250 AsmOperandInfo &info, const char *constraint) const;
252 std::pair<unsigned, const TargetRegisterClass*>
253 getRegForInlineAsmConstraint(const std::string &Constraint,
254 EVT VT) const;
255 std::vector<unsigned>
256 getRegClassForInlineAsmConstraint(const std::string &Constraint,
257 EVT VT) const;
259 /// LowerAsmOperandForConstraint - Lower the specified operand into the Ops
260 /// vector. If it is invalid, don't add anything to Ops. If hasMemory is
261 /// true it means one of the asm constraint of the inline asm instruction
262 /// being processed is 'm'.
263 virtual void LowerAsmOperandForConstraint(SDValue Op,
264 char ConstraintLetter,
265 std::vector<SDValue> &Ops,
266 SelectionDAG &DAG) const;
268 const ARMSubtarget* getSubtarget() const {
269 return Subtarget;
272 /// getRegClassFor - Return the register class that should be used for the
273 /// specified value type.
274 virtual TargetRegisterClass *getRegClassFor(EVT VT) const;
276 /// getFunctionAlignment - Return the Log2 alignment of this function.
277 virtual unsigned getFunctionAlignment(const Function *F) const;
279 /// getMaximalGlobalOffset - Returns the maximal possible offset which can
280 /// be used for loads / stores from the global.
281 virtual unsigned getMaximalGlobalOffset() const;
283 /// createFastISel - This method returns a target specific FastISel object,
284 /// or null if the target does not support "fast" ISel.
285 virtual FastISel *createFastISel(FunctionLoweringInfo &funcInfo) const;
287 Sched::Preference getSchedulingPreference(SDNode *N) const;
289 unsigned getRegPressureLimit(const TargetRegisterClass *RC,
290 MachineFunction &MF) const;
292 bool isShuffleMaskLegal(const SmallVectorImpl<int> &M, EVT VT) const;
293 bool isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const;
295 /// isFPImmLegal - Returns true if the target can instruction select the
296 /// specified FP immediate natively. If false, the legalizer will
297 /// materialize the FP immediate as a load from a constant pool.
298 virtual bool isFPImmLegal(const APFloat &Imm, EVT VT) const;
300 virtual bool getTgtMemIntrinsic(IntrinsicInfo &Info,
301 const CallInst &I,
302 unsigned Intrinsic) const;
303 protected:
304 std::pair<const TargetRegisterClass*, uint8_t>
305 findRepresentativeClass(EVT VT) const;
307 private:
308 /// Subtarget - Keep a pointer to the ARMSubtarget around so that we can
309 /// make the right decision when generating code for different targets.
310 const ARMSubtarget *Subtarget;
312 const TargetRegisterInfo *RegInfo;
314 const InstrItineraryData *Itins;
316 /// ARMPCLabelIndex - Keep track of the number of ARM PC labels created.
318 unsigned ARMPCLabelIndex;
320 void addTypeForNEON(EVT VT, EVT PromotedLdStVT, EVT PromotedBitwiseVT);
321 void addDRTypeForNEON(EVT VT);
322 void addQRTypeForNEON(EVT VT);
324 typedef SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPassVector;
325 void PassF64ArgInRegs(DebugLoc dl, SelectionDAG &DAG,
326 SDValue Chain, SDValue &Arg,
327 RegsToPassVector &RegsToPass,
328 CCValAssign &VA, CCValAssign &NextVA,
329 SDValue &StackPtr,
330 SmallVector<SDValue, 8> &MemOpChains,
331 ISD::ArgFlagsTy Flags) const;
332 SDValue GetF64FormalArgument(CCValAssign &VA, CCValAssign &NextVA,
333 SDValue &Root, SelectionDAG &DAG,
334 DebugLoc dl) const;
336 CCAssignFn *CCAssignFnForNode(CallingConv::ID CC, bool Return,
337 bool isVarArg) const;
338 SDValue LowerMemOpCallTo(SDValue Chain, SDValue StackPtr, SDValue Arg,
339 DebugLoc dl, SelectionDAG &DAG,
340 const CCValAssign &VA,
341 ISD::ArgFlagsTy Flags) const;
342 SDValue LowerEH_SJLJ_SETJMP(SDValue Op, SelectionDAG &DAG) const;
343 SDValue LowerEH_SJLJ_LONGJMP(SDValue Op, SelectionDAG &DAG) const;
344 SDValue LowerEH_SJLJ_DISPATCHSETUP(SDValue Op, SelectionDAG &DAG) const;
345 SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG,
346 const ARMSubtarget *Subtarget) const;
347 SDValue LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const;
348 SDValue LowerGlobalAddressDarwin(SDValue Op, SelectionDAG &DAG) const;
349 SDValue LowerGlobalAddressELF(SDValue Op, SelectionDAG &DAG) const;
350 SDValue LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const;
351 SDValue LowerToTLSGeneralDynamicModel(GlobalAddressSDNode *GA,
352 SelectionDAG &DAG) const;
353 SDValue LowerToTLSExecModels(GlobalAddressSDNode *GA,
354 SelectionDAG &DAG) const;
355 SDValue LowerGLOBAL_OFFSET_TABLE(SDValue Op, SelectionDAG &DAG) const;
356 SDValue LowerBR_JT(SDValue Op, SelectionDAG &DAG) const;
357 SDValue LowerSELECT(SDValue Op, SelectionDAG &DAG) const;
358 SDValue LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const;
359 SDValue LowerBR_CC(SDValue Op, SelectionDAG &DAG) const;
360 SDValue LowerFCOPYSIGN(SDValue Op, SelectionDAG &DAG) const;
361 SDValue LowerRETURNADDR(SDValue Op, SelectionDAG &DAG) const;
362 SDValue LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const;
363 SDValue LowerShiftRightParts(SDValue Op, SelectionDAG &DAG) const;
364 SDValue LowerShiftLeftParts(SDValue Op, SelectionDAG &DAG) const;
365 SDValue LowerFLT_ROUNDS_(SDValue Op, SelectionDAG &DAG) const;
367 SDValue LowerCallResult(SDValue Chain, SDValue InFlag,
368 CallingConv::ID CallConv, bool isVarArg,
369 const SmallVectorImpl<ISD::InputArg> &Ins,
370 DebugLoc dl, SelectionDAG &DAG,
371 SmallVectorImpl<SDValue> &InVals) const;
373 virtual SDValue
374 LowerFormalArguments(SDValue Chain,
375 CallingConv::ID CallConv, bool isVarArg,
376 const SmallVectorImpl<ISD::InputArg> &Ins,
377 DebugLoc dl, SelectionDAG &DAG,
378 SmallVectorImpl<SDValue> &InVals) const;
380 virtual SDValue
381 LowerCall(SDValue Chain, SDValue Callee,
382 CallingConv::ID CallConv, bool isVarArg,
383 bool &isTailCall,
384 const SmallVectorImpl<ISD::OutputArg> &Outs,
385 const SmallVectorImpl<SDValue> &OutVals,
386 const SmallVectorImpl<ISD::InputArg> &Ins,
387 DebugLoc dl, SelectionDAG &DAG,
388 SmallVectorImpl<SDValue> &InVals) const;
390 /// IsEligibleForTailCallOptimization - Check whether the call is eligible
391 /// for tail call optimization. Targets which want to do tail call
392 /// optimization should implement this function.
393 bool IsEligibleForTailCallOptimization(SDValue Callee,
394 CallingConv::ID CalleeCC,
395 bool isVarArg,
396 bool isCalleeStructRet,
397 bool isCallerStructRet,
398 const SmallVectorImpl<ISD::OutputArg> &Outs,
399 const SmallVectorImpl<SDValue> &OutVals,
400 const SmallVectorImpl<ISD::InputArg> &Ins,
401 SelectionDAG& DAG) const;
402 virtual SDValue
403 LowerReturn(SDValue Chain,
404 CallingConv::ID CallConv, bool isVarArg,
405 const SmallVectorImpl<ISD::OutputArg> &Outs,
406 const SmallVectorImpl<SDValue> &OutVals,
407 DebugLoc dl, SelectionDAG &DAG) const;
409 SDValue getARMCmp(SDValue LHS, SDValue RHS, ISD::CondCode CC,
410 SDValue &ARMcc, SelectionDAG &DAG, DebugLoc dl) const;
411 SDValue getVFPCmp(SDValue LHS, SDValue RHS,
412 SelectionDAG &DAG, DebugLoc dl) const;
414 SDValue OptimizeVFPBrcond(SDValue Op, SelectionDAG &DAG) const;
416 MachineBasicBlock *EmitAtomicCmpSwap(MachineInstr *MI,
417 MachineBasicBlock *BB,
418 unsigned Size) const;
419 MachineBasicBlock *EmitAtomicBinary(MachineInstr *MI,
420 MachineBasicBlock *BB,
421 unsigned Size,
422 unsigned BinOpcode) const;
426 namespace ARM {
427 FastISel *createFastISel(FunctionLoweringInfo &funcInfo);
431 #endif // ARMISELLOWERING_H