Use %ull here.
[llvm/stm8.git] / lib / Target / X86 / X86ISelLowering.h
blob630105739899f5a4cd2be5cfa230b277c642ce3d
1 //===-- X86ISelLowering.h - X86 DAG Lowering Interface ----------*- C++ -*-===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file defines the interfaces that X86 uses to lower LLVM code into a
11 // selection DAG.
13 //===----------------------------------------------------------------------===//
15 #ifndef X86ISELLOWERING_H
16 #define X86ISELLOWERING_H
18 #include "X86Subtarget.h"
19 #include "X86RegisterInfo.h"
20 #include "X86MachineFunctionInfo.h"
21 #include "llvm/Target/TargetLowering.h"
22 #include "llvm/Target/TargetOptions.h"
23 #include "llvm/CodeGen/FastISel.h"
24 #include "llvm/CodeGen/SelectionDAG.h"
25 #include "llvm/CodeGen/CallingConvLower.h"
27 namespace llvm {
28 namespace X86ISD {
29 // X86 Specific DAG Nodes
30 enum NodeType {
31 // Start the numbering where the builtin ops leave off.
32 FIRST_NUMBER = ISD::BUILTIN_OP_END,
34 /// BSF - Bit scan forward.
35 /// BSR - Bit scan reverse.
36 BSF,
37 BSR,
39 /// SHLD, SHRD - Double shift instructions. These correspond to
40 /// X86::SHLDxx and X86::SHRDxx instructions.
41 SHLD,
42 SHRD,
44 /// FAND - Bitwise logical AND of floating point values. This corresponds
45 /// to X86::ANDPS or X86::ANDPD.
46 FAND,
48 /// FOR - Bitwise logical OR of floating point values. This corresponds
49 /// to X86::ORPS or X86::ORPD.
50 FOR,
52 /// FXOR - Bitwise logical XOR of floating point values. This corresponds
53 /// to X86::XORPS or X86::XORPD.
54 FXOR,
56 /// FSRL - Bitwise logical right shift of floating point values. These
57 /// corresponds to X86::PSRLDQ.
58 FSRL,
60 /// CALL - These operations represent an abstract X86 call
61 /// instruction, which includes a bunch of information. In particular the
62 /// operands of these node are:
63 ///
64 /// #0 - The incoming token chain
65 /// #1 - The callee
66 /// #2 - The number of arg bytes the caller pushes on the stack.
67 /// #3 - The number of arg bytes the callee pops off the stack.
68 /// #4 - The value to pass in AL/AX/EAX (optional)
69 /// #5 - The value to pass in DL/DX/EDX (optional)
70 ///
71 /// The result values of these nodes are:
72 ///
73 /// #0 - The outgoing token chain
74 /// #1 - The first register result value (optional)
75 /// #2 - The second register result value (optional)
76 ///
77 CALL,
79 /// RDTSC_DAG - This operation implements the lowering for
80 /// readcyclecounter
81 RDTSC_DAG,
83 /// X86 compare and logical compare instructions.
84 CMP, COMI, UCOMI,
86 /// X86 bit-test instructions.
87 BT,
89 /// X86 SetCC. Operand 0 is condition code, and operand 1 is the EFLAGS
90 /// operand, usually produced by a CMP instruction.
91 SETCC,
93 // Same as SETCC except it's materialized with a sbb and the value is all
94 // one's or all zero's.
95 SETCC_CARRY, // R = carry_bit ? ~0 : 0
97 /// X86 conditional moves. Operand 0 and operand 1 are the two values
98 /// to select from. Operand 2 is the condition code, and operand 3 is the
99 /// flag operand produced by a CMP or TEST instruction. It also writes a
100 /// flag result.
101 CMOV,
103 /// X86 conditional branches. Operand 0 is the chain operand, operand 1
104 /// is the block to branch if condition is true, operand 2 is the
105 /// condition code, and operand 3 is the flag operand produced by a CMP
106 /// or TEST instruction.
107 BRCOND,
109 /// Return with a flag operand. Operand 0 is the chain operand, operand
110 /// 1 is the number of bytes of stack to pop.
111 RET_FLAG,
113 /// REP_STOS - Repeat fill, corresponds to X86::REP_STOSx.
114 REP_STOS,
116 /// REP_MOVS - Repeat move, corresponds to X86::REP_MOVSx.
117 REP_MOVS,
119 /// GlobalBaseReg - On Darwin, this node represents the result of the popl
120 /// at function entry, used for PIC code.
121 GlobalBaseReg,
123 /// Wrapper - A wrapper node for TargetConstantPool,
124 /// TargetExternalSymbol, and TargetGlobalAddress.
125 Wrapper,
127 /// WrapperRIP - Special wrapper used under X86-64 PIC mode for RIP
128 /// relative displacements.
129 WrapperRIP,
131 /// MOVQ2DQ - Copies a 64-bit value from an MMX vector to the low word
132 /// of an XMM vector, with the high word zero filled.
133 MOVQ2DQ,
135 /// MOVDQ2Q - Copies a 64-bit value from the low word of an XMM vector
136 /// to an MMX vector. If you think this is too close to the previous
137 /// mnemonic, so do I; blame Intel.
138 MOVDQ2Q,
140 /// PEXTRB - Extract an 8-bit value from a vector and zero extend it to
141 /// i32, corresponds to X86::PEXTRB.
142 PEXTRB,
144 /// PEXTRW - Extract a 16-bit value from a vector and zero extend it to
145 /// i32, corresponds to X86::PEXTRW.
146 PEXTRW,
148 /// INSERTPS - Insert any element of a 4 x float vector into any element
149 /// of a destination 4 x floatvector.
150 INSERTPS,
152 /// PINSRB - Insert the lower 8-bits of a 32-bit value to a vector,
153 /// corresponds to X86::PINSRB.
154 PINSRB,
156 /// PINSRW - Insert the lower 16-bits of a 32-bit value to a vector,
157 /// corresponds to X86::PINSRW.
158 PINSRW, MMX_PINSRW,
160 /// PSHUFB - Shuffle 16 8-bit values within a vector.
161 PSHUFB,
163 /// PANDN - and with not'd value.
164 PANDN,
166 /// PSIGNB/W/D - Copy integer sign.
167 PSIGNB, PSIGNW, PSIGND,
169 /// PBLENDVB - Variable blend
170 PBLENDVB,
172 /// FMAX, FMIN - Floating point max and min.
174 FMAX, FMIN,
176 /// FRSQRT, FRCP - Floating point reciprocal-sqrt and reciprocal
177 /// approximation. Note that these typically require refinement
178 /// in order to obtain suitable precision.
179 FRSQRT, FRCP,
181 // TLSADDR - Thread Local Storage.
182 TLSADDR,
184 // TLSCALL - Thread Local Storage. When calling to an OS provided
185 // thunk at the address from an earlier relocation.
186 TLSCALL,
188 // EH_RETURN - Exception Handling helpers.
189 EH_RETURN,
191 /// TC_RETURN - Tail call return.
192 /// operand #0 chain
193 /// operand #1 callee (register or absolute)
194 /// operand #2 stack adjustment
195 /// operand #3 optional in flag
196 TC_RETURN,
198 // VZEXT_MOVL - Vector move low and zero extend.
199 VZEXT_MOVL,
201 // VSHL, VSRL - Vector logical left / right shift.
202 VSHL, VSRL,
204 // CMPPD, CMPPS - Vector double/float comparison.
205 // CMPPD, CMPPS - Vector double/float comparison.
206 CMPPD, CMPPS,
208 // PCMP* - Vector integer comparisons.
209 PCMPEQB, PCMPEQW, PCMPEQD, PCMPEQQ,
210 PCMPGTB, PCMPGTW, PCMPGTD, PCMPGTQ,
212 // ADD, SUB, SMUL, etc. - Arithmetic operations with FLAGS results.
213 ADD, SUB, ADC, SBB, SMUL,
214 INC, DEC, OR, XOR, AND,
216 UMUL, // LOW, HI, FLAGS = umul LHS, RHS
218 // MUL_IMM - X86 specific multiply by immediate.
219 MUL_IMM,
221 // PTEST - Vector bitwise comparisons
222 PTEST,
224 // TESTP - Vector packed fp sign bitwise comparisons
225 TESTP,
227 // Several flavors of instructions with vector shuffle behaviors.
228 PALIGN,
229 PSHUFD,
230 PSHUFHW,
231 PSHUFLW,
232 PSHUFHW_LD,
233 PSHUFLW_LD,
234 SHUFPD,
235 SHUFPS,
236 MOVDDUP,
237 MOVSHDUP,
238 MOVSLDUP,
239 MOVSHDUP_LD,
240 MOVSLDUP_LD,
241 MOVLHPS,
242 MOVLHPD,
243 MOVHLPS,
244 MOVHLPD,
245 MOVLPS,
246 MOVLPD,
247 MOVSD,
248 MOVSS,
249 UNPCKLPS,
250 UNPCKLPD,
251 VUNPCKLPS,
252 VUNPCKLPD,
253 VUNPCKLPSY,
254 VUNPCKLPDY,
255 UNPCKHPS,
256 UNPCKHPD,
257 PUNPCKLBW,
258 PUNPCKLWD,
259 PUNPCKLDQ,
260 PUNPCKLQDQ,
261 PUNPCKHBW,
262 PUNPCKHWD,
263 PUNPCKHDQ,
264 PUNPCKHQDQ,
266 // VASTART_SAVE_XMM_REGS - Save xmm argument registers to the stack,
267 // according to %al. An operator is needed so that this can be expanded
268 // with control flow.
269 VASTART_SAVE_XMM_REGS,
271 // WIN_ALLOCA - Windows's _chkstk call to do stack probing.
272 WIN_ALLOCA,
274 // Memory barrier
275 MEMBARRIER,
276 MFENCE,
277 SFENCE,
278 LFENCE,
280 // ATOMADD64_DAG, ATOMSUB64_DAG, ATOMOR64_DAG, ATOMAND64_DAG,
281 // ATOMXOR64_DAG, ATOMNAND64_DAG, ATOMSWAP64_DAG -
282 // Atomic 64-bit binary operations.
283 ATOMADD64_DAG = ISD::FIRST_TARGET_MEMORY_OPCODE,
284 ATOMSUB64_DAG,
285 ATOMOR64_DAG,
286 ATOMXOR64_DAG,
287 ATOMAND64_DAG,
288 ATOMNAND64_DAG,
289 ATOMSWAP64_DAG,
291 // LCMPXCHG_DAG, LCMPXCHG8_DAG - Compare and swap.
292 LCMPXCHG_DAG,
293 LCMPXCHG8_DAG,
295 // VZEXT_LOAD - Load, scalar_to_vector, and zero extend.
296 VZEXT_LOAD,
298 // FNSTCW16m - Store FP control world into i16 memory.
299 FNSTCW16m,
301 /// FP_TO_INT*_IN_MEM - This instruction implements FP_TO_SINT with the
302 /// integer destination in memory and a FP reg source. This corresponds
303 /// to the X86::FIST*m instructions and the rounding mode change stuff. It
304 /// has two inputs (token chain and address) and two outputs (int value
305 /// and token chain).
306 FP_TO_INT16_IN_MEM,
307 FP_TO_INT32_IN_MEM,
308 FP_TO_INT64_IN_MEM,
310 /// FILD, FILD_FLAG - This instruction implements SINT_TO_FP with the
311 /// integer source in memory and FP reg result. This corresponds to the
312 /// X86::FILD*m instructions. It has three inputs (token chain, address,
313 /// and source type) and two outputs (FP value and token chain). FILD_FLAG
314 /// also produces a flag).
315 FILD,
316 FILD_FLAG,
318 /// FLD - This instruction implements an extending load to FP stack slots.
319 /// This corresponds to the X86::FLD32m / X86::FLD64m. It takes a chain
320 /// operand, ptr to load from, and a ValueType node indicating the type
321 /// to load to.
322 FLD,
324 /// FST - This instruction implements a truncating store to FP stack
325 /// slots. This corresponds to the X86::FST32m / X86::FST64m. It takes a
326 /// chain operand, value to store, address, and a ValueType to store it
327 /// as.
328 FST,
330 /// VAARG_64 - This instruction grabs the address of the next argument
331 /// from a va_list. (reads and modifies the va_list in memory)
332 VAARG_64
334 // WARNING: Do not add anything in the end unless you want the node to
335 // have memop! In fact, starting from ATOMADD64_DAG all opcodes will be
336 // thought as target memory ops!
340 /// Define some predicates that are used for node matching.
341 namespace X86 {
342 /// isPSHUFDMask - Return true if the specified VECTOR_SHUFFLE operand
343 /// specifies a shuffle of elements that is suitable for input to PSHUFD.
344 bool isPSHUFDMask(ShuffleVectorSDNode *N);
346 /// isPSHUFHWMask - Return true if the specified VECTOR_SHUFFLE operand
347 /// specifies a shuffle of elements that is suitable for input to PSHUFD.
348 bool isPSHUFHWMask(ShuffleVectorSDNode *N);
350 /// isPSHUFLWMask - Return true if the specified VECTOR_SHUFFLE operand
351 /// specifies a shuffle of elements that is suitable for input to PSHUFD.
352 bool isPSHUFLWMask(ShuffleVectorSDNode *N);
354 /// isSHUFPMask - Return true if the specified VECTOR_SHUFFLE operand
355 /// specifies a shuffle of elements that is suitable for input to SHUFP*.
356 bool isSHUFPMask(ShuffleVectorSDNode *N);
358 /// isMOVHLPSMask - Return true if the specified VECTOR_SHUFFLE operand
359 /// specifies a shuffle of elements that is suitable for input to MOVHLPS.
360 bool isMOVHLPSMask(ShuffleVectorSDNode *N);
362 /// isMOVHLPS_v_undef_Mask - Special case of isMOVHLPSMask for canonical form
363 /// of vector_shuffle v, v, <2, 3, 2, 3>, i.e. vector_shuffle v, undef,
364 /// <2, 3, 2, 3>
365 bool isMOVHLPS_v_undef_Mask(ShuffleVectorSDNode *N);
367 /// isMOVLPMask - Return true if the specified VECTOR_SHUFFLE operand
368 /// specifies a shuffle of elements that is suitable for MOVLP{S|D}.
369 bool isMOVLPMask(ShuffleVectorSDNode *N);
371 /// isMOVHPMask - Return true if the specified VECTOR_SHUFFLE operand
372 /// specifies a shuffle of elements that is suitable for MOVHP{S|D}.
373 /// as well as MOVLHPS.
374 bool isMOVLHPSMask(ShuffleVectorSDNode *N);
376 /// isUNPCKLMask - Return true if the specified VECTOR_SHUFFLE operand
377 /// specifies a shuffle of elements that is suitable for input to UNPCKL.
378 bool isUNPCKLMask(ShuffleVectorSDNode *N, bool V2IsSplat = false);
380 /// isUNPCKHMask - Return true if the specified VECTOR_SHUFFLE operand
381 /// specifies a shuffle of elements that is suitable for input to UNPCKH.
382 bool isUNPCKHMask(ShuffleVectorSDNode *N, bool V2IsSplat = false);
384 /// isUNPCKL_v_undef_Mask - Special case of isUNPCKLMask for canonical form
385 /// of vector_shuffle v, v, <0, 4, 1, 5>, i.e. vector_shuffle v, undef,
386 /// <0, 0, 1, 1>
387 bool isUNPCKL_v_undef_Mask(ShuffleVectorSDNode *N);
389 /// isUNPCKH_v_undef_Mask - Special case of isUNPCKHMask for canonical form
390 /// of vector_shuffle v, v, <2, 6, 3, 7>, i.e. vector_shuffle v, undef,
391 /// <2, 2, 3, 3>
392 bool isUNPCKH_v_undef_Mask(ShuffleVectorSDNode *N);
394 /// isMOVLMask - Return true if the specified VECTOR_SHUFFLE operand
395 /// specifies a shuffle of elements that is suitable for input to MOVSS,
396 /// MOVSD, and MOVD, i.e. setting the lowest element.
397 bool isMOVLMask(ShuffleVectorSDNode *N);
399 /// isMOVSHDUPMask - Return true if the specified VECTOR_SHUFFLE operand
400 /// specifies a shuffle of elements that is suitable for input to MOVSHDUP.
401 bool isMOVSHDUPMask(ShuffleVectorSDNode *N);
403 /// isMOVSLDUPMask - Return true if the specified VECTOR_SHUFFLE operand
404 /// specifies a shuffle of elements that is suitable for input to MOVSLDUP.
405 bool isMOVSLDUPMask(ShuffleVectorSDNode *N);
407 /// isMOVDDUPMask - Return true if the specified VECTOR_SHUFFLE operand
408 /// specifies a shuffle of elements that is suitable for input to MOVDDUP.
409 bool isMOVDDUPMask(ShuffleVectorSDNode *N);
411 /// isPALIGNRMask - Return true if the specified VECTOR_SHUFFLE operand
412 /// specifies a shuffle of elements that is suitable for input to PALIGNR.
413 bool isPALIGNRMask(ShuffleVectorSDNode *N);
415 /// isVEXTRACTF128Index - Return true if the specified
416 /// EXTRACT_SUBVECTOR operand specifies a vector extract that is
417 /// suitable for input to VEXTRACTF128.
418 bool isVEXTRACTF128Index(SDNode *N);
420 /// isVINSERTF128Index - Return true if the specified
421 /// INSERT_SUBVECTOR operand specifies a subvector insert that is
422 /// suitable for input to VINSERTF128.
423 bool isVINSERTF128Index(SDNode *N);
425 /// getShuffleSHUFImmediate - Return the appropriate immediate to shuffle
426 /// the specified isShuffleMask VECTOR_SHUFFLE mask with PSHUF* and SHUFP*
427 /// instructions.
428 unsigned getShuffleSHUFImmediate(SDNode *N);
430 /// getShufflePSHUFHWImmediate - Return the appropriate immediate to shuffle
431 /// the specified VECTOR_SHUFFLE mask with PSHUFHW instruction.
432 unsigned getShufflePSHUFHWImmediate(SDNode *N);
434 /// getShufflePSHUFLWImmediate - Return the appropriate immediate to shuffle
435 /// the specified VECTOR_SHUFFLE mask with PSHUFLW instruction.
436 unsigned getShufflePSHUFLWImmediate(SDNode *N);
438 /// getShufflePALIGNRImmediate - Return the appropriate immediate to shuffle
439 /// the specified VECTOR_SHUFFLE mask with the PALIGNR instruction.
440 unsigned getShufflePALIGNRImmediate(SDNode *N);
442 /// getExtractVEXTRACTF128Immediate - Return the appropriate
443 /// immediate to extract the specified EXTRACT_SUBVECTOR index
444 /// with VEXTRACTF128 instructions.
445 unsigned getExtractVEXTRACTF128Immediate(SDNode *N);
447 /// getInsertVINSERTF128Immediate - Return the appropriate
448 /// immediate to insert at the specified INSERT_SUBVECTOR index
449 /// with VINSERTF128 instructions.
450 unsigned getInsertVINSERTF128Immediate(SDNode *N);
452 /// isZeroNode - Returns true if Elt is a constant zero or a floating point
453 /// constant +0.0.
454 bool isZeroNode(SDValue Elt);
456 /// isOffsetSuitableForCodeModel - Returns true of the given offset can be
457 /// fit into displacement field of the instruction.
458 bool isOffsetSuitableForCodeModel(int64_t Offset, CodeModel::Model M,
459 bool hasSymbolicDisplacement = true);
462 //===--------------------------------------------------------------------===//
463 // X86TargetLowering - X86 Implementation of the TargetLowering interface
464 class X86TargetLowering : public TargetLowering {
465 public:
466 explicit X86TargetLowering(X86TargetMachine &TM);
468 virtual unsigned getJumpTableEncoding() const;
470 virtual MVT getShiftAmountTy(EVT LHSTy) const { return MVT::i8; }
472 virtual const MCExpr *
473 LowerCustomJumpTableEntry(const MachineJumpTableInfo *MJTI,
474 const MachineBasicBlock *MBB, unsigned uid,
475 MCContext &Ctx) const;
477 /// getPICJumpTableRelocaBase - Returns relocation base for the given PIC
478 /// jumptable.
479 virtual SDValue getPICJumpTableRelocBase(SDValue Table,
480 SelectionDAG &DAG) const;
481 virtual const MCExpr *
482 getPICJumpTableRelocBaseExpr(const MachineFunction *MF,
483 unsigned JTI, MCContext &Ctx) const;
485 /// getStackPtrReg - Return the stack pointer register we are using: either
486 /// ESP or RSP.
487 unsigned getStackPtrReg() const { return X86StackPtr; }
489 /// getByValTypeAlignment - Return the desired alignment for ByVal aggregate
490 /// function arguments in the caller parameter area. For X86, aggregates
491 /// that contains are placed at 16-byte boundaries while the rest are at
492 /// 4-byte boundaries.
493 virtual unsigned getByValTypeAlignment(const Type *Ty) const;
495 /// getOptimalMemOpType - Returns the target specific optimal type for load
496 /// and store operations as a result of memset, memcpy, and memmove
497 /// lowering. If DstAlign is zero that means it's safe to destination
498 /// alignment can satisfy any constraint. Similarly if SrcAlign is zero it
499 /// means there isn't a need to check it against alignment requirement,
500 /// probably because the source does not need to be loaded. If
501 /// 'NonScalarIntSafe' is true, that means it's safe to return a
502 /// non-scalar-integer type, e.g. empty string source, constant, or loaded
503 /// from memory. 'MemcpyStrSrc' indicates whether the memcpy source is
504 /// constant so it does not need to be loaded.
505 /// It returns EVT::Other if the type should be determined using generic
506 /// target-independent logic.
507 virtual EVT
508 getOptimalMemOpType(uint64_t Size, unsigned DstAlign, unsigned SrcAlign,
509 bool NonScalarIntSafe, bool MemcpyStrSrc,
510 MachineFunction &MF) const;
512 /// allowsUnalignedMemoryAccesses - Returns true if the target allows
513 /// unaligned memory accesses. of the specified type.
514 virtual bool allowsUnalignedMemoryAccesses(EVT VT) const {
515 return true;
518 /// LowerOperation - Provide custom lowering hooks for some operations.
520 virtual SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const;
522 /// ReplaceNodeResults - Replace the results of node with an illegal result
523 /// type with new values built out of custom code.
525 virtual void ReplaceNodeResults(SDNode *N, SmallVectorImpl<SDValue>&Results,
526 SelectionDAG &DAG) const;
529 virtual SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const;
531 /// isTypeDesirableForOp - Return true if the target has native support for
532 /// the specified value type and it is 'desirable' to use the type for the
533 /// given node type. e.g. On x86 i16 is legal, but undesirable since i16
534 /// instruction encodings are longer and some i16 instructions are slow.
535 virtual bool isTypeDesirableForOp(unsigned Opc, EVT VT) const;
537 /// isTypeDesirable - Return true if the target has native support for the
538 /// specified value type and it is 'desirable' to use the type. e.g. On x86
539 /// i16 is legal, but undesirable since i16 instruction encodings are longer
540 /// and some i16 instructions are slow.
541 virtual bool IsDesirableToPromoteOp(SDValue Op, EVT &PVT) const;
543 virtual MachineBasicBlock *
544 EmitInstrWithCustomInserter(MachineInstr *MI,
545 MachineBasicBlock *MBB) const;
548 /// getTargetNodeName - This method returns the name of a target specific
549 /// DAG node.
550 virtual const char *getTargetNodeName(unsigned Opcode) const;
552 /// getSetCCResultType - Return the ISD::SETCC ValueType
553 virtual MVT::SimpleValueType getSetCCResultType(EVT VT) const;
555 /// computeMaskedBitsForTargetNode - Determine which of the bits specified
556 /// in Mask are known to be either zero or one and return them in the
557 /// KnownZero/KnownOne bitsets.
558 virtual void computeMaskedBitsForTargetNode(const SDValue Op,
559 const APInt &Mask,
560 APInt &KnownZero,
561 APInt &KnownOne,
562 const SelectionDAG &DAG,
563 unsigned Depth = 0) const;
565 // ComputeNumSignBitsForTargetNode - Determine the number of bits in the
566 // operation that are sign bits.
567 virtual unsigned ComputeNumSignBitsForTargetNode(SDValue Op,
568 unsigned Depth) const;
570 virtual bool
571 isGAPlusOffset(SDNode *N, const GlobalValue* &GA, int64_t &Offset) const;
573 SDValue getReturnAddressFrameIndex(SelectionDAG &DAG) const;
575 virtual bool ExpandInlineAsm(CallInst *CI) const;
577 ConstraintType getConstraintType(const std::string &Constraint) const;
579 /// Examine constraint string and operand type and determine a weight value.
580 /// The operand object must already have been set up with the operand type.
581 virtual ConstraintWeight getSingleConstraintMatchWeight(
582 AsmOperandInfo &info, const char *constraint) const;
584 std::vector<unsigned>
585 getRegClassForInlineAsmConstraint(const std::string &Constraint,
586 EVT VT) const;
588 virtual const char *LowerXConstraint(EVT ConstraintVT) const;
590 /// LowerAsmOperandForConstraint - Lower the specified operand into the Ops
591 /// vector. If it is invalid, don't add anything to Ops. If hasMemory is
592 /// true it means one of the asm constraint of the inline asm instruction
593 /// being processed is 'm'.
594 virtual void LowerAsmOperandForConstraint(SDValue Op,
595 char ConstraintLetter,
596 std::vector<SDValue> &Ops,
597 SelectionDAG &DAG) const;
599 /// getRegForInlineAsmConstraint - Given a physical register constraint
600 /// (e.g. {edx}), return the register number and the register class for the
601 /// register. This should only be used for C_Register constraints. On
602 /// error, this returns a register number of 0.
603 std::pair<unsigned, const TargetRegisterClass*>
604 getRegForInlineAsmConstraint(const std::string &Constraint,
605 EVT VT) const;
607 /// isLegalAddressingMode - Return true if the addressing mode represented
608 /// by AM is legal for this target, for a load/store of the specified type.
609 virtual bool isLegalAddressingMode(const AddrMode &AM, const Type *Ty)const;
611 /// isTruncateFree - Return true if it's free to truncate a value of
612 /// type Ty1 to type Ty2. e.g. On x86 it's free to truncate a i32 value in
613 /// register EAX to i16 by referencing its sub-register AX.
614 virtual bool isTruncateFree(const Type *Ty1, const Type *Ty2) const;
615 virtual bool isTruncateFree(EVT VT1, EVT VT2) const;
617 /// isZExtFree - Return true if any actual instruction that defines a
618 /// value of type Ty1 implicit zero-extends the value to Ty2 in the result
619 /// register. This does not necessarily include registers defined in
620 /// unknown ways, such as incoming arguments, or copies from unknown
621 /// virtual registers. Also, if isTruncateFree(Ty2, Ty1) is true, this
622 /// does not necessarily apply to truncate instructions. e.g. on x86-64,
623 /// all instructions that define 32-bit values implicit zero-extend the
624 /// result out to 64 bits.
625 virtual bool isZExtFree(const Type *Ty1, const Type *Ty2) const;
626 virtual bool isZExtFree(EVT VT1, EVT VT2) const;
628 /// isNarrowingProfitable - Return true if it's profitable to narrow
629 /// operations of type VT1 to VT2. e.g. on x86, it's profitable to narrow
630 /// from i32 to i8 but not from i32 to i16.
631 virtual bool isNarrowingProfitable(EVT VT1, EVT VT2) const;
633 /// isFPImmLegal - Returns true if the target can instruction select the
634 /// specified FP immediate natively. If false, the legalizer will
635 /// materialize the FP immediate as a load from a constant pool.
636 virtual bool isFPImmLegal(const APFloat &Imm, EVT VT) const;
638 /// isShuffleMaskLegal - Targets can use this to indicate that they only
639 /// support *some* VECTOR_SHUFFLE operations, those with specific masks.
640 /// By default, if a target supports the VECTOR_SHUFFLE node, all mask
641 /// values are assumed to be legal.
642 virtual bool isShuffleMaskLegal(const SmallVectorImpl<int> &Mask,
643 EVT VT) const;
645 /// isVectorClearMaskLegal - Similar to isShuffleMaskLegal. This is
646 /// used by Targets can use this to indicate if there is a suitable
647 /// VECTOR_SHUFFLE that can be used to replace a VAND with a constant
648 /// pool entry.
649 virtual bool isVectorClearMaskLegal(const SmallVectorImpl<int> &Mask,
650 EVT VT) const;
652 /// ShouldShrinkFPConstant - If true, then instruction selection should
653 /// seek to shrink the FP constant of the specified type to a smaller type
654 /// in order to save space and / or reduce runtime.
655 virtual bool ShouldShrinkFPConstant(EVT VT) const {
656 // Don't shrink FP constpool if SSE2 is available since cvtss2sd is more
657 // expensive than a straight movsd. On the other hand, it's important to
658 // shrink long double fp constant since fldt is very slow.
659 return !X86ScalarSSEf64 || VT == MVT::f80;
662 const X86Subtarget* getSubtarget() const {
663 return Subtarget;
666 /// isScalarFPTypeInSSEReg - Return true if the specified scalar FP type is
667 /// computed in an SSE register, not on the X87 floating point stack.
668 bool isScalarFPTypeInSSEReg(EVT VT) const {
669 return (VT == MVT::f64 && X86ScalarSSEf64) || // f64 is when SSE2
670 (VT == MVT::f32 && X86ScalarSSEf32); // f32 is when SSE1
673 /// createFastISel - This method returns a target specific FastISel object,
674 /// or null if the target does not support "fast" ISel.
675 virtual FastISel *createFastISel(FunctionLoweringInfo &funcInfo) const;
677 /// getFunctionAlignment - Return the Log2 alignment of this function.
678 virtual unsigned getFunctionAlignment(const Function *F) const;
680 /// getStackCookieLocation - Return true if the target stores stack
681 /// protector cookies at a fixed offset in some non-standard address
682 /// space, and populates the address space and offset as
683 /// appropriate.
684 virtual bool getStackCookieLocation(unsigned &AddressSpace, unsigned &Offset) const;
686 protected:
687 std::pair<const TargetRegisterClass*, uint8_t>
688 findRepresentativeClass(EVT VT) const;
690 private:
691 /// Subtarget - Keep a pointer to the X86Subtarget around so that we can
692 /// make the right decision when generating code for different targets.
693 const X86Subtarget *Subtarget;
694 const X86RegisterInfo *RegInfo;
695 const TargetData *TD;
697 /// X86StackPtr - X86 physical register used as stack ptr.
698 unsigned X86StackPtr;
700 /// X86ScalarSSEf32, X86ScalarSSEf64 - Select between SSE or x87
701 /// floating point ops.
702 /// When SSE is available, use it for f32 operations.
703 /// When SSE2 is available, use it for f64 operations.
704 bool X86ScalarSSEf32;
705 bool X86ScalarSSEf64;
707 /// LegalFPImmediates - A list of legal fp immediates.
708 std::vector<APFloat> LegalFPImmediates;
710 /// addLegalFPImmediate - Indicate that this x86 target can instruction
711 /// select the specified FP immediate natively.
712 void addLegalFPImmediate(const APFloat& Imm) {
713 LegalFPImmediates.push_back(Imm);
716 SDValue LowerCallResult(SDValue Chain, SDValue InFlag,
717 CallingConv::ID CallConv, bool isVarArg,
718 const SmallVectorImpl<ISD::InputArg> &Ins,
719 DebugLoc dl, SelectionDAG &DAG,
720 SmallVectorImpl<SDValue> &InVals) const;
721 SDValue LowerMemArgument(SDValue Chain,
722 CallingConv::ID CallConv,
723 const SmallVectorImpl<ISD::InputArg> &ArgInfo,
724 DebugLoc dl, SelectionDAG &DAG,
725 const CCValAssign &VA, MachineFrameInfo *MFI,
726 unsigned i) const;
727 SDValue LowerMemOpCallTo(SDValue Chain, SDValue StackPtr, SDValue Arg,
728 DebugLoc dl, SelectionDAG &DAG,
729 const CCValAssign &VA,
730 ISD::ArgFlagsTy Flags) const;
732 // Call lowering helpers.
734 /// IsEligibleForTailCallOptimization - Check whether the call is eligible
735 /// for tail call optimization. Targets which want to do tail call
736 /// optimization should implement this function.
737 bool IsEligibleForTailCallOptimization(SDValue Callee,
738 CallingConv::ID CalleeCC,
739 bool isVarArg,
740 bool isCalleeStructRet,
741 bool isCallerStructRet,
742 const SmallVectorImpl<ISD::OutputArg> &Outs,
743 const SmallVectorImpl<SDValue> &OutVals,
744 const SmallVectorImpl<ISD::InputArg> &Ins,
745 SelectionDAG& DAG) const;
746 bool IsCalleePop(bool isVarArg, CallingConv::ID CallConv) const;
747 SDValue EmitTailCallLoadRetAddr(SelectionDAG &DAG, SDValue &OutRetAddr,
748 SDValue Chain, bool IsTailCall, bool Is64Bit,
749 int FPDiff, DebugLoc dl) const;
751 unsigned GetAlignedArgumentStackSize(unsigned StackSize,
752 SelectionDAG &DAG) const;
754 std::pair<SDValue,SDValue> FP_TO_INTHelper(SDValue Op, SelectionDAG &DAG,
755 bool isSigned) const;
757 SDValue LowerAsSplatVectorLoad(SDValue SrcOp, EVT VT, DebugLoc dl,
758 SelectionDAG &DAG) const;
759 SDValue LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const;
760 SDValue LowerCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG) const;
761 SDValue LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) const;
762 SDValue LowerEXTRACT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) const;
763 SDValue LowerEXTRACT_VECTOR_ELT_SSE4(SDValue Op, SelectionDAG &DAG) const;
764 SDValue LowerINSERT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) const;
765 SDValue LowerINSERT_VECTOR_ELT_SSE4(SDValue Op, SelectionDAG &DAG) const;
766 SDValue LowerSCALAR_TO_VECTOR(SDValue Op, SelectionDAG &DAG) const;
767 SDValue LowerEXTRACT_SUBVECTOR(SDValue Op, SelectionDAG &DAG) const;
768 SDValue LowerINSERT_SUBVECTOR(SDValue Op, SelectionDAG &DAG) const;
769 SDValue LowerConstantPool(SDValue Op, SelectionDAG &DAG) const;
770 SDValue LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const;
771 SDValue LowerGlobalAddress(const GlobalValue *GV, DebugLoc dl,
772 int64_t Offset, SelectionDAG &DAG) const;
773 SDValue LowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const;
774 SDValue LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const;
775 SDValue LowerExternalSymbol(SDValue Op, SelectionDAG &DAG) const;
776 SDValue LowerShift(SDValue Op, SelectionDAG &DAG) const;
777 SDValue BuildFILD(SDValue Op, EVT SrcVT, SDValue Chain, SDValue StackSlot,
778 SelectionDAG &DAG) const;
779 SDValue LowerBITCAST(SDValue op, SelectionDAG &DAG) const;
780 SDValue LowerSINT_TO_FP(SDValue Op, SelectionDAG &DAG) const;
781 SDValue LowerUINT_TO_FP(SDValue Op, SelectionDAG &DAG) const;
782 SDValue LowerUINT_TO_FP_i64(SDValue Op, SelectionDAG &DAG) const;
783 SDValue LowerUINT_TO_FP_i32(SDValue Op, SelectionDAG &DAG) const;
784 SDValue LowerFP_TO_SINT(SDValue Op, SelectionDAG &DAG) const;
785 SDValue LowerFP_TO_UINT(SDValue Op, SelectionDAG &DAG) const;
786 SDValue LowerFABS(SDValue Op, SelectionDAG &DAG) const;
787 SDValue LowerFNEG(SDValue Op, SelectionDAG &DAG) const;
788 SDValue LowerFCOPYSIGN(SDValue Op, SelectionDAG &DAG) const;
789 SDValue LowerToBT(SDValue And, ISD::CondCode CC,
790 DebugLoc dl, SelectionDAG &DAG) const;
791 SDValue LowerSETCC(SDValue Op, SelectionDAG &DAG) const;
792 SDValue LowerVSETCC(SDValue Op, SelectionDAG &DAG) const;
793 SDValue LowerSELECT(SDValue Op, SelectionDAG &DAG) const;
794 SDValue LowerBRCOND(SDValue Op, SelectionDAG &DAG) const;
795 SDValue LowerMEMSET(SDValue Op, SelectionDAG &DAG) const;
796 SDValue LowerJumpTable(SDValue Op, SelectionDAG &DAG) const;
797 SDValue LowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG) const;
798 SDValue LowerVASTART(SDValue Op, SelectionDAG &DAG) const;
799 SDValue LowerVAARG(SDValue Op, SelectionDAG &DAG) const;
800 SDValue LowerVACOPY(SDValue Op, SelectionDAG &DAG) const;
801 SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG) const;
802 SDValue LowerRETURNADDR(SDValue Op, SelectionDAG &DAG) const;
803 SDValue LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const;
804 SDValue LowerFRAME_TO_ARGS_OFFSET(SDValue Op, SelectionDAG &DAG) const;
805 SDValue LowerEH_RETURN(SDValue Op, SelectionDAG &DAG) const;
806 SDValue LowerTRAMPOLINE(SDValue Op, SelectionDAG &DAG) const;
807 SDValue LowerFLT_ROUNDS_(SDValue Op, SelectionDAG &DAG) const;
808 SDValue LowerCTLZ(SDValue Op, SelectionDAG &DAG) const;
809 SDValue LowerCTTZ(SDValue Op, SelectionDAG &DAG) const;
810 SDValue LowerMUL_V2I64(SDValue Op, SelectionDAG &DAG) const;
811 SDValue LowerSHL(SDValue Op, SelectionDAG &DAG) const;
812 SDValue LowerXALUO(SDValue Op, SelectionDAG &DAG) const;
814 SDValue LowerCMP_SWAP(SDValue Op, SelectionDAG &DAG) const;
815 SDValue LowerLOAD_SUB(SDValue Op, SelectionDAG &DAG) const;
816 SDValue LowerREADCYCLECOUNTER(SDValue Op, SelectionDAG &DAG) const;
817 SDValue LowerMEMBARRIER(SDValue Op, SelectionDAG &DAG) const;
819 // Utility functions to help LowerVECTOR_SHUFFLE
820 SDValue LowerVECTOR_SHUFFLEv8i16(SDValue Op, SelectionDAG &DAG) const;
822 virtual SDValue
823 LowerFormalArguments(SDValue Chain,
824 CallingConv::ID CallConv, bool isVarArg,
825 const SmallVectorImpl<ISD::InputArg> &Ins,
826 DebugLoc dl, SelectionDAG &DAG,
827 SmallVectorImpl<SDValue> &InVals) const;
828 virtual SDValue
829 LowerCall(SDValue Chain, SDValue Callee,
830 CallingConv::ID CallConv, bool isVarArg, bool &isTailCall,
831 const SmallVectorImpl<ISD::OutputArg> &Outs,
832 const SmallVectorImpl<SDValue> &OutVals,
833 const SmallVectorImpl<ISD::InputArg> &Ins,
834 DebugLoc dl, SelectionDAG &DAG,
835 SmallVectorImpl<SDValue> &InVals) const;
837 virtual SDValue
838 LowerReturn(SDValue Chain,
839 CallingConv::ID CallConv, bool isVarArg,
840 const SmallVectorImpl<ISD::OutputArg> &Outs,
841 const SmallVectorImpl<SDValue> &OutVals,
842 DebugLoc dl, SelectionDAG &DAG) const;
844 virtual bool isUsedByReturnOnly(SDNode *N) const;
846 virtual bool mayBeEmittedAsTailCall(CallInst *CI) const;
848 virtual EVT
849 getTypeForExtArgOrReturn(LLVMContext &Context, EVT VT,
850 ISD::NodeType ExtendKind) const;
852 virtual bool
853 CanLowerReturn(CallingConv::ID CallConv, bool isVarArg,
854 const SmallVectorImpl<ISD::OutputArg> &Outs,
855 LLVMContext &Context) const;
857 void ReplaceATOMIC_BINARY_64(SDNode *N, SmallVectorImpl<SDValue> &Results,
858 SelectionDAG &DAG, unsigned NewOp) const;
860 /// Utility function to emit string processing sse4.2 instructions
861 /// that return in xmm0.
862 /// This takes the instruction to expand, the associated machine basic
863 /// block, the number of args, and whether or not the second arg is
864 /// in memory or not.
865 MachineBasicBlock *EmitPCMP(MachineInstr *BInstr, MachineBasicBlock *BB,
866 unsigned argNum, bool inMem) const;
868 /// Utility functions to emit monitor and mwait instructions. These
869 /// need to make sure that the arguments to the intrinsic are in the
870 /// correct registers.
871 MachineBasicBlock *EmitMonitor(MachineInstr *MI,
872 MachineBasicBlock *BB) const;
873 MachineBasicBlock *EmitMwait(MachineInstr *MI, MachineBasicBlock *BB) const;
875 /// Utility function to emit atomic bitwise operations (and, or, xor).
876 /// It takes the bitwise instruction to expand, the associated machine basic
877 /// block, and the associated X86 opcodes for reg/reg and reg/imm.
878 MachineBasicBlock *EmitAtomicBitwiseWithCustomInserter(
879 MachineInstr *BInstr,
880 MachineBasicBlock *BB,
881 unsigned regOpc,
882 unsigned immOpc,
883 unsigned loadOpc,
884 unsigned cxchgOpc,
885 unsigned notOpc,
886 unsigned EAXreg,
887 TargetRegisterClass *RC,
888 bool invSrc = false) const;
890 MachineBasicBlock *EmitAtomicBit6432WithCustomInserter(
891 MachineInstr *BInstr,
892 MachineBasicBlock *BB,
893 unsigned regOpcL,
894 unsigned regOpcH,
895 unsigned immOpcL,
896 unsigned immOpcH,
897 bool invSrc = false) const;
899 /// Utility function to emit atomic min and max. It takes the min/max
900 /// instruction to expand, the associated basic block, and the associated
901 /// cmov opcode for moving the min or max value.
902 MachineBasicBlock *EmitAtomicMinMaxWithCustomInserter(MachineInstr *BInstr,
903 MachineBasicBlock *BB,
904 unsigned cmovOpc) const;
906 // Utility function to emit the low-level va_arg code for X86-64.
907 MachineBasicBlock *EmitVAARG64WithCustomInserter(
908 MachineInstr *MI,
909 MachineBasicBlock *MBB) const;
911 /// Utility function to emit the xmm reg save portion of va_start.
912 MachineBasicBlock *EmitVAStartSaveXMMRegsWithCustomInserter(
913 MachineInstr *BInstr,
914 MachineBasicBlock *BB) const;
916 MachineBasicBlock *EmitLoweredSelect(MachineInstr *I,
917 MachineBasicBlock *BB) const;
919 MachineBasicBlock *EmitLoweredWinAlloca(MachineInstr *MI,
920 MachineBasicBlock *BB) const;
922 MachineBasicBlock *EmitLoweredTLSCall(MachineInstr *MI,
923 MachineBasicBlock *BB) const;
925 MachineBasicBlock *emitLoweredTLSAddr(MachineInstr *MI,
926 MachineBasicBlock *BB) const;
928 /// Emit nodes that will be selected as "test Op0,Op0", or something
929 /// equivalent, for use with the given x86 condition code.
930 SDValue EmitTest(SDValue Op0, unsigned X86CC, SelectionDAG &DAG) const;
932 /// Emit nodes that will be selected as "cmp Op0,Op1", or something
933 /// equivalent, for use with the given x86 condition code.
934 SDValue EmitCmp(SDValue Op0, SDValue Op1, unsigned X86CC,
935 SelectionDAG &DAG) const;
938 namespace X86 {
939 FastISel *createFastISel(FunctionLoweringInfo &funcInfo);
943 #endif // X86ISELLOWERING_H