1 //===-- PPCISelLowering.h - PPC32 DAG Lowering Interface --------*- C++ -*-===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This file defines the interfaces that PPC uses to lower LLVM code into a
12 //===----------------------------------------------------------------------===//
14 #ifndef LLVM_LIB_TARGET_POWERPC_PPCISELLOWERING_H
15 #define LLVM_LIB_TARGET_POWERPC_PPCISELLOWERING_H
17 #include "PPCInstrInfo.h"
18 #include "llvm/CodeGen/CallingConvLower.h"
19 #include "llvm/CodeGen/MachineFunction.h"
20 #include "llvm/CodeGen/MachineMemOperand.h"
21 #include "llvm/CodeGen/SelectionDAG.h"
22 #include "llvm/CodeGen/SelectionDAGNodes.h"
23 #include "llvm/CodeGen/TargetLowering.h"
24 #include "llvm/CodeGen/ValueTypes.h"
25 #include "llvm/CodeGenTypes/MachineValueType.h"
26 #include "llvm/IR/Attributes.h"
27 #include "llvm/IR/CallingConv.h"
28 #include "llvm/IR/Function.h"
29 #include "llvm/IR/InlineAsm.h"
30 #include "llvm/IR/Metadata.h"
31 #include "llvm/IR/Type.h"
39 // When adding a NEW PPCISD node please add it to the correct position in
40 // the enum. The order of elements in this enum matters!
41 // Values that are added between FIRST_MEMORY_OPCODE and LAST_MEMORY_OPCODE
42 // are considered memory opcodes and are treated differently than other
44 enum NodeType
: unsigned {
45 // Start the numbering where the builtin ops and target ops leave off.
46 FIRST_NUMBER
= ISD::BUILTIN_OP_END
,
48 /// FSEL - Traditional three-operand fsel node.
52 /// XSMAXC[DQ]P, XSMINC[DQ]P - C-type min/max instructions.
56 /// FCFID - The FCFID instruction, taking an f64 operand and producing
57 /// and f64 value containing the FP representation of the integer that
58 /// was temporarily in the f64 operand.
61 /// Newer FCFID[US] integer-to-floating-point conversion instructions for
62 /// unsigned integers and single-precision outputs.
67 /// FCTI[D,W]Z - The FCTIDZ and FCTIWZ instructions, taking an f32 or f64
68 /// operand, producing an f64 value containing the integer representation
73 /// Newer FCTI[D,W]UZ floating-point-to-integer conversion instructions for
74 /// unsigned integers with round toward zero.
78 /// VEXTS, ByteWidth - takes an input in VSFRC and produces an output in
79 /// VSFRC that is sign-extended from ByteWidth to a 64-byte integer.
82 /// Reciprocal estimate instructions (unary FP ops).
86 /// Test instruction for software square root.
89 /// Square root instruction.
92 /// VPERM - The PPC VPERM Instruction.
96 /// XXSPLT - The PPC VSX splat instructions
100 /// XXSPLTI_SP_TO_DP - The PPC VSX splat instructions for immediates for
101 /// converting immediate single precision numbers to double precision
102 /// vector or scalar.
105 /// XXSPLTI32DX - The PPC XXSPLTI32DX instruction.
109 /// VECINSERT - The PPC vector insert instruction
113 /// VECSHL - The PPC vector shift left instruction
117 /// XXPERMDI - The PPC XXPERMDI instruction
122 /// The CMPB instruction (takes two operands of i32 or i64).
125 /// Hi/Lo - These represent the high and low 16-bit parts of a global
126 /// address respectively. These nodes have two operands, the first of
127 /// which must be a TargetGlobalAddress, and the second of which must be a
128 /// Constant. Selected naively, these turn into 'lis G+C' and 'li G+C',
129 /// though these are usually folded into other nodes.
133 /// The following two target-specific nodes are used for calls through
134 /// function pointers in the 64-bit SVR4 ABI.
136 /// OPRC, CHAIN = DYNALLOC(CHAIN, NEGSIZE, FRAME_INDEX)
137 /// This instruction is lowered in PPCRegisterInfo::eliminateFrameIndex to
138 /// compute an allocation on the stack.
141 /// This instruction is lowered in PPCRegisterInfo::eliminateFrameIndex to
142 /// compute an offset from native SP to the address of the most recent
146 /// To avoid stack clash, allocation is performed by block and each block is
150 /// The result of the mflr at function entry, used for PIC code.
153 /// These nodes represent PPC shifts.
155 /// For scalar types, only the last `n + 1` bits of the shift amounts
156 /// are used, where n is log2(sizeof(element) * 8). See sld/slw, etc.
157 /// for exact behaviors.
159 /// For vector types, only the last n bits are used. See vsld.
164 /// FNMSUB - Negated multiply-subtract instruction.
167 /// EXTSWSLI = The PPC extswsli instruction, which does an extend-sign
168 /// word and shift left immediate.
171 /// The combination of sra[wd]i and addze used to implemented signed
172 /// integer division by a power of 2. The first operand is the dividend,
173 /// and the second is the constant shift amount (representing the
177 /// CALL - A direct function call.
178 /// CALL_NOP is a call with the special NOP which follows 64-bit
179 /// CALL_NOTOC the caller does not use the TOC.
180 /// SVR4 calls and 32-bit/64-bit AIX calls.
185 /// CHAIN,FLAG = MTCTR(VAL, CHAIN[, INFLAG]) - Directly corresponds to a
186 /// MTCTR instruction.
189 /// CHAIN,FLAG = BCTRL(CHAIN, INFLAG) - Directly corresponds to a
190 /// BCTRL instruction.
193 /// CHAIN,FLAG = BCTRL(CHAIN, ADDR, INFLAG) - The combination of a bctrl
194 /// instruction and the TOC reload required on 64-bit ELF, 32-bit AIX
198 /// The variants that implicitly define rounding mode for calls with
199 /// strictfp semantics.
206 /// Return with a glue operand, matched by 'blr'
209 /// R32 = MFOCRF(CRREG, INFLAG) - Represents the MFOCRF instruction.
210 /// This copies the bits corresponding to the specified CRREG into the
211 /// resultant GPR. Bits corresponding to other CR regs are undefined.
214 /// Direct move from a VSX register to a GPR
217 /// Direct move from a GPR to a VSX register (algebraic)
220 /// Direct move from a GPR to a VSX register (zero)
223 /// Direct move of 2 consecutive GPR to a VSX register.
226 /// BUILD_SPE64 and EXTRACT_SPE are analogous to BUILD_PAIR and
227 /// EXTRACT_ELEMENT but take f64 arguments instead of i64, as i64 is
228 /// unsupported for this target.
229 /// Merge 2 GPRs to a single SPE register.
232 /// Extract SPE register component, second argument is high or low.
235 /// Extract a subvector from signed integer vector and convert to FP.
236 /// It is primarily used to convert a (widened) illegal integer vector
237 /// type to a legal floating point vector type.
238 /// For example v2i32 -> widened to v4i32 -> v2f64
241 /// Extract a subvector from unsigned integer vector and convert to FP.
242 /// As with SINT_VEC_TO_FP, used for converting illegal types.
245 /// PowerPC instructions that have SCALAR_TO_VECTOR semantics tend to
246 /// place the value into the least significant element of the most
247 /// significant doubleword in the vector. This is not element zero for
248 /// anything smaller than a doubleword on either endianness. This node has
249 /// the same semantics as SCALAR_TO_VECTOR except that the value remains in
250 /// the aforementioned location in the vector register.
251 SCALAR_TO_VECTOR_PERMUTED
,
253 // FIXME: Remove these once the ANDI glue bug is fixed:
254 /// i1 = ANDI_rec_1_[EQ|GT]_BIT(i32 or i64 x) - Represents the result of the
255 /// eq or gt bit of CR0 after executing andi. x, 1. This is used to
256 /// implement truncation of i32 or i64 to i1.
260 // READ_TIME_BASE - A read of the 64-bit time-base register on a 32-bit
261 // target (returns (Lo, Hi)). It takes a chain operand.
264 // EH_SJLJ_SETJMP - SjLj exception handling setjmp.
267 // EH_SJLJ_LONGJMP - SjLj exception handling longjmp.
270 /// RESVEC = VCMP(LHS, RHS, OPC) - Represents one of the altivec VCMP*
271 /// instructions. For lack of better number, we use the opcode number
272 /// encoding for the OPC field to identify the compare. For example, 838
276 /// RESVEC, OUTFLAG = VCMP_rec(LHS, RHS, OPC) - Represents one of the
277 /// altivec VCMP*_rec instructions. For lack of better number, we use the
278 /// opcode number encoding for the OPC field to identify the compare. For
279 /// example, 838 is VCMPGTSH.
282 /// CHAIN = COND_BRANCH CHAIN, CRRC, OPC, DESTBB [, INFLAG] - This
283 /// corresponds to the COND_BRANCH pseudo instruction. CRRC is the
284 /// condition register to branch on, OPC is the branch opcode to use (e.g.
285 /// PPC::BLE), DESTBB is the destination block to branch to, and INFLAG is
286 /// an optional input flag argument.
289 /// CHAIN = BDNZ CHAIN, DESTBB - These are used to create counter-based
294 /// F8RC = FADDRTZ F8RC, F8RC - This is an FADD done with rounding
295 /// towards zero. Used only as part of the long double-to-int
296 /// conversion sequence.
299 /// F8RC = MFFS - This moves the FPSCR (not modeled) into the register.
302 /// TC_RETURN - A tail call return.
304 /// operand #1 callee (register or absolute)
305 /// operand #2 stack adjustment
306 /// operand #3 optional in flag
309 /// ch, gl = CR6[UN]SET ch, inglue - Toggle CR bit 6 for SVR4 vararg calls
313 /// GPRC = address of _GLOBAL_OFFSET_TABLE_. Used by initial-exec TLS
314 /// for non-position independent code on PPC32.
317 /// GPRC = address of _GLOBAL_OFFSET_TABLE_. Used by general dynamic and
318 /// local dynamic TLS and position indendepent code on PPC32.
321 /// G8RC = ADDIS_GOT_TPREL_HA %x2, Symbol - Used by the initial-exec
322 /// TLS model, produces an ADDIS8 instruction that adds the GOT
323 /// base to sym\@got\@tprel\@ha.
326 /// G8RC = LD_GOT_TPREL_L Symbol, G8RReg - Used by the initial-exec
327 /// TLS model, produces a LD instruction with base register G8RReg
328 /// and offset sym\@got\@tprel\@l. This completes the addition that
329 /// finds the offset of "sym" relative to the thread pointer.
332 /// G8RC = ADD_TLS G8RReg, Symbol - Can be used by the initial-exec
333 /// and local-exec TLS models, produces an ADD instruction that adds
334 /// the contents of G8RReg to the thread pointer. Symbol contains a
335 /// relocation sym\@tls which is to be replaced by the thread pointer
336 /// and identifies to the linker that the instruction is part of a
340 /// G8RC = ADDIS_TLSGD_HA %x2, Symbol - For the general-dynamic TLS
341 /// model, produces an ADDIS8 instruction that adds the GOT base
342 /// register to sym\@got\@tlsgd\@ha.
345 /// %x3 = ADDI_TLSGD_L G8RReg, Symbol - For the general-dynamic TLS
346 /// model, produces an ADDI8 instruction that adds G8RReg to
347 /// sym\@got\@tlsgd\@l and stores the result in X3. Hidden by
348 /// ADDIS_TLSGD_L_ADDR until after register assignment.
351 /// %x3 = GET_TLS_ADDR %x3, Symbol - For the general-dynamic TLS
352 /// model, produces a call to __tls_get_addr(sym\@tlsgd). Hidden by
353 /// ADDIS_TLSGD_L_ADDR until after register assignment.
356 /// %x3 = GET_TPOINTER - Used for the local- and initial-exec TLS model on
357 /// 32-bit AIX, produces a call to .__get_tpointer to retrieve the thread
358 /// pointer. At the end of the call, the thread pointer is found in R3.
361 /// G8RC = ADDI_TLSGD_L_ADDR G8RReg, Symbol, Symbol - Op that
362 /// combines ADDI_TLSGD_L and GET_TLS_ADDR until expansion following
363 /// register assignment.
366 /// GPRC = TLSGD_AIX, TOC_ENTRY, TOC_ENTRY
367 /// G8RC = TLSGD_AIX, TOC_ENTRY, TOC_ENTRY
368 /// Op that combines two register copies of TOC entries
369 /// (region handle into R3 and variable offset into R4) followed by a
370 /// GET_TLS_ADDR node which will be expanded to a call to .__tls_get_addr.
371 /// This node is used in 64-bit mode as well (in which case the result is
372 /// G8RC and inputs are X3/X4).
375 /// %x3 = GET_TLS_MOD_AIX _$TLSML - For the AIX local-dynamic TLS model,
376 /// produces a call to .__tls_get_mod(_$TLSML\@ml).
379 /// [GP|G8]RC = TLSLD_AIX, TOC_ENTRY(module handle)
380 /// Op that requires a single input of the module handle TOC entry in R3,
381 /// and generates a GET_TLS_MOD_AIX node which will be expanded into a call
382 /// to .__tls_get_mod. This node is used in both 32-bit and 64-bit modes.
383 /// The only difference is the register class.
386 /// G8RC = ADDIS_TLSLD_HA %x2, Symbol - For the local-dynamic TLS
387 /// model, produces an ADDIS8 instruction that adds the GOT base
388 /// register to sym\@got\@tlsld\@ha.
391 /// %x3 = ADDI_TLSLD_L G8RReg, Symbol - For the local-dynamic TLS
392 /// model, produces an ADDI8 instruction that adds G8RReg to
393 /// sym\@got\@tlsld\@l and stores the result in X3. Hidden by
394 /// ADDIS_TLSLD_L_ADDR until after register assignment.
397 /// %x3 = GET_TLSLD_ADDR %x3, Symbol - For the local-dynamic TLS
398 /// model, produces a call to __tls_get_addr(sym\@tlsld). Hidden by
399 /// ADDIS_TLSLD_L_ADDR until after register assignment.
402 /// G8RC = ADDI_TLSLD_L_ADDR G8RReg, Symbol, Symbol - Op that
403 /// combines ADDI_TLSLD_L and GET_TLSLD_ADDR until expansion
404 /// following register assignment.
407 /// G8RC = ADDIS_DTPREL_HA %x3, Symbol - For the local-dynamic TLS
408 /// model, produces an ADDIS8 instruction that adds X3 to
412 /// G8RC = ADDI_DTPREL_L G8RReg, Symbol - For the local-dynamic TLS
413 /// model, produces an ADDI8 instruction that adds G8RReg to
414 /// sym\@got\@dtprel\@l.
417 /// G8RC = PADDI_DTPREL %x3, Symbol - For the pc-rel based local-dynamic TLS
418 /// model, produces a PADDI8 instruction that adds X3 to sym\@dtprel.
421 /// VRRC = VADD_SPLAT Elt, EltSize - Temporary node to be expanded
422 /// during instruction selection to optimize a BUILD_VECTOR into
423 /// operations on splats. This is necessary to avoid losing these
424 /// optimizations due to constant folding.
427 /// CHAIN = SC CHAIN, Imm128 - System call. The 7-bit unsigned
428 /// operand identifies the operating system entry point.
431 /// CHAIN = CLRBHRB CHAIN - Clear branch history rolling buffer.
434 /// GPRC, CHAIN = MFBHRBE CHAIN, Entry, Dummy - Move from branch
435 /// history rolling buffer entry.
438 /// CHAIN = RFEBB CHAIN, State - Return from event-based branch.
441 /// VSRC, CHAIN = XXSWAPD CHAIN, VSRC - Occurs only for little
442 /// endian. Maps to an xxswapd instruction that corrects an lxvd2x
443 /// or stxvd2x instruction. The chain is necessary because the
444 /// sequence replaces a load and needs to provide the same number
448 /// An SDNode for swaps that are not associated with any loads/stores
449 /// and thereby have no chain.
452 /// FP_EXTEND_HALF(VECTOR, IDX) - Custom extend upper (IDX=0) half or
453 /// lower (IDX=1) half of v4f32 to v2f64.
456 /// MAT_PCREL_ADDR = Materialize a PC Relative address. This can be done
457 /// either through an add like PADDI or through a PC Relative load like
461 /// TLS_DYNAMIC_MAT_PCREL_ADDR = Materialize a PC Relative address for
462 /// TLS global address when using dynamic access models. This can be done
463 /// through an add like PADDI.
464 TLS_DYNAMIC_MAT_PCREL_ADDR
,
466 /// TLS_LOCAL_EXEC_MAT_ADDR = Materialize an address for TLS global address
467 /// when using local exec access models, and when prefixed instructions are
468 /// available. This is used with ADD_TLS to produce an add like PADDI.
469 TLS_LOCAL_EXEC_MAT_ADDR
,
471 /// ACC_BUILD = Build an accumulator register from 4 VSX registers.
474 /// PAIR_BUILD = Build a vector pair register from 2 VSX registers.
477 /// EXTRACT_VSX_REG = Extract one of the underlying vsx registers of
478 /// an accumulator or pair register. This node is needed because
479 /// EXTRACT_SUBVECTOR expects the input and output vectors to have the same
483 /// XXMFACC = This corresponds to the xxmfacc instruction.
486 // Constrained conversion from floating point to int
487 FIRST_STRICTFP_OPCODE
,
488 STRICT_FCTIDZ
= FIRST_STRICTFP_OPCODE
,
493 /// Constrained integer-to-floating-point conversion instructions.
499 /// Constrained floating point add in round-to-zero mode.
501 LAST_STRICTFP_OPCODE
= STRICT_FADDRTZ
,
503 /// SETBC - The ISA 3.1 (P10) SETBC instruction.
506 /// SETBCR - The ISA 3.1 (P10) SETBCR instruction.
509 // NOTE: The nodes below may require PC-Rel specific patterns if the
510 // address could be PC-Relative. When adding new nodes below, consider
511 // whether or not the address can be PC-Relative and add the corresponding
512 // PC-relative patterns and tests.
514 /// CHAIN = STBRX CHAIN, GPRC, Ptr, Type - This is a
515 /// byte-swapping store instruction. It byte-swaps the low "Type" bits of
516 /// the GPRC input, then stores it through Ptr. Type can be either i16 or
519 STBRX
= FIRST_MEMORY_OPCODE
,
521 /// GPRC, CHAIN = LBRX CHAIN, Ptr, Type - This is a
522 /// byte-swapping load instruction. It loads "Type" bits, byte swaps it,
523 /// then puts it in the bottom bits of the GPRC. TYPE can be either i16
527 /// STFIWX - The STFIWX instruction. The first operand is an input token
528 /// chain, then an f64 value to store, then an address to store it to.
531 /// GPRC, CHAIN = LFIWAX CHAIN, Ptr - This is a floating-point
532 /// load which sign-extends from a 32-bit integer value into the
533 /// destination 64-bit register.
536 /// GPRC, CHAIN = LFIWZX CHAIN, Ptr - This is a floating-point
537 /// load which zero-extends from a 32-bit integer value into the
538 /// destination 64-bit register.
541 /// GPRC, CHAIN = LXSIZX, CHAIN, Ptr, ByteWidth - This is a load of an
542 /// integer smaller than 64 bits into a VSR. The integer is zero-extended.
543 /// This can be used for converting loaded integers to floating point.
546 /// STXSIX - The STXSI[bh]X instruction. The first operand is an input
547 /// chain, then an f64 value to store, then an address to store it to,
548 /// followed by a byte-width for the store.
551 /// VSRC, CHAIN = LXVD2X_LE CHAIN, Ptr - Occurs only for little endian.
552 /// Maps directly to an lxvd2x instruction that will be followed by
556 /// LXVRZX - Load VSX Vector Rightmost and Zero Extend
557 /// This node represents v1i128 BUILD_VECTOR of a zero extending load
558 /// instruction from <byte, halfword, word, or doubleword> to i128.
559 /// Allows utilization of the Load VSX Vector Rightmost Instructions.
562 /// VSRC, CHAIN = LOAD_VEC_BE CHAIN, Ptr - Occurs only for little endian.
563 /// Maps directly to one of lxvd2x/lxvw4x/lxvh8x/lxvb16x depending on
564 /// the vector type to load vector in big-endian element order.
567 /// VSRC, CHAIN = LD_VSX_LH CHAIN, Ptr - This is a floating-point load of a
568 /// v2f32 value into the lower half of a VSR register.
571 /// VSRC, CHAIN = LD_SPLAT, CHAIN, Ptr - a splatting load memory
572 /// instructions such as LXVDSX, LXVWSX.
575 /// VSRC, CHAIN = ZEXT_LD_SPLAT, CHAIN, Ptr - a splatting load memory
576 /// that zero-extends.
579 /// VSRC, CHAIN = SEXT_LD_SPLAT, CHAIN, Ptr - a splatting load memory
580 /// that sign-extends.
583 /// CHAIN = STXVD2X CHAIN, VSRC, Ptr - Occurs only for little endian.
584 /// Maps directly to an stxvd2x instruction that will be preceded by
588 /// CHAIN = STORE_VEC_BE CHAIN, VSRC, Ptr - Occurs only for little endian.
589 /// Maps directly to one of stxvd2x/stxvw4x/stxvh8x/stxvb16x depending on
590 /// the vector type to store vector in big-endian element order.
593 /// Store scalar integers from VSR.
596 /// ATOMIC_CMP_SWAP - the exact same as the target-independent nodes
597 /// except they ensure that the compare input is zero-extended for
598 /// sub-word versions because the atomic loads zero-extend.
602 /// CHAIN,Glue = STORE_COND CHAIN, GPR, Ptr
603 /// The store conditional instruction ST[BHWD]ARX that produces a glue
604 /// result to attach it to a conditional branch.
607 /// GPRC = TOC_ENTRY GA, TOC
608 /// Loads the entry for GA from the TOC, where the TOC base is given by
609 /// the last operand.
611 LAST_MEMORY_OPCODE
= TOC_ENTRY
,
614 } // end namespace PPCISD
616 /// Define some predicates that are used for node matching.
619 /// isVPKUHUMShuffleMask - Return true if this is the shuffle mask for a
620 /// VPKUHUM instruction.
621 bool isVPKUHUMShuffleMask(ShuffleVectorSDNode
*N
, unsigned ShuffleKind
,
624 /// isVPKUWUMShuffleMask - Return true if this is the shuffle mask for a
625 /// VPKUWUM instruction.
626 bool isVPKUWUMShuffleMask(ShuffleVectorSDNode
*N
, unsigned ShuffleKind
,
629 /// isVPKUDUMShuffleMask - Return true if this is the shuffle mask for a
630 /// VPKUDUM instruction.
631 bool isVPKUDUMShuffleMask(ShuffleVectorSDNode
*N
, unsigned ShuffleKind
,
634 /// isVMRGLShuffleMask - Return true if this is a shuffle mask suitable for
635 /// a VRGL* instruction with the specified unit size (1,2 or 4 bytes).
636 bool isVMRGLShuffleMask(ShuffleVectorSDNode
*N
, unsigned UnitSize
,
637 unsigned ShuffleKind
, SelectionDAG
&DAG
);
639 /// isVMRGHShuffleMask - Return true if this is a shuffle mask suitable for
640 /// a VRGH* instruction with the specified unit size (1,2 or 4 bytes).
641 bool isVMRGHShuffleMask(ShuffleVectorSDNode
*N
, unsigned UnitSize
,
642 unsigned ShuffleKind
, SelectionDAG
&DAG
);
644 /// isVMRGEOShuffleMask - Return true if this is a shuffle mask suitable for
645 /// a VMRGEW or VMRGOW instruction
646 bool isVMRGEOShuffleMask(ShuffleVectorSDNode
*N
, bool CheckEven
,
647 unsigned ShuffleKind
, SelectionDAG
&DAG
);
648 /// isXXSLDWIShuffleMask - Return true if this is a shuffle mask suitable
649 /// for a XXSLDWI instruction.
650 bool isXXSLDWIShuffleMask(ShuffleVectorSDNode
*N
, unsigned &ShiftElts
,
651 bool &Swap
, bool IsLE
);
653 /// isXXBRHShuffleMask - Return true if this is a shuffle mask suitable
654 /// for a XXBRH instruction.
655 bool isXXBRHShuffleMask(ShuffleVectorSDNode
*N
);
657 /// isXXBRWShuffleMask - Return true if this is a shuffle mask suitable
658 /// for a XXBRW instruction.
659 bool isXXBRWShuffleMask(ShuffleVectorSDNode
*N
);
661 /// isXXBRDShuffleMask - Return true if this is a shuffle mask suitable
662 /// for a XXBRD instruction.
663 bool isXXBRDShuffleMask(ShuffleVectorSDNode
*N
);
665 /// isXXBRQShuffleMask - Return true if this is a shuffle mask suitable
666 /// for a XXBRQ instruction.
667 bool isXXBRQShuffleMask(ShuffleVectorSDNode
*N
);
669 /// isXXPERMDIShuffleMask - Return true if this is a shuffle mask suitable
670 /// for a XXPERMDI instruction.
671 bool isXXPERMDIShuffleMask(ShuffleVectorSDNode
*N
, unsigned &ShiftElts
,
672 bool &Swap
, bool IsLE
);
674 /// isVSLDOIShuffleMask - If this is a vsldoi shuffle mask, return the
675 /// shift amount, otherwise return -1.
676 int isVSLDOIShuffleMask(SDNode
*N
, unsigned ShuffleKind
,
679 /// isSplatShuffleMask - Return true if the specified VECTOR_SHUFFLE operand
680 /// specifies a splat of a single element that is suitable for input to
681 /// VSPLTB/VSPLTH/VSPLTW.
682 bool isSplatShuffleMask(ShuffleVectorSDNode
*N
, unsigned EltSize
);
684 /// isXXINSERTWMask - Return true if this VECTOR_SHUFFLE can be handled by
685 /// the XXINSERTW instruction introduced in ISA 3.0. This is essentially any
686 /// shuffle of v4f32/v4i32 vectors that just inserts one element from one
687 /// vector into the other. This function will also set a couple of
688 /// output parameters for how much the source vector needs to be shifted and
689 /// what byte number needs to be specified for the instruction to put the
690 /// element in the desired location of the target vector.
691 bool isXXINSERTWMask(ShuffleVectorSDNode
*N
, unsigned &ShiftElts
,
692 unsigned &InsertAtByte
, bool &Swap
, bool IsLE
);
694 /// getSplatIdxForPPCMnemonics - Return the splat index as a value that is
695 /// appropriate for PPC mnemonics (which have a big endian bias - namely
696 /// elements are counted from the left of the vector register).
697 unsigned getSplatIdxForPPCMnemonics(SDNode
*N
, unsigned EltSize
,
700 /// get_VSPLTI_elt - If this is a build_vector of constants which can be
701 /// formed by using a vspltis[bhw] instruction of the specified element
702 /// size, return the constant being splatted. The ByteSize field indicates
703 /// the number of bytes of each element [124] -> [bhw].
704 SDValue
get_VSPLTI_elt(SDNode
*N
, unsigned ByteSize
, SelectionDAG
&DAG
);
706 // Flags for computing the optimal addressing mode for loads and stores.
710 // Extension mode for integer loads.
715 // Address computation flags.
716 MOF_NotAddNorCst
= 1 << 5, // Not const. or sum of ptr and scalar.
717 MOF_RPlusSImm16
= 1 << 6, // Reg plus signed 16-bit constant.
718 MOF_RPlusLo
= 1 << 7, // Reg plus signed 16-bit relocation
719 MOF_RPlusSImm16Mult4
= 1 << 8, // Reg plus 16-bit signed multiple of 4.
720 MOF_RPlusSImm16Mult16
= 1 << 9, // Reg plus 16-bit signed multiple of 16.
721 MOF_RPlusSImm34
= 1 << 10, // Reg plus 34-bit signed constant.
722 MOF_RPlusR
= 1 << 11, // Sum of two variables.
723 MOF_PCRel
= 1 << 12, // PC-Relative relocation.
724 MOF_AddrIsSImm32
= 1 << 13, // A simple 32-bit constant.
726 // The in-memory type.
727 MOF_SubWordInt
= 1 << 15,
728 MOF_WordInt
= 1 << 16,
729 MOF_DoubleWordInt
= 1 << 17,
730 MOF_ScalarFloat
= 1 << 18, // Scalar single or double precision.
731 MOF_Vector
= 1 << 19, // Vector types and quad precision scalars.
732 MOF_Vector256
= 1 << 20,
734 // Subtarget features.
735 MOF_SubtargetBeforeP9
= 1 << 22,
736 MOF_SubtargetP9
= 1 << 23,
737 MOF_SubtargetP10
= 1 << 24,
738 MOF_SubtargetSPE
= 1 << 25
741 // The addressing modes for loads and stores.
751 } // end namespace PPC
753 class PPCTargetLowering
: public TargetLowering
{
754 const PPCSubtarget
&Subtarget
;
757 explicit PPCTargetLowering(const PPCTargetMachine
&TM
,
758 const PPCSubtarget
&STI
);
760 /// getTargetNodeName() - This method returns the name of a target specific
762 const char *getTargetNodeName(unsigned Opcode
) const override
;
764 bool isSelectSupported(SelectSupportKind Kind
) const override
{
765 // PowerPC does not support scalar condition selects on vectors.
766 return (Kind
!= SelectSupportKind::ScalarCondVectorVal
);
769 /// getPreferredVectorAction - The code we generate when vector types are
770 /// legalized by promoting the integer element type is often much worse
771 /// than code we generate if we widen the type for applicable vector types.
772 /// The issue with promoting is that the vector is scalaraized, individual
773 /// elements promoted and then the vector is rebuilt. So say we load a pair
774 /// of v4i8's and shuffle them. This will turn into a mess of 8 extending
775 /// loads, moves back into VSR's (or memory ops if we don't have moves) and
776 /// then the VPERM for the shuffle. All in all a very slow sequence.
777 TargetLoweringBase::LegalizeTypeAction
getPreferredVectorAction(MVT VT
)
779 // Default handling for scalable and single-element vectors.
780 if (VT
.isScalableVector() || VT
.getVectorNumElements() == 1)
781 return TargetLoweringBase::getPreferredVectorAction(VT
);
783 // Split and promote vNi1 vectors so we don't produce v256i1/v512i1
784 // types as those are only for MMA instructions.
785 if (VT
.getScalarSizeInBits() == 1 && VT
.getSizeInBits() > 16)
786 return TypeSplitVector
;
787 if (VT
.getScalarSizeInBits() == 1)
788 return TypePromoteInteger
;
790 // Widen vectors that have reasonably sized elements.
791 if (VT
.getScalarSizeInBits() % 8 == 0)
792 return TypeWidenVector
;
793 return TargetLoweringBase::getPreferredVectorAction(VT
);
796 bool useSoftFloat() const override
;
800 MVT
getScalarShiftAmountTy(const DataLayout
&, EVT
) const override
{
804 bool isCheapToSpeculateCttz(Type
*Ty
) const override
{
808 bool isCheapToSpeculateCtlz(Type
*Ty
) const override
{
813 shallExtractConstSplatVectorElementToStore(Type
*VectorTy
,
814 unsigned ElemSizeInBits
,
815 unsigned &Index
) const override
;
817 bool isCtlzFast() const override
{
821 bool isEqualityCmpFoldedWithSignedCmp() const override
{
825 bool hasAndNotCompare(SDValue
) const override
{
829 bool preferIncOfAddToSubOfNot(EVT VT
) const override
;
831 bool convertSetCCLogicToBitwiseLogic(EVT VT
) const override
{
832 return VT
.isScalarInteger();
835 SDValue
getNegatedExpression(SDValue Op
, SelectionDAG
&DAG
, bool LegalOps
,
836 bool OptForSize
, NegatibleCost
&Cost
,
837 unsigned Depth
= 0) const override
;
839 /// getSetCCResultType - Return the ISD::SETCC ValueType
840 EVT
getSetCCResultType(const DataLayout
&DL
, LLVMContext
&Context
,
841 EVT VT
) const override
;
843 /// Return true if target always benefits from combining into FMA for a
844 /// given value type. This must typically return false on targets where FMA
845 /// takes more cycles to execute than FADD.
846 bool enableAggressiveFMAFusion(EVT VT
) const override
;
848 /// getPreIndexedAddressParts - returns true by value, base pointer and
849 /// offset pointer and addressing mode by reference if the node's address
850 /// can be legally represented as pre-indexed load / store address.
851 bool getPreIndexedAddressParts(SDNode
*N
, SDValue
&Base
,
853 ISD::MemIndexedMode
&AM
,
854 SelectionDAG
&DAG
) const override
;
856 /// SelectAddressEVXRegReg - Given the specified addressed, check to see if
857 /// it can be more efficiently represented as [r+imm].
858 bool SelectAddressEVXRegReg(SDValue N
, SDValue
&Base
, SDValue
&Index
,
859 SelectionDAG
&DAG
) const;
861 /// SelectAddressRegReg - Given the specified addressed, check to see if it
862 /// can be more efficiently represented as [r+imm]. If \p EncodingAlignment
863 /// is non-zero, only accept displacement which is not suitable for [r+imm].
864 /// Returns false if it can be represented by [r+imm], which are preferred.
865 bool SelectAddressRegReg(SDValue N
, SDValue
&Base
, SDValue
&Index
,
867 MaybeAlign EncodingAlignment
= std::nullopt
) const;
869 /// SelectAddressRegImm - Returns true if the address N can be represented
870 /// by a base register plus a signed 16-bit displacement [r+imm], and if it
871 /// is not better represented as reg+reg. If \p EncodingAlignment is
872 /// non-zero, only accept displacements suitable for instruction encoding
873 /// requirement, i.e. multiples of 4 for DS form.
874 bool SelectAddressRegImm(SDValue N
, SDValue
&Disp
, SDValue
&Base
,
876 MaybeAlign EncodingAlignment
) const;
877 bool SelectAddressRegImm34(SDValue N
, SDValue
&Disp
, SDValue
&Base
,
878 SelectionDAG
&DAG
) const;
880 /// SelectAddressRegRegOnly - Given the specified addressed, force it to be
881 /// represented as an indexed [r+r] operation.
882 bool SelectAddressRegRegOnly(SDValue N
, SDValue
&Base
, SDValue
&Index
,
883 SelectionDAG
&DAG
) const;
885 /// SelectAddressPCRel - Represent the specified address as pc relative to
886 /// be represented as [pc+imm]
887 bool SelectAddressPCRel(SDValue N
, SDValue
&Base
) const;
889 Sched::Preference
getSchedulingPreference(SDNode
*N
) const override
;
891 /// LowerOperation - Provide custom lowering hooks for some operations.
893 SDValue
LowerOperation(SDValue Op
, SelectionDAG
&DAG
) const override
;
895 /// ReplaceNodeResults - Replace the results of node with an illegal result
896 /// type with new values built out of custom code.
898 void ReplaceNodeResults(SDNode
*N
, SmallVectorImpl
<SDValue
>&Results
,
899 SelectionDAG
&DAG
) const override
;
901 SDValue
expandVSXLoadForLE(SDNode
*N
, DAGCombinerInfo
&DCI
) const;
902 SDValue
expandVSXStoreForLE(SDNode
*N
, DAGCombinerInfo
&DCI
) const;
904 SDValue
PerformDAGCombine(SDNode
*N
, DAGCombinerInfo
&DCI
) const override
;
906 SDValue
BuildSDIVPow2(SDNode
*N
, const APInt
&Divisor
, SelectionDAG
&DAG
,
907 SmallVectorImpl
<SDNode
*> &Created
) const override
;
909 Register
getRegisterByName(const char* RegName
, LLT VT
,
910 const MachineFunction
&MF
) const override
;
912 void computeKnownBitsForTargetNode(const SDValue Op
,
914 const APInt
&DemandedElts
,
915 const SelectionDAG
&DAG
,
916 unsigned Depth
= 0) const override
;
918 Align
getPrefLoopAlignment(MachineLoop
*ML
) const override
;
920 bool shouldInsertFencesForAtomic(const Instruction
*I
) const override
{
924 Instruction
*emitLeadingFence(IRBuilderBase
&Builder
, Instruction
*Inst
,
925 AtomicOrdering Ord
) const override
;
926 Instruction
*emitTrailingFence(IRBuilderBase
&Builder
, Instruction
*Inst
,
927 AtomicOrdering Ord
) const override
;
929 bool shouldInlineQuadwordAtomics() const;
931 TargetLowering::AtomicExpansionKind
932 shouldExpandAtomicRMWInIR(AtomicRMWInst
*AI
) const override
;
934 TargetLowering::AtomicExpansionKind
935 shouldExpandAtomicCmpXchgInIR(AtomicCmpXchgInst
*AI
) const override
;
937 Value
*emitMaskedAtomicRMWIntrinsic(IRBuilderBase
&Builder
,
938 AtomicRMWInst
*AI
, Value
*AlignedAddr
,
939 Value
*Incr
, Value
*Mask
,
941 AtomicOrdering Ord
) const override
;
942 Value
*emitMaskedAtomicCmpXchgIntrinsic(IRBuilderBase
&Builder
,
943 AtomicCmpXchgInst
*CI
,
944 Value
*AlignedAddr
, Value
*CmpVal
,
945 Value
*NewVal
, Value
*Mask
,
946 AtomicOrdering Ord
) const override
;
949 EmitInstrWithCustomInserter(MachineInstr
&MI
,
950 MachineBasicBlock
*MBB
) const override
;
951 MachineBasicBlock
*EmitAtomicBinary(MachineInstr
&MI
,
952 MachineBasicBlock
*MBB
,
955 unsigned CmpOpcode
= 0,
956 unsigned CmpPred
= 0) const;
957 MachineBasicBlock
*EmitPartwordAtomicBinary(MachineInstr
&MI
,
958 MachineBasicBlock
*MBB
,
961 unsigned CmpOpcode
= 0,
962 unsigned CmpPred
= 0) const;
964 MachineBasicBlock
*emitEHSjLjSetJmp(MachineInstr
&MI
,
965 MachineBasicBlock
*MBB
) const;
967 MachineBasicBlock
*emitEHSjLjLongJmp(MachineInstr
&MI
,
968 MachineBasicBlock
*MBB
) const;
970 MachineBasicBlock
*emitProbedAlloca(MachineInstr
&MI
,
971 MachineBasicBlock
*MBB
) const;
973 bool hasInlineStackProbe(const MachineFunction
&MF
) const override
;
975 unsigned getStackProbeSize(const MachineFunction
&MF
) const;
977 ConstraintType
getConstraintType(StringRef Constraint
) const override
;
979 /// Examine constraint string and operand type and determine a weight value.
980 /// The operand object must already have been set up with the operand type.
981 ConstraintWeight
getSingleConstraintMatchWeight(
982 AsmOperandInfo
&info
, const char *constraint
) const override
;
984 std::pair
<unsigned, const TargetRegisterClass
*>
985 getRegForInlineAsmConstraint(const TargetRegisterInfo
*TRI
,
986 StringRef Constraint
, MVT VT
) const override
;
988 /// getByValTypeAlignment - Return the desired alignment for ByVal aggregate
989 /// function arguments in the caller parameter area.
990 Align
getByValTypeAlignment(Type
*Ty
, const DataLayout
&DL
) const override
;
992 /// LowerAsmOperandForConstraint - Lower the specified operand into the Ops
993 /// vector. If it is invalid, don't add anything to Ops.
994 void LowerAsmOperandForConstraint(SDValue Op
, StringRef Constraint
,
995 std::vector
<SDValue
> &Ops
,
996 SelectionDAG
&DAG
) const override
;
998 InlineAsm::ConstraintCode
999 getInlineAsmMemConstraint(StringRef ConstraintCode
) const override
{
1000 if (ConstraintCode
== "es")
1001 return InlineAsm::ConstraintCode::es
;
1002 else if (ConstraintCode
== "Q")
1003 return InlineAsm::ConstraintCode::Q
;
1004 else if (ConstraintCode
== "Z")
1005 return InlineAsm::ConstraintCode::Z
;
1006 else if (ConstraintCode
== "Zy")
1007 return InlineAsm::ConstraintCode::Zy
;
1008 return TargetLowering::getInlineAsmMemConstraint(ConstraintCode
);
1011 void CollectTargetIntrinsicOperands(const CallInst
&I
,
1012 SmallVectorImpl
<SDValue
> &Ops
,
1013 SelectionDAG
&DAG
) const override
;
1015 /// isLegalAddressingMode - Return true if the addressing mode represented
1016 /// by AM is legal for this target, for a load/store of the specified type.
1017 bool isLegalAddressingMode(const DataLayout
&DL
, const AddrMode
&AM
,
1018 Type
*Ty
, unsigned AS
,
1019 Instruction
*I
= nullptr) const override
;
1021 /// isLegalICmpImmediate - Return true if the specified immediate is legal
1022 /// icmp immediate, that is the target has icmp instructions which can
1023 /// compare a register against the immediate without having to materialize
1024 /// the immediate into a register.
1025 bool isLegalICmpImmediate(int64_t Imm
) const override
;
1027 /// isLegalAddImmediate - Return true if the specified immediate is legal
1028 /// add immediate, that is the target has add instructions which can
1029 /// add a register and the immediate without having to materialize
1030 /// the immediate into a register.
1031 bool isLegalAddImmediate(int64_t Imm
) const override
;
1033 /// isTruncateFree - Return true if it's free to truncate a value of
1034 /// type Ty1 to type Ty2. e.g. On PPC it's free to truncate a i64 value in
1035 /// register X1 to i32 by referencing its sub-register R1.
1036 bool isTruncateFree(Type
*Ty1
, Type
*Ty2
) const override
;
1037 bool isTruncateFree(EVT VT1
, EVT VT2
) const override
;
1039 bool isZExtFree(SDValue Val
, EVT VT2
) const override
;
1041 bool isFPExtFree(EVT DestVT
, EVT SrcVT
) const override
;
1043 /// Returns true if it is beneficial to convert a load of a constant
1044 /// to just the constant itself.
1045 bool shouldConvertConstantLoadToIntImm(const APInt
&Imm
,
1046 Type
*Ty
) const override
;
1048 bool convertSelectOfConstantsToMath(EVT VT
) const override
{
1052 bool decomposeMulByConstant(LLVMContext
&Context
, EVT VT
,
1053 SDValue C
) const override
;
1055 bool isDesirableToTransformToIntegerOp(unsigned Opc
,
1056 EVT VT
) const override
{
1057 // Only handle float load/store pair because float(fpr) load/store
1058 // instruction has more cycles than integer(gpr) load/store in PPC.
1059 if (Opc
!= ISD::LOAD
&& Opc
!= ISD::STORE
)
1061 if (VT
!= MVT::f32
&& VT
!= MVT::f64
)
1067 // Returns true if the address of the global is stored in TOC entry.
1068 bool isAccessedAsGotIndirect(SDValue N
) const;
1070 bool isOffsetFoldingLegal(const GlobalAddressSDNode
*GA
) const override
;
1072 bool getTgtMemIntrinsic(IntrinsicInfo
&Info
,
1074 MachineFunction
&MF
,
1075 unsigned Intrinsic
) const override
;
1077 /// It returns EVT::Other if the type should be determined using generic
1078 /// target-independent logic.
1079 EVT
getOptimalMemOpType(const MemOp
&Op
,
1080 const AttributeList
&FuncAttributes
) const override
;
1082 /// Is unaligned memory access allowed for the given type, and is it fast
1083 /// relative to software emulation.
1084 bool allowsMisalignedMemoryAccesses(
1085 EVT VT
, unsigned AddrSpace
, Align Alignment
= Align(1),
1086 MachineMemOperand::Flags Flags
= MachineMemOperand::MONone
,
1087 unsigned *Fast
= nullptr) const override
;
1089 /// isFMAFasterThanFMulAndFAdd - Return true if an FMA operation is faster
1090 /// than a pair of fmul and fadd instructions. fmuladd intrinsics will be
1091 /// expanded to FMAs when this method returns true, otherwise fmuladd is
1092 /// expanded to fmul + fadd.
1093 bool isFMAFasterThanFMulAndFAdd(const MachineFunction
&MF
,
1094 EVT VT
) const override
;
1096 bool isFMAFasterThanFMulAndFAdd(const Function
&F
, Type
*Ty
) const override
;
1098 /// isProfitableToHoist - Check if it is profitable to hoist instruction
1099 /// \p I to its dominator block.
1100 /// For example, it is not profitable if \p I and it's only user can form a
1101 /// FMA instruction, because Powerpc prefers FMADD.
1102 bool isProfitableToHoist(Instruction
*I
) const override
;
1104 const MCPhysReg
*getScratchRegisters(CallingConv::ID CC
) const override
;
1106 // Should we expand the build vector with shuffles?
1108 shouldExpandBuildVectorWithShuffles(EVT VT
,
1109 unsigned DefinedValues
) const override
;
1111 // Keep the zero-extensions for arguments to libcalls.
1112 bool shouldKeepZExtForFP16Conv() const override
{ return true; }
1114 /// createFastISel - This method returns a target-specific FastISel object,
1115 /// or null if the target does not support "fast" instruction selection.
1116 FastISel
*createFastISel(FunctionLoweringInfo
&FuncInfo
,
1117 const TargetLibraryInfo
*LibInfo
) const override
;
1119 /// Returns true if an argument of type Ty needs to be passed in a
1120 /// contiguous block of registers in calling convention CallConv.
1121 bool functionArgumentNeedsConsecutiveRegisters(
1122 Type
*Ty
, CallingConv::ID CallConv
, bool isVarArg
,
1123 const DataLayout
&DL
) const override
{
1124 // We support any array type as "consecutive" block in the parameter
1125 // save area. The element type defines the alignment requirement and
1126 // whether the argument should go in GPRs, FPRs, or VRs if available.
1128 // Note that clang uses this capability both to implement the ELFv2
1129 // homogeneous float/vector aggregate ABI, and to avoid having to use
1130 // "byval" when passing aggregates that might fully fit in registers.
1131 return Ty
->isArrayTy();
1134 /// If a physical register, this returns the register that receives the
1135 /// exception address on entry to an EH pad.
1137 getExceptionPointerRegister(const Constant
*PersonalityFn
) const override
;
1139 /// If a physical register, this returns the register that receives the
1140 /// exception typeid on entry to a landing pad.
1142 getExceptionSelectorRegister(const Constant
*PersonalityFn
) const override
;
1144 /// Override to support customized stack guard loading.
1145 bool useLoadStackGuardNode(const Module
&M
) const override
;
1146 void insertSSPDeclarations(Module
&M
) const override
;
1147 Value
*getSDagStackGuard(const Module
&M
) const override
;
1149 bool isFPImmLegal(const APFloat
&Imm
, EVT VT
,
1150 bool ForCodeSize
) const override
;
1152 unsigned getJumpTableEncoding() const override
;
1153 bool isJumpTableRelative() const override
;
1154 SDValue
getPICJumpTableRelocBase(SDValue Table
,
1155 SelectionDAG
&DAG
) const override
;
1156 const MCExpr
*getPICJumpTableRelocBaseExpr(const MachineFunction
*MF
,
1158 MCContext
&Ctx
) const override
;
1160 /// SelectOptimalAddrMode - Based on a node N and it's Parent (a MemSDNode),
1161 /// compute the address flags of the node, get the optimal address mode
1162 /// based on the flags, and set the Base and Disp based on the address mode.
1163 PPC::AddrMode
SelectOptimalAddrMode(const SDNode
*Parent
, SDValue N
,
1164 SDValue
&Disp
, SDValue
&Base
,
1166 MaybeAlign Align
) const;
1167 /// SelectForceXFormMode - Given the specified address, force it to be
1168 /// represented as an indexed [r+r] operation (an XForm instruction).
1169 PPC::AddrMode
SelectForceXFormMode(SDValue N
, SDValue
&Disp
, SDValue
&Base
,
1170 SelectionDAG
&DAG
) const;
1172 bool splitValueIntoRegisterParts(
1173 SelectionDAG
& DAG
, const SDLoc
&DL
, SDValue Val
, SDValue
*Parts
,
1174 unsigned NumParts
, MVT PartVT
, std::optional
<CallingConv::ID
> CC
)
1176 /// Structure that collects some common arguments that get passed around
1177 /// between the functions for call lowering.
1179 const CallingConv::ID CallConv
;
1180 const bool IsTailCall
: 1;
1181 const bool IsVarArg
: 1;
1182 const bool IsPatchPoint
: 1;
1183 const bool IsIndirect
: 1;
1184 const bool HasNest
: 1;
1185 const bool NoMerge
: 1;
1187 CallFlags(CallingConv::ID CC
, bool IsTailCall
, bool IsVarArg
,
1188 bool IsPatchPoint
, bool IsIndirect
, bool HasNest
, bool NoMerge
)
1189 : CallConv(CC
), IsTailCall(IsTailCall
), IsVarArg(IsVarArg
),
1190 IsPatchPoint(IsPatchPoint
), IsIndirect(IsIndirect
),
1191 HasNest(HasNest
), NoMerge(NoMerge
) {}
1194 CCAssignFn
*ccAssignFnForCall(CallingConv::ID CC
, bool Return
,
1195 bool IsVarArg
) const;
1196 bool supportsTailCallFor(const CallBase
*CB
) const;
1199 struct ReuseLoadInfo
{
1203 MachinePointerInfo MPI
;
1204 bool IsDereferenceable
= false;
1205 bool IsInvariant
= false;
1208 const MDNode
*Ranges
= nullptr;
1210 ReuseLoadInfo() = default;
1212 MachineMemOperand::Flags
MMOFlags() const {
1213 MachineMemOperand::Flags F
= MachineMemOperand::MONone
;
1214 if (IsDereferenceable
)
1215 F
|= MachineMemOperand::MODereferenceable
;
1217 F
|= MachineMemOperand::MOInvariant
;
1222 // Map that relates a set of common address flags to PPC addressing modes.
1223 std::map
<PPC::AddrMode
, SmallVector
<unsigned, 16>> AddrModesMap
;
1224 void initializeAddrModeMap();
1226 bool canReuseLoadAddress(SDValue Op
, EVT MemVT
, ReuseLoadInfo
&RLI
,
1228 ISD::LoadExtType ET
= ISD::NON_EXTLOAD
) const;
1229 void spliceIntoChain(SDValue ResChain
, SDValue NewResChain
,
1230 SelectionDAG
&DAG
) const;
1232 void LowerFP_TO_INTForReuse(SDValue Op
, ReuseLoadInfo
&RLI
,
1233 SelectionDAG
&DAG
, const SDLoc
&dl
) const;
1234 SDValue
LowerFP_TO_INTDirectMove(SDValue Op
, SelectionDAG
&DAG
,
1235 const SDLoc
&dl
) const;
1237 bool directMoveIsProfitable(const SDValue
&Op
) const;
1238 SDValue
LowerINT_TO_FPDirectMove(SDValue Op
, SelectionDAG
&DAG
,
1239 const SDLoc
&dl
) const;
1241 SDValue
LowerINT_TO_FPVector(SDValue Op
, SelectionDAG
&DAG
,
1242 const SDLoc
&dl
) const;
1244 SDValue
LowerTRUNCATEVector(SDValue Op
, SelectionDAG
&DAG
) const;
1246 SDValue
getFramePointerFrameIndex(SelectionDAG
& DAG
) const;
1247 SDValue
getReturnAddrFrameIndex(SelectionDAG
& DAG
) const;
1249 bool IsEligibleForTailCallOptimization(
1250 const GlobalValue
*CalleeGV
, CallingConv::ID CalleeCC
,
1251 CallingConv::ID CallerCC
, bool isVarArg
,
1252 const SmallVectorImpl
<ISD::InputArg
> &Ins
) const;
1254 bool IsEligibleForTailCallOptimization_64SVR4(
1255 const GlobalValue
*CalleeGV
, CallingConv::ID CalleeCC
,
1256 CallingConv::ID CallerCC
, const CallBase
*CB
, bool isVarArg
,
1257 const SmallVectorImpl
<ISD::OutputArg
> &Outs
,
1258 const SmallVectorImpl
<ISD::InputArg
> &Ins
, const Function
*CallerFunc
,
1259 bool isCalleeExternalSymbol
) const;
1261 bool isEligibleForTCO(const GlobalValue
*CalleeGV
, CallingConv::ID CalleeCC
,
1262 CallingConv::ID CallerCC
, const CallBase
*CB
,
1264 const SmallVectorImpl
<ISD::OutputArg
> &Outs
,
1265 const SmallVectorImpl
<ISD::InputArg
> &Ins
,
1266 const Function
*CallerFunc
,
1267 bool isCalleeExternalSymbol
) const;
1269 SDValue
EmitTailCallLoadFPAndRetAddr(SelectionDAG
&DAG
, int SPDiff
,
1270 SDValue Chain
, SDValue
&LROpOut
,
1272 const SDLoc
&dl
) const;
1274 SDValue
getTOCEntry(SelectionDAG
&DAG
, const SDLoc
&dl
, SDValue GA
) const;
1276 SDValue
LowerRETURNADDR(SDValue Op
, SelectionDAG
&DAG
) const;
1277 SDValue
LowerFRAMEADDR(SDValue Op
, SelectionDAG
&DAG
) const;
1278 SDValue
LowerConstantPool(SDValue Op
, SelectionDAG
&DAG
) const;
1279 SDValue
LowerBlockAddress(SDValue Op
, SelectionDAG
&DAG
) const;
1280 SDValue
LowerGlobalTLSAddress(SDValue Op
, SelectionDAG
&DAG
) const;
1281 SDValue
LowerGlobalTLSAddressAIX(SDValue Op
, SelectionDAG
&DAG
) const;
1282 SDValue
LowerGlobalTLSAddressLinux(SDValue Op
, SelectionDAG
&DAG
) const;
1283 SDValue
LowerGlobalAddress(SDValue Op
, SelectionDAG
&DAG
) const;
1284 SDValue
LowerJumpTable(SDValue Op
, SelectionDAG
&DAG
) const;
1285 SDValue
LowerUaddo(SDValue Op
, SelectionDAG
&DAG
) const;
1286 SDValue
LowerSETCC(SDValue Op
, SelectionDAG
&DAG
) const;
1287 SDValue
LowerSSUBO(SDValue Op
, SelectionDAG
&DAG
) const;
1288 SDValue
LowerINIT_TRAMPOLINE(SDValue Op
, SelectionDAG
&DAG
) const;
1289 SDValue
LowerADJUST_TRAMPOLINE(SDValue Op
, SelectionDAG
&DAG
) const;
1290 SDValue
LowerINLINEASM(SDValue Op
, SelectionDAG
&DAG
) const;
1291 SDValue
LowerVASTART(SDValue Op
, SelectionDAG
&DAG
) const;
1292 SDValue
LowerVAARG(SDValue Op
, SelectionDAG
&DAG
) const;
1293 SDValue
LowerVACOPY(SDValue Op
, SelectionDAG
&DAG
) const;
1294 SDValue
LowerSTACKRESTORE(SDValue Op
, SelectionDAG
&DAG
) const;
1295 SDValue
LowerGET_DYNAMIC_AREA_OFFSET(SDValue Op
, SelectionDAG
&DAG
) const;
1296 SDValue
LowerDYNAMIC_STACKALLOC(SDValue Op
, SelectionDAG
&DAG
) const;
1297 SDValue
LowerEH_DWARF_CFA(SDValue Op
, SelectionDAG
&DAG
) const;
1298 SDValue
LowerLOAD(SDValue Op
, SelectionDAG
&DAG
) const;
1299 SDValue
LowerSTORE(SDValue Op
, SelectionDAG
&DAG
) const;
1300 SDValue
LowerTRUNCATE(SDValue Op
, SelectionDAG
&DAG
) const;
1301 SDValue
LowerSELECT_CC(SDValue Op
, SelectionDAG
&DAG
) const;
1302 SDValue
LowerFP_TO_INT(SDValue Op
, SelectionDAG
&DAG
,
1303 const SDLoc
&dl
) const;
1304 SDValue
LowerINT_TO_FP(SDValue Op
, SelectionDAG
&DAG
) const;
1305 SDValue
LowerGET_ROUNDING(SDValue Op
, SelectionDAG
&DAG
) const;
1306 SDValue
LowerSET_ROUNDING(SDValue Op
, SelectionDAG
&DAG
) const;
1307 SDValue
LowerSHL_PARTS(SDValue Op
, SelectionDAG
&DAG
) const;
1308 SDValue
LowerSRL_PARTS(SDValue Op
, SelectionDAG
&DAG
) const;
1309 SDValue
LowerSRA_PARTS(SDValue Op
, SelectionDAG
&DAG
) const;
1310 SDValue
LowerFunnelShift(SDValue Op
, SelectionDAG
&DAG
) const;
1311 SDValue
LowerBUILD_VECTOR(SDValue Op
, SelectionDAG
&DAG
) const;
1312 SDValue
LowerVECTOR_SHUFFLE(SDValue Op
, SelectionDAG
&DAG
) const;
1313 SDValue
LowerVPERM(SDValue Op
, SelectionDAG
&DAG
, ArrayRef
<int> PermMask
,
1314 EVT VT
, SDValue V1
, SDValue V2
) const;
1315 SDValue
LowerINSERT_VECTOR_ELT(SDValue Op
, SelectionDAG
&DAG
) const;
1316 SDValue
LowerINTRINSIC_WO_CHAIN(SDValue Op
, SelectionDAG
&DAG
) const;
1317 SDValue
LowerINTRINSIC_VOID(SDValue Op
, SelectionDAG
&DAG
) const;
1318 SDValue
LowerBSWAP(SDValue Op
, SelectionDAG
&DAG
) const;
1319 SDValue
LowerATOMIC_CMP_SWAP(SDValue Op
, SelectionDAG
&DAG
) const;
1320 SDValue
LowerIS_FPCLASS(SDValue Op
, SelectionDAG
&DAG
) const;
1321 SDValue
lowerToLibCall(const char *LibCallName
, SDValue Op
,
1322 SelectionDAG
&DAG
) const;
1323 SDValue
lowerLibCallBasedOnType(const char *LibCallFloatName
,
1324 const char *LibCallDoubleName
, SDValue Op
,
1325 SelectionDAG
&DAG
) const;
1326 bool isLowringToMASSFiniteSafe(SDValue Op
) const;
1327 bool isLowringToMASSSafe(SDValue Op
) const;
1328 bool isScalarMASSConversionEnabled() const;
1329 SDValue
lowerLibCallBase(const char *LibCallDoubleName
,
1330 const char *LibCallFloatName
,
1331 const char *LibCallDoubleNameFinite
,
1332 const char *LibCallFloatNameFinite
, SDValue Op
,
1333 SelectionDAG
&DAG
) const;
1334 SDValue
lowerPow(SDValue Op
, SelectionDAG
&DAG
) const;
1335 SDValue
lowerSin(SDValue Op
, SelectionDAG
&DAG
) const;
1336 SDValue
lowerCos(SDValue Op
, SelectionDAG
&DAG
) const;
1337 SDValue
lowerLog(SDValue Op
, SelectionDAG
&DAG
) const;
1338 SDValue
lowerLog10(SDValue Op
, SelectionDAG
&DAG
) const;
1339 SDValue
lowerExp(SDValue Op
, SelectionDAG
&DAG
) const;
1340 SDValue
LowerATOMIC_LOAD_STORE(SDValue Op
, SelectionDAG
&DAG
) const;
1341 SDValue
LowerSCALAR_TO_VECTOR(SDValue Op
, SelectionDAG
&DAG
) const;
1342 SDValue
LowerMUL(SDValue Op
, SelectionDAG
&DAG
) const;
1343 SDValue
LowerFP_EXTEND(SDValue Op
, SelectionDAG
&DAG
) const;
1344 SDValue
LowerFP_ROUND(SDValue Op
, SelectionDAG
&DAG
) const;
1345 SDValue
LowerROTL(SDValue Op
, SelectionDAG
&DAG
) const;
1347 SDValue
LowerVectorLoad(SDValue Op
, SelectionDAG
&DAG
) const;
1348 SDValue
LowerVectorStore(SDValue Op
, SelectionDAG
&DAG
) const;
1350 SDValue
LowerCallResult(SDValue Chain
, SDValue InGlue
,
1351 CallingConv::ID CallConv
, bool isVarArg
,
1352 const SmallVectorImpl
<ISD::InputArg
> &Ins
,
1353 const SDLoc
&dl
, SelectionDAG
&DAG
,
1354 SmallVectorImpl
<SDValue
> &InVals
) const;
1356 SDValue
FinishCall(CallFlags CFlags
, const SDLoc
&dl
, SelectionDAG
&DAG
,
1357 SmallVector
<std::pair
<unsigned, SDValue
>, 8> &RegsToPass
,
1358 SDValue InGlue
, SDValue Chain
, SDValue CallSeqStart
,
1359 SDValue
&Callee
, int SPDiff
, unsigned NumBytes
,
1360 const SmallVectorImpl
<ISD::InputArg
> &Ins
,
1361 SmallVectorImpl
<SDValue
> &InVals
,
1362 const CallBase
*CB
) const;
1365 LowerFormalArguments(SDValue Chain
, CallingConv::ID CallConv
, bool isVarArg
,
1366 const SmallVectorImpl
<ISD::InputArg
> &Ins
,
1367 const SDLoc
&dl
, SelectionDAG
&DAG
,
1368 SmallVectorImpl
<SDValue
> &InVals
) const override
;
1370 SDValue
LowerCall(TargetLowering::CallLoweringInfo
&CLI
,
1371 SmallVectorImpl
<SDValue
> &InVals
) const override
;
1373 bool CanLowerReturn(CallingConv::ID CallConv
, MachineFunction
&MF
,
1375 const SmallVectorImpl
<ISD::OutputArg
> &Outs
,
1376 LLVMContext
&Context
) const override
;
1378 SDValue
LowerReturn(SDValue Chain
, CallingConv::ID CallConv
, bool isVarArg
,
1379 const SmallVectorImpl
<ISD::OutputArg
> &Outs
,
1380 const SmallVectorImpl
<SDValue
> &OutVals
,
1381 const SDLoc
&dl
, SelectionDAG
&DAG
) const override
;
1383 SDValue
extendArgForPPC64(ISD::ArgFlagsTy Flags
, EVT ObjectVT
,
1384 SelectionDAG
&DAG
, SDValue ArgVal
,
1385 const SDLoc
&dl
) const;
1387 SDValue
LowerFormalArguments_AIX(
1388 SDValue Chain
, CallingConv::ID CallConv
, bool isVarArg
,
1389 const SmallVectorImpl
<ISD::InputArg
> &Ins
, const SDLoc
&dl
,
1390 SelectionDAG
&DAG
, SmallVectorImpl
<SDValue
> &InVals
) const;
1391 SDValue
LowerFormalArguments_64SVR4(
1392 SDValue Chain
, CallingConv::ID CallConv
, bool isVarArg
,
1393 const SmallVectorImpl
<ISD::InputArg
> &Ins
, const SDLoc
&dl
,
1394 SelectionDAG
&DAG
, SmallVectorImpl
<SDValue
> &InVals
) const;
1395 SDValue
LowerFormalArguments_32SVR4(
1396 SDValue Chain
, CallingConv::ID CallConv
, bool isVarArg
,
1397 const SmallVectorImpl
<ISD::InputArg
> &Ins
, const SDLoc
&dl
,
1398 SelectionDAG
&DAG
, SmallVectorImpl
<SDValue
> &InVals
) const;
1400 SDValue
createMemcpyOutsideCallSeq(SDValue Arg
, SDValue PtrOff
,
1401 SDValue CallSeqStart
,
1402 ISD::ArgFlagsTy Flags
, SelectionDAG
&DAG
,
1403 const SDLoc
&dl
) const;
1405 SDValue
LowerCall_64SVR4(SDValue Chain
, SDValue Callee
, CallFlags CFlags
,
1406 const SmallVectorImpl
<ISD::OutputArg
> &Outs
,
1407 const SmallVectorImpl
<SDValue
> &OutVals
,
1408 const SmallVectorImpl
<ISD::InputArg
> &Ins
,
1409 const SDLoc
&dl
, SelectionDAG
&DAG
,
1410 SmallVectorImpl
<SDValue
> &InVals
,
1411 const CallBase
*CB
) const;
1412 SDValue
LowerCall_32SVR4(SDValue Chain
, SDValue Callee
, CallFlags CFlags
,
1413 const SmallVectorImpl
<ISD::OutputArg
> &Outs
,
1414 const SmallVectorImpl
<SDValue
> &OutVals
,
1415 const SmallVectorImpl
<ISD::InputArg
> &Ins
,
1416 const SDLoc
&dl
, SelectionDAG
&DAG
,
1417 SmallVectorImpl
<SDValue
> &InVals
,
1418 const CallBase
*CB
) const;
1419 SDValue
LowerCall_AIX(SDValue Chain
, SDValue Callee
, CallFlags CFlags
,
1420 const SmallVectorImpl
<ISD::OutputArg
> &Outs
,
1421 const SmallVectorImpl
<SDValue
> &OutVals
,
1422 const SmallVectorImpl
<ISD::InputArg
> &Ins
,
1423 const SDLoc
&dl
, SelectionDAG
&DAG
,
1424 SmallVectorImpl
<SDValue
> &InVals
,
1425 const CallBase
*CB
) const;
1427 SDValue
lowerEH_SJLJ_SETJMP(SDValue Op
, SelectionDAG
&DAG
) const;
1428 SDValue
lowerEH_SJLJ_LONGJMP(SDValue Op
, SelectionDAG
&DAG
) const;
1429 SDValue
LowerBITCAST(SDValue Op
, SelectionDAG
&DAG
) const;
1431 SDValue
DAGCombineExtBoolTrunc(SDNode
*N
, DAGCombinerInfo
&DCI
) const;
1432 SDValue
DAGCombineBuildVector(SDNode
*N
, DAGCombinerInfo
&DCI
) const;
1433 SDValue
DAGCombineTruncBoolExt(SDNode
*N
, DAGCombinerInfo
&DCI
) const;
1434 SDValue
combineStoreFPToInt(SDNode
*N
, DAGCombinerInfo
&DCI
) const;
1435 SDValue
combineFPToIntToFP(SDNode
*N
, DAGCombinerInfo
&DCI
) const;
1436 SDValue
combineSHL(SDNode
*N
, DAGCombinerInfo
&DCI
) const;
1437 SDValue
combineSRA(SDNode
*N
, DAGCombinerInfo
&DCI
) const;
1438 SDValue
combineSRL(SDNode
*N
, DAGCombinerInfo
&DCI
) const;
1439 SDValue
combineMUL(SDNode
*N
, DAGCombinerInfo
&DCI
) const;
1440 SDValue
combineADD(SDNode
*N
, DAGCombinerInfo
&DCI
) const;
1441 SDValue
combineFMALike(SDNode
*N
, DAGCombinerInfo
&DCI
) const;
1442 SDValue
combineTRUNCATE(SDNode
*N
, DAGCombinerInfo
&DCI
) const;
1443 SDValue
combineSetCC(SDNode
*N
, DAGCombinerInfo
&DCI
) const;
1444 SDValue
combineVectorShuffle(ShuffleVectorSDNode
*SVN
,
1445 SelectionDAG
&DAG
) const;
1446 SDValue
combineVReverseMemOP(ShuffleVectorSDNode
*SVN
, LSBaseSDNode
*LSBase
,
1447 DAGCombinerInfo
&DCI
) const;
1449 /// ConvertSETCCToSubtract - looks at SETCC that compares ints. It replaces
1450 /// SETCC with integer subtraction when (1) there is a legal way of doing it
1451 /// (2) keeping the result of comparison in GPR has performance benefit.
1452 SDValue
ConvertSETCCToSubtract(SDNode
*N
, DAGCombinerInfo
&DCI
) const;
1454 SDValue
getSqrtEstimate(SDValue Operand
, SelectionDAG
&DAG
, int Enabled
,
1455 int &RefinementSteps
, bool &UseOneConstNR
,
1456 bool Reciprocal
) const override
;
1457 SDValue
getRecipEstimate(SDValue Operand
, SelectionDAG
&DAG
, int Enabled
,
1458 int &RefinementSteps
) const override
;
1459 SDValue
getSqrtInputTest(SDValue Operand
, SelectionDAG
&DAG
,
1460 const DenormalMode
&Mode
) const override
;
1461 SDValue
getSqrtResultForDenormInput(SDValue Operand
,
1462 SelectionDAG
&DAG
) const override
;
1463 unsigned combineRepeatedFPDivisors() const override
;
1466 combineElementTruncationToVectorTruncation(SDNode
*N
,
1467 DAGCombinerInfo
&DCI
) const;
1469 /// lowerToVINSERTH - Return the SDValue if this VECTOR_SHUFFLE can be
1470 /// handled by the VINSERTH instruction introduced in ISA 3.0. This is
1471 /// essentially any shuffle of v8i16 vectors that just inserts one element
1472 /// from one vector into the other.
1473 SDValue
lowerToVINSERTH(ShuffleVectorSDNode
*N
, SelectionDAG
&DAG
) const;
1475 /// lowerToVINSERTB - Return the SDValue if this VECTOR_SHUFFLE can be
1476 /// handled by the VINSERTB instruction introduced in ISA 3.0. This is
1477 /// essentially v16i8 vector version of VINSERTH.
1478 SDValue
lowerToVINSERTB(ShuffleVectorSDNode
*N
, SelectionDAG
&DAG
) const;
1480 /// lowerToXXSPLTI32DX - Return the SDValue if this VECTOR_SHUFFLE can be
1481 /// handled by the XXSPLTI32DX instruction introduced in ISA 3.1.
1482 SDValue
lowerToXXSPLTI32DX(ShuffleVectorSDNode
*N
, SelectionDAG
&DAG
) const;
1484 // Return whether the call instruction can potentially be optimized to a
1485 // tail call. This will cause the optimizers to attempt to move, or
1486 // duplicate return instructions to help enable tail call optimizations.
1487 bool mayBeEmittedAsTailCall(const CallInst
*CI
) const override
;
1488 bool isMaskAndCmp0FoldingBeneficial(const Instruction
&AndI
) const override
;
1490 /// getAddrModeForFlags - Based on the set of address flags, select the most
1491 /// optimal instruction format to match by.
1492 PPC::AddrMode
getAddrModeForFlags(unsigned Flags
) const;
1494 /// computeMOFlags - Given a node N and it's Parent (a MemSDNode), compute
1495 /// the address flags of the load/store instruction that is to be matched.
1496 /// The address flags are stored in a map, which is then searched
1497 /// through to determine the optimal load/store instruction format.
1498 unsigned computeMOFlags(const SDNode
*Parent
, SDValue N
,
1499 SelectionDAG
&DAG
) const;
1500 }; // end class PPCTargetLowering
1504 FastISel
*createFastISel(FunctionLoweringInfo
&FuncInfo
,
1505 const TargetLibraryInfo
*LibInfo
);
1507 } // end namespace PPC
1509 bool isIntS16Immediate(SDNode
*N
, int16_t &Imm
);
1510 bool isIntS16Immediate(SDValue Op
, int16_t &Imm
);
1511 bool isIntS34Immediate(SDNode
*N
, int64_t &Imm
);
1512 bool isIntS34Immediate(SDValue Op
, int64_t &Imm
);
1514 bool convertToNonDenormSingle(APInt
&ArgAPInt
);
1515 bool convertToNonDenormSingle(APFloat
&ArgAPFloat
);
1516 bool checkConvertToNonDenormSingle(APFloat
&ArgAPFloat
);
1518 } // end namespace llvm
1520 #endif // LLVM_LIB_TARGET_POWERPC_PPCISELLOWERING_H