[LLVM][Alignment] Make functions using log of alignment explicit
[llvm-complete.git] / lib / Target / PowerPC / PPCISelLowering.h
blob249d7e48f854f810553e6f8841e317674bb0d105
1 //===-- PPCISelLowering.h - PPC32 DAG Lowering Interface --------*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file defines the interfaces that PPC uses to lower LLVM code into a
10 // selection DAG.
12 //===----------------------------------------------------------------------===//
14 #ifndef LLVM_LIB_TARGET_POWERPC_PPCISELLOWERING_H
15 #define LLVM_LIB_TARGET_POWERPC_PPCISELLOWERING_H
17 #include "PPCInstrInfo.h"
18 #include "llvm/CodeGen/CallingConvLower.h"
19 #include "llvm/CodeGen/MachineFunction.h"
20 #include "llvm/CodeGen/MachineMemOperand.h"
21 #include "llvm/CodeGen/SelectionDAG.h"
22 #include "llvm/CodeGen/SelectionDAGNodes.h"
23 #include "llvm/CodeGen/TargetLowering.h"
24 #include "llvm/CodeGen/ValueTypes.h"
25 #include "llvm/IR/Attributes.h"
26 #include "llvm/IR/CallingConv.h"
27 #include "llvm/IR/Function.h"
28 #include "llvm/IR/InlineAsm.h"
29 #include "llvm/IR/Metadata.h"
30 #include "llvm/IR/Type.h"
31 #include "llvm/Support/MachineValueType.h"
32 #include <utility>
34 namespace llvm {
36 namespace PPCISD {
38 // When adding a NEW PPCISD node please add it to the correct position in
39 // the enum. The order of elements in this enum matters!
40 // Values that are added after this entry:
41 // STBRX = ISD::FIRST_TARGET_MEMORY_OPCODE
42 // are considered memory opcodes and are treated differently than entries
43 // that come before it. For example, ADD or MUL should be placed before
44 // the ISD::FIRST_TARGET_MEMORY_OPCODE while a LOAD or STORE should come
45 // after it.
46 enum NodeType : unsigned {
47 // Start the numbering where the builtin ops and target ops leave off.
48 FIRST_NUMBER = ISD::BUILTIN_OP_END,
50 /// FSEL - Traditional three-operand fsel node.
51 ///
52 FSEL,
54 /// FCFID - The FCFID instruction, taking an f64 operand and producing
55 /// and f64 value containing the FP representation of the integer that
56 /// was temporarily in the f64 operand.
57 FCFID,
59 /// Newer FCFID[US] integer-to-floating-point conversion instructions for
60 /// unsigned integers and single-precision outputs.
61 FCFIDU, FCFIDS, FCFIDUS,
63 /// FCTI[D,W]Z - The FCTIDZ and FCTIWZ instructions, taking an f32 or f64
64 /// operand, producing an f64 value containing the integer representation
65 /// of that FP value.
66 FCTIDZ, FCTIWZ,
68 /// Newer FCTI[D,W]UZ floating-point-to-integer conversion instructions for
69 /// unsigned integers with round toward zero.
70 FCTIDUZ, FCTIWUZ,
72 /// Floating-point-to-interger conversion instructions
73 FP_TO_UINT_IN_VSR, FP_TO_SINT_IN_VSR,
75 /// VEXTS, ByteWidth - takes an input in VSFRC and produces an output in
76 /// VSFRC that is sign-extended from ByteWidth to a 64-byte integer.
77 VEXTS,
79 /// SExtVElems, takes an input vector of a smaller type and sign
80 /// extends to an output vector of a larger type.
81 SExtVElems,
83 /// Reciprocal estimate instructions (unary FP ops).
84 FRE, FRSQRTE,
86 // VMADDFP, VNMSUBFP - The VMADDFP and VNMSUBFP instructions, taking
87 // three v4f32 operands and producing a v4f32 result.
88 VMADDFP, VNMSUBFP,
90 /// VPERM - The PPC VPERM Instruction.
91 ///
92 VPERM,
94 /// XXSPLT - The PPC VSX splat instructions
95 ///
96 XXSPLT,
98 /// VECINSERT - The PPC vector insert instruction
99 ///
100 VECINSERT,
102 /// XXREVERSE - The PPC VSX reverse instruction
104 XXREVERSE,
106 /// VECSHL - The PPC vector shift left instruction
108 VECSHL,
110 /// XXPERMDI - The PPC XXPERMDI instruction
112 XXPERMDI,
114 /// The CMPB instruction (takes two operands of i32 or i64).
115 CMPB,
117 /// Hi/Lo - These represent the high and low 16-bit parts of a global
118 /// address respectively. These nodes have two operands, the first of
119 /// which must be a TargetGlobalAddress, and the second of which must be a
120 /// Constant. Selected naively, these turn into 'lis G+C' and 'li G+C',
121 /// though these are usually folded into other nodes.
122 Hi, Lo,
124 /// The following two target-specific nodes are used for calls through
125 /// function pointers in the 64-bit SVR4 ABI.
127 /// OPRC, CHAIN = DYNALLOC(CHAIN, NEGSIZE, FRAME_INDEX)
128 /// This instruction is lowered in PPCRegisterInfo::eliminateFrameIndex to
129 /// compute an allocation on the stack.
130 DYNALLOC,
132 /// This instruction is lowered in PPCRegisterInfo::eliminateFrameIndex to
133 /// compute an offset from native SP to the address of the most recent
134 /// dynamic alloca.
135 DYNAREAOFFSET,
137 /// GlobalBaseReg - On Darwin, this node represents the result of the mflr
138 /// at function entry, used for PIC code.
139 GlobalBaseReg,
141 /// These nodes represent PPC shifts.
143 /// For scalar types, only the last `n + 1` bits of the shift amounts
144 /// are used, where n is log2(sizeof(element) * 8). See sld/slw, etc.
145 /// for exact behaviors.
147 /// For vector types, only the last n bits are used. See vsld.
148 SRL, SRA, SHL,
150 /// EXTSWSLI = The PPC extswsli instruction, which does an extend-sign
151 /// word and shift left immediate.
152 EXTSWSLI,
154 /// The combination of sra[wd]i and addze used to implemented signed
155 /// integer division by a power of 2. The first operand is the dividend,
156 /// and the second is the constant shift amount (representing the
157 /// divisor).
158 SRA_ADDZE,
160 /// CALL - A direct function call.
161 /// CALL_NOP is a call with the special NOP which follows 64-bit
162 /// SVR4 calls and 32-bit/64-bit AIX calls.
163 CALL, CALL_NOP,
165 /// CHAIN,FLAG = MTCTR(VAL, CHAIN[, INFLAG]) - Directly corresponds to a
166 /// MTCTR instruction.
167 MTCTR,
169 /// CHAIN,FLAG = BCTRL(CHAIN, INFLAG) - Directly corresponds to a
170 /// BCTRL instruction.
171 BCTRL,
173 /// CHAIN,FLAG = BCTRL(CHAIN, ADDR, INFLAG) - The combination of a bctrl
174 /// instruction and the TOC reload required on SVR4 PPC64.
175 BCTRL_LOAD_TOC,
177 /// Return with a flag operand, matched by 'blr'
178 RET_FLAG,
180 /// R32 = MFOCRF(CRREG, INFLAG) - Represents the MFOCRF instruction.
181 /// This copies the bits corresponding to the specified CRREG into the
182 /// resultant GPR. Bits corresponding to other CR regs are undefined.
183 MFOCRF,
185 /// Direct move from a VSX register to a GPR
186 MFVSR,
188 /// Direct move from a GPR to a VSX register (algebraic)
189 MTVSRA,
191 /// Direct move from a GPR to a VSX register (zero)
192 MTVSRZ,
194 /// Direct move of 2 consecutive GPR to a VSX register.
195 BUILD_FP128,
197 /// BUILD_SPE64 and EXTRACT_SPE are analogous to BUILD_PAIR and
198 /// EXTRACT_ELEMENT but take f64 arguments instead of i64, as i64 is
199 /// unsupported for this target.
200 /// Merge 2 GPRs to a single SPE register.
201 BUILD_SPE64,
203 /// Extract SPE register component, second argument is high or low.
204 EXTRACT_SPE,
206 /// Extract a subvector from signed integer vector and convert to FP.
207 /// It is primarily used to convert a (widened) illegal integer vector
208 /// type to a legal floating point vector type.
209 /// For example v2i32 -> widened to v4i32 -> v2f64
210 SINT_VEC_TO_FP,
212 /// Extract a subvector from unsigned integer vector and convert to FP.
213 /// As with SINT_VEC_TO_FP, used for converting illegal types.
214 UINT_VEC_TO_FP,
216 // FIXME: Remove these once the ANDI glue bug is fixed:
217 /// i1 = ANDIo_1_[EQ|GT]_BIT(i32 or i64 x) - Represents the result of the
218 /// eq or gt bit of CR0 after executing andi. x, 1. This is used to
219 /// implement truncation of i32 or i64 to i1.
220 ANDIo_1_EQ_BIT, ANDIo_1_GT_BIT,
222 // READ_TIME_BASE - A read of the 64-bit time-base register on a 32-bit
223 // target (returns (Lo, Hi)). It takes a chain operand.
224 READ_TIME_BASE,
226 // EH_SJLJ_SETJMP - SjLj exception handling setjmp.
227 EH_SJLJ_SETJMP,
229 // EH_SJLJ_LONGJMP - SjLj exception handling longjmp.
230 EH_SJLJ_LONGJMP,
232 /// RESVEC = VCMP(LHS, RHS, OPC) - Represents one of the altivec VCMP*
233 /// instructions. For lack of better number, we use the opcode number
234 /// encoding for the OPC field to identify the compare. For example, 838
235 /// is VCMPGTSH.
236 VCMP,
238 /// RESVEC, OUTFLAG = VCMPo(LHS, RHS, OPC) - Represents one of the
239 /// altivec VCMP*o instructions. For lack of better number, we use the
240 /// opcode number encoding for the OPC field to identify the compare. For
241 /// example, 838 is VCMPGTSH.
242 VCMPo,
244 /// CHAIN = COND_BRANCH CHAIN, CRRC, OPC, DESTBB [, INFLAG] - This
245 /// corresponds to the COND_BRANCH pseudo instruction. CRRC is the
246 /// condition register to branch on, OPC is the branch opcode to use (e.g.
247 /// PPC::BLE), DESTBB is the destination block to branch to, and INFLAG is
248 /// an optional input flag argument.
249 COND_BRANCH,
251 /// CHAIN = BDNZ CHAIN, DESTBB - These are used to create counter-based
252 /// loops.
253 BDNZ, BDZ,
255 /// F8RC = FADDRTZ F8RC, F8RC - This is an FADD done with rounding
256 /// towards zero. Used only as part of the long double-to-int
257 /// conversion sequence.
258 FADDRTZ,
260 /// F8RC = MFFS - This moves the FPSCR (not modeled) into the register.
261 MFFS,
263 /// TC_RETURN - A tail call return.
264 /// operand #0 chain
265 /// operand #1 callee (register or absolute)
266 /// operand #2 stack adjustment
267 /// operand #3 optional in flag
268 TC_RETURN,
270 /// ch, gl = CR6[UN]SET ch, inglue - Toggle CR bit 6 for SVR4 vararg calls
271 CR6SET,
272 CR6UNSET,
274 /// GPRC = address of _GLOBAL_OFFSET_TABLE_. Used by initial-exec TLS
275 /// for non-position independent code on PPC32.
276 PPC32_GOT,
278 /// GPRC = address of _GLOBAL_OFFSET_TABLE_. Used by general dynamic and
279 /// local dynamic TLS and position indendepent code on PPC32.
280 PPC32_PICGOT,
282 /// G8RC = ADDIS_GOT_TPREL_HA %x2, Symbol - Used by the initial-exec
283 /// TLS model, produces an ADDIS8 instruction that adds the GOT
284 /// base to sym\@got\@tprel\@ha.
285 ADDIS_GOT_TPREL_HA,
287 /// G8RC = LD_GOT_TPREL_L Symbol, G8RReg - Used by the initial-exec
288 /// TLS model, produces a LD instruction with base register G8RReg
289 /// and offset sym\@got\@tprel\@l. This completes the addition that
290 /// finds the offset of "sym" relative to the thread pointer.
291 LD_GOT_TPREL_L,
293 /// G8RC = ADD_TLS G8RReg, Symbol - Used by the initial-exec TLS
294 /// model, produces an ADD instruction that adds the contents of
295 /// G8RReg to the thread pointer. Symbol contains a relocation
296 /// sym\@tls which is to be replaced by the thread pointer and
297 /// identifies to the linker that the instruction is part of a
298 /// TLS sequence.
299 ADD_TLS,
301 /// G8RC = ADDIS_TLSGD_HA %x2, Symbol - For the general-dynamic TLS
302 /// model, produces an ADDIS8 instruction that adds the GOT base
303 /// register to sym\@got\@tlsgd\@ha.
304 ADDIS_TLSGD_HA,
306 /// %x3 = ADDI_TLSGD_L G8RReg, Symbol - For the general-dynamic TLS
307 /// model, produces an ADDI8 instruction that adds G8RReg to
308 /// sym\@got\@tlsgd\@l and stores the result in X3. Hidden by
309 /// ADDIS_TLSGD_L_ADDR until after register assignment.
310 ADDI_TLSGD_L,
312 /// %x3 = GET_TLS_ADDR %x3, Symbol - For the general-dynamic TLS
313 /// model, produces a call to __tls_get_addr(sym\@tlsgd). Hidden by
314 /// ADDIS_TLSGD_L_ADDR until after register assignment.
315 GET_TLS_ADDR,
317 /// G8RC = ADDI_TLSGD_L_ADDR G8RReg, Symbol, Symbol - Op that
318 /// combines ADDI_TLSGD_L and GET_TLS_ADDR until expansion following
319 /// register assignment.
320 ADDI_TLSGD_L_ADDR,
322 /// G8RC = ADDIS_TLSLD_HA %x2, Symbol - For the local-dynamic TLS
323 /// model, produces an ADDIS8 instruction that adds the GOT base
324 /// register to sym\@got\@tlsld\@ha.
325 ADDIS_TLSLD_HA,
327 /// %x3 = ADDI_TLSLD_L G8RReg, Symbol - For the local-dynamic TLS
328 /// model, produces an ADDI8 instruction that adds G8RReg to
329 /// sym\@got\@tlsld\@l and stores the result in X3. Hidden by
330 /// ADDIS_TLSLD_L_ADDR until after register assignment.
331 ADDI_TLSLD_L,
333 /// %x3 = GET_TLSLD_ADDR %x3, Symbol - For the local-dynamic TLS
334 /// model, produces a call to __tls_get_addr(sym\@tlsld). Hidden by
335 /// ADDIS_TLSLD_L_ADDR until after register assignment.
336 GET_TLSLD_ADDR,
338 /// G8RC = ADDI_TLSLD_L_ADDR G8RReg, Symbol, Symbol - Op that
339 /// combines ADDI_TLSLD_L and GET_TLSLD_ADDR until expansion
340 /// following register assignment.
341 ADDI_TLSLD_L_ADDR,
343 /// G8RC = ADDIS_DTPREL_HA %x3, Symbol - For the local-dynamic TLS
344 /// model, produces an ADDIS8 instruction that adds X3 to
345 /// sym\@dtprel\@ha.
346 ADDIS_DTPREL_HA,
348 /// G8RC = ADDI_DTPREL_L G8RReg, Symbol - For the local-dynamic TLS
349 /// model, produces an ADDI8 instruction that adds G8RReg to
350 /// sym\@got\@dtprel\@l.
351 ADDI_DTPREL_L,
353 /// VRRC = VADD_SPLAT Elt, EltSize - Temporary node to be expanded
354 /// during instruction selection to optimize a BUILD_VECTOR into
355 /// operations on splats. This is necessary to avoid losing these
356 /// optimizations due to constant folding.
357 VADD_SPLAT,
359 /// CHAIN = SC CHAIN, Imm128 - System call. The 7-bit unsigned
360 /// operand identifies the operating system entry point.
363 /// CHAIN = CLRBHRB CHAIN - Clear branch history rolling buffer.
364 CLRBHRB,
366 /// GPRC, CHAIN = MFBHRBE CHAIN, Entry, Dummy - Move from branch
367 /// history rolling buffer entry.
368 MFBHRBE,
370 /// CHAIN = RFEBB CHAIN, State - Return from event-based branch.
371 RFEBB,
373 /// VSRC, CHAIN = XXSWAPD CHAIN, VSRC - Occurs only for little
374 /// endian. Maps to an xxswapd instruction that corrects an lxvd2x
375 /// or stxvd2x instruction. The chain is necessary because the
376 /// sequence replaces a load and needs to provide the same number
377 /// of outputs.
378 XXSWAPD,
380 /// An SDNode for swaps that are not associated with any loads/stores
381 /// and thereby have no chain.
382 SWAP_NO_CHAIN,
384 /// An SDNode for Power9 vector absolute value difference.
385 /// operand #0 vector
386 /// operand #1 vector
387 /// operand #2 constant i32 0 or 1, to indicate whether needs to patch
388 /// the most significant bit for signed i32
390 /// Power9 VABSD* instructions are designed to support unsigned integer
391 /// vectors (byte/halfword/word), if we want to make use of them for signed
392 /// integer vectors, we have to flip their sign bits first. To flip sign bit
393 /// for byte/halfword integer vector would become inefficient, but for word
394 /// integer vector, we can leverage XVNEGSP to make it efficiently. eg:
395 /// abs(sub(a,b)) => VABSDUW(a+0x80000000, b+0x80000000)
396 /// => VABSDUW((XVNEGSP a), (XVNEGSP b))
397 VABSD,
399 /// QVFPERM = This corresponds to the QPX qvfperm instruction.
400 QVFPERM,
402 /// QVGPCI = This corresponds to the QPX qvgpci instruction.
403 QVGPCI,
405 /// QVALIGNI = This corresponds to the QPX qvaligni instruction.
406 QVALIGNI,
408 /// QVESPLATI = This corresponds to the QPX qvesplati instruction.
409 QVESPLATI,
411 /// QBFLT = Access the underlying QPX floating-point boolean
412 /// representation.
413 QBFLT,
415 /// Custom extend v4f32 to v2f64.
416 FP_EXTEND_LH,
418 /// CHAIN = STBRX CHAIN, GPRC, Ptr, Type - This is a
419 /// byte-swapping store instruction. It byte-swaps the low "Type" bits of
420 /// the GPRC input, then stores it through Ptr. Type can be either i16 or
421 /// i32.
422 STBRX = ISD::FIRST_TARGET_MEMORY_OPCODE,
424 /// GPRC, CHAIN = LBRX CHAIN, Ptr, Type - This is a
425 /// byte-swapping load instruction. It loads "Type" bits, byte swaps it,
426 /// then puts it in the bottom bits of the GPRC. TYPE can be either i16
427 /// or i32.
428 LBRX,
430 /// STFIWX - The STFIWX instruction. The first operand is an input token
431 /// chain, then an f64 value to store, then an address to store it to.
432 STFIWX,
434 /// GPRC, CHAIN = LFIWAX CHAIN, Ptr - This is a floating-point
435 /// load which sign-extends from a 32-bit integer value into the
436 /// destination 64-bit register.
437 LFIWAX,
439 /// GPRC, CHAIN = LFIWZX CHAIN, Ptr - This is a floating-point
440 /// load which zero-extends from a 32-bit integer value into the
441 /// destination 64-bit register.
442 LFIWZX,
444 /// GPRC, CHAIN = LXSIZX, CHAIN, Ptr, ByteWidth - This is a load of an
445 /// integer smaller than 64 bits into a VSR. The integer is zero-extended.
446 /// This can be used for converting loaded integers to floating point.
447 LXSIZX,
449 /// STXSIX - The STXSI[bh]X instruction. The first operand is an input
450 /// chain, then an f64 value to store, then an address to store it to,
451 /// followed by a byte-width for the store.
452 STXSIX,
454 /// VSRC, CHAIN = LXVD2X_LE CHAIN, Ptr - Occurs only for little endian.
455 /// Maps directly to an lxvd2x instruction that will be followed by
456 /// an xxswapd.
457 LXVD2X,
459 /// VSRC, CHAIN = LOAD_VEC_BE CHAIN, Ptr - Occurs only for little endian.
460 /// Maps directly to one of lxvd2x/lxvw4x/lxvh8x/lxvb16x depending on
461 /// the vector type to load vector in big-endian element order.
462 LOAD_VEC_BE,
464 /// VSRC, CHAIN = LD_VSX_LH CHAIN, Ptr - This is a floating-point load of a
465 /// v2f32 value into the lower half of a VSR register.
466 LD_VSX_LH,
468 /// CHAIN = STXVD2X CHAIN, VSRC, Ptr - Occurs only for little endian.
469 /// Maps directly to an stxvd2x instruction that will be preceded by
470 /// an xxswapd.
471 STXVD2X,
473 /// CHAIN = STORE_VEC_BE CHAIN, VSRC, Ptr - Occurs only for little endian.
474 /// Maps directly to one of stxvd2x/stxvw4x/stxvh8x/stxvb16x depending on
475 /// the vector type to store vector in big-endian element order.
476 STORE_VEC_BE,
478 /// Store scalar integers from VSR.
479 ST_VSR_SCAL_INT,
481 /// QBRC, CHAIN = QVLFSb CHAIN, Ptr
482 /// The 4xf32 load used for v4i1 constants.
483 QVLFSb,
485 /// ATOMIC_CMP_SWAP - the exact same as the target-independent nodes
486 /// except they ensure that the compare input is zero-extended for
487 /// sub-word versions because the atomic loads zero-extend.
488 ATOMIC_CMP_SWAP_8, ATOMIC_CMP_SWAP_16,
490 /// GPRC = TOC_ENTRY GA, TOC
491 /// Loads the entry for GA from the TOC, where the TOC base is given by
492 /// the last operand.
493 TOC_ENTRY
496 } // end namespace PPCISD
498 /// Define some predicates that are used for node matching.
499 namespace PPC {
501 /// isVPKUHUMShuffleMask - Return true if this is the shuffle mask for a
502 /// VPKUHUM instruction.
503 bool isVPKUHUMShuffleMask(ShuffleVectorSDNode *N, unsigned ShuffleKind,
504 SelectionDAG &DAG);
506 /// isVPKUWUMShuffleMask - Return true if this is the shuffle mask for a
507 /// VPKUWUM instruction.
508 bool isVPKUWUMShuffleMask(ShuffleVectorSDNode *N, unsigned ShuffleKind,
509 SelectionDAG &DAG);
511 /// isVPKUDUMShuffleMask - Return true if this is the shuffle mask for a
512 /// VPKUDUM instruction.
513 bool isVPKUDUMShuffleMask(ShuffleVectorSDNode *N, unsigned ShuffleKind,
514 SelectionDAG &DAG);
516 /// isVMRGLShuffleMask - Return true if this is a shuffle mask suitable for
517 /// a VRGL* instruction with the specified unit size (1,2 or 4 bytes).
518 bool isVMRGLShuffleMask(ShuffleVectorSDNode *N, unsigned UnitSize,
519 unsigned ShuffleKind, SelectionDAG &DAG);
521 /// isVMRGHShuffleMask - Return true if this is a shuffle mask suitable for
522 /// a VRGH* instruction with the specified unit size (1,2 or 4 bytes).
523 bool isVMRGHShuffleMask(ShuffleVectorSDNode *N, unsigned UnitSize,
524 unsigned ShuffleKind, SelectionDAG &DAG);
526 /// isVMRGEOShuffleMask - Return true if this is a shuffle mask suitable for
527 /// a VMRGEW or VMRGOW instruction
528 bool isVMRGEOShuffleMask(ShuffleVectorSDNode *N, bool CheckEven,
529 unsigned ShuffleKind, SelectionDAG &DAG);
530 /// isXXSLDWIShuffleMask - Return true if this is a shuffle mask suitable
531 /// for a XXSLDWI instruction.
532 bool isXXSLDWIShuffleMask(ShuffleVectorSDNode *N, unsigned &ShiftElts,
533 bool &Swap, bool IsLE);
535 /// isXXBRHShuffleMask - Return true if this is a shuffle mask suitable
536 /// for a XXBRH instruction.
537 bool isXXBRHShuffleMask(ShuffleVectorSDNode *N);
539 /// isXXBRWShuffleMask - Return true if this is a shuffle mask suitable
540 /// for a XXBRW instruction.
541 bool isXXBRWShuffleMask(ShuffleVectorSDNode *N);
543 /// isXXBRDShuffleMask - Return true if this is a shuffle mask suitable
544 /// for a XXBRD instruction.
545 bool isXXBRDShuffleMask(ShuffleVectorSDNode *N);
547 /// isXXBRQShuffleMask - Return true if this is a shuffle mask suitable
548 /// for a XXBRQ instruction.
549 bool isXXBRQShuffleMask(ShuffleVectorSDNode *N);
551 /// isXXPERMDIShuffleMask - Return true if this is a shuffle mask suitable
552 /// for a XXPERMDI instruction.
553 bool isXXPERMDIShuffleMask(ShuffleVectorSDNode *N, unsigned &ShiftElts,
554 bool &Swap, bool IsLE);
556 /// isVSLDOIShuffleMask - If this is a vsldoi shuffle mask, return the
557 /// shift amount, otherwise return -1.
558 int isVSLDOIShuffleMask(SDNode *N, unsigned ShuffleKind,
559 SelectionDAG &DAG);
561 /// isSplatShuffleMask - Return true if the specified VECTOR_SHUFFLE operand
562 /// specifies a splat of a single element that is suitable for input to
563 /// VSPLTB/VSPLTH/VSPLTW.
564 bool isSplatShuffleMask(ShuffleVectorSDNode *N, unsigned EltSize);
566 /// isXXINSERTWMask - Return true if this VECTOR_SHUFFLE can be handled by
567 /// the XXINSERTW instruction introduced in ISA 3.0. This is essentially any
568 /// shuffle of v4f32/v4i32 vectors that just inserts one element from one
569 /// vector into the other. This function will also set a couple of
570 /// output parameters for how much the source vector needs to be shifted and
571 /// what byte number needs to be specified for the instruction to put the
572 /// element in the desired location of the target vector.
573 bool isXXINSERTWMask(ShuffleVectorSDNode *N, unsigned &ShiftElts,
574 unsigned &InsertAtByte, bool &Swap, bool IsLE);
576 /// getVSPLTImmediate - Return the appropriate VSPLT* immediate to splat the
577 /// specified isSplatShuffleMask VECTOR_SHUFFLE mask.
578 unsigned getVSPLTImmediate(SDNode *N, unsigned EltSize, SelectionDAG &DAG);
580 /// get_VSPLTI_elt - If this is a build_vector of constants which can be
581 /// formed by using a vspltis[bhw] instruction of the specified element
582 /// size, return the constant being splatted. The ByteSize field indicates
583 /// the number of bytes of each element [124] -> [bhw].
584 SDValue get_VSPLTI_elt(SDNode *N, unsigned ByteSize, SelectionDAG &DAG);
586 /// If this is a qvaligni shuffle mask, return the shift
587 /// amount, otherwise return -1.
588 int isQVALIGNIShuffleMask(SDNode *N);
590 } // end namespace PPC
592 class PPCTargetLowering : public TargetLowering {
593 const PPCSubtarget &Subtarget;
595 public:
596 explicit PPCTargetLowering(const PPCTargetMachine &TM,
597 const PPCSubtarget &STI);
599 /// getTargetNodeName() - This method returns the name of a target specific
600 /// DAG node.
601 const char *getTargetNodeName(unsigned Opcode) const override;
603 bool isSelectSupported(SelectSupportKind Kind) const override {
604 // PowerPC does not support scalar condition selects on vectors.
605 return (Kind != SelectSupportKind::ScalarCondVectorVal);
608 /// getPreferredVectorAction - The code we generate when vector types are
609 /// legalized by promoting the integer element type is often much worse
610 /// than code we generate if we widen the type for applicable vector types.
611 /// The issue with promoting is that the vector is scalaraized, individual
612 /// elements promoted and then the vector is rebuilt. So say we load a pair
613 /// of v4i8's and shuffle them. This will turn into a mess of 8 extending
614 /// loads, moves back into VSR's (or memory ops if we don't have moves) and
615 /// then the VPERM for the shuffle. All in all a very slow sequence.
616 TargetLoweringBase::LegalizeTypeAction getPreferredVectorAction(MVT VT)
617 const override {
618 if (VT.getScalarSizeInBits() % 8 == 0)
619 return TypeWidenVector;
620 return TargetLoweringBase::getPreferredVectorAction(VT);
623 bool useSoftFloat() const override;
625 bool hasSPE() const;
627 MVT getScalarShiftAmountTy(const DataLayout &, EVT) const override {
628 return MVT::i32;
631 bool isCheapToSpeculateCttz() const override {
632 return true;
635 bool isCheapToSpeculateCtlz() const override {
636 return true;
639 bool isCtlzFast() const override {
640 return true;
643 bool hasAndNotCompare(SDValue) const override {
644 return true;
647 bool preferIncOfAddToSubOfNot(EVT VT) const override;
649 bool convertSetCCLogicToBitwiseLogic(EVT VT) const override {
650 return VT.isScalarInteger();
653 bool supportSplitCSR(MachineFunction *MF) const override {
654 return
655 MF->getFunction().getCallingConv() == CallingConv::CXX_FAST_TLS &&
656 MF->getFunction().hasFnAttribute(Attribute::NoUnwind);
659 void initializeSplitCSR(MachineBasicBlock *Entry) const override;
661 void insertCopiesSplitCSR(
662 MachineBasicBlock *Entry,
663 const SmallVectorImpl<MachineBasicBlock *> &Exits) const override;
665 /// getSetCCResultType - Return the ISD::SETCC ValueType
666 EVT getSetCCResultType(const DataLayout &DL, LLVMContext &Context,
667 EVT VT) const override;
669 /// Return true if target always beneficiates from combining into FMA for a
670 /// given value type. This must typically return false on targets where FMA
671 /// takes more cycles to execute than FADD.
672 bool enableAggressiveFMAFusion(EVT VT) const override;
674 /// getPreIndexedAddressParts - returns true by value, base pointer and
675 /// offset pointer and addressing mode by reference if the node's address
676 /// can be legally represented as pre-indexed load / store address.
677 bool getPreIndexedAddressParts(SDNode *N, SDValue &Base,
678 SDValue &Offset,
679 ISD::MemIndexedMode &AM,
680 SelectionDAG &DAG) const override;
682 /// SelectAddressEVXRegReg - Given the specified addressed, check to see if
683 /// it can be more efficiently represented as [r+imm].
684 bool SelectAddressEVXRegReg(SDValue N, SDValue &Base, SDValue &Index,
685 SelectionDAG &DAG) const;
687 /// SelectAddressRegReg - Given the specified addressed, check to see if it
688 /// can be more efficiently represented as [r+imm]. If \p EncodingAlignment
689 /// is non-zero, only accept displacement which is not suitable for [r+imm].
690 /// Returns false if it can be represented by [r+imm], which are preferred.
691 bool SelectAddressRegReg(SDValue N, SDValue &Base, SDValue &Index,
692 SelectionDAG &DAG,
693 unsigned EncodingAlignment = 0) const;
695 /// SelectAddressRegImm - Returns true if the address N can be represented
696 /// by a base register plus a signed 16-bit displacement [r+imm], and if it
697 /// is not better represented as reg+reg. If \p EncodingAlignment is
698 /// non-zero, only accept displacements suitable for instruction encoding
699 /// requirement, i.e. multiples of 4 for DS form.
700 bool SelectAddressRegImm(SDValue N, SDValue &Disp, SDValue &Base,
701 SelectionDAG &DAG,
702 unsigned EncodingAlignment) const;
704 /// SelectAddressRegRegOnly - Given the specified addressed, force it to be
705 /// represented as an indexed [r+r] operation.
706 bool SelectAddressRegRegOnly(SDValue N, SDValue &Base, SDValue &Index,
707 SelectionDAG &DAG) const;
709 Sched::Preference getSchedulingPreference(SDNode *N) const override;
711 /// LowerOperation - Provide custom lowering hooks for some operations.
713 SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const override;
715 /// ReplaceNodeResults - Replace the results of node with an illegal result
716 /// type with new values built out of custom code.
718 void ReplaceNodeResults(SDNode *N, SmallVectorImpl<SDValue>&Results,
719 SelectionDAG &DAG) const override;
721 SDValue expandVSXLoadForLE(SDNode *N, DAGCombinerInfo &DCI) const;
722 SDValue expandVSXStoreForLE(SDNode *N, DAGCombinerInfo &DCI) const;
724 SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const override;
726 SDValue BuildSDIVPow2(SDNode *N, const APInt &Divisor, SelectionDAG &DAG,
727 SmallVectorImpl<SDNode *> &Created) const override;
729 unsigned getRegisterByName(const char* RegName, EVT VT,
730 SelectionDAG &DAG) const override;
732 void computeKnownBitsForTargetNode(const SDValue Op,
733 KnownBits &Known,
734 const APInt &DemandedElts,
735 const SelectionDAG &DAG,
736 unsigned Depth = 0) const override;
738 unsigned getPrefLoopLogAlignment(MachineLoop *ML) const override;
740 bool shouldInsertFencesForAtomic(const Instruction *I) const override {
741 return true;
744 Instruction *emitLeadingFence(IRBuilder<> &Builder, Instruction *Inst,
745 AtomicOrdering Ord) const override;
746 Instruction *emitTrailingFence(IRBuilder<> &Builder, Instruction *Inst,
747 AtomicOrdering Ord) const override;
749 MachineBasicBlock *
750 EmitInstrWithCustomInserter(MachineInstr &MI,
751 MachineBasicBlock *MBB) const override;
752 MachineBasicBlock *EmitAtomicBinary(MachineInstr &MI,
753 MachineBasicBlock *MBB,
754 unsigned AtomicSize,
755 unsigned BinOpcode,
756 unsigned CmpOpcode = 0,
757 unsigned CmpPred = 0) const;
758 MachineBasicBlock *EmitPartwordAtomicBinary(MachineInstr &MI,
759 MachineBasicBlock *MBB,
760 bool is8bit,
761 unsigned Opcode,
762 unsigned CmpOpcode = 0,
763 unsigned CmpPred = 0) const;
765 MachineBasicBlock *emitEHSjLjSetJmp(MachineInstr &MI,
766 MachineBasicBlock *MBB) const;
768 MachineBasicBlock *emitEHSjLjLongJmp(MachineInstr &MI,
769 MachineBasicBlock *MBB) const;
771 ConstraintType getConstraintType(StringRef Constraint) const override;
773 /// Examine constraint string and operand type and determine a weight value.
774 /// The operand object must already have been set up with the operand type.
775 ConstraintWeight getSingleConstraintMatchWeight(
776 AsmOperandInfo &info, const char *constraint) const override;
778 std::pair<unsigned, const TargetRegisterClass *>
779 getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
780 StringRef Constraint, MVT VT) const override;
782 /// getByValTypeAlignment - Return the desired alignment for ByVal aggregate
783 /// function arguments in the caller parameter area. This is the actual
784 /// alignment, not its logarithm.
785 unsigned getByValTypeAlignment(Type *Ty,
786 const DataLayout &DL) const override;
788 /// LowerAsmOperandForConstraint - Lower the specified operand into the Ops
789 /// vector. If it is invalid, don't add anything to Ops.
790 void LowerAsmOperandForConstraint(SDValue Op,
791 std::string &Constraint,
792 std::vector<SDValue> &Ops,
793 SelectionDAG &DAG) const override;
795 unsigned
796 getInlineAsmMemConstraint(StringRef ConstraintCode) const override {
797 if (ConstraintCode == "es")
798 return InlineAsm::Constraint_es;
799 else if (ConstraintCode == "o")
800 return InlineAsm::Constraint_o;
801 else if (ConstraintCode == "Q")
802 return InlineAsm::Constraint_Q;
803 else if (ConstraintCode == "Z")
804 return InlineAsm::Constraint_Z;
805 else if (ConstraintCode == "Zy")
806 return InlineAsm::Constraint_Zy;
807 return TargetLowering::getInlineAsmMemConstraint(ConstraintCode);
810 /// isLegalAddressingMode - Return true if the addressing mode represented
811 /// by AM is legal for this target, for a load/store of the specified type.
812 bool isLegalAddressingMode(const DataLayout &DL, const AddrMode &AM,
813 Type *Ty, unsigned AS,
814 Instruction *I = nullptr) const override;
816 /// isLegalICmpImmediate - Return true if the specified immediate is legal
817 /// icmp immediate, that is the target has icmp instructions which can
818 /// compare a register against the immediate without having to materialize
819 /// the immediate into a register.
820 bool isLegalICmpImmediate(int64_t Imm) const override;
822 /// isLegalAddImmediate - Return true if the specified immediate is legal
823 /// add immediate, that is the target has add instructions which can
824 /// add a register and the immediate without having to materialize
825 /// the immediate into a register.
826 bool isLegalAddImmediate(int64_t Imm) const override;
828 /// isTruncateFree - Return true if it's free to truncate a value of
829 /// type Ty1 to type Ty2. e.g. On PPC it's free to truncate a i64 value in
830 /// register X1 to i32 by referencing its sub-register R1.
831 bool isTruncateFree(Type *Ty1, Type *Ty2) const override;
832 bool isTruncateFree(EVT VT1, EVT VT2) const override;
834 bool isZExtFree(SDValue Val, EVT VT2) const override;
836 bool isFPExtFree(EVT DestVT, EVT SrcVT) const override;
838 /// Returns true if it is beneficial to convert a load of a constant
839 /// to just the constant itself.
840 bool shouldConvertConstantLoadToIntImm(const APInt &Imm,
841 Type *Ty) const override;
843 bool convertSelectOfConstantsToMath(EVT VT) const override {
844 return true;
847 bool isDesirableToTransformToIntegerOp(unsigned Opc,
848 EVT VT) const override {
849 // Only handle float load/store pair because float(fpr) load/store
850 // instruction has more cycles than integer(gpr) load/store in PPC.
851 if (Opc != ISD::LOAD && Opc != ISD::STORE)
852 return false;
853 if (VT != MVT::f32 && VT != MVT::f64)
854 return false;
856 return true;
859 // Returns true if the address of the global is stored in TOC entry.
860 bool isAccessedAsGotIndirect(SDValue N) const;
862 bool isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const override;
864 bool getTgtMemIntrinsic(IntrinsicInfo &Info,
865 const CallInst &I,
866 MachineFunction &MF,
867 unsigned Intrinsic) const override;
869 /// getOptimalMemOpType - Returns the target specific optimal type for load
870 /// and store operations as a result of memset, memcpy, and memmove
871 /// lowering. If DstAlign is zero that means it's safe to destination
872 /// alignment can satisfy any constraint. Similarly if SrcAlign is zero it
873 /// means there isn't a need to check it against alignment requirement,
874 /// probably because the source does not need to be loaded. If 'IsMemset' is
875 /// true, that means it's expanding a memset. If 'ZeroMemset' is true, that
876 /// means it's a memset of zero. 'MemcpyStrSrc' indicates whether the memcpy
877 /// source is constant so it does not need to be loaded.
878 /// It returns EVT::Other if the type should be determined using generic
879 /// target-independent logic.
881 getOptimalMemOpType(uint64_t Size, unsigned DstAlign, unsigned SrcAlign,
882 bool IsMemset, bool ZeroMemset, bool MemcpyStrSrc,
883 const AttributeList &FuncAttributes) const override;
885 /// Is unaligned memory access allowed for the given type, and is it fast
886 /// relative to software emulation.
887 bool allowsMisalignedMemoryAccesses(
888 EVT VT, unsigned AddrSpace, unsigned Align = 1,
889 MachineMemOperand::Flags Flags = MachineMemOperand::MONone,
890 bool *Fast = nullptr) const override;
892 /// isFMAFasterThanFMulAndFAdd - Return true if an FMA operation is faster
893 /// than a pair of fmul and fadd instructions. fmuladd intrinsics will be
894 /// expanded to FMAs when this method returns true, otherwise fmuladd is
895 /// expanded to fmul + fadd.
896 bool isFMAFasterThanFMulAndFAdd(EVT VT) const override;
898 const MCPhysReg *getScratchRegisters(CallingConv::ID CC) const override;
900 // Should we expand the build vector with shuffles?
901 bool
902 shouldExpandBuildVectorWithShuffles(EVT VT,
903 unsigned DefinedValues) const override;
905 /// createFastISel - This method returns a target-specific FastISel object,
906 /// or null if the target does not support "fast" instruction selection.
907 FastISel *createFastISel(FunctionLoweringInfo &FuncInfo,
908 const TargetLibraryInfo *LibInfo) const override;
910 /// Returns true if an argument of type Ty needs to be passed in a
911 /// contiguous block of registers in calling convention CallConv.
912 bool functionArgumentNeedsConsecutiveRegisters(
913 Type *Ty, CallingConv::ID CallConv, bool isVarArg) const override {
914 // We support any array type as "consecutive" block in the parameter
915 // save area. The element type defines the alignment requirement and
916 // whether the argument should go in GPRs, FPRs, or VRs if available.
918 // Note that clang uses this capability both to implement the ELFv2
919 // homogeneous float/vector aggregate ABI, and to avoid having to use
920 // "byval" when passing aggregates that might fully fit in registers.
921 return Ty->isArrayTy();
924 /// If a physical register, this returns the register that receives the
925 /// exception address on entry to an EH pad.
926 unsigned
927 getExceptionPointerRegister(const Constant *PersonalityFn) const override;
929 /// If a physical register, this returns the register that receives the
930 /// exception typeid on entry to a landing pad.
931 unsigned
932 getExceptionSelectorRegister(const Constant *PersonalityFn) const override;
934 /// Override to support customized stack guard loading.
935 bool useLoadStackGuardNode() const override;
936 void insertSSPDeclarations(Module &M) const override;
938 bool isFPImmLegal(const APFloat &Imm, EVT VT,
939 bool ForCodeSize) const override;
941 unsigned getJumpTableEncoding() const override;
942 bool isJumpTableRelative() const override;
943 SDValue getPICJumpTableRelocBase(SDValue Table,
944 SelectionDAG &DAG) const override;
945 const MCExpr *getPICJumpTableRelocBaseExpr(const MachineFunction *MF,
946 unsigned JTI,
947 MCContext &Ctx) const override;
949 private:
950 struct ReuseLoadInfo {
951 SDValue Ptr;
952 SDValue Chain;
953 SDValue ResChain;
954 MachinePointerInfo MPI;
955 bool IsDereferenceable = false;
956 bool IsInvariant = false;
957 unsigned Alignment = 0;
958 AAMDNodes AAInfo;
959 const MDNode *Ranges = nullptr;
961 ReuseLoadInfo() = default;
963 MachineMemOperand::Flags MMOFlags() const {
964 MachineMemOperand::Flags F = MachineMemOperand::MONone;
965 if (IsDereferenceable)
966 F |= MachineMemOperand::MODereferenceable;
967 if (IsInvariant)
968 F |= MachineMemOperand::MOInvariant;
969 return F;
973 bool isNoopAddrSpaceCast(unsigned SrcAS, unsigned DestAS) const override {
974 // Addrspacecasts are always noops.
975 return true;
978 bool canReuseLoadAddress(SDValue Op, EVT MemVT, ReuseLoadInfo &RLI,
979 SelectionDAG &DAG,
980 ISD::LoadExtType ET = ISD::NON_EXTLOAD) const;
981 void spliceIntoChain(SDValue ResChain, SDValue NewResChain,
982 SelectionDAG &DAG) const;
984 void LowerFP_TO_INTForReuse(SDValue Op, ReuseLoadInfo &RLI,
985 SelectionDAG &DAG, const SDLoc &dl) const;
986 SDValue LowerFP_TO_INTDirectMove(SDValue Op, SelectionDAG &DAG,
987 const SDLoc &dl) const;
989 bool directMoveIsProfitable(const SDValue &Op) const;
990 SDValue LowerINT_TO_FPDirectMove(SDValue Op, SelectionDAG &DAG,
991 const SDLoc &dl) const;
993 SDValue LowerINT_TO_FPVector(SDValue Op, SelectionDAG &DAG,
994 const SDLoc &dl) const;
996 SDValue LowerTRUNCATEVector(SDValue Op, SelectionDAG &DAG) const;
998 SDValue getFramePointerFrameIndex(SelectionDAG & DAG) const;
999 SDValue getReturnAddrFrameIndex(SelectionDAG & DAG) const;
1001 bool
1002 IsEligibleForTailCallOptimization(SDValue Callee,
1003 CallingConv::ID CalleeCC,
1004 bool isVarArg,
1005 const SmallVectorImpl<ISD::InputArg> &Ins,
1006 SelectionDAG& DAG) const;
1008 bool
1009 IsEligibleForTailCallOptimization_64SVR4(
1010 SDValue Callee,
1011 CallingConv::ID CalleeCC,
1012 ImmutableCallSite CS,
1013 bool isVarArg,
1014 const SmallVectorImpl<ISD::OutputArg> &Outs,
1015 const SmallVectorImpl<ISD::InputArg> &Ins,
1016 SelectionDAG& DAG) const;
1018 SDValue EmitTailCallLoadFPAndRetAddr(SelectionDAG &DAG, int SPDiff,
1019 SDValue Chain, SDValue &LROpOut,
1020 SDValue &FPOpOut,
1021 const SDLoc &dl) const;
1023 SDValue getTOCEntry(SelectionDAG &DAG, const SDLoc &dl, SDValue GA) const;
1025 SDValue LowerRETURNADDR(SDValue Op, SelectionDAG &DAG) const;
1026 SDValue LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const;
1027 SDValue LowerConstantPool(SDValue Op, SelectionDAG &DAG) const;
1028 SDValue LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const;
1029 SDValue LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const;
1030 SDValue LowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const;
1031 SDValue LowerJumpTable(SDValue Op, SelectionDAG &DAG) const;
1032 SDValue LowerSETCC(SDValue Op, SelectionDAG &DAG) const;
1033 SDValue LowerINIT_TRAMPOLINE(SDValue Op, SelectionDAG &DAG) const;
1034 SDValue LowerADJUST_TRAMPOLINE(SDValue Op, SelectionDAG &DAG) const;
1035 SDValue LowerVASTART(SDValue Op, SelectionDAG &DAG) const;
1036 SDValue LowerVAARG(SDValue Op, SelectionDAG &DAG) const;
1037 SDValue LowerVACOPY(SDValue Op, SelectionDAG &DAG) const;
1038 SDValue LowerSTACKRESTORE(SDValue Op, SelectionDAG &DAG) const;
1039 SDValue LowerGET_DYNAMIC_AREA_OFFSET(SDValue Op, SelectionDAG &DAG) const;
1040 SDValue LowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG) const;
1041 SDValue LowerEH_DWARF_CFA(SDValue Op, SelectionDAG &DAG) const;
1042 SDValue LowerLOAD(SDValue Op, SelectionDAG &DAG) const;
1043 SDValue LowerSTORE(SDValue Op, SelectionDAG &DAG) const;
1044 SDValue LowerTRUNCATE(SDValue Op, SelectionDAG &DAG) const;
1045 SDValue LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const;
1046 SDValue LowerFP_TO_INT(SDValue Op, SelectionDAG &DAG,
1047 const SDLoc &dl) const;
1048 SDValue LowerINT_TO_FP(SDValue Op, SelectionDAG &DAG) const;
1049 SDValue LowerFLT_ROUNDS_(SDValue Op, SelectionDAG &DAG) const;
1050 SDValue LowerSHL_PARTS(SDValue Op, SelectionDAG &DAG) const;
1051 SDValue LowerSRL_PARTS(SDValue Op, SelectionDAG &DAG) const;
1052 SDValue LowerSRA_PARTS(SDValue Op, SelectionDAG &DAG) const;
1053 SDValue LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const;
1054 SDValue LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) const;
1055 SDValue LowerINSERT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) const;
1056 SDValue LowerEXTRACT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) const;
1057 SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG) const;
1058 SDValue LowerINTRINSIC_VOID(SDValue Op, SelectionDAG &DAG) const;
1059 SDValue LowerREM(SDValue Op, SelectionDAG &DAG) const;
1060 SDValue LowerBSWAP(SDValue Op, SelectionDAG &DAG) const;
1061 SDValue LowerATOMIC_CMP_SWAP(SDValue Op, SelectionDAG &DAG) const;
1062 SDValue LowerSCALAR_TO_VECTOR(SDValue Op, SelectionDAG &DAG) const;
1063 SDValue LowerSIGN_EXTEND_INREG(SDValue Op, SelectionDAG &DAG) const;
1064 SDValue LowerMUL(SDValue Op, SelectionDAG &DAG) const;
1065 SDValue LowerABS(SDValue Op, SelectionDAG &DAG) const;
1066 SDValue LowerFP_EXTEND(SDValue Op, SelectionDAG &DAG) const;
1068 SDValue LowerVectorLoad(SDValue Op, SelectionDAG &DAG) const;
1069 SDValue LowerVectorStore(SDValue Op, SelectionDAG &DAG) const;
1071 SDValue LowerCallResult(SDValue Chain, SDValue InFlag,
1072 CallingConv::ID CallConv, bool isVarArg,
1073 const SmallVectorImpl<ISD::InputArg> &Ins,
1074 const SDLoc &dl, SelectionDAG &DAG,
1075 SmallVectorImpl<SDValue> &InVals) const;
1076 SDValue FinishCall(CallingConv::ID CallConv, const SDLoc &dl,
1077 bool isTailCall, bool isVarArg, bool isPatchPoint,
1078 bool hasNest, SelectionDAG &DAG,
1079 SmallVector<std::pair<unsigned, SDValue>, 8> &RegsToPass,
1080 SDValue InFlag, SDValue Chain, SDValue CallSeqStart,
1081 SDValue &Callee, int SPDiff, unsigned NumBytes,
1082 const SmallVectorImpl<ISD::InputArg> &Ins,
1083 SmallVectorImpl<SDValue> &InVals,
1084 ImmutableCallSite CS) const;
1086 SDValue
1087 LowerFormalArguments(SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
1088 const SmallVectorImpl<ISD::InputArg> &Ins,
1089 const SDLoc &dl, SelectionDAG &DAG,
1090 SmallVectorImpl<SDValue> &InVals) const override;
1092 SDValue LowerCall(TargetLowering::CallLoweringInfo &CLI,
1093 SmallVectorImpl<SDValue> &InVals) const override;
1095 bool CanLowerReturn(CallingConv::ID CallConv, MachineFunction &MF,
1096 bool isVarArg,
1097 const SmallVectorImpl<ISD::OutputArg> &Outs,
1098 LLVMContext &Context) const override;
1100 SDValue LowerReturn(SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
1101 const SmallVectorImpl<ISD::OutputArg> &Outs,
1102 const SmallVectorImpl<SDValue> &OutVals,
1103 const SDLoc &dl, SelectionDAG &DAG) const override;
1105 SDValue extendArgForPPC64(ISD::ArgFlagsTy Flags, EVT ObjectVT,
1106 SelectionDAG &DAG, SDValue ArgVal,
1107 const SDLoc &dl) const;
1109 SDValue LowerFormalArguments_Darwin(
1110 SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
1111 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
1112 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const;
1113 SDValue LowerFormalArguments_64SVR4(
1114 SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
1115 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
1116 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const;
1117 SDValue LowerFormalArguments_32SVR4(
1118 SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
1119 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
1120 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const;
1122 SDValue createMemcpyOutsideCallSeq(SDValue Arg, SDValue PtrOff,
1123 SDValue CallSeqStart,
1124 ISD::ArgFlagsTy Flags, SelectionDAG &DAG,
1125 const SDLoc &dl) const;
1127 SDValue LowerCall_Darwin(SDValue Chain, SDValue Callee,
1128 CallingConv::ID CallConv, bool isVarArg,
1129 bool isTailCall, bool isPatchPoint,
1130 const SmallVectorImpl<ISD::OutputArg> &Outs,
1131 const SmallVectorImpl<SDValue> &OutVals,
1132 const SmallVectorImpl<ISD::InputArg> &Ins,
1133 const SDLoc &dl, SelectionDAG &DAG,
1134 SmallVectorImpl<SDValue> &InVals,
1135 ImmutableCallSite CS) const;
1136 SDValue LowerCall_64SVR4(SDValue Chain, SDValue Callee,
1137 CallingConv::ID CallConv, bool isVarArg,
1138 bool isTailCall, bool isPatchPoint,
1139 const SmallVectorImpl<ISD::OutputArg> &Outs,
1140 const SmallVectorImpl<SDValue> &OutVals,
1141 const SmallVectorImpl<ISD::InputArg> &Ins,
1142 const SDLoc &dl, SelectionDAG &DAG,
1143 SmallVectorImpl<SDValue> &InVals,
1144 ImmutableCallSite CS) const;
1145 SDValue LowerCall_32SVR4(SDValue Chain, SDValue Callee,
1146 CallingConv::ID CallConv, bool isVarArg,
1147 bool isTailCall, bool isPatchPoint,
1148 const SmallVectorImpl<ISD::OutputArg> &Outs,
1149 const SmallVectorImpl<SDValue> &OutVals,
1150 const SmallVectorImpl<ISD::InputArg> &Ins,
1151 const SDLoc &dl, SelectionDAG &DAG,
1152 SmallVectorImpl<SDValue> &InVals,
1153 ImmutableCallSite CS) const;
1154 SDValue LowerCall_AIX(SDValue Chain, SDValue Callee,
1155 CallingConv::ID CallConv, bool isVarArg,
1156 bool isTailCall, bool isPatchPoint,
1157 const SmallVectorImpl<ISD::OutputArg> &Outs,
1158 const SmallVectorImpl<SDValue> &OutVals,
1159 const SmallVectorImpl<ISD::InputArg> &Ins,
1160 const SDLoc &dl, SelectionDAG &DAG,
1161 SmallVectorImpl<SDValue> &InVals,
1162 ImmutableCallSite CS) const;
1164 SDValue lowerEH_SJLJ_SETJMP(SDValue Op, SelectionDAG &DAG) const;
1165 SDValue lowerEH_SJLJ_LONGJMP(SDValue Op, SelectionDAG &DAG) const;
1166 SDValue LowerBITCAST(SDValue Op, SelectionDAG &DAG) const;
1168 SDValue DAGCombineExtBoolTrunc(SDNode *N, DAGCombinerInfo &DCI) const;
1169 SDValue DAGCombineBuildVector(SDNode *N, DAGCombinerInfo &DCI) const;
1170 SDValue DAGCombineTruncBoolExt(SDNode *N, DAGCombinerInfo &DCI) const;
1171 SDValue combineStoreFPToInt(SDNode *N, DAGCombinerInfo &DCI) const;
1172 SDValue combineFPToIntToFP(SDNode *N, DAGCombinerInfo &DCI) const;
1173 SDValue combineSHL(SDNode *N, DAGCombinerInfo &DCI) const;
1174 SDValue combineSRA(SDNode *N, DAGCombinerInfo &DCI) const;
1175 SDValue combineSRL(SDNode *N, DAGCombinerInfo &DCI) const;
1176 SDValue combineMUL(SDNode *N, DAGCombinerInfo &DCI) const;
1177 SDValue combineADD(SDNode *N, DAGCombinerInfo &DCI) const;
1178 SDValue combineTRUNCATE(SDNode *N, DAGCombinerInfo &DCI) const;
1179 SDValue combineSetCC(SDNode *N, DAGCombinerInfo &DCI) const;
1180 SDValue combineABS(SDNode *N, DAGCombinerInfo &DCI) const;
1181 SDValue combineVSelect(SDNode *N, DAGCombinerInfo &DCI) const;
1182 SDValue combineVReverseMemOP(ShuffleVectorSDNode *SVN, LSBaseSDNode *LSBase,
1183 DAGCombinerInfo &DCI) const;
1185 /// ConvertSETCCToSubtract - looks at SETCC that compares ints. It replaces
1186 /// SETCC with integer subtraction when (1) there is a legal way of doing it
1187 /// (2) keeping the result of comparison in GPR has performance benefit.
1188 SDValue ConvertSETCCToSubtract(SDNode *N, DAGCombinerInfo &DCI) const;
1190 SDValue getSqrtEstimate(SDValue Operand, SelectionDAG &DAG, int Enabled,
1191 int &RefinementSteps, bool &UseOneConstNR,
1192 bool Reciprocal) const override;
1193 SDValue getRecipEstimate(SDValue Operand, SelectionDAG &DAG, int Enabled,
1194 int &RefinementSteps) const override;
1195 unsigned combineRepeatedFPDivisors() const override;
1197 SDValue
1198 combineElementTruncationToVectorTruncation(SDNode *N,
1199 DAGCombinerInfo &DCI) const;
1201 /// lowerToVINSERTH - Return the SDValue if this VECTOR_SHUFFLE can be
1202 /// handled by the VINSERTH instruction introduced in ISA 3.0. This is
1203 /// essentially any shuffle of v8i16 vectors that just inserts one element
1204 /// from one vector into the other.
1205 SDValue lowerToVINSERTH(ShuffleVectorSDNode *N, SelectionDAG &DAG) const;
1207 /// lowerToVINSERTB - Return the SDValue if this VECTOR_SHUFFLE can be
1208 /// handled by the VINSERTB instruction introduced in ISA 3.0. This is
1209 /// essentially v16i8 vector version of VINSERTH.
1210 SDValue lowerToVINSERTB(ShuffleVectorSDNode *N, SelectionDAG &DAG) const;
1212 // Return whether the call instruction can potentially be optimized to a
1213 // tail call. This will cause the optimizers to attempt to move, or
1214 // duplicate return instructions to help enable tail call optimizations.
1215 bool mayBeEmittedAsTailCall(const CallInst *CI) const override;
1216 bool hasBitPreservingFPLogic(EVT VT) const override;
1217 bool isMaskAndCmp0FoldingBeneficial(const Instruction &AndI) const override;
1218 }; // end class PPCTargetLowering
1220 namespace PPC {
1222 FastISel *createFastISel(FunctionLoweringInfo &FuncInfo,
1223 const TargetLibraryInfo *LibInfo);
1225 } // end namespace PPC
1227 bool isIntS16Immediate(SDNode *N, int16_t &Imm);
1228 bool isIntS16Immediate(SDValue Op, int16_t &Imm);
1230 } // end namespace llvm
1232 #endif // LLVM_TARGET_POWERPC_PPC32ISELLOWERING_H