1 //===-- PPCFastISel.cpp - PowerPC FastISel implementation -----------------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This file defines the PowerPC-specific support for the FastISel class. Some
10 // of the target-specific code is generated by tablegen in the file
11 // PPCGenFastISel.inc, which is #included here.
13 //===----------------------------------------------------------------------===//
15 #include "MCTargetDesc/PPCPredicates.h"
17 #include "PPCCCState.h"
18 #include "PPCCallingConv.h"
19 #include "PPCISelLowering.h"
20 #include "PPCMachineFunctionInfo.h"
21 #include "PPCSubtarget.h"
22 #include "PPCTargetMachine.h"
23 #include "llvm/ADT/Optional.h"
24 #include "llvm/CodeGen/CallingConvLower.h"
25 #include "llvm/CodeGen/FastISel.h"
26 #include "llvm/CodeGen/FunctionLoweringInfo.h"
27 #include "llvm/CodeGen/MachineConstantPool.h"
28 #include "llvm/CodeGen/MachineFrameInfo.h"
29 #include "llvm/CodeGen/MachineInstrBuilder.h"
30 #include "llvm/CodeGen/MachineRegisterInfo.h"
31 #include "llvm/CodeGen/TargetLowering.h"
32 #include "llvm/IR/CallingConv.h"
33 #include "llvm/IR/GetElementPtrTypeIterator.h"
34 #include "llvm/IR/GlobalAlias.h"
35 #include "llvm/IR/GlobalVariable.h"
36 #include "llvm/IR/IntrinsicInst.h"
37 #include "llvm/IR/Operator.h"
38 #include "llvm/Support/Debug.h"
39 #include "llvm/Target/TargetMachine.h"
41 //===----------------------------------------------------------------------===//
44 // fastLowerArguments: Handle simple cases.
45 // PPCMaterializeGV: Handle TLS.
46 // SelectCall: Handle function pointers.
47 // SelectCall: Handle multi-register return values.
48 // SelectCall: Optimize away nops for local calls.
49 // processCallArgs: Handle bit-converted arguments.
50 // finishCall: Handle multi-register return values.
51 // PPCComputeAddress: Handle parameter references as FrameIndex's.
52 // PPCEmitCmp: Handle immediate as operand 1.
53 // SelectCall: Handle small byval arguments.
54 // SelectIntrinsicCall: Implement.
55 // SelectSelect: Implement.
56 // Consider factoring isTypeLegal into the base class.
57 // Implement switches and jump tables.
59 //===----------------------------------------------------------------------===//
62 #define DEBUG_TYPE "ppcfastisel"
66 typedef struct Address
{
79 // Innocuous defaults for our address.
81 : BaseType(RegBase
), Offset(0) {
86 class PPCFastISel final
: public FastISel
{
88 const TargetMachine
&TM
;
89 const PPCSubtarget
*PPCSubTarget
;
90 PPCFunctionInfo
*PPCFuncInfo
;
91 const TargetInstrInfo
&TII
;
92 const TargetLowering
&TLI
;
96 explicit PPCFastISel(FunctionLoweringInfo
&FuncInfo
,
97 const TargetLibraryInfo
*LibInfo
)
98 : FastISel(FuncInfo
, LibInfo
), TM(FuncInfo
.MF
->getTarget()),
99 PPCSubTarget(&FuncInfo
.MF
->getSubtarget
<PPCSubtarget
>()),
100 PPCFuncInfo(FuncInfo
.MF
->getInfo
<PPCFunctionInfo
>()),
101 TII(*PPCSubTarget
->getInstrInfo()),
102 TLI(*PPCSubTarget
->getTargetLowering()),
103 Context(&FuncInfo
.Fn
->getContext()) {}
105 // Backend specific FastISel code.
107 bool fastSelectInstruction(const Instruction
*I
) override
;
108 unsigned fastMaterializeConstant(const Constant
*C
) override
;
109 unsigned fastMaterializeAlloca(const AllocaInst
*AI
) override
;
110 bool tryToFoldLoadIntoMI(MachineInstr
*MI
, unsigned OpNo
,
111 const LoadInst
*LI
) override
;
112 bool fastLowerArguments() override
;
113 unsigned fastEmit_i(MVT Ty
, MVT RetTy
, unsigned Opc
, uint64_t Imm
) override
;
114 unsigned fastEmitInst_ri(unsigned MachineInstOpcode
,
115 const TargetRegisterClass
*RC
,
116 unsigned Op0
, bool Op0IsKill
,
118 unsigned fastEmitInst_r(unsigned MachineInstOpcode
,
119 const TargetRegisterClass
*RC
,
120 unsigned Op0
, bool Op0IsKill
);
121 unsigned fastEmitInst_rr(unsigned MachineInstOpcode
,
122 const TargetRegisterClass
*RC
,
123 unsigned Op0
, bool Op0IsKill
,
124 unsigned Op1
, bool Op1IsKill
);
126 bool fastLowerCall(CallLoweringInfo
&CLI
) override
;
128 // Instruction selection routines.
130 bool SelectLoad(const Instruction
*I
);
131 bool SelectStore(const Instruction
*I
);
132 bool SelectBranch(const Instruction
*I
);
133 bool SelectIndirectBr(const Instruction
*I
);
134 bool SelectFPExt(const Instruction
*I
);
135 bool SelectFPTrunc(const Instruction
*I
);
136 bool SelectIToFP(const Instruction
*I
, bool IsSigned
);
137 bool SelectFPToI(const Instruction
*I
, bool IsSigned
);
138 bool SelectBinaryIntOp(const Instruction
*I
, unsigned ISDOpcode
);
139 bool SelectRet(const Instruction
*I
);
140 bool SelectTrunc(const Instruction
*I
);
141 bool SelectIntExt(const Instruction
*I
);
145 bool isTypeLegal(Type
*Ty
, MVT
&VT
);
146 bool isLoadTypeLegal(Type
*Ty
, MVT
&VT
);
147 bool isValueAvailable(const Value
*V
) const;
148 bool isVSFRCRegClass(const TargetRegisterClass
*RC
) const {
149 return RC
->getID() == PPC::VSFRCRegClassID
;
151 bool isVSSRCRegClass(const TargetRegisterClass
*RC
) const {
152 return RC
->getID() == PPC::VSSRCRegClassID
;
154 unsigned copyRegToRegClass(const TargetRegisterClass
*ToRC
,
155 unsigned SrcReg
, unsigned Flag
= 0,
156 unsigned SubReg
= 0) {
157 unsigned TmpReg
= createResultReg(ToRC
);
158 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, DbgLoc
,
159 TII
.get(TargetOpcode::COPY
), TmpReg
).addReg(SrcReg
, Flag
, SubReg
);
162 bool PPCEmitCmp(const Value
*Src1Value
, const Value
*Src2Value
,
163 bool isZExt
, unsigned DestReg
,
164 const PPC::Predicate Pred
);
165 bool PPCEmitLoad(MVT VT
, unsigned &ResultReg
, Address
&Addr
,
166 const TargetRegisterClass
*RC
, bool IsZExt
= true,
167 unsigned FP64LoadOpc
= PPC::LFD
);
168 bool PPCEmitStore(MVT VT
, unsigned SrcReg
, Address
&Addr
);
169 bool PPCComputeAddress(const Value
*Obj
, Address
&Addr
);
170 void PPCSimplifyAddress(Address
&Addr
, bool &UseOffset
,
172 bool PPCEmitIntExt(MVT SrcVT
, unsigned SrcReg
, MVT DestVT
,
173 unsigned DestReg
, bool IsZExt
);
174 unsigned PPCMaterializeFP(const ConstantFP
*CFP
, MVT VT
);
175 unsigned PPCMaterializeGV(const GlobalValue
*GV
, MVT VT
);
176 unsigned PPCMaterializeInt(const ConstantInt
*CI
, MVT VT
,
177 bool UseSExt
= true);
178 unsigned PPCMaterialize32BitInt(int64_t Imm
,
179 const TargetRegisterClass
*RC
);
180 unsigned PPCMaterialize64BitInt(int64_t Imm
,
181 const TargetRegisterClass
*RC
);
182 unsigned PPCMoveToIntReg(const Instruction
*I
, MVT VT
,
183 unsigned SrcReg
, bool IsSigned
);
184 unsigned PPCMoveToFPReg(MVT VT
, unsigned SrcReg
, bool IsSigned
);
186 // Call handling routines.
188 bool processCallArgs(SmallVectorImpl
<Value
*> &Args
,
189 SmallVectorImpl
<unsigned> &ArgRegs
,
190 SmallVectorImpl
<MVT
> &ArgVTs
,
191 SmallVectorImpl
<ISD::ArgFlagsTy
> &ArgFlags
,
192 SmallVectorImpl
<unsigned> &RegArgs
,
196 bool finishCall(MVT RetVT
, CallLoweringInfo
&CLI
, unsigned &NumBytes
);
199 #include "PPCGenFastISel.inc"
203 } // end anonymous namespace
205 static Optional
<PPC::Predicate
> getComparePred(CmpInst::Predicate Pred
) {
207 // These are not representable with any single compare.
208 case CmpInst::FCMP_FALSE
:
209 case CmpInst::FCMP_TRUE
:
210 // Major concern about the following 6 cases is NaN result. The comparison
211 // result consists of 4 bits, indicating lt, eq, gt and un (unordered),
212 // only one of which will be set. The result is generated by fcmpu
213 // instruction. However, bc instruction only inspects one of the first 3
214 // bits, so when un is set, bc instruction may jump to an undesired
217 // More specifically, if we expect an unordered comparison and un is set, we
218 // expect to always go to true branch; in such case UEQ, UGT and ULT still
219 // give false, which are undesired; but UNE, UGE, ULE happen to give true,
220 // since they are tested by inspecting !eq, !lt, !gt, respectively.
222 // Similarly, for ordered comparison, when un is set, we always expect the
223 // result to be false. In such case OGT, OLT and OEQ is good, since they are
224 // actually testing GT, LT, and EQ respectively, which are false. OGE, OLE
225 // and ONE are tested through !lt, !gt and !eq, and these are true.
226 case CmpInst::FCMP_UEQ
:
227 case CmpInst::FCMP_UGT
:
228 case CmpInst::FCMP_ULT
:
229 case CmpInst::FCMP_OGE
:
230 case CmpInst::FCMP_OLE
:
231 case CmpInst::FCMP_ONE
:
233 return Optional
<PPC::Predicate
>();
235 case CmpInst::FCMP_OEQ
:
236 case CmpInst::ICMP_EQ
:
239 case CmpInst::FCMP_OGT
:
240 case CmpInst::ICMP_UGT
:
241 case CmpInst::ICMP_SGT
:
244 case CmpInst::FCMP_UGE
:
245 case CmpInst::ICMP_UGE
:
246 case CmpInst::ICMP_SGE
:
249 case CmpInst::FCMP_OLT
:
250 case CmpInst::ICMP_ULT
:
251 case CmpInst::ICMP_SLT
:
254 case CmpInst::FCMP_ULE
:
255 case CmpInst::ICMP_ULE
:
256 case CmpInst::ICMP_SLE
:
259 case CmpInst::FCMP_UNE
:
260 case CmpInst::ICMP_NE
:
263 case CmpInst::FCMP_ORD
:
266 case CmpInst::FCMP_UNO
:
271 // Determine whether the type Ty is simple enough to be handled by
272 // fast-isel, and return its equivalent machine type in VT.
273 // FIXME: Copied directly from ARM -- factor into base class?
274 bool PPCFastISel::isTypeLegal(Type
*Ty
, MVT
&VT
) {
275 EVT Evt
= TLI
.getValueType(DL
, Ty
, true);
277 // Only handle simple types.
278 if (Evt
== MVT::Other
|| !Evt
.isSimple()) return false;
279 VT
= Evt
.getSimpleVT();
281 // Handle all legal types, i.e. a register that will directly hold this
283 return TLI
.isTypeLegal(VT
);
286 // Determine whether the type Ty is simple enough to be handled by
287 // fast-isel as a load target, and return its equivalent machine type in VT.
288 bool PPCFastISel::isLoadTypeLegal(Type
*Ty
, MVT
&VT
) {
289 if (isTypeLegal(Ty
, VT
)) return true;
291 // If this is a type than can be sign or zero-extended to a basic operation
292 // go ahead and accept it now.
293 if (VT
== MVT::i8
|| VT
== MVT::i16
|| VT
== MVT::i32
) {
300 bool PPCFastISel::isValueAvailable(const Value
*V
) const {
301 if (!isa
<Instruction
>(V
))
304 const auto *I
= cast
<Instruction
>(V
);
305 return FuncInfo
.MBBMap
[I
->getParent()] == FuncInfo
.MBB
;
308 // Given a value Obj, create an Address object Addr that represents its
309 // address. Return false if we can't handle it.
310 bool PPCFastISel::PPCComputeAddress(const Value
*Obj
, Address
&Addr
) {
311 const User
*U
= nullptr;
312 unsigned Opcode
= Instruction::UserOp1
;
313 if (const Instruction
*I
= dyn_cast
<Instruction
>(Obj
)) {
314 // Don't walk into other basic blocks unless the object is an alloca from
315 // another block, otherwise it may not have a virtual register assigned.
316 if (FuncInfo
.StaticAllocaMap
.count(static_cast<const AllocaInst
*>(Obj
)) ||
317 FuncInfo
.MBBMap
[I
->getParent()] == FuncInfo
.MBB
) {
318 Opcode
= I
->getOpcode();
321 } else if (const ConstantExpr
*C
= dyn_cast
<ConstantExpr
>(Obj
)) {
322 Opcode
= C
->getOpcode();
329 case Instruction::BitCast
:
330 // Look through bitcasts.
331 return PPCComputeAddress(U
->getOperand(0), Addr
);
332 case Instruction::IntToPtr
:
333 // Look past no-op inttoptrs.
334 if (TLI
.getValueType(DL
, U
->getOperand(0)->getType()) ==
335 TLI
.getPointerTy(DL
))
336 return PPCComputeAddress(U
->getOperand(0), Addr
);
338 case Instruction::PtrToInt
:
339 // Look past no-op ptrtoints.
340 if (TLI
.getValueType(DL
, U
->getType()) == TLI
.getPointerTy(DL
))
341 return PPCComputeAddress(U
->getOperand(0), Addr
);
343 case Instruction::GetElementPtr
: {
344 Address SavedAddr
= Addr
;
345 long TmpOffset
= Addr
.Offset
;
347 // Iterate through the GEP folding the constants into offsets where
349 gep_type_iterator GTI
= gep_type_begin(U
);
350 for (User::const_op_iterator II
= U
->op_begin() + 1, IE
= U
->op_end();
351 II
!= IE
; ++II
, ++GTI
) {
352 const Value
*Op
= *II
;
353 if (StructType
*STy
= GTI
.getStructTypeOrNull()) {
354 const StructLayout
*SL
= DL
.getStructLayout(STy
);
355 unsigned Idx
= cast
<ConstantInt
>(Op
)->getZExtValue();
356 TmpOffset
+= SL
->getElementOffset(Idx
);
358 uint64_t S
= DL
.getTypeAllocSize(GTI
.getIndexedType());
360 if (const ConstantInt
*CI
= dyn_cast
<ConstantInt
>(Op
)) {
361 // Constant-offset addressing.
362 TmpOffset
+= CI
->getSExtValue() * S
;
365 if (canFoldAddIntoGEP(U
, Op
)) {
366 // A compatible add with a constant operand. Fold the constant.
368 cast
<ConstantInt
>(cast
<AddOperator
>(Op
)->getOperand(1));
369 TmpOffset
+= CI
->getSExtValue() * S
;
370 // Iterate on the other operand.
371 Op
= cast
<AddOperator
>(Op
)->getOperand(0);
375 goto unsupported_gep
;
380 // Try to grab the base operand now.
381 Addr
.Offset
= TmpOffset
;
382 if (PPCComputeAddress(U
->getOperand(0), Addr
)) return true;
384 // We failed, restore everything and try the other options.
390 case Instruction::Alloca
: {
391 const AllocaInst
*AI
= cast
<AllocaInst
>(Obj
);
392 DenseMap
<const AllocaInst
*, int>::iterator SI
=
393 FuncInfo
.StaticAllocaMap
.find(AI
);
394 if (SI
!= FuncInfo
.StaticAllocaMap
.end()) {
395 Addr
.BaseType
= Address::FrameIndexBase
;
396 Addr
.Base
.FI
= SI
->second
;
403 // FIXME: References to parameters fall through to the behavior
404 // below. They should be able to reference a frame index since
405 // they are stored to the stack, so we can get "ld rx, offset(r1)"
406 // instead of "addi ry, r1, offset / ld rx, 0(ry)". Obj will
407 // just contain the parameter. Try to handle this with a FI.
409 // Try to get this in a register if nothing else has worked.
410 if (Addr
.Base
.Reg
== 0)
411 Addr
.Base
.Reg
= getRegForValue(Obj
);
413 // Prevent assignment of base register to X0, which is inappropriate
414 // for loads and stores alike.
415 if (Addr
.Base
.Reg
!= 0)
416 MRI
.setRegClass(Addr
.Base
.Reg
, &PPC::G8RC_and_G8RC_NOX0RegClass
);
418 return Addr
.Base
.Reg
!= 0;
421 // Fix up some addresses that can't be used directly. For example, if
422 // an offset won't fit in an instruction field, we may need to move it
423 // into an index register.
424 void PPCFastISel::PPCSimplifyAddress(Address
&Addr
, bool &UseOffset
,
425 unsigned &IndexReg
) {
427 // Check whether the offset fits in the instruction field.
428 if (!isInt
<16>(Addr
.Offset
))
431 // If this is a stack pointer and the offset needs to be simplified then
432 // put the alloca address into a register, set the base type back to
433 // register and continue. This should almost never happen.
434 if (!UseOffset
&& Addr
.BaseType
== Address::FrameIndexBase
) {
435 unsigned ResultReg
= createResultReg(&PPC::G8RC_and_G8RC_NOX0RegClass
);
436 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, DbgLoc
, TII
.get(PPC::ADDI8
),
437 ResultReg
).addFrameIndex(Addr
.Base
.FI
).addImm(0);
438 Addr
.Base
.Reg
= ResultReg
;
439 Addr
.BaseType
= Address::RegBase
;
443 IntegerType
*OffsetTy
= Type::getInt64Ty(*Context
);
444 const ConstantInt
*Offset
=
445 ConstantInt::getSigned(OffsetTy
, (int64_t)(Addr
.Offset
));
446 IndexReg
= PPCMaterializeInt(Offset
, MVT::i64
);
447 assert(IndexReg
&& "Unexpected error in PPCMaterializeInt!");
451 // Emit a load instruction if possible, returning true if we succeeded,
452 // otherwise false. See commentary below for how the register class of
453 // the load is determined.
454 bool PPCFastISel::PPCEmitLoad(MVT VT
, unsigned &ResultReg
, Address
&Addr
,
455 const TargetRegisterClass
*RC
,
456 bool IsZExt
, unsigned FP64LoadOpc
) {
458 bool UseOffset
= true;
459 bool HasSPE
= PPCSubTarget
->hasSPE();
461 // If ResultReg is given, it determines the register class of the load.
462 // Otherwise, RC is the register class to use. If the result of the
463 // load isn't anticipated in this block, both may be zero, in which
464 // case we must make a conservative guess. In particular, don't assign
465 // R0 or X0 to the result register, as the result may be used in a load,
466 // store, add-immediate, or isel that won't permit this. (Though
467 // perhaps the spill and reload of live-exit values would handle this?)
468 const TargetRegisterClass
*UseRC
=
469 (ResultReg
? MRI
.getRegClass(ResultReg
) :
471 (VT
== MVT::f64
? (HasSPE
? &PPC::SPERCRegClass
: &PPC::F8RCRegClass
) :
472 (VT
== MVT::f32
? (HasSPE
? &PPC::SPE4RCRegClass
: &PPC::F4RCRegClass
) :
473 (VT
== MVT::i64
? &PPC::G8RC_and_G8RC_NOX0RegClass
:
474 &PPC::GPRC_and_GPRC_NOR0RegClass
)))));
476 bool Is32BitInt
= UseRC
->hasSuperClassEq(&PPC::GPRCRegClass
);
478 switch (VT
.SimpleTy
) {
479 default: // e.g., vector types not handled
482 Opc
= Is32BitInt
? PPC::LBZ
: PPC::LBZ8
;
485 Opc
= (IsZExt
? (Is32BitInt
? PPC::LHZ
: PPC::LHZ8
)
486 : (Is32BitInt
? PPC::LHA
: PPC::LHA8
));
489 Opc
= (IsZExt
? (Is32BitInt
? PPC::LWZ
: PPC::LWZ8
)
490 : (Is32BitInt
? PPC::LWA_32
: PPC::LWA
));
491 if ((Opc
== PPC::LWA
|| Opc
== PPC::LWA_32
) && ((Addr
.Offset
& 3) != 0))
496 assert(UseRC
->hasSuperClassEq(&PPC::G8RCRegClass
) &&
497 "64-bit load with 32-bit target??");
498 UseOffset
= ((Addr
.Offset
& 3) == 0);
501 Opc
= PPCSubTarget
->hasSPE() ? PPC::SPELWZ
: PPC::LFS
;
508 // If necessary, materialize the offset into a register and use
509 // the indexed form. Also handle stack pointers with special needs.
510 unsigned IndexReg
= 0;
511 PPCSimplifyAddress(Addr
, UseOffset
, IndexReg
);
513 // If this is a potential VSX load with an offset of 0, a VSX indexed load can
515 bool IsVSSRC
= isVSSRCRegClass(UseRC
);
516 bool IsVSFRC
= isVSFRCRegClass(UseRC
);
517 bool Is32VSXLoad
= IsVSSRC
&& Opc
== PPC::LFS
;
518 bool Is64VSXLoad
= IsVSFRC
&& Opc
== PPC::LFD
;
519 if ((Is32VSXLoad
|| Is64VSXLoad
) &&
520 (Addr
.BaseType
!= Address::FrameIndexBase
) && UseOffset
&&
521 (Addr
.Offset
== 0)) {
526 ResultReg
= createResultReg(UseRC
);
528 // Note: If we still have a frame index here, we know the offset is
529 // in range, as otherwise PPCSimplifyAddress would have converted it
531 if (Addr
.BaseType
== Address::FrameIndexBase
) {
532 // VSX only provides an indexed load.
533 if (Is32VSXLoad
|| Is64VSXLoad
) return false;
535 MachineMemOperand
*MMO
= FuncInfo
.MF
->getMachineMemOperand(
536 MachinePointerInfo::getFixedStack(*FuncInfo
.MF
, Addr
.Base
.FI
,
538 MachineMemOperand::MOLoad
, MFI
.getObjectSize(Addr
.Base
.FI
),
539 MFI
.getObjectAlignment(Addr
.Base
.FI
));
541 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, DbgLoc
, TII
.get(Opc
), ResultReg
)
542 .addImm(Addr
.Offset
).addFrameIndex(Addr
.Base
.FI
).addMemOperand(MMO
);
544 // Base reg with offset in range.
545 } else if (UseOffset
) {
546 // VSX only provides an indexed load.
547 if (Is32VSXLoad
|| Is64VSXLoad
) return false;
549 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, DbgLoc
, TII
.get(Opc
), ResultReg
)
550 .addImm(Addr
.Offset
).addReg(Addr
.Base
.Reg
);
554 // Get the RR opcode corresponding to the RI one. FIXME: It would be
555 // preferable to use the ImmToIdxMap from PPCRegisterInfo.cpp, but it
556 // is hard to get at.
558 default: llvm_unreachable("Unexpected opcode!");
559 case PPC::LBZ
: Opc
= PPC::LBZX
; break;
560 case PPC::LBZ8
: Opc
= PPC::LBZX8
; break;
561 case PPC::LHZ
: Opc
= PPC::LHZX
; break;
562 case PPC::LHZ8
: Opc
= PPC::LHZX8
; break;
563 case PPC::LHA
: Opc
= PPC::LHAX
; break;
564 case PPC::LHA8
: Opc
= PPC::LHAX8
; break;
565 case PPC::LWZ
: Opc
= PPC::LWZX
; break;
566 case PPC::LWZ8
: Opc
= PPC::LWZX8
; break;
567 case PPC::LWA
: Opc
= PPC::LWAX
; break;
568 case PPC::LWA_32
: Opc
= PPC::LWAX_32
; break;
569 case PPC::LD
: Opc
= PPC::LDX
; break;
570 case PPC::LFS
: Opc
= IsVSSRC
? PPC::LXSSPX
: PPC::LFSX
; break;
571 case PPC::LFD
: Opc
= IsVSFRC
? PPC::LXSDX
: PPC::LFDX
; break;
572 case PPC::EVLDD
: Opc
= PPC::EVLDDX
; break;
573 case PPC::SPELWZ
: Opc
= PPC::SPELWZX
; break;
576 auto MIB
= BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, DbgLoc
, TII
.get(Opc
),
579 // If we have an index register defined we use it in the store inst,
580 // otherwise we use X0 as base as it makes the vector instructions to
581 // use zero in the computation of the effective address regardless the
582 // content of the register.
584 MIB
.addReg(Addr
.Base
.Reg
).addReg(IndexReg
);
586 MIB
.addReg(PPC::ZERO8
).addReg(Addr
.Base
.Reg
);
592 // Attempt to fast-select a load instruction.
593 bool PPCFastISel::SelectLoad(const Instruction
*I
) {
594 // FIXME: No atomic loads are supported.
595 if (cast
<LoadInst
>(I
)->isAtomic())
598 // Verify we have a legal type before going any further.
600 if (!isLoadTypeLegal(I
->getType(), VT
))
603 // See if we can handle this address.
605 if (!PPCComputeAddress(I
->getOperand(0), Addr
))
608 // Look at the currently assigned register for this instruction
609 // to determine the required register class. This is necessary
610 // to constrain RA from using R0/X0 when this is not legal.
611 unsigned AssignedReg
= FuncInfo
.ValueMap
[I
];
612 const TargetRegisterClass
*RC
=
613 AssignedReg
? MRI
.getRegClass(AssignedReg
) : nullptr;
615 unsigned ResultReg
= 0;
616 if (!PPCEmitLoad(VT
, ResultReg
, Addr
, RC
, true,
617 PPCSubTarget
->hasSPE() ? PPC::EVLDD
: PPC::LFD
))
619 updateValueMap(I
, ResultReg
);
623 // Emit a store instruction to store SrcReg at Addr.
624 bool PPCFastISel::PPCEmitStore(MVT VT
, unsigned SrcReg
, Address
&Addr
) {
625 assert(SrcReg
&& "Nothing to store!");
627 bool UseOffset
= true;
629 const TargetRegisterClass
*RC
= MRI
.getRegClass(SrcReg
);
630 bool Is32BitInt
= RC
->hasSuperClassEq(&PPC::GPRCRegClass
);
632 switch (VT
.SimpleTy
) {
633 default: // e.g., vector types not handled
636 Opc
= Is32BitInt
? PPC::STB
: PPC::STB8
;
639 Opc
= Is32BitInt
? PPC::STH
: PPC::STH8
;
642 assert(Is32BitInt
&& "Not GPRC for i32??");
647 UseOffset
= ((Addr
.Offset
& 3) == 0);
650 Opc
= PPCSubTarget
->hasSPE() ? PPC::SPESTW
: PPC::STFS
;
653 Opc
= PPCSubTarget
->hasSPE() ? PPC::EVSTDD
: PPC::STFD
;
657 // If necessary, materialize the offset into a register and use
658 // the indexed form. Also handle stack pointers with special needs.
659 unsigned IndexReg
= 0;
660 PPCSimplifyAddress(Addr
, UseOffset
, IndexReg
);
662 // If this is a potential VSX store with an offset of 0, a VSX indexed store
664 bool IsVSSRC
= isVSSRCRegClass(RC
);
665 bool IsVSFRC
= isVSFRCRegClass(RC
);
666 bool Is32VSXStore
= IsVSSRC
&& Opc
== PPC::STFS
;
667 bool Is64VSXStore
= IsVSFRC
&& Opc
== PPC::STFD
;
668 if ((Is32VSXStore
|| Is64VSXStore
) &&
669 (Addr
.BaseType
!= Address::FrameIndexBase
) && UseOffset
&&
670 (Addr
.Offset
== 0)) {
674 // Note: If we still have a frame index here, we know the offset is
675 // in range, as otherwise PPCSimplifyAddress would have converted it
677 if (Addr
.BaseType
== Address::FrameIndexBase
) {
678 // VSX only provides an indexed store.
679 if (Is32VSXStore
|| Is64VSXStore
) return false;
681 MachineMemOperand
*MMO
= FuncInfo
.MF
->getMachineMemOperand(
682 MachinePointerInfo::getFixedStack(*FuncInfo
.MF
, Addr
.Base
.FI
,
684 MachineMemOperand::MOStore
, MFI
.getObjectSize(Addr
.Base
.FI
),
685 MFI
.getObjectAlignment(Addr
.Base
.FI
));
687 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, DbgLoc
, TII
.get(Opc
))
690 .addFrameIndex(Addr
.Base
.FI
)
693 // Base reg with offset in range.
694 } else if (UseOffset
) {
695 // VSX only provides an indexed store.
696 if (Is32VSXStore
|| Is64VSXStore
)
699 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, DbgLoc
, TII
.get(Opc
))
700 .addReg(SrcReg
).addImm(Addr
.Offset
).addReg(Addr
.Base
.Reg
);
704 // Get the RR opcode corresponding to the RI one. FIXME: It would be
705 // preferable to use the ImmToIdxMap from PPCRegisterInfo.cpp, but it
706 // is hard to get at.
708 default: llvm_unreachable("Unexpected opcode!");
709 case PPC::STB
: Opc
= PPC::STBX
; break;
710 case PPC::STH
: Opc
= PPC::STHX
; break;
711 case PPC::STW
: Opc
= PPC::STWX
; break;
712 case PPC::STB8
: Opc
= PPC::STBX8
; break;
713 case PPC::STH8
: Opc
= PPC::STHX8
; break;
714 case PPC::STW8
: Opc
= PPC::STWX8
; break;
715 case PPC::STD
: Opc
= PPC::STDX
; break;
716 case PPC::STFS
: Opc
= IsVSSRC
? PPC::STXSSPX
: PPC::STFSX
; break;
717 case PPC::STFD
: Opc
= IsVSFRC
? PPC::STXSDX
: PPC::STFDX
; break;
718 case PPC::EVSTDD
: Opc
= PPC::EVSTDDX
; break;
719 case PPC::SPESTW
: Opc
= PPC::SPESTWX
; break;
722 auto MIB
= BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, DbgLoc
, TII
.get(Opc
))
725 // If we have an index register defined we use it in the store inst,
726 // otherwise we use X0 as base as it makes the vector instructions to
727 // use zero in the computation of the effective address regardless the
728 // content of the register.
730 MIB
.addReg(Addr
.Base
.Reg
).addReg(IndexReg
);
732 MIB
.addReg(PPC::ZERO8
).addReg(Addr
.Base
.Reg
);
738 // Attempt to fast-select a store instruction.
739 bool PPCFastISel::SelectStore(const Instruction
*I
) {
740 Value
*Op0
= I
->getOperand(0);
743 // FIXME: No atomics loads are supported.
744 if (cast
<StoreInst
>(I
)->isAtomic())
747 // Verify we have a legal type before going any further.
749 if (!isLoadTypeLegal(Op0
->getType(), VT
))
752 // Get the value to be stored into a register.
753 SrcReg
= getRegForValue(Op0
);
757 // See if we can handle this address.
759 if (!PPCComputeAddress(I
->getOperand(1), Addr
))
762 if (!PPCEmitStore(VT
, SrcReg
, Addr
))
768 // Attempt to fast-select a branch instruction.
769 bool PPCFastISel::SelectBranch(const Instruction
*I
) {
770 const BranchInst
*BI
= cast
<BranchInst
>(I
);
771 MachineBasicBlock
*BrBB
= FuncInfo
.MBB
;
772 MachineBasicBlock
*TBB
= FuncInfo
.MBBMap
[BI
->getSuccessor(0)];
773 MachineBasicBlock
*FBB
= FuncInfo
.MBBMap
[BI
->getSuccessor(1)];
775 // For now, just try the simplest case where it's fed by a compare.
776 if (const CmpInst
*CI
= dyn_cast
<CmpInst
>(BI
->getCondition())) {
777 if (isValueAvailable(CI
)) {
778 Optional
<PPC::Predicate
> OptPPCPred
= getComparePred(CI
->getPredicate());
782 PPC::Predicate PPCPred
= OptPPCPred
.getValue();
784 // Take advantage of fall-through opportunities.
785 if (FuncInfo
.MBB
->isLayoutSuccessor(TBB
)) {
787 PPCPred
= PPC::InvertPredicate(PPCPred
);
790 unsigned CondReg
= createResultReg(&PPC::CRRCRegClass
);
792 if (!PPCEmitCmp(CI
->getOperand(0), CI
->getOperand(1), CI
->isUnsigned(),
796 BuildMI(*BrBB
, FuncInfo
.InsertPt
, DbgLoc
, TII
.get(PPC::BCC
))
797 .addImm(PPCSubTarget
->hasSPE() ? PPC::PRED_SPE
: PPCPred
)
798 .addReg(CondReg
).addMBB(TBB
);
799 finishCondBranch(BI
->getParent(), TBB
, FBB
);
802 } else if (const ConstantInt
*CI
=
803 dyn_cast
<ConstantInt
>(BI
->getCondition())) {
804 uint64_t Imm
= CI
->getZExtValue();
805 MachineBasicBlock
*Target
= (Imm
== 0) ? FBB
: TBB
;
806 fastEmitBranch(Target
, DbgLoc
);
810 // FIXME: ARM looks for a case where the block containing the compare
811 // has been split from the block containing the branch. If this happens,
812 // there is a vreg available containing the result of the compare. I'm
813 // not sure we can do much, as we've lost the predicate information with
814 // the compare instruction -- we have a 4-bit CR but don't know which bit
819 // Attempt to emit a compare of the two source values. Signed and unsigned
820 // comparisons are supported. Return false if we can't handle it.
821 bool PPCFastISel::PPCEmitCmp(const Value
*SrcValue1
, const Value
*SrcValue2
,
822 bool IsZExt
, unsigned DestReg
,
823 const PPC::Predicate Pred
) {
824 Type
*Ty
= SrcValue1
->getType();
825 EVT SrcEVT
= TLI
.getValueType(DL
, Ty
, true);
826 if (!SrcEVT
.isSimple())
828 MVT SrcVT
= SrcEVT
.getSimpleVT();
830 if (SrcVT
== MVT::i1
&& PPCSubTarget
->useCRBits())
833 // See if operand 2 is an immediate encodeable in the compare.
834 // FIXME: Operands are not in canonical order at -O0, so an immediate
835 // operand in position 1 is a lost opportunity for now. We are
836 // similar to ARM in this regard.
839 const bool HasSPE
= PPCSubTarget
->hasSPE();
841 // Only 16-bit integer constants can be represented in compares for
842 // PowerPC. Others will be materialized into a register.
843 if (const ConstantInt
*ConstInt
= dyn_cast
<ConstantInt
>(SrcValue2
)) {
844 if (SrcVT
== MVT::i64
|| SrcVT
== MVT::i32
|| SrcVT
== MVT::i16
||
845 SrcVT
== MVT::i8
|| SrcVT
== MVT::i1
) {
846 const APInt
&CIVal
= ConstInt
->getValue();
847 Imm
= (IsZExt
) ? (long)CIVal
.getZExtValue() : (long)CIVal
.getSExtValue();
848 if ((IsZExt
&& isUInt
<16>(Imm
)) || (!IsZExt
&& isInt
<16>(Imm
)))
853 unsigned SrcReg1
= getRegForValue(SrcValue1
);
857 unsigned SrcReg2
= 0;
859 SrcReg2
= getRegForValue(SrcValue2
);
865 bool NeedsExt
= false;
867 auto RC1
= MRI
.getRegClass(SrcReg1
);
868 auto RC2
= SrcReg2
!= 0 ? MRI
.getRegClass(SrcReg2
) : nullptr;
870 switch (SrcVT
.SimpleTy
) {
871 default: return false;
875 default: return false;
877 CmpOpc
= PPC::EFSCMPEQ
;
880 CmpOpc
= PPC::EFSCMPLT
;
883 CmpOpc
= PPC::EFSCMPGT
;
887 CmpOpc
= PPC::FCMPUS
;
888 if (isVSSRCRegClass(RC1
))
889 SrcReg1
= copyRegToRegClass(&PPC::F4RCRegClass
, SrcReg1
);
890 if (RC2
&& isVSSRCRegClass(RC2
))
891 SrcReg2
= copyRegToRegClass(&PPC::F4RCRegClass
, SrcReg2
);
897 default: return false;
899 CmpOpc
= PPC::EFDCMPEQ
;
902 CmpOpc
= PPC::EFDCMPLT
;
905 CmpOpc
= PPC::EFDCMPGT
;
908 } else if (isVSFRCRegClass(RC1
) || (RC2
&& isVSFRCRegClass(RC2
))) {
909 CmpOpc
= PPC::XSCMPUDP
;
911 CmpOpc
= PPC::FCMPUD
;
921 CmpOpc
= IsZExt
? PPC::CMPLW
: PPC::CMPW
;
923 CmpOpc
= IsZExt
? PPC::CMPLWI
: PPC::CMPWI
;
927 CmpOpc
= IsZExt
? PPC::CMPLD
: PPC::CMPD
;
929 CmpOpc
= IsZExt
? PPC::CMPLDI
: PPC::CMPDI
;
934 unsigned ExtReg
= createResultReg(&PPC::GPRCRegClass
);
935 if (!PPCEmitIntExt(SrcVT
, SrcReg1
, MVT::i32
, ExtReg
, IsZExt
))
940 unsigned ExtReg
= createResultReg(&PPC::GPRCRegClass
);
941 if (!PPCEmitIntExt(SrcVT
, SrcReg2
, MVT::i32
, ExtReg
, IsZExt
))
948 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, DbgLoc
, TII
.get(CmpOpc
), DestReg
)
949 .addReg(SrcReg1
).addReg(SrcReg2
);
951 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, DbgLoc
, TII
.get(CmpOpc
), DestReg
)
952 .addReg(SrcReg1
).addImm(Imm
);
957 // Attempt to fast-select a floating-point extend instruction.
958 bool PPCFastISel::SelectFPExt(const Instruction
*I
) {
959 Value
*Src
= I
->getOperand(0);
960 EVT SrcVT
= TLI
.getValueType(DL
, Src
->getType(), true);
961 EVT DestVT
= TLI
.getValueType(DL
, I
->getType(), true);
963 if (SrcVT
!= MVT::f32
|| DestVT
!= MVT::f64
)
966 unsigned SrcReg
= getRegForValue(Src
);
970 // No code is generated for a FP extend.
971 updateValueMap(I
, SrcReg
);
975 // Attempt to fast-select a floating-point truncate instruction.
976 bool PPCFastISel::SelectFPTrunc(const Instruction
*I
) {
977 Value
*Src
= I
->getOperand(0);
978 EVT SrcVT
= TLI
.getValueType(DL
, Src
->getType(), true);
979 EVT DestVT
= TLI
.getValueType(DL
, I
->getType(), true);
981 if (SrcVT
!= MVT::f64
|| DestVT
!= MVT::f32
)
984 unsigned SrcReg
= getRegForValue(Src
);
988 // Round the result to single precision.
990 auto RC
= MRI
.getRegClass(SrcReg
);
991 if (PPCSubTarget
->hasSPE()) {
992 DestReg
= createResultReg(&PPC::SPE4RCRegClass
);
993 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, DbgLoc
,
994 TII
.get(PPC::EFSCFD
), DestReg
)
996 } else if (isVSFRCRegClass(RC
)) {
997 DestReg
= createResultReg(&PPC::VSSRCRegClass
);
998 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, DbgLoc
,
999 TII
.get(PPC::XSRSP
), DestReg
)
1002 DestReg
= createResultReg(&PPC::F4RCRegClass
);
1003 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, DbgLoc
,
1004 TII
.get(PPC::FRSP
), DestReg
)
1008 updateValueMap(I
, DestReg
);
1012 // Move an i32 or i64 value in a GPR to an f64 value in an FPR.
1013 // FIXME: When direct register moves are implemented (see PowerISA 2.07),
1014 // those should be used instead of moving via a stack slot when the
1015 // subtarget permits.
1016 // FIXME: The code here is sloppy for the 4-byte case. Can use a 4-byte
1017 // stack slot and 4-byte store/load sequence. Or just sext the 4-byte
1018 // case to 8 bytes which produces tighter code but wastes stack space.
1019 unsigned PPCFastISel::PPCMoveToFPReg(MVT SrcVT
, unsigned SrcReg
,
1022 // If necessary, extend 32-bit int to 64-bit.
1023 if (SrcVT
== MVT::i32
) {
1024 unsigned TmpReg
= createResultReg(&PPC::G8RCRegClass
);
1025 if (!PPCEmitIntExt(MVT::i32
, SrcReg
, MVT::i64
, TmpReg
, !IsSigned
))
1030 // Get a stack slot 8 bytes wide, aligned on an 8-byte boundary.
1032 Addr
.BaseType
= Address::FrameIndexBase
;
1033 Addr
.Base
.FI
= MFI
.CreateStackObject(8, 8, false);
1035 // Store the value from the GPR.
1036 if (!PPCEmitStore(MVT::i64
, SrcReg
, Addr
))
1039 // Load the integer value into an FPR. The kind of load used depends
1040 // on a number of conditions.
1041 unsigned LoadOpc
= PPC::LFD
;
1043 if (SrcVT
== MVT::i32
) {
1045 LoadOpc
= PPC::LFIWZX
;
1046 Addr
.Offset
= (PPCSubTarget
->isLittleEndian()) ? 0 : 4;
1047 } else if (PPCSubTarget
->hasLFIWAX()) {
1048 LoadOpc
= PPC::LFIWAX
;
1049 Addr
.Offset
= (PPCSubTarget
->isLittleEndian()) ? 0 : 4;
1053 const TargetRegisterClass
*RC
= &PPC::F8RCRegClass
;
1054 unsigned ResultReg
= 0;
1055 if (!PPCEmitLoad(MVT::f64
, ResultReg
, Addr
, RC
, !IsSigned
, LoadOpc
))
1061 // Attempt to fast-select an integer-to-floating-point conversion.
1062 // FIXME: Once fast-isel has better support for VSX, conversions using
1063 // direct moves should be implemented.
1064 bool PPCFastISel::SelectIToFP(const Instruction
*I
, bool IsSigned
) {
1066 Type
*DstTy
= I
->getType();
1067 if (!isTypeLegal(DstTy
, DstVT
))
1070 if (DstVT
!= MVT::f32
&& DstVT
!= MVT::f64
)
1073 Value
*Src
= I
->getOperand(0);
1074 EVT SrcEVT
= TLI
.getValueType(DL
, Src
->getType(), true);
1075 if (!SrcEVT
.isSimple())
1078 MVT SrcVT
= SrcEVT
.getSimpleVT();
1080 if (SrcVT
!= MVT::i8
&& SrcVT
!= MVT::i16
&&
1081 SrcVT
!= MVT::i32
&& SrcVT
!= MVT::i64
)
1084 unsigned SrcReg
= getRegForValue(Src
);
1088 // Shortcut for SPE. Doesn't need to store/load, since it's all in the GPRs
1089 if (PPCSubTarget
->hasSPE()) {
1091 if (DstVT
== MVT::f32
)
1092 Opc
= IsSigned
? PPC::EFSCFSI
: PPC::EFSCFUI
;
1094 Opc
= IsSigned
? PPC::EFDCFSI
: PPC::EFDCFUI
;
1096 unsigned DestReg
= createResultReg(&PPC::SPERCRegClass
);
1097 // Generate the convert.
1098 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, DbgLoc
, TII
.get(Opc
), DestReg
)
1100 updateValueMap(I
, DestReg
);
1104 // We can only lower an unsigned convert if we have the newer
1105 // floating-point conversion operations.
1106 if (!IsSigned
&& !PPCSubTarget
->hasFPCVT())
1109 // FIXME: For now we require the newer floating-point conversion operations
1110 // (which are present only on P7 and A2 server models) when converting
1111 // to single-precision float. Otherwise we have to generate a lot of
1112 // fiddly code to avoid double rounding. If necessary, the fiddly code
1113 // can be found in PPCTargetLowering::LowerINT_TO_FP().
1114 if (DstVT
== MVT::f32
&& !PPCSubTarget
->hasFPCVT())
1117 // Extend the input if necessary.
1118 if (SrcVT
== MVT::i8
|| SrcVT
== MVT::i16
) {
1119 unsigned TmpReg
= createResultReg(&PPC::G8RCRegClass
);
1120 if (!PPCEmitIntExt(SrcVT
, SrcReg
, MVT::i64
, TmpReg
, !IsSigned
))
1126 // Move the integer value to an FPR.
1127 unsigned FPReg
= PPCMoveToFPReg(SrcVT
, SrcReg
, IsSigned
);
1131 // Determine the opcode for the conversion.
1132 const TargetRegisterClass
*RC
= &PPC::F8RCRegClass
;
1133 unsigned DestReg
= createResultReg(RC
);
1136 if (DstVT
== MVT::f32
)
1137 Opc
= IsSigned
? PPC::FCFIDS
: PPC::FCFIDUS
;
1139 Opc
= IsSigned
? PPC::FCFID
: PPC::FCFIDU
;
1141 // Generate the convert.
1142 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, DbgLoc
, TII
.get(Opc
), DestReg
)
1145 updateValueMap(I
, DestReg
);
1149 // Move the floating-point value in SrcReg into an integer destination
1150 // register, and return the register (or zero if we can't handle it).
1151 // FIXME: When direct register moves are implemented (see PowerISA 2.07),
1152 // those should be used instead of moving via a stack slot when the
1153 // subtarget permits.
1154 unsigned PPCFastISel::PPCMoveToIntReg(const Instruction
*I
, MVT VT
,
1155 unsigned SrcReg
, bool IsSigned
) {
1156 // Get a stack slot 8 bytes wide, aligned on an 8-byte boundary.
1157 // Note that if have STFIWX available, we could use a 4-byte stack
1158 // slot for i32, but this being fast-isel we'll just go with the
1159 // easiest code gen possible.
1161 Addr
.BaseType
= Address::FrameIndexBase
;
1162 Addr
.Base
.FI
= MFI
.CreateStackObject(8, 8, false);
1164 // Store the value from the FPR.
1165 if (!PPCEmitStore(MVT::f64
, SrcReg
, Addr
))
1168 // Reload it into a GPR. If we want an i32 on big endian, modify the
1169 // address to have a 4-byte offset so we load from the right place.
1171 Addr
.Offset
= (PPCSubTarget
->isLittleEndian()) ? 0 : 4;
1173 // Look at the currently assigned register for this instruction
1174 // to determine the required register class.
1175 unsigned AssignedReg
= FuncInfo
.ValueMap
[I
];
1176 const TargetRegisterClass
*RC
=
1177 AssignedReg
? MRI
.getRegClass(AssignedReg
) : nullptr;
1179 unsigned ResultReg
= 0;
1180 if (!PPCEmitLoad(VT
, ResultReg
, Addr
, RC
, !IsSigned
))
1186 // Attempt to fast-select a floating-point-to-integer conversion.
1187 // FIXME: Once fast-isel has better support for VSX, conversions using
1188 // direct moves should be implemented.
1189 bool PPCFastISel::SelectFPToI(const Instruction
*I
, bool IsSigned
) {
1191 Type
*DstTy
= I
->getType();
1192 if (!isTypeLegal(DstTy
, DstVT
))
1195 if (DstVT
!= MVT::i32
&& DstVT
!= MVT::i64
)
1198 // If we don't have FCTIDUZ, or SPE, and we need it, punt to SelectionDAG.
1199 if (DstVT
== MVT::i64
&& !IsSigned
&&
1200 !PPCSubTarget
->hasFPCVT() && !PPCSubTarget
->hasSPE())
1203 Value
*Src
= I
->getOperand(0);
1204 Type
*SrcTy
= Src
->getType();
1205 if (!isTypeLegal(SrcTy
, SrcVT
))
1208 if (SrcVT
!= MVT::f32
&& SrcVT
!= MVT::f64
)
1211 unsigned SrcReg
= getRegForValue(Src
);
1215 // Convert f32 to f64 or convert VSSRC to VSFRC if necessary. This is just a
1216 // meaningless copy to get the register class right.
1217 const TargetRegisterClass
*InRC
= MRI
.getRegClass(SrcReg
);
1218 if (InRC
== &PPC::F4RCRegClass
)
1219 SrcReg
= copyRegToRegClass(&PPC::F8RCRegClass
, SrcReg
);
1220 else if (InRC
== &PPC::VSSRCRegClass
)
1221 SrcReg
= copyRegToRegClass(&PPC::VSFRCRegClass
, SrcReg
);
1223 // Determine the opcode for the conversion, which takes place
1224 // entirely within FPRs or VSRs.
1227 auto RC
= MRI
.getRegClass(SrcReg
);
1229 if (PPCSubTarget
->hasSPE()) {
1230 DestReg
= createResultReg(&PPC::GPRCRegClass
);
1232 Opc
= InRC
== &PPC::SPE4RCRegClass
? PPC::EFSCTSIZ
: PPC::EFDCTSIZ
;
1234 Opc
= InRC
== &PPC::SPE4RCRegClass
? PPC::EFSCTUIZ
: PPC::EFDCTUIZ
;
1235 } else if (isVSFRCRegClass(RC
)) {
1236 DestReg
= createResultReg(&PPC::VSFRCRegClass
);
1237 if (DstVT
== MVT::i32
)
1238 Opc
= IsSigned
? PPC::XSCVDPSXWS
: PPC::XSCVDPUXWS
;
1240 Opc
= IsSigned
? PPC::XSCVDPSXDS
: PPC::XSCVDPUXDS
;
1242 DestReg
= createResultReg(&PPC::F8RCRegClass
);
1243 if (DstVT
== MVT::i32
)
1247 Opc
= PPCSubTarget
->hasFPCVT() ? PPC::FCTIWUZ
: PPC::FCTIDZ
;
1249 Opc
= IsSigned
? PPC::FCTIDZ
: PPC::FCTIDUZ
;
1252 // Generate the convert.
1253 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, DbgLoc
, TII
.get(Opc
), DestReg
)
1256 // Now move the integer value from a float register to an integer register.
1257 unsigned IntReg
= PPCSubTarget
->hasSPE() ? DestReg
:
1258 PPCMoveToIntReg(I
, DstVT
, DestReg
, IsSigned
);
1263 updateValueMap(I
, IntReg
);
1267 // Attempt to fast-select a binary integer operation that isn't already
1268 // handled automatically.
1269 bool PPCFastISel::SelectBinaryIntOp(const Instruction
*I
, unsigned ISDOpcode
) {
1270 EVT DestVT
= TLI
.getValueType(DL
, I
->getType(), true);
1272 // We can get here in the case when we have a binary operation on a non-legal
1273 // type and the target independent selector doesn't know how to handle it.
1274 if (DestVT
!= MVT::i16
&& DestVT
!= MVT::i8
)
1277 // Look at the currently assigned register for this instruction
1278 // to determine the required register class. If there is no register,
1279 // make a conservative choice (don't assign R0).
1280 unsigned AssignedReg
= FuncInfo
.ValueMap
[I
];
1281 const TargetRegisterClass
*RC
=
1282 (AssignedReg
? MRI
.getRegClass(AssignedReg
) :
1283 &PPC::GPRC_and_GPRC_NOR0RegClass
);
1284 bool IsGPRC
= RC
->hasSuperClassEq(&PPC::GPRCRegClass
);
1287 switch (ISDOpcode
) {
1288 default: return false;
1290 Opc
= IsGPRC
? PPC::ADD4
: PPC::ADD8
;
1293 Opc
= IsGPRC
? PPC::OR
: PPC::OR8
;
1296 Opc
= IsGPRC
? PPC::SUBF
: PPC::SUBF8
;
1300 unsigned ResultReg
= createResultReg(RC
? RC
: &PPC::G8RCRegClass
);
1301 unsigned SrcReg1
= getRegForValue(I
->getOperand(0));
1302 if (SrcReg1
== 0) return false;
1304 // Handle case of small immediate operand.
1305 if (const ConstantInt
*ConstInt
= dyn_cast
<ConstantInt
>(I
->getOperand(1))) {
1306 const APInt
&CIVal
= ConstInt
->getValue();
1307 int Imm
= (int)CIVal
.getSExtValue();
1309 if (isInt
<16>(Imm
)) {
1312 llvm_unreachable("Missing case!");
1315 MRI
.setRegClass(SrcReg1
, &PPC::GPRC_and_GPRC_NOR0RegClass
);
1319 MRI
.setRegClass(SrcReg1
, &PPC::G8RC_and_G8RC_NOX0RegClass
);
1332 MRI
.setRegClass(SrcReg1
, &PPC::GPRC_and_GPRC_NOR0RegClass
);
1341 MRI
.setRegClass(SrcReg1
, &PPC::G8RC_and_G8RC_NOX0RegClass
);
1348 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, DbgLoc
, TII
.get(Opc
),
1352 updateValueMap(I
, ResultReg
);
1359 unsigned SrcReg2
= getRegForValue(I
->getOperand(1));
1360 if (SrcReg2
== 0) return false;
1362 // Reverse operands for subtract-from.
1363 if (ISDOpcode
== ISD::SUB
)
1364 std::swap(SrcReg1
, SrcReg2
);
1366 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, DbgLoc
, TII
.get(Opc
), ResultReg
)
1367 .addReg(SrcReg1
).addReg(SrcReg2
);
1368 updateValueMap(I
, ResultReg
);
1372 // Handle arguments to a call that we're attempting to fast-select.
1373 // Return false if the arguments are too complex for us at the moment.
1374 bool PPCFastISel::processCallArgs(SmallVectorImpl
<Value
*> &Args
,
1375 SmallVectorImpl
<unsigned> &ArgRegs
,
1376 SmallVectorImpl
<MVT
> &ArgVTs
,
1377 SmallVectorImpl
<ISD::ArgFlagsTy
> &ArgFlags
,
1378 SmallVectorImpl
<unsigned> &RegArgs
,
1382 SmallVector
<CCValAssign
, 16> ArgLocs
;
1383 CCState
CCInfo(CC
, IsVarArg
, *FuncInfo
.MF
, ArgLocs
, *Context
);
1385 // Reserve space for the linkage area on the stack.
1386 unsigned LinkageSize
= PPCSubTarget
->getFrameLowering()->getLinkageSize();
1387 CCInfo
.AllocateStack(LinkageSize
, 8);
1389 CCInfo
.AnalyzeCallOperands(ArgVTs
, ArgFlags
, CC_PPC64_ELF_FIS
);
1391 // Bail out if we can't handle any of the arguments.
1392 for (unsigned I
= 0, E
= ArgLocs
.size(); I
!= E
; ++I
) {
1393 CCValAssign
&VA
= ArgLocs
[I
];
1394 MVT ArgVT
= ArgVTs
[VA
.getValNo()];
1396 // Skip vector arguments for now, as well as long double and
1397 // uint128_t, and anything that isn't passed in a register.
1398 if (ArgVT
.isVector() || ArgVT
.getSizeInBits() > 64 || ArgVT
== MVT::i1
||
1399 !VA
.isRegLoc() || VA
.needsCustom())
1402 // Skip bit-converted arguments for now.
1403 if (VA
.getLocInfo() == CCValAssign::BCvt
)
1407 // Get a count of how many bytes are to be pushed onto the stack.
1408 NumBytes
= CCInfo
.getNextStackOffset();
1410 // The prolog code of the callee may store up to 8 GPR argument registers to
1411 // the stack, allowing va_start to index over them in memory if its varargs.
1412 // Because we cannot tell if this is needed on the caller side, we have to
1413 // conservatively assume that it is needed. As such, make sure we have at
1414 // least enough stack space for the caller to store the 8 GPRs.
1415 // FIXME: On ELFv2, it may be unnecessary to allocate the parameter area.
1416 NumBytes
= std::max(NumBytes
, LinkageSize
+ 64);
1418 // Issue CALLSEQ_START.
1419 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, DbgLoc
,
1420 TII
.get(TII
.getCallFrameSetupOpcode()))
1421 .addImm(NumBytes
).addImm(0);
1423 // Prepare to assign register arguments. Every argument uses up a
1424 // GPR protocol register even if it's passed in a floating-point
1425 // register (unless we're using the fast calling convention).
1426 unsigned NextGPR
= PPC::X3
;
1427 unsigned NextFPR
= PPC::F1
;
1429 // Process arguments.
1430 for (unsigned I
= 0, E
= ArgLocs
.size(); I
!= E
; ++I
) {
1431 CCValAssign
&VA
= ArgLocs
[I
];
1432 unsigned Arg
= ArgRegs
[VA
.getValNo()];
1433 MVT ArgVT
= ArgVTs
[VA
.getValNo()];
1435 // Handle argument promotion and bitcasts.
1436 switch (VA
.getLocInfo()) {
1438 llvm_unreachable("Unknown loc info!");
1439 case CCValAssign::Full
:
1441 case CCValAssign::SExt
: {
1442 MVT DestVT
= VA
.getLocVT();
1443 const TargetRegisterClass
*RC
=
1444 (DestVT
== MVT::i64
) ? &PPC::G8RCRegClass
: &PPC::GPRCRegClass
;
1445 unsigned TmpReg
= createResultReg(RC
);
1446 if (!PPCEmitIntExt(ArgVT
, Arg
, DestVT
, TmpReg
, /*IsZExt*/false))
1447 llvm_unreachable("Failed to emit a sext!");
1452 case CCValAssign::AExt
:
1453 case CCValAssign::ZExt
: {
1454 MVT DestVT
= VA
.getLocVT();
1455 const TargetRegisterClass
*RC
=
1456 (DestVT
== MVT::i64
) ? &PPC::G8RCRegClass
: &PPC::GPRCRegClass
;
1457 unsigned TmpReg
= createResultReg(RC
);
1458 if (!PPCEmitIntExt(ArgVT
, Arg
, DestVT
, TmpReg
, /*IsZExt*/true))
1459 llvm_unreachable("Failed to emit a zext!");
1464 case CCValAssign::BCvt
: {
1465 // FIXME: Not yet handled.
1466 llvm_unreachable("Should have bailed before getting here!");
1471 // Copy this argument to the appropriate register.
1473 if (ArgVT
== MVT::f32
|| ArgVT
== MVT::f64
) {
1475 if (CC
!= CallingConv::Fast
)
1480 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, DbgLoc
,
1481 TII
.get(TargetOpcode::COPY
), ArgReg
).addReg(Arg
);
1482 RegArgs
.push_back(ArgReg
);
1488 // For a call that we've determined we can fast-select, finish the
1489 // call sequence and generate a copy to obtain the return value (if any).
1490 bool PPCFastISel::finishCall(MVT RetVT
, CallLoweringInfo
&CLI
, unsigned &NumBytes
) {
1491 CallingConv::ID CC
= CLI
.CallConv
;
1493 // Issue CallSEQ_END.
1494 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, DbgLoc
,
1495 TII
.get(TII
.getCallFrameDestroyOpcode()))
1496 .addImm(NumBytes
).addImm(0);
1498 // Next, generate a copy to obtain the return value.
1499 // FIXME: No multi-register return values yet, though I don't foresee
1500 // any real difficulties there.
1501 if (RetVT
!= MVT::isVoid
) {
1502 SmallVector
<CCValAssign
, 16> RVLocs
;
1503 CCState
CCInfo(CC
, false, *FuncInfo
.MF
, RVLocs
, *Context
);
1504 CCInfo
.AnalyzeCallResult(RetVT
, RetCC_PPC64_ELF_FIS
);
1505 CCValAssign
&VA
= RVLocs
[0];
1506 assert(RVLocs
.size() == 1 && "No support for multi-reg return values!");
1507 assert(VA
.isRegLoc() && "Can only return in registers!");
1509 MVT DestVT
= VA
.getValVT();
1510 MVT CopyVT
= DestVT
;
1512 // Ints smaller than a register still arrive in a full 64-bit
1513 // register, so make sure we recognize this.
1514 if (RetVT
== MVT::i8
|| RetVT
== MVT::i16
|| RetVT
== MVT::i32
)
1517 unsigned SourcePhysReg
= VA
.getLocReg();
1518 unsigned ResultReg
= 0;
1520 if (RetVT
== CopyVT
) {
1521 const TargetRegisterClass
*CpyRC
= TLI
.getRegClassFor(CopyVT
);
1522 ResultReg
= copyRegToRegClass(CpyRC
, SourcePhysReg
);
1524 // If necessary, round the floating result to single precision.
1525 } else if (CopyVT
== MVT::f64
) {
1526 ResultReg
= createResultReg(TLI
.getRegClassFor(RetVT
));
1527 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, DbgLoc
, TII
.get(PPC::FRSP
),
1528 ResultReg
).addReg(SourcePhysReg
);
1530 // If only the low half of a general register is needed, generate
1531 // a GPRC copy instead of a G8RC copy. (EXTRACT_SUBREG can't be
1532 // used along the fast-isel path (not lowered), and downstream logic
1533 // also doesn't like a direct subreg copy on a physical reg.)
1534 } else if (RetVT
== MVT::i8
|| RetVT
== MVT::i16
|| RetVT
== MVT::i32
) {
1535 // Convert physical register from G8RC to GPRC.
1536 SourcePhysReg
-= PPC::X0
- PPC::R0
;
1537 ResultReg
= copyRegToRegClass(&PPC::GPRCRegClass
, SourcePhysReg
);
1540 assert(ResultReg
&& "ResultReg unset!");
1541 CLI
.InRegs
.push_back(SourcePhysReg
);
1542 CLI
.ResultReg
= ResultReg
;
1543 CLI
.NumResultRegs
= 1;
1549 bool PPCFastISel::fastLowerCall(CallLoweringInfo
&CLI
) {
1550 CallingConv::ID CC
= CLI
.CallConv
;
1551 bool IsTailCall
= CLI
.IsTailCall
;
1552 bool IsVarArg
= CLI
.IsVarArg
;
1553 const Value
*Callee
= CLI
.Callee
;
1554 const MCSymbol
*Symbol
= CLI
.Symbol
;
1556 if (!Callee
&& !Symbol
)
1559 // Allow SelectionDAG isel to handle tail calls.
1563 // Let SDISel handle vararg functions.
1567 // Handle simple calls for now, with legal return types and
1568 // those that can be extended.
1569 Type
*RetTy
= CLI
.RetTy
;
1571 if (RetTy
->isVoidTy())
1572 RetVT
= MVT::isVoid
;
1573 else if (!isTypeLegal(RetTy
, RetVT
) && RetVT
!= MVT::i16
&&
1576 else if (RetVT
== MVT::i1
&& PPCSubTarget
->useCRBits())
1577 // We can't handle boolean returns when CR bits are in use.
1580 // FIXME: No multi-register return values yet.
1581 if (RetVT
!= MVT::isVoid
&& RetVT
!= MVT::i8
&& RetVT
!= MVT::i16
&&
1582 RetVT
!= MVT::i32
&& RetVT
!= MVT::i64
&& RetVT
!= MVT::f32
&&
1583 RetVT
!= MVT::f64
) {
1584 SmallVector
<CCValAssign
, 16> RVLocs
;
1585 CCState
CCInfo(CC
, IsVarArg
, *FuncInfo
.MF
, RVLocs
, *Context
);
1586 CCInfo
.AnalyzeCallResult(RetVT
, RetCC_PPC64_ELF_FIS
);
1587 if (RVLocs
.size() > 1)
1591 // Bail early if more than 8 arguments, as we only currently
1592 // handle arguments passed in registers.
1593 unsigned NumArgs
= CLI
.OutVals
.size();
1597 // Set up the argument vectors.
1598 SmallVector
<Value
*, 8> Args
;
1599 SmallVector
<unsigned, 8> ArgRegs
;
1600 SmallVector
<MVT
, 8> ArgVTs
;
1601 SmallVector
<ISD::ArgFlagsTy
, 8> ArgFlags
;
1603 Args
.reserve(NumArgs
);
1604 ArgRegs
.reserve(NumArgs
);
1605 ArgVTs
.reserve(NumArgs
);
1606 ArgFlags
.reserve(NumArgs
);
1608 for (unsigned i
= 0, ie
= NumArgs
; i
!= ie
; ++i
) {
1609 // Only handle easy calls for now. It would be reasonably easy
1610 // to handle <= 8-byte structures passed ByVal in registers, but we
1611 // have to ensure they are right-justified in the register.
1612 ISD::ArgFlagsTy Flags
= CLI
.OutFlags
[i
];
1613 if (Flags
.isInReg() || Flags
.isSRet() || Flags
.isNest() || Flags
.isByVal())
1616 Value
*ArgValue
= CLI
.OutVals
[i
];
1617 Type
*ArgTy
= ArgValue
->getType();
1619 if (!isTypeLegal(ArgTy
, ArgVT
) && ArgVT
!= MVT::i16
&& ArgVT
!= MVT::i8
)
1622 if (ArgVT
.isVector())
1625 unsigned Arg
= getRegForValue(ArgValue
);
1629 Args
.push_back(ArgValue
);
1630 ArgRegs
.push_back(Arg
);
1631 ArgVTs
.push_back(ArgVT
);
1632 ArgFlags
.push_back(Flags
);
1635 // Process the arguments.
1636 SmallVector
<unsigned, 8> RegArgs
;
1639 if (!processCallArgs(Args
, ArgRegs
, ArgVTs
, ArgFlags
,
1640 RegArgs
, CC
, NumBytes
, IsVarArg
))
1643 MachineInstrBuilder MIB
;
1644 // FIXME: No handling for function pointers yet. This requires
1645 // implementing the function descriptor (OPD) setup.
1646 const GlobalValue
*GV
= dyn_cast
<GlobalValue
>(Callee
);
1648 // patchpoints are a special case; they always dispatch to a pointer value.
1649 // However, we don't actually want to generate the indirect call sequence
1650 // here (that will be generated, as necessary, during asm printing), and
1651 // the call we generate here will be erased by FastISel::selectPatchpoint,
1652 // so don't try very hard...
1653 if (CLI
.IsPatchPoint
)
1654 MIB
= BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, DbgLoc
, TII
.get(PPC::NOP
));
1658 // Build direct call with NOP for TOC restore.
1659 // FIXME: We can and should optimize away the NOP for local calls.
1660 MIB
= BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, DbgLoc
,
1661 TII
.get(PPC::BL8_NOP
));
1663 MIB
.addGlobalAddress(GV
);
1666 // Add implicit physical register uses to the call.
1667 for (unsigned II
= 0, IE
= RegArgs
.size(); II
!= IE
; ++II
)
1668 MIB
.addReg(RegArgs
[II
], RegState::Implicit
);
1670 // Direct calls, in both the ELF V1 and V2 ABIs, need the TOC register live
1672 PPCFuncInfo
->setUsesTOCBasePtr();
1673 MIB
.addReg(PPC::X2
, RegState::Implicit
);
1675 // Add a register mask with the call-preserved registers. Proper
1676 // defs for return values will be added by setPhysRegsDeadExcept().
1677 MIB
.addRegMask(TRI
.getCallPreservedMask(*FuncInfo
.MF
, CC
));
1681 // Finish off the call including any return values.
1682 return finishCall(RetVT
, CLI
, NumBytes
);
1685 // Attempt to fast-select a return instruction.
1686 bool PPCFastISel::SelectRet(const Instruction
*I
) {
1688 if (!FuncInfo
.CanLowerReturn
)
1691 if (TLI
.supportSplitCSR(FuncInfo
.MF
))
1694 const ReturnInst
*Ret
= cast
<ReturnInst
>(I
);
1695 const Function
&F
= *I
->getParent()->getParent();
1697 // Build a list of return value registers.
1698 SmallVector
<unsigned, 4> RetRegs
;
1699 CallingConv::ID CC
= F
.getCallingConv();
1701 if (Ret
->getNumOperands() > 0) {
1702 SmallVector
<ISD::OutputArg
, 4> Outs
;
1703 GetReturnInfo(CC
, F
.getReturnType(), F
.getAttributes(), Outs
, TLI
, DL
);
1705 // Analyze operands of the call, assigning locations to each operand.
1706 SmallVector
<CCValAssign
, 16> ValLocs
;
1707 CCState
CCInfo(CC
, F
.isVarArg(), *FuncInfo
.MF
, ValLocs
, *Context
);
1708 CCInfo
.AnalyzeReturn(Outs
, RetCC_PPC64_ELF_FIS
);
1709 const Value
*RV
= Ret
->getOperand(0);
1711 // FIXME: Only one output register for now.
1712 if (ValLocs
.size() > 1)
1715 // Special case for returning a constant integer of any size - materialize
1716 // the constant as an i64 and copy it to the return register.
1717 if (const ConstantInt
*CI
= dyn_cast
<ConstantInt
>(RV
)) {
1718 CCValAssign
&VA
= ValLocs
[0];
1720 unsigned RetReg
= VA
.getLocReg();
1721 // We still need to worry about properly extending the sign. For example,
1722 // we could have only a single bit or a constant that needs zero
1723 // extension rather than sign extension. Make sure we pass the return
1724 // value extension property to integer materialization.
1726 PPCMaterializeInt(CI
, MVT::i64
, VA
.getLocInfo() != CCValAssign::ZExt
);
1728 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, DbgLoc
,
1729 TII
.get(TargetOpcode::COPY
), RetReg
).addReg(SrcReg
);
1731 RetRegs
.push_back(RetReg
);
1734 unsigned Reg
= getRegForValue(RV
);
1739 // Copy the result values into the output registers.
1740 for (unsigned i
= 0; i
< ValLocs
.size(); ++i
) {
1742 CCValAssign
&VA
= ValLocs
[i
];
1743 assert(VA
.isRegLoc() && "Can only return in registers!");
1744 RetRegs
.push_back(VA
.getLocReg());
1745 unsigned SrcReg
= Reg
+ VA
.getValNo();
1747 EVT RVEVT
= TLI
.getValueType(DL
, RV
->getType());
1748 if (!RVEVT
.isSimple())
1750 MVT RVVT
= RVEVT
.getSimpleVT();
1751 MVT DestVT
= VA
.getLocVT();
1753 if (RVVT
!= DestVT
&& RVVT
!= MVT::i8
&&
1754 RVVT
!= MVT::i16
&& RVVT
!= MVT::i32
)
1757 if (RVVT
!= DestVT
) {
1758 switch (VA
.getLocInfo()) {
1760 llvm_unreachable("Unknown loc info!");
1761 case CCValAssign::Full
:
1762 llvm_unreachable("Full value assign but types don't match?");
1763 case CCValAssign::AExt
:
1764 case CCValAssign::ZExt
: {
1765 const TargetRegisterClass
*RC
=
1766 (DestVT
== MVT::i64
) ? &PPC::G8RCRegClass
: &PPC::GPRCRegClass
;
1767 unsigned TmpReg
= createResultReg(RC
);
1768 if (!PPCEmitIntExt(RVVT
, SrcReg
, DestVT
, TmpReg
, true))
1773 case CCValAssign::SExt
: {
1774 const TargetRegisterClass
*RC
=
1775 (DestVT
== MVT::i64
) ? &PPC::G8RCRegClass
: &PPC::GPRCRegClass
;
1776 unsigned TmpReg
= createResultReg(RC
);
1777 if (!PPCEmitIntExt(RVVT
, SrcReg
, DestVT
, TmpReg
, false))
1785 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, DbgLoc
,
1786 TII
.get(TargetOpcode::COPY
), RetRegs
[i
])
1792 MachineInstrBuilder MIB
= BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, DbgLoc
,
1793 TII
.get(PPC::BLR8
));
1795 for (unsigned i
= 0, e
= RetRegs
.size(); i
!= e
; ++i
)
1796 MIB
.addReg(RetRegs
[i
], RegState::Implicit
);
1801 // Attempt to emit an integer extend of SrcReg into DestReg. Both
1802 // signed and zero extensions are supported. Return false if we
1804 bool PPCFastISel::PPCEmitIntExt(MVT SrcVT
, unsigned SrcReg
, MVT DestVT
,
1805 unsigned DestReg
, bool IsZExt
) {
1806 if (DestVT
!= MVT::i32
&& DestVT
!= MVT::i64
)
1808 if (SrcVT
!= MVT::i8
&& SrcVT
!= MVT::i16
&& SrcVT
!= MVT::i32
)
1811 // Signed extensions use EXTSB, EXTSH, EXTSW.
1814 if (SrcVT
== MVT::i8
)
1815 Opc
= (DestVT
== MVT::i32
) ? PPC::EXTSB
: PPC::EXTSB8_32_64
;
1816 else if (SrcVT
== MVT::i16
)
1817 Opc
= (DestVT
== MVT::i32
) ? PPC::EXTSH
: PPC::EXTSH8_32_64
;
1819 assert(DestVT
== MVT::i64
&& "Signed extend from i32 to i32??");
1820 Opc
= PPC::EXTSW_32_64
;
1822 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, DbgLoc
, TII
.get(Opc
), DestReg
)
1825 // Unsigned 32-bit extensions use RLWINM.
1826 } else if (DestVT
== MVT::i32
) {
1828 if (SrcVT
== MVT::i8
)
1831 assert(SrcVT
== MVT::i16
&& "Unsigned extend from i32 to i32??");
1834 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, DbgLoc
, TII
.get(PPC::RLWINM
),
1836 .addReg(SrcReg
).addImm(/*SH=*/0).addImm(MB
).addImm(/*ME=*/31);
1838 // Unsigned 64-bit extensions use RLDICL (with a 32-bit source).
1841 if (SrcVT
== MVT::i8
)
1843 else if (SrcVT
== MVT::i16
)
1847 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, DbgLoc
,
1848 TII
.get(PPC::RLDICL_32_64
), DestReg
)
1849 .addReg(SrcReg
).addImm(/*SH=*/0).addImm(MB
);
1855 // Attempt to fast-select an indirect branch instruction.
1856 bool PPCFastISel::SelectIndirectBr(const Instruction
*I
) {
1857 unsigned AddrReg
= getRegForValue(I
->getOperand(0));
1861 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, DbgLoc
, TII
.get(PPC::MTCTR8
))
1863 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, DbgLoc
, TII
.get(PPC::BCTR8
));
1865 const IndirectBrInst
*IB
= cast
<IndirectBrInst
>(I
);
1866 for (const BasicBlock
*SuccBB
: IB
->successors())
1867 FuncInfo
.MBB
->addSuccessor(FuncInfo
.MBBMap
[SuccBB
]);
1872 // Attempt to fast-select an integer truncate instruction.
1873 bool PPCFastISel::SelectTrunc(const Instruction
*I
) {
1874 Value
*Src
= I
->getOperand(0);
1875 EVT SrcVT
= TLI
.getValueType(DL
, Src
->getType(), true);
1876 EVT DestVT
= TLI
.getValueType(DL
, I
->getType(), true);
1878 if (SrcVT
!= MVT::i64
&& SrcVT
!= MVT::i32
&& SrcVT
!= MVT::i16
)
1881 if (DestVT
!= MVT::i32
&& DestVT
!= MVT::i16
&& DestVT
!= MVT::i8
)
1884 unsigned SrcReg
= getRegForValue(Src
);
1888 // The only interesting case is when we need to switch register classes.
1889 if (SrcVT
== MVT::i64
)
1890 SrcReg
= copyRegToRegClass(&PPC::GPRCRegClass
, SrcReg
, 0, PPC::sub_32
);
1892 updateValueMap(I
, SrcReg
);
1896 // Attempt to fast-select an integer extend instruction.
1897 bool PPCFastISel::SelectIntExt(const Instruction
*I
) {
1898 Type
*DestTy
= I
->getType();
1899 Value
*Src
= I
->getOperand(0);
1900 Type
*SrcTy
= Src
->getType();
1902 bool IsZExt
= isa
<ZExtInst
>(I
);
1903 unsigned SrcReg
= getRegForValue(Src
);
1904 if (!SrcReg
) return false;
1906 EVT SrcEVT
, DestEVT
;
1907 SrcEVT
= TLI
.getValueType(DL
, SrcTy
, true);
1908 DestEVT
= TLI
.getValueType(DL
, DestTy
, true);
1909 if (!SrcEVT
.isSimple())
1911 if (!DestEVT
.isSimple())
1914 MVT SrcVT
= SrcEVT
.getSimpleVT();
1915 MVT DestVT
= DestEVT
.getSimpleVT();
1917 // If we know the register class needed for the result of this
1918 // instruction, use it. Otherwise pick the register class of the
1919 // correct size that does not contain X0/R0, since we don't know
1920 // whether downstream uses permit that assignment.
1921 unsigned AssignedReg
= FuncInfo
.ValueMap
[I
];
1922 const TargetRegisterClass
*RC
=
1923 (AssignedReg
? MRI
.getRegClass(AssignedReg
) :
1924 (DestVT
== MVT::i64
? &PPC::G8RC_and_G8RC_NOX0RegClass
:
1925 &PPC::GPRC_and_GPRC_NOR0RegClass
));
1926 unsigned ResultReg
= createResultReg(RC
);
1928 if (!PPCEmitIntExt(SrcVT
, SrcReg
, DestVT
, ResultReg
, IsZExt
))
1931 updateValueMap(I
, ResultReg
);
1935 // Attempt to fast-select an instruction that wasn't handled by
1936 // the table-generated machinery.
1937 bool PPCFastISel::fastSelectInstruction(const Instruction
*I
) {
1939 switch (I
->getOpcode()) {
1940 case Instruction::Load
:
1941 return SelectLoad(I
);
1942 case Instruction::Store
:
1943 return SelectStore(I
);
1944 case Instruction::Br
:
1945 return SelectBranch(I
);
1946 case Instruction::IndirectBr
:
1947 return SelectIndirectBr(I
);
1948 case Instruction::FPExt
:
1949 return SelectFPExt(I
);
1950 case Instruction::FPTrunc
:
1951 return SelectFPTrunc(I
);
1952 case Instruction::SIToFP
:
1953 return SelectIToFP(I
, /*IsSigned*/ true);
1954 case Instruction::UIToFP
:
1955 return SelectIToFP(I
, /*IsSigned*/ false);
1956 case Instruction::FPToSI
:
1957 return SelectFPToI(I
, /*IsSigned*/ true);
1958 case Instruction::FPToUI
:
1959 return SelectFPToI(I
, /*IsSigned*/ false);
1960 case Instruction::Add
:
1961 return SelectBinaryIntOp(I
, ISD::ADD
);
1962 case Instruction::Or
:
1963 return SelectBinaryIntOp(I
, ISD::OR
);
1964 case Instruction::Sub
:
1965 return SelectBinaryIntOp(I
, ISD::SUB
);
1966 case Instruction::Call
:
1967 // On AIX, call lowering uses the DAG-ISEL path currently so that the
1968 // callee of the direct function call instruction will be mapped to the
1969 // symbol for the function's entry point, which is distinct from the
1970 // function descriptor symbol. The latter is the symbol whose XCOFF symbol
1971 // name is the C-linkage name of the source level function.
1972 if (TM
.getTargetTriple().isOSAIX())
1974 return selectCall(I
);
1975 case Instruction::Ret
:
1976 return SelectRet(I
);
1977 case Instruction::Trunc
:
1978 return SelectTrunc(I
);
1979 case Instruction::ZExt
:
1980 case Instruction::SExt
:
1981 return SelectIntExt(I
);
1982 // Here add other flavors of Instruction::XXX that automated
1983 // cases don't catch. For example, switches are terminators
1984 // that aren't yet handled.
1991 // Materialize a floating-point constant into a register, and return
1992 // the register number (or zero if we failed to handle it).
1993 unsigned PPCFastISel::PPCMaterializeFP(const ConstantFP
*CFP
, MVT VT
) {
1994 // No plans to handle long double here.
1995 if (VT
!= MVT::f32
&& VT
!= MVT::f64
)
1998 // All FP constants are loaded from the constant pool.
1999 unsigned Align
= DL
.getPrefTypeAlignment(CFP
->getType());
2000 assert(Align
> 0 && "Unexpectedly missing alignment information!");
2001 unsigned Idx
= MCP
.getConstantPoolIndex(cast
<Constant
>(CFP
), Align
);
2002 const bool HasSPE
= PPCSubTarget
->hasSPE();
2003 const TargetRegisterClass
*RC
;
2005 RC
= ((VT
== MVT::f32
) ? &PPC::SPE4RCRegClass
: &PPC::SPERCRegClass
);
2007 RC
= ((VT
== MVT::f32
) ? &PPC::F4RCRegClass
: &PPC::F8RCRegClass
);
2009 unsigned DestReg
= createResultReg(RC
);
2010 CodeModel::Model CModel
= TM
.getCodeModel();
2012 MachineMemOperand
*MMO
= FuncInfo
.MF
->getMachineMemOperand(
2013 MachinePointerInfo::getConstantPool(*FuncInfo
.MF
),
2014 MachineMemOperand::MOLoad
, (VT
== MVT::f32
) ? 4 : 8, Align
);
2019 Opc
= ((VT
== MVT::f32
) ? PPC::SPELWZ
: PPC::EVLDD
);
2021 Opc
= ((VT
== MVT::f32
) ? PPC::LFS
: PPC::LFD
);
2023 unsigned TmpReg
= createResultReg(&PPC::G8RC_and_G8RC_NOX0RegClass
);
2025 PPCFuncInfo
->setUsesTOCBasePtr();
2026 // For small code model, generate a LF[SD](0, LDtocCPT(Idx, X2)).
2027 if (CModel
== CodeModel::Small
) {
2028 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, DbgLoc
, TII
.get(PPC::LDtocCPT
),
2030 .addConstantPoolIndex(Idx
).addReg(PPC::X2
);
2031 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, DbgLoc
, TII
.get(Opc
), DestReg
)
2032 .addImm(0).addReg(TmpReg
).addMemOperand(MMO
);
2034 // Otherwise we generate LF[SD](Idx[lo], ADDIStocHA(X2, Idx)).
2035 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, DbgLoc
, TII
.get(PPC::ADDIStocHA
),
2036 TmpReg
).addReg(PPC::X2
).addConstantPoolIndex(Idx
);
2037 // But for large code model, we must generate a LDtocL followed
2039 if (CModel
== CodeModel::Large
) {
2040 unsigned TmpReg2
= createResultReg(&PPC::G8RC_and_G8RC_NOX0RegClass
);
2041 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, DbgLoc
, TII
.get(PPC::LDtocL
),
2042 TmpReg2
).addConstantPoolIndex(Idx
).addReg(TmpReg
);
2043 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, DbgLoc
, TII
.get(Opc
), DestReg
)
2047 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, DbgLoc
, TII
.get(Opc
), DestReg
)
2048 .addConstantPoolIndex(Idx
, 0, PPCII::MO_TOC_LO
)
2050 .addMemOperand(MMO
);
2056 // Materialize the address of a global value into a register, and return
2057 // the register number (or zero if we failed to handle it).
2058 unsigned PPCFastISel::PPCMaterializeGV(const GlobalValue
*GV
, MVT VT
) {
2059 assert(VT
== MVT::i64
&& "Non-address!");
2060 const TargetRegisterClass
*RC
= &PPC::G8RC_and_G8RC_NOX0RegClass
;
2061 unsigned DestReg
= createResultReg(RC
);
2063 // Global values may be plain old object addresses, TLS object
2064 // addresses, constant pool entries, or jump tables. How we generate
2065 // code for these may depend on small, medium, or large code model.
2066 CodeModel::Model CModel
= TM
.getCodeModel();
2068 // FIXME: Jump tables are not yet required because fast-isel doesn't
2069 // handle switches; if that changes, we need them as well. For now,
2070 // what follows assumes everything's a generic (or TLS) global address.
2072 // FIXME: We don't yet handle the complexity of TLS.
2073 if (GV
->isThreadLocal())
2076 PPCFuncInfo
->setUsesTOCBasePtr();
2077 // For small code model, generate a simple TOC load.
2078 if (CModel
== CodeModel::Small
)
2079 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, DbgLoc
, TII
.get(PPC::LDtoc
),
2081 .addGlobalAddress(GV
)
2084 // If the address is an externally defined symbol, a symbol with common
2085 // or externally available linkage, a non-local function address, or a
2086 // jump table address (not yet needed), or if we are generating code
2087 // for large code model, we generate:
2088 // LDtocL(GV, ADDIStocHA(%x2, GV))
2089 // Otherwise we generate:
2090 // ADDItocL(ADDIStocHA(%x2, GV), GV)
2091 // Either way, start with the ADDIStocHA:
2092 unsigned HighPartReg
= createResultReg(RC
);
2093 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, DbgLoc
, TII
.get(PPC::ADDIStocHA
),
2094 HighPartReg
).addReg(PPC::X2
).addGlobalAddress(GV
);
2096 unsigned char GVFlags
= PPCSubTarget
->classifyGlobalReference(GV
);
2097 if (GVFlags
& PPCII::MO_NLP_FLAG
) {
2098 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, DbgLoc
, TII
.get(PPC::LDtocL
),
2099 DestReg
).addGlobalAddress(GV
).addReg(HighPartReg
);
2101 // Otherwise generate the ADDItocL.
2102 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, DbgLoc
, TII
.get(PPC::ADDItocL
),
2103 DestReg
).addReg(HighPartReg
).addGlobalAddress(GV
);
2110 // Materialize a 32-bit integer constant into a register, and return
2111 // the register number (or zero if we failed to handle it).
2112 unsigned PPCFastISel::PPCMaterialize32BitInt(int64_t Imm
,
2113 const TargetRegisterClass
*RC
) {
2114 unsigned Lo
= Imm
& 0xFFFF;
2115 unsigned Hi
= (Imm
>> 16) & 0xFFFF;
2117 unsigned ResultReg
= createResultReg(RC
);
2118 bool IsGPRC
= RC
->hasSuperClassEq(&PPC::GPRCRegClass
);
2121 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, DbgLoc
,
2122 TII
.get(IsGPRC
? PPC::LI
: PPC::LI8
), ResultReg
)
2125 // Both Lo and Hi have nonzero bits.
2126 unsigned TmpReg
= createResultReg(RC
);
2127 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, DbgLoc
,
2128 TII
.get(IsGPRC
? PPC::LIS
: PPC::LIS8
), TmpReg
)
2130 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, DbgLoc
,
2131 TII
.get(IsGPRC
? PPC::ORI
: PPC::ORI8
), ResultReg
)
2132 .addReg(TmpReg
).addImm(Lo
);
2135 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, DbgLoc
,
2136 TII
.get(IsGPRC
? PPC::LIS
: PPC::LIS8
), ResultReg
)
2142 // Materialize a 64-bit integer constant into a register, and return
2143 // the register number (or zero if we failed to handle it).
2144 unsigned PPCFastISel::PPCMaterialize64BitInt(int64_t Imm
,
2145 const TargetRegisterClass
*RC
) {
2146 unsigned Remainder
= 0;
2149 // If the value doesn't fit in 32 bits, see if we can shift it
2150 // so that it fits in 32 bits.
2151 if (!isInt
<32>(Imm
)) {
2152 Shift
= countTrailingZeros
<uint64_t>(Imm
);
2153 int64_t ImmSh
= static_cast<uint64_t>(Imm
) >> Shift
;
2155 if (isInt
<32>(ImmSh
))
2164 // Handle the high-order 32 bits (if shifted) or the whole 32 bits
2165 // (if not shifted).
2166 unsigned TmpReg1
= PPCMaterialize32BitInt(Imm
, RC
);
2170 // If upper 32 bits were not zero, we've built them and need to shift
2174 TmpReg2
= createResultReg(RC
);
2175 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, DbgLoc
, TII
.get(PPC::RLDICR
),
2176 TmpReg2
).addReg(TmpReg1
).addImm(Shift
).addImm(63 - Shift
);
2180 unsigned TmpReg3
, Hi
, Lo
;
2181 if ((Hi
= (Remainder
>> 16) & 0xFFFF)) {
2182 TmpReg3
= createResultReg(RC
);
2183 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, DbgLoc
, TII
.get(PPC::ORIS8
),
2184 TmpReg3
).addReg(TmpReg2
).addImm(Hi
);
2188 if ((Lo
= Remainder
& 0xFFFF)) {
2189 unsigned ResultReg
= createResultReg(RC
);
2190 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, DbgLoc
, TII
.get(PPC::ORI8
),
2191 ResultReg
).addReg(TmpReg3
).addImm(Lo
);
2198 // Materialize an integer constant into a register, and return
2199 // the register number (or zero if we failed to handle it).
2200 unsigned PPCFastISel::PPCMaterializeInt(const ConstantInt
*CI
, MVT VT
,
2202 // If we're using CR bit registers for i1 values, handle that as a special
2204 if (VT
== MVT::i1
&& PPCSubTarget
->useCRBits()) {
2205 unsigned ImmReg
= createResultReg(&PPC::CRBITRCRegClass
);
2206 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, DbgLoc
,
2207 TII
.get(CI
->isZero() ? PPC::CRUNSET
: PPC::CRSET
), ImmReg
);
2211 if (VT
!= MVT::i64
&& VT
!= MVT::i32
&& VT
!= MVT::i16
&& VT
!= MVT::i8
&&
2215 const TargetRegisterClass
*RC
=
2216 ((VT
== MVT::i64
) ? &PPC::G8RCRegClass
: &PPC::GPRCRegClass
);
2217 int64_t Imm
= UseSExt
? CI
->getSExtValue() : CI
->getZExtValue();
2219 // If the constant is in range, use a load-immediate.
2220 // Since LI will sign extend the constant we need to make sure that for
2221 // our zeroext constants that the sign extended constant fits into 16-bits -
2222 // a range of 0..0x7fff.
2223 if (isInt
<16>(Imm
)) {
2224 unsigned Opc
= (VT
== MVT::i64
) ? PPC::LI8
: PPC::LI
;
2225 unsigned ImmReg
= createResultReg(RC
);
2226 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, DbgLoc
, TII
.get(Opc
), ImmReg
)
2231 // Construct the constant piecewise.
2233 return PPCMaterialize64BitInt(Imm
, RC
);
2234 else if (VT
== MVT::i32
)
2235 return PPCMaterialize32BitInt(Imm
, RC
);
2240 // Materialize a constant into a register, and return the register
2241 // number (or zero if we failed to handle it).
2242 unsigned PPCFastISel::fastMaterializeConstant(const Constant
*C
) {
2243 EVT CEVT
= TLI
.getValueType(DL
, C
->getType(), true);
2245 // Only handle simple types.
2246 if (!CEVT
.isSimple()) return 0;
2247 MVT VT
= CEVT
.getSimpleVT();
2249 if (const ConstantFP
*CFP
= dyn_cast
<ConstantFP
>(C
))
2250 return PPCMaterializeFP(CFP
, VT
);
2251 else if (const GlobalValue
*GV
= dyn_cast
<GlobalValue
>(C
))
2252 return PPCMaterializeGV(GV
, VT
);
2253 else if (const ConstantInt
*CI
= dyn_cast
<ConstantInt
>(C
))
2254 // Note that the code in FunctionLoweringInfo::ComputePHILiveOutRegInfo
2255 // assumes that constant PHI operands will be zero extended, and failure to
2256 // match that assumption will cause problems if we sign extend here but
2257 // some user of a PHI is in a block for which we fall back to full SDAG
2258 // instruction selection.
2259 return PPCMaterializeInt(CI
, VT
, false);
2264 // Materialize the address created by an alloca into a register, and
2265 // return the register number (or zero if we failed to handle it).
2266 unsigned PPCFastISel::fastMaterializeAlloca(const AllocaInst
*AI
) {
2267 // Don't handle dynamic allocas.
2268 if (!FuncInfo
.StaticAllocaMap
.count(AI
)) return 0;
2271 if (!isLoadTypeLegal(AI
->getType(), VT
)) return 0;
2273 DenseMap
<const AllocaInst
*, int>::iterator SI
=
2274 FuncInfo
.StaticAllocaMap
.find(AI
);
2276 if (SI
!= FuncInfo
.StaticAllocaMap
.end()) {
2277 unsigned ResultReg
= createResultReg(&PPC::G8RC_and_G8RC_NOX0RegClass
);
2278 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, DbgLoc
, TII
.get(PPC::ADDI8
),
2279 ResultReg
).addFrameIndex(SI
->second
).addImm(0);
2286 // Fold loads into extends when possible.
2287 // FIXME: We can have multiple redundant extend/trunc instructions
2288 // following a load. The folding only picks up one. Extend this
2289 // to check subsequent instructions for the same pattern and remove
2290 // them. Thus ResultReg should be the def reg for the last redundant
2291 // instruction in a chain, and all intervening instructions can be
2292 // removed from parent. Change test/CodeGen/PowerPC/fast-isel-fold.ll
2293 // to add ELF64-NOT: rldicl to the appropriate tests when this works.
2294 bool PPCFastISel::tryToFoldLoadIntoMI(MachineInstr
*MI
, unsigned OpNo
,
2295 const LoadInst
*LI
) {
2296 // Verify we have a legal type before going any further.
2298 if (!isLoadTypeLegal(LI
->getType(), VT
))
2301 // Combine load followed by zero- or sign-extend.
2302 bool IsZExt
= false;
2303 switch(MI
->getOpcode()) {
2308 case PPC::RLDICL_32_64
: {
2310 unsigned MB
= MI
->getOperand(3).getImm();
2311 if ((VT
== MVT::i8
&& MB
<= 56) ||
2312 (VT
== MVT::i16
&& MB
<= 48) ||
2313 (VT
== MVT::i32
&& MB
<= 32))
2319 case PPC::RLWINM8
: {
2321 unsigned MB
= MI
->getOperand(3).getImm();
2322 if ((VT
== MVT::i8
&& MB
<= 24) ||
2323 (VT
== MVT::i16
&& MB
<= 16))
2330 case PPC::EXTSB8_32_64
:
2331 /* There is no sign-extending load-byte instruction. */
2336 case PPC::EXTSH8_32_64
: {
2337 if (VT
!= MVT::i16
&& VT
!= MVT::i8
)
2344 case PPC::EXTSW_32_64
: {
2345 if (VT
!= MVT::i32
&& VT
!= MVT::i16
&& VT
!= MVT::i8
)
2351 // See if we can handle this address.
2353 if (!PPCComputeAddress(LI
->getOperand(0), Addr
))
2356 unsigned ResultReg
= MI
->getOperand(0).getReg();
2358 if (!PPCEmitLoad(VT
, ResultReg
, Addr
, nullptr, IsZExt
,
2359 PPCSubTarget
->hasSPE() ? PPC::EVLDD
: PPC::LFD
))
2362 MachineBasicBlock::iterator
I(MI
);
2363 removeDeadCode(I
, std::next(I
));
2367 // Attempt to lower call arguments in a faster way than done by
2368 // the selection DAG code.
2369 bool PPCFastISel::fastLowerArguments() {
2370 // Defer to normal argument lowering for now. It's reasonably
2371 // efficient. Consider doing something like ARM to handle the
2372 // case where all args fit in registers, no varargs, no float
2377 // Handle materializing integer constants into a register. This is not
2378 // automatically generated for PowerPC, so must be explicitly created here.
2379 unsigned PPCFastISel::fastEmit_i(MVT Ty
, MVT VT
, unsigned Opc
, uint64_t Imm
) {
2381 if (Opc
!= ISD::Constant
)
2384 // If we're using CR bit registers for i1 values, handle that as a special
2386 if (VT
== MVT::i1
&& PPCSubTarget
->useCRBits()) {
2387 unsigned ImmReg
= createResultReg(&PPC::CRBITRCRegClass
);
2388 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, DbgLoc
,
2389 TII
.get(Imm
== 0 ? PPC::CRUNSET
: PPC::CRSET
), ImmReg
);
2393 if (VT
!= MVT::i64
&& VT
!= MVT::i32
&& VT
!= MVT::i16
&& VT
!= MVT::i8
&&
2397 const TargetRegisterClass
*RC
= ((VT
== MVT::i64
) ? &PPC::G8RCRegClass
:
2398 &PPC::GPRCRegClass
);
2400 return PPCMaterialize64BitInt(Imm
, RC
);
2402 return PPCMaterialize32BitInt(Imm
, RC
);
2405 // Override for ADDI and ADDI8 to set the correct register class
2406 // on RHS operand 0. The automatic infrastructure naively assumes
2407 // GPRC for i32 and G8RC for i64; the concept of "no R0" is lost
2408 // for these cases. At the moment, none of the other automatically
2409 // generated RI instructions require special treatment. However, once
2410 // SelectSelect is implemented, "isel" requires similar handling.
2412 // Also be conservative about the output register class. Avoid
2413 // assigning R0 or X0 to the output register for GPRC and G8RC
2414 // register classes, as any such result could be used in ADDI, etc.,
2415 // where those regs have another meaning.
2416 unsigned PPCFastISel::fastEmitInst_ri(unsigned MachineInstOpcode
,
2417 const TargetRegisterClass
*RC
,
2418 unsigned Op0
, bool Op0IsKill
,
2420 if (MachineInstOpcode
== PPC::ADDI
)
2421 MRI
.setRegClass(Op0
, &PPC::GPRC_and_GPRC_NOR0RegClass
);
2422 else if (MachineInstOpcode
== PPC::ADDI8
)
2423 MRI
.setRegClass(Op0
, &PPC::G8RC_and_G8RC_NOX0RegClass
);
2425 const TargetRegisterClass
*UseRC
=
2426 (RC
== &PPC::GPRCRegClass
? &PPC::GPRC_and_GPRC_NOR0RegClass
:
2427 (RC
== &PPC::G8RCRegClass
? &PPC::G8RC_and_G8RC_NOX0RegClass
: RC
));
2429 return FastISel::fastEmitInst_ri(MachineInstOpcode
, UseRC
,
2430 Op0
, Op0IsKill
, Imm
);
2433 // Override for instructions with one register operand to avoid use of
2434 // R0/X0. The automatic infrastructure isn't aware of the context so
2435 // we must be conservative.
2436 unsigned PPCFastISel::fastEmitInst_r(unsigned MachineInstOpcode
,
2437 const TargetRegisterClass
* RC
,
2438 unsigned Op0
, bool Op0IsKill
) {
2439 const TargetRegisterClass
*UseRC
=
2440 (RC
== &PPC::GPRCRegClass
? &PPC::GPRC_and_GPRC_NOR0RegClass
:
2441 (RC
== &PPC::G8RCRegClass
? &PPC::G8RC_and_G8RC_NOX0RegClass
: RC
));
2443 return FastISel::fastEmitInst_r(MachineInstOpcode
, UseRC
, Op0
, Op0IsKill
);
2446 // Override for instructions with two register operands to avoid use
2447 // of R0/X0. The automatic infrastructure isn't aware of the context
2448 // so we must be conservative.
2449 unsigned PPCFastISel::fastEmitInst_rr(unsigned MachineInstOpcode
,
2450 const TargetRegisterClass
* RC
,
2451 unsigned Op0
, bool Op0IsKill
,
2452 unsigned Op1
, bool Op1IsKill
) {
2453 const TargetRegisterClass
*UseRC
=
2454 (RC
== &PPC::GPRCRegClass
? &PPC::GPRC_and_GPRC_NOR0RegClass
:
2455 (RC
== &PPC::G8RCRegClass
? &PPC::G8RC_and_G8RC_NOX0RegClass
: RC
));
2457 return FastISel::fastEmitInst_rr(MachineInstOpcode
, UseRC
, Op0
, Op0IsKill
,
2462 // Create the fast instruction selector for PowerPC64 ELF.
2463 FastISel
*PPC::createFastISel(FunctionLoweringInfo
&FuncInfo
,
2464 const TargetLibraryInfo
*LibInfo
) {
2465 // Only available on 64-bit ELF for now.
2466 const PPCSubtarget
&Subtarget
= FuncInfo
.MF
->getSubtarget
<PPCSubtarget
>();
2467 if (Subtarget
.isPPC64() && Subtarget
.isSVR4ABI())
2468 return new PPCFastISel(FuncInfo
, LibInfo
);