1 //===- AArch6464FastISel.cpp - AArch64 FastISel implementation ------------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This file defines the AArch64-specific support for the FastISel class. Some
10 // of the target-specific code is generated by tablegen in the file
11 // AArch64GenFastISel.inc, which is #included here.
13 //===----------------------------------------------------------------------===//
16 #include "AArch64CallingConvention.h"
17 #include "AArch64RegisterInfo.h"
18 #include "AArch64Subtarget.h"
19 #include "MCTargetDesc/AArch64AddressingModes.h"
20 #include "Utils/AArch64BaseInfo.h"
21 #include "llvm/ADT/APFloat.h"
22 #include "llvm/ADT/APInt.h"
23 #include "llvm/ADT/DenseMap.h"
24 #include "llvm/ADT/SmallVector.h"
25 #include "llvm/Analysis/BranchProbabilityInfo.h"
26 #include "llvm/CodeGen/CallingConvLower.h"
27 #include "llvm/CodeGen/FastISel.h"
28 #include "llvm/CodeGen/FunctionLoweringInfo.h"
29 #include "llvm/CodeGen/ISDOpcodes.h"
30 #include "llvm/CodeGen/MachineBasicBlock.h"
31 #include "llvm/CodeGen/MachineConstantPool.h"
32 #include "llvm/CodeGen/MachineFrameInfo.h"
33 #include "llvm/CodeGen/MachineInstr.h"
34 #include "llvm/CodeGen/MachineInstrBuilder.h"
35 #include "llvm/CodeGen/MachineMemOperand.h"
36 #include "llvm/CodeGen/MachineRegisterInfo.h"
37 #include "llvm/CodeGen/RuntimeLibcalls.h"
38 #include "llvm/CodeGen/ValueTypes.h"
39 #include "llvm/IR/Argument.h"
40 #include "llvm/IR/Attributes.h"
41 #include "llvm/IR/BasicBlock.h"
42 #include "llvm/IR/CallingConv.h"
43 #include "llvm/IR/Constant.h"
44 #include "llvm/IR/Constants.h"
45 #include "llvm/IR/DataLayout.h"
46 #include "llvm/IR/DerivedTypes.h"
47 #include "llvm/IR/Function.h"
48 #include "llvm/IR/GetElementPtrTypeIterator.h"
49 #include "llvm/IR/GlobalValue.h"
50 #include "llvm/IR/InstrTypes.h"
51 #include "llvm/IR/Instruction.h"
52 #include "llvm/IR/Instructions.h"
53 #include "llvm/IR/IntrinsicInst.h"
54 #include "llvm/IR/Intrinsics.h"
55 #include "llvm/IR/Operator.h"
56 #include "llvm/IR/Type.h"
57 #include "llvm/IR/User.h"
58 #include "llvm/IR/Value.h"
59 #include "llvm/MC/MCInstrDesc.h"
60 #include "llvm/MC/MCRegisterInfo.h"
61 #include "llvm/MC/MCSymbol.h"
62 #include "llvm/Support/AtomicOrdering.h"
63 #include "llvm/Support/Casting.h"
64 #include "llvm/Support/CodeGen.h"
65 #include "llvm/Support/Compiler.h"
66 #include "llvm/Support/ErrorHandling.h"
67 #include "llvm/Support/MachineValueType.h"
68 #include "llvm/Support/MathExtras.h"
79 class AArch64FastISel final
: public FastISel
{
82 using BaseKind
= enum {
88 BaseKind Kind
= RegBase
;
89 AArch64_AM::ShiftExtendType ExtType
= AArch64_AM::InvalidShiftExtend
;
94 unsigned OffsetReg
= 0;
97 const GlobalValue
*GV
= nullptr;
100 Address() { Base
.Reg
= 0; }
102 void setKind(BaseKind K
) { Kind
= K
; }
103 BaseKind
getKind() const { return Kind
; }
104 void setExtendType(AArch64_AM::ShiftExtendType E
) { ExtType
= E
; }
105 AArch64_AM::ShiftExtendType
getExtendType() const { return ExtType
; }
106 bool isRegBase() const { return Kind
== RegBase
; }
107 bool isFIBase() const { return Kind
== FrameIndexBase
; }
109 void setReg(unsigned Reg
) {
110 assert(isRegBase() && "Invalid base register access!");
114 unsigned getReg() const {
115 assert(isRegBase() && "Invalid base register access!");
119 void setOffsetReg(unsigned Reg
) {
123 unsigned getOffsetReg() const {
127 void setFI(unsigned FI
) {
128 assert(isFIBase() && "Invalid base frame index access!");
132 unsigned getFI() const {
133 assert(isFIBase() && "Invalid base frame index access!");
137 void setOffset(int64_t O
) { Offset
= O
; }
138 int64_t getOffset() { return Offset
; }
139 void setShift(unsigned S
) { Shift
= S
; }
140 unsigned getShift() { return Shift
; }
142 void setGlobalValue(const GlobalValue
*G
) { GV
= G
; }
143 const GlobalValue
*getGlobalValue() { return GV
; }
146 /// Subtarget - Keep a pointer to the AArch64Subtarget around so that we can
147 /// make the right decision when generating code for different targets.
148 const AArch64Subtarget
*Subtarget
;
149 LLVMContext
*Context
;
151 bool fastLowerArguments() override
;
152 bool fastLowerCall(CallLoweringInfo
&CLI
) override
;
153 bool fastLowerIntrinsicCall(const IntrinsicInst
*II
) override
;
156 // Selection routines.
157 bool selectAddSub(const Instruction
*I
);
158 bool selectLogicalOp(const Instruction
*I
);
159 bool selectLoad(const Instruction
*I
);
160 bool selectStore(const Instruction
*I
);
161 bool selectBranch(const Instruction
*I
);
162 bool selectIndirectBr(const Instruction
*I
);
163 bool selectCmp(const Instruction
*I
);
164 bool selectSelect(const Instruction
*I
);
165 bool selectFPExt(const Instruction
*I
);
166 bool selectFPTrunc(const Instruction
*I
);
167 bool selectFPToInt(const Instruction
*I
, bool Signed
);
168 bool selectIntToFP(const Instruction
*I
, bool Signed
);
169 bool selectRem(const Instruction
*I
, unsigned ISDOpcode
);
170 bool selectRet(const Instruction
*I
);
171 bool selectTrunc(const Instruction
*I
);
172 bool selectIntExt(const Instruction
*I
);
173 bool selectMul(const Instruction
*I
);
174 bool selectShift(const Instruction
*I
);
175 bool selectBitCast(const Instruction
*I
);
176 bool selectFRem(const Instruction
*I
);
177 bool selectSDiv(const Instruction
*I
);
178 bool selectGetElementPtr(const Instruction
*I
);
179 bool selectAtomicCmpXchg(const AtomicCmpXchgInst
*I
);
181 // Utility helper routines.
182 bool isTypeLegal(Type
*Ty
, MVT
&VT
);
183 bool isTypeSupported(Type
*Ty
, MVT
&VT
, bool IsVectorAllowed
= false);
184 bool isValueAvailable(const Value
*V
) const;
185 bool computeAddress(const Value
*Obj
, Address
&Addr
, Type
*Ty
= nullptr);
186 bool computeCallAddress(const Value
*V
, Address
&Addr
);
187 bool simplifyAddress(Address
&Addr
, MVT VT
);
188 void addLoadStoreOperands(Address
&Addr
, const MachineInstrBuilder
&MIB
,
189 MachineMemOperand::Flags Flags
,
190 unsigned ScaleFactor
, MachineMemOperand
*MMO
);
191 bool isMemCpySmall(uint64_t Len
, unsigned Alignment
);
192 bool tryEmitSmallMemCpy(Address Dest
, Address Src
, uint64_t Len
,
194 bool foldXALUIntrinsic(AArch64CC::CondCode
&CC
, const Instruction
*I
,
196 bool optimizeIntExtLoad(const Instruction
*I
, MVT RetVT
, MVT SrcVT
);
197 bool optimizeSelect(const SelectInst
*SI
);
198 std::pair
<unsigned, bool> getRegForGEPIndex(const Value
*Idx
);
200 // Emit helper routines.
201 unsigned emitAddSub(bool UseAdd
, MVT RetVT
, const Value
*LHS
,
202 const Value
*RHS
, bool SetFlags
= false,
203 bool WantResult
= true, bool IsZExt
= false);
204 unsigned emitAddSub_rr(bool UseAdd
, MVT RetVT
, unsigned LHSReg
,
205 bool LHSIsKill
, unsigned RHSReg
, bool RHSIsKill
,
206 bool SetFlags
= false, bool WantResult
= true);
207 unsigned emitAddSub_ri(bool UseAdd
, MVT RetVT
, unsigned LHSReg
,
208 bool LHSIsKill
, uint64_t Imm
, bool SetFlags
= false,
209 bool WantResult
= true);
210 unsigned emitAddSub_rs(bool UseAdd
, MVT RetVT
, unsigned LHSReg
,
211 bool LHSIsKill
, unsigned RHSReg
, bool RHSIsKill
,
212 AArch64_AM::ShiftExtendType ShiftType
,
213 uint64_t ShiftImm
, bool SetFlags
= false,
214 bool WantResult
= true);
215 unsigned emitAddSub_rx(bool UseAdd
, MVT RetVT
, unsigned LHSReg
,
216 bool LHSIsKill
, unsigned RHSReg
, bool RHSIsKill
,
217 AArch64_AM::ShiftExtendType ExtType
,
218 uint64_t ShiftImm
, bool SetFlags
= false,
219 bool WantResult
= true);
222 bool emitCompareAndBranch(const BranchInst
*BI
);
223 bool emitCmp(const Value
*LHS
, const Value
*RHS
, bool IsZExt
);
224 bool emitICmp(MVT RetVT
, const Value
*LHS
, const Value
*RHS
, bool IsZExt
);
225 bool emitICmp_ri(MVT RetVT
, unsigned LHSReg
, bool LHSIsKill
, uint64_t Imm
);
226 bool emitFCmp(MVT RetVT
, const Value
*LHS
, const Value
*RHS
);
227 unsigned emitLoad(MVT VT
, MVT ResultVT
, Address Addr
, bool WantZExt
= true,
228 MachineMemOperand
*MMO
= nullptr);
229 bool emitStore(MVT VT
, unsigned SrcReg
, Address Addr
,
230 MachineMemOperand
*MMO
= nullptr);
231 bool emitStoreRelease(MVT VT
, unsigned SrcReg
, unsigned AddrReg
,
232 MachineMemOperand
*MMO
= nullptr);
233 unsigned emitIntExt(MVT SrcVT
, unsigned SrcReg
, MVT DestVT
, bool isZExt
);
234 unsigned emiti1Ext(unsigned SrcReg
, MVT DestVT
, bool isZExt
);
235 unsigned emitAdd(MVT RetVT
, const Value
*LHS
, const Value
*RHS
,
236 bool SetFlags
= false, bool WantResult
= true,
237 bool IsZExt
= false);
238 unsigned emitAdd_ri_(MVT VT
, unsigned Op0
, bool Op0IsKill
, int64_t Imm
);
239 unsigned emitSub(MVT RetVT
, const Value
*LHS
, const Value
*RHS
,
240 bool SetFlags
= false, bool WantResult
= true,
241 bool IsZExt
= false);
242 unsigned emitSubs_rr(MVT RetVT
, unsigned LHSReg
, bool LHSIsKill
,
243 unsigned RHSReg
, bool RHSIsKill
, bool WantResult
= true);
244 unsigned emitSubs_rs(MVT RetVT
, unsigned LHSReg
, bool LHSIsKill
,
245 unsigned RHSReg
, bool RHSIsKill
,
246 AArch64_AM::ShiftExtendType ShiftType
, uint64_t ShiftImm
,
247 bool WantResult
= true);
248 unsigned emitLogicalOp(unsigned ISDOpc
, MVT RetVT
, const Value
*LHS
,
250 unsigned emitLogicalOp_ri(unsigned ISDOpc
, MVT RetVT
, unsigned LHSReg
,
251 bool LHSIsKill
, uint64_t Imm
);
252 unsigned emitLogicalOp_rs(unsigned ISDOpc
, MVT RetVT
, unsigned LHSReg
,
253 bool LHSIsKill
, unsigned RHSReg
, bool RHSIsKill
,
255 unsigned emitAnd_ri(MVT RetVT
, unsigned LHSReg
, bool LHSIsKill
, uint64_t Imm
);
256 unsigned emitMul_rr(MVT RetVT
, unsigned Op0
, bool Op0IsKill
,
257 unsigned Op1
, bool Op1IsKill
);
258 unsigned emitSMULL_rr(MVT RetVT
, unsigned Op0
, bool Op0IsKill
,
259 unsigned Op1
, bool Op1IsKill
);
260 unsigned emitUMULL_rr(MVT RetVT
, unsigned Op0
, bool Op0IsKill
,
261 unsigned Op1
, bool Op1IsKill
);
262 unsigned emitLSL_rr(MVT RetVT
, unsigned Op0Reg
, bool Op0IsKill
,
263 unsigned Op1Reg
, bool Op1IsKill
);
264 unsigned emitLSL_ri(MVT RetVT
, MVT SrcVT
, unsigned Op0Reg
, bool Op0IsKill
,
265 uint64_t Imm
, bool IsZExt
= true);
266 unsigned emitLSR_rr(MVT RetVT
, unsigned Op0Reg
, bool Op0IsKill
,
267 unsigned Op1Reg
, bool Op1IsKill
);
268 unsigned emitLSR_ri(MVT RetVT
, MVT SrcVT
, unsigned Op0Reg
, bool Op0IsKill
,
269 uint64_t Imm
, bool IsZExt
= true);
270 unsigned emitASR_rr(MVT RetVT
, unsigned Op0Reg
, bool Op0IsKill
,
271 unsigned Op1Reg
, bool Op1IsKill
);
272 unsigned emitASR_ri(MVT RetVT
, MVT SrcVT
, unsigned Op0Reg
, bool Op0IsKill
,
273 uint64_t Imm
, bool IsZExt
= false);
275 unsigned materializeInt(const ConstantInt
*CI
, MVT VT
);
276 unsigned materializeFP(const ConstantFP
*CFP
, MVT VT
);
277 unsigned materializeGV(const GlobalValue
*GV
);
279 // Call handling routines.
281 CCAssignFn
*CCAssignFnForCall(CallingConv::ID CC
) const;
282 bool processCallArgs(CallLoweringInfo
&CLI
, SmallVectorImpl
<MVT
> &ArgVTs
,
284 bool finishCall(CallLoweringInfo
&CLI
, MVT RetVT
, unsigned NumBytes
);
287 // Backend specific FastISel code.
288 unsigned fastMaterializeAlloca(const AllocaInst
*AI
) override
;
289 unsigned fastMaterializeConstant(const Constant
*C
) override
;
290 unsigned fastMaterializeFloatZero(const ConstantFP
* CF
) override
;
292 explicit AArch64FastISel(FunctionLoweringInfo
&FuncInfo
,
293 const TargetLibraryInfo
*LibInfo
)
294 : FastISel(FuncInfo
, LibInfo
, /*SkipTargetIndependentISel=*/true) {
296 &static_cast<const AArch64Subtarget
&>(FuncInfo
.MF
->getSubtarget());
297 Context
= &FuncInfo
.Fn
->getContext();
300 bool fastSelectInstruction(const Instruction
*I
) override
;
302 #include "AArch64GenFastISel.inc"
305 } // end anonymous namespace
307 /// Check if the sign-/zero-extend will be a noop.
308 static bool isIntExtFree(const Instruction
*I
) {
309 assert((isa
<ZExtInst
>(I
) || isa
<SExtInst
>(I
)) &&
310 "Unexpected integer extend instruction.");
311 assert(!I
->getType()->isVectorTy() && I
->getType()->isIntegerTy() &&
312 "Unexpected value type.");
313 bool IsZExt
= isa
<ZExtInst
>(I
);
315 if (const auto *LI
= dyn_cast
<LoadInst
>(I
->getOperand(0)))
319 if (const auto *Arg
= dyn_cast
<Argument
>(I
->getOperand(0)))
320 if ((IsZExt
&& Arg
->hasZExtAttr()) || (!IsZExt
&& Arg
->hasSExtAttr()))
326 /// Determine the implicit scale factor that is applied by a memory
327 /// operation for a given value type.
328 static unsigned getImplicitScaleFactor(MVT VT
) {
329 switch (VT
.SimpleTy
) {
332 case MVT::i1
: // fall-through
337 case MVT::i32
: // fall-through
340 case MVT::i64
: // fall-through
346 CCAssignFn
*AArch64FastISel::CCAssignFnForCall(CallingConv::ID CC
) const {
347 if (CC
== CallingConv::WebKit_JS
)
348 return CC_AArch64_WebKit_JS
;
349 if (CC
== CallingConv::GHC
)
350 return CC_AArch64_GHC
;
351 if (CC
== CallingConv::CFGuard_Check
)
352 return CC_AArch64_Win64_CFGuard_Check
;
353 return Subtarget
->isTargetDarwin() ? CC_AArch64_DarwinPCS
: CC_AArch64_AAPCS
;
356 unsigned AArch64FastISel::fastMaterializeAlloca(const AllocaInst
*AI
) {
357 assert(TLI
.getValueType(DL
, AI
->getType(), true) == MVT::i64
&&
358 "Alloca should always return a pointer.");
360 // Don't handle dynamic allocas.
361 if (!FuncInfo
.StaticAllocaMap
.count(AI
))
364 DenseMap
<const AllocaInst
*, int>::iterator SI
=
365 FuncInfo
.StaticAllocaMap
.find(AI
);
367 if (SI
!= FuncInfo
.StaticAllocaMap
.end()) {
368 unsigned ResultReg
= createResultReg(&AArch64::GPR64spRegClass
);
369 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, DbgLoc
, TII
.get(AArch64::ADDXri
),
371 .addFrameIndex(SI
->second
)
380 unsigned AArch64FastISel::materializeInt(const ConstantInt
*CI
, MVT VT
) {
385 return fastEmit_i(VT
, VT
, ISD::Constant
, CI
->getZExtValue());
387 // Create a copy from the zero register to materialize a "0" value.
388 const TargetRegisterClass
*RC
= (VT
== MVT::i64
) ? &AArch64::GPR64RegClass
389 : &AArch64::GPR32RegClass
;
390 unsigned ZeroReg
= (VT
== MVT::i64
) ? AArch64::XZR
: AArch64::WZR
;
391 unsigned ResultReg
= createResultReg(RC
);
392 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, DbgLoc
, TII
.get(TargetOpcode::COPY
),
393 ResultReg
).addReg(ZeroReg
, getKillRegState(true));
397 unsigned AArch64FastISel::materializeFP(const ConstantFP
*CFP
, MVT VT
) {
398 // Positive zero (+0.0) has to be materialized with a fmov from the zero
399 // register, because the immediate version of fmov cannot encode zero.
400 if (CFP
->isNullValue())
401 return fastMaterializeFloatZero(CFP
);
403 if (VT
!= MVT::f32
&& VT
!= MVT::f64
)
406 const APFloat Val
= CFP
->getValueAPF();
407 bool Is64Bit
= (VT
== MVT::f64
);
408 // This checks to see if we can use FMOV instructions to materialize
409 // a constant, otherwise we have to materialize via the constant pool.
411 Is64Bit
? AArch64_AM::getFP64Imm(Val
) : AArch64_AM::getFP32Imm(Val
);
413 unsigned Opc
= Is64Bit
? AArch64::FMOVDi
: AArch64::FMOVSi
;
414 return fastEmitInst_i(Opc
, TLI
.getRegClassFor(VT
), Imm
);
417 // For the MachO large code model materialize the FP constant in code.
418 if (Subtarget
->isTargetMachO() && TM
.getCodeModel() == CodeModel::Large
) {
419 unsigned Opc1
= Is64Bit
? AArch64::MOVi64imm
: AArch64::MOVi32imm
;
420 const TargetRegisterClass
*RC
= Is64Bit
?
421 &AArch64::GPR64RegClass
: &AArch64::GPR32RegClass
;
423 unsigned TmpReg
= createResultReg(RC
);
424 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, DbgLoc
, TII
.get(Opc1
), TmpReg
)
425 .addImm(CFP
->getValueAPF().bitcastToAPInt().getZExtValue());
427 unsigned ResultReg
= createResultReg(TLI
.getRegClassFor(VT
));
428 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, DbgLoc
,
429 TII
.get(TargetOpcode::COPY
), ResultReg
)
430 .addReg(TmpReg
, getKillRegState(true));
435 // Materialize via constant pool. MachineConstantPool wants an explicit
437 unsigned Align
= DL
.getPrefTypeAlignment(CFP
->getType());
439 Align
= DL
.getTypeAllocSize(CFP
->getType());
441 unsigned CPI
= MCP
.getConstantPoolIndex(cast
<Constant
>(CFP
), Align
);
442 unsigned ADRPReg
= createResultReg(&AArch64::GPR64commonRegClass
);
443 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, DbgLoc
, TII
.get(AArch64::ADRP
),
444 ADRPReg
).addConstantPoolIndex(CPI
, 0, AArch64II::MO_PAGE
);
446 unsigned Opc
= Is64Bit
? AArch64::LDRDui
: AArch64::LDRSui
;
447 unsigned ResultReg
= createResultReg(TLI
.getRegClassFor(VT
));
448 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, DbgLoc
, TII
.get(Opc
), ResultReg
)
450 .addConstantPoolIndex(CPI
, 0, AArch64II::MO_PAGEOFF
| AArch64II::MO_NC
);
454 unsigned AArch64FastISel::materializeGV(const GlobalValue
*GV
) {
455 // We can't handle thread-local variables quickly yet.
456 if (GV
->isThreadLocal())
459 // MachO still uses GOT for large code-model accesses, but ELF requires
460 // movz/movk sequences, which FastISel doesn't handle yet.
461 if (!Subtarget
->useSmallAddressing() && !Subtarget
->isTargetMachO())
464 unsigned OpFlags
= Subtarget
->ClassifyGlobalReference(GV
, TM
);
466 EVT DestEVT
= TLI
.getValueType(DL
, GV
->getType(), true);
467 if (!DestEVT
.isSimple())
470 unsigned ADRPReg
= createResultReg(&AArch64::GPR64commonRegClass
);
473 if (OpFlags
& AArch64II::MO_GOT
) {
475 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, DbgLoc
, TII
.get(AArch64::ADRP
),
477 .addGlobalAddress(GV
, 0, AArch64II::MO_PAGE
| OpFlags
);
480 if (Subtarget
->isTargetILP32()) {
481 ResultReg
= createResultReg(&AArch64::GPR32RegClass
);
482 LdrOpc
= AArch64::LDRWui
;
484 ResultReg
= createResultReg(&AArch64::GPR64RegClass
);
485 LdrOpc
= AArch64::LDRXui
;
487 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, DbgLoc
, TII
.get(LdrOpc
),
490 .addGlobalAddress(GV
, 0, AArch64II::MO_GOT
| AArch64II::MO_PAGEOFF
|
491 AArch64II::MO_NC
| OpFlags
);
492 if (!Subtarget
->isTargetILP32())
495 // LDRWui produces a 32-bit register, but pointers in-register are 64-bits
496 // so we must extend the result on ILP32.
497 unsigned Result64
= createResultReg(&AArch64::GPR64RegClass
);
498 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, DbgLoc
,
499 TII
.get(TargetOpcode::SUBREG_TO_REG
))
502 .addReg(ResultReg
, RegState::Kill
)
503 .addImm(AArch64::sub_32
);
507 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, DbgLoc
, TII
.get(AArch64::ADRP
),
509 .addGlobalAddress(GV
, 0, AArch64II::MO_PAGE
| OpFlags
);
511 ResultReg
= createResultReg(&AArch64::GPR64spRegClass
);
512 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, DbgLoc
, TII
.get(AArch64::ADDXri
),
515 .addGlobalAddress(GV
, 0,
516 AArch64II::MO_PAGEOFF
| AArch64II::MO_NC
| OpFlags
)
522 unsigned AArch64FastISel::fastMaterializeConstant(const Constant
*C
) {
523 EVT CEVT
= TLI
.getValueType(DL
, C
->getType(), true);
525 // Only handle simple types.
526 if (!CEVT
.isSimple())
528 MVT VT
= CEVT
.getSimpleVT();
529 // arm64_32 has 32-bit pointers held in 64-bit registers. Because of that,
530 // 'null' pointers need to have a somewhat special treatment.
531 if (const auto *CPN
= dyn_cast
<ConstantPointerNull
>(C
)) {
533 assert(CPN
->getType()->getPointerAddressSpace() == 0 &&
534 "Unexpected address space");
535 assert(VT
== MVT::i64
&& "Expected 64-bit pointers");
536 return materializeInt(ConstantInt::get(Type::getInt64Ty(*Context
), 0), VT
);
539 if (const auto *CI
= dyn_cast
<ConstantInt
>(C
))
540 return materializeInt(CI
, VT
);
541 else if (const ConstantFP
*CFP
= dyn_cast
<ConstantFP
>(C
))
542 return materializeFP(CFP
, VT
);
543 else if (const GlobalValue
*GV
= dyn_cast
<GlobalValue
>(C
))
544 return materializeGV(GV
);
549 unsigned AArch64FastISel::fastMaterializeFloatZero(const ConstantFP
* CFP
) {
550 assert(CFP
->isNullValue() &&
551 "Floating-point constant is not a positive zero.");
553 if (!isTypeLegal(CFP
->getType(), VT
))
556 if (VT
!= MVT::f32
&& VT
!= MVT::f64
)
559 bool Is64Bit
= (VT
== MVT::f64
);
560 unsigned ZReg
= Is64Bit
? AArch64::XZR
: AArch64::WZR
;
561 unsigned Opc
= Is64Bit
? AArch64::FMOVXDr
: AArch64::FMOVWSr
;
562 return fastEmitInst_r(Opc
, TLI
.getRegClassFor(VT
), ZReg
, /*IsKill=*/true);
565 /// Check if the multiply is by a power-of-2 constant.
566 static bool isMulPowOf2(const Value
*I
) {
567 if (const auto *MI
= dyn_cast
<MulOperator
>(I
)) {
568 if (const auto *C
= dyn_cast
<ConstantInt
>(MI
->getOperand(0)))
569 if (C
->getValue().isPowerOf2())
571 if (const auto *C
= dyn_cast
<ConstantInt
>(MI
->getOperand(1)))
572 if (C
->getValue().isPowerOf2())
578 // Computes the address to get to an object.
579 bool AArch64FastISel::computeAddress(const Value
*Obj
, Address
&Addr
, Type
*Ty
)
581 const User
*U
= nullptr;
582 unsigned Opcode
= Instruction::UserOp1
;
583 if (const Instruction
*I
= dyn_cast
<Instruction
>(Obj
)) {
584 // Don't walk into other basic blocks unless the object is an alloca from
585 // another block, otherwise it may not have a virtual register assigned.
586 if (FuncInfo
.StaticAllocaMap
.count(static_cast<const AllocaInst
*>(Obj
)) ||
587 FuncInfo
.MBBMap
[I
->getParent()] == FuncInfo
.MBB
) {
588 Opcode
= I
->getOpcode();
591 } else if (const ConstantExpr
*C
= dyn_cast
<ConstantExpr
>(Obj
)) {
592 Opcode
= C
->getOpcode();
596 if (auto *Ty
= dyn_cast
<PointerType
>(Obj
->getType()))
597 if (Ty
->getAddressSpace() > 255)
598 // Fast instruction selection doesn't support the special
605 case Instruction::BitCast
:
606 // Look through bitcasts.
607 return computeAddress(U
->getOperand(0), Addr
, Ty
);
609 case Instruction::IntToPtr
:
610 // Look past no-op inttoptrs.
611 if (TLI
.getValueType(DL
, U
->getOperand(0)->getType()) ==
612 TLI
.getPointerTy(DL
))
613 return computeAddress(U
->getOperand(0), Addr
, Ty
);
616 case Instruction::PtrToInt
:
617 // Look past no-op ptrtoints.
618 if (TLI
.getValueType(DL
, U
->getType()) == TLI
.getPointerTy(DL
))
619 return computeAddress(U
->getOperand(0), Addr
, Ty
);
622 case Instruction::GetElementPtr
: {
623 Address SavedAddr
= Addr
;
624 uint64_t TmpOffset
= Addr
.getOffset();
626 // Iterate through the GEP folding the constants into offsets where
628 for (gep_type_iterator GTI
= gep_type_begin(U
), E
= gep_type_end(U
);
630 const Value
*Op
= GTI
.getOperand();
631 if (StructType
*STy
= GTI
.getStructTypeOrNull()) {
632 const StructLayout
*SL
= DL
.getStructLayout(STy
);
633 unsigned Idx
= cast
<ConstantInt
>(Op
)->getZExtValue();
634 TmpOffset
+= SL
->getElementOffset(Idx
);
636 uint64_t S
= DL
.getTypeAllocSize(GTI
.getIndexedType());
638 if (const ConstantInt
*CI
= dyn_cast
<ConstantInt
>(Op
)) {
639 // Constant-offset addressing.
640 TmpOffset
+= CI
->getSExtValue() * S
;
643 if (canFoldAddIntoGEP(U
, Op
)) {
644 // A compatible add with a constant operand. Fold the constant.
646 cast
<ConstantInt
>(cast
<AddOperator
>(Op
)->getOperand(1));
647 TmpOffset
+= CI
->getSExtValue() * S
;
648 // Iterate on the other operand.
649 Op
= cast
<AddOperator
>(Op
)->getOperand(0);
653 goto unsupported_gep
;
658 // Try to grab the base operand now.
659 Addr
.setOffset(TmpOffset
);
660 if (computeAddress(U
->getOperand(0), Addr
, Ty
))
663 // We failed, restore everything and try the other options.
669 case Instruction::Alloca
: {
670 const AllocaInst
*AI
= cast
<AllocaInst
>(Obj
);
671 DenseMap
<const AllocaInst
*, int>::iterator SI
=
672 FuncInfo
.StaticAllocaMap
.find(AI
);
673 if (SI
!= FuncInfo
.StaticAllocaMap
.end()) {
674 Addr
.setKind(Address::FrameIndexBase
);
675 Addr
.setFI(SI
->second
);
680 case Instruction::Add
: {
681 // Adds of constants are common and easy enough.
682 const Value
*LHS
= U
->getOperand(0);
683 const Value
*RHS
= U
->getOperand(1);
685 if (isa
<ConstantInt
>(LHS
))
688 if (const ConstantInt
*CI
= dyn_cast
<ConstantInt
>(RHS
)) {
689 Addr
.setOffset(Addr
.getOffset() + CI
->getSExtValue());
690 return computeAddress(LHS
, Addr
, Ty
);
693 Address Backup
= Addr
;
694 if (computeAddress(LHS
, Addr
, Ty
) && computeAddress(RHS
, Addr
, Ty
))
700 case Instruction::Sub
: {
701 // Subs of constants are common and easy enough.
702 const Value
*LHS
= U
->getOperand(0);
703 const Value
*RHS
= U
->getOperand(1);
705 if (const ConstantInt
*CI
= dyn_cast
<ConstantInt
>(RHS
)) {
706 Addr
.setOffset(Addr
.getOffset() - CI
->getSExtValue());
707 return computeAddress(LHS
, Addr
, Ty
);
711 case Instruction::Shl
: {
712 if (Addr
.getOffsetReg())
715 const auto *CI
= dyn_cast
<ConstantInt
>(U
->getOperand(1));
719 unsigned Val
= CI
->getZExtValue();
720 if (Val
< 1 || Val
> 3)
723 uint64_t NumBytes
= 0;
724 if (Ty
&& Ty
->isSized()) {
725 uint64_t NumBits
= DL
.getTypeSizeInBits(Ty
);
726 NumBytes
= NumBits
/ 8;
727 if (!isPowerOf2_64(NumBits
))
731 if (NumBytes
!= (1ULL << Val
))
735 Addr
.setExtendType(AArch64_AM::LSL
);
737 const Value
*Src
= U
->getOperand(0);
738 if (const auto *I
= dyn_cast
<Instruction
>(Src
)) {
739 if (FuncInfo
.MBBMap
[I
->getParent()] == FuncInfo
.MBB
) {
740 // Fold the zext or sext when it won't become a noop.
741 if (const auto *ZE
= dyn_cast
<ZExtInst
>(I
)) {
742 if (!isIntExtFree(ZE
) &&
743 ZE
->getOperand(0)->getType()->isIntegerTy(32)) {
744 Addr
.setExtendType(AArch64_AM::UXTW
);
745 Src
= ZE
->getOperand(0);
747 } else if (const auto *SE
= dyn_cast
<SExtInst
>(I
)) {
748 if (!isIntExtFree(SE
) &&
749 SE
->getOperand(0)->getType()->isIntegerTy(32)) {
750 Addr
.setExtendType(AArch64_AM::SXTW
);
751 Src
= SE
->getOperand(0);
757 if (const auto *AI
= dyn_cast
<BinaryOperator
>(Src
))
758 if (AI
->getOpcode() == Instruction::And
) {
759 const Value
*LHS
= AI
->getOperand(0);
760 const Value
*RHS
= AI
->getOperand(1);
762 if (const auto *C
= dyn_cast
<ConstantInt
>(LHS
))
763 if (C
->getValue() == 0xffffffff)
766 if (const auto *C
= dyn_cast
<ConstantInt
>(RHS
))
767 if (C
->getValue() == 0xffffffff) {
768 Addr
.setExtendType(AArch64_AM::UXTW
);
769 unsigned Reg
= getRegForValue(LHS
);
772 bool RegIsKill
= hasTrivialKill(LHS
);
773 Reg
= fastEmitInst_extractsubreg(MVT::i32
, Reg
, RegIsKill
,
775 Addr
.setOffsetReg(Reg
);
780 unsigned Reg
= getRegForValue(Src
);
783 Addr
.setOffsetReg(Reg
);
786 case Instruction::Mul
: {
787 if (Addr
.getOffsetReg())
793 const Value
*LHS
= U
->getOperand(0);
794 const Value
*RHS
= U
->getOperand(1);
796 // Canonicalize power-of-2 value to the RHS.
797 if (const auto *C
= dyn_cast
<ConstantInt
>(LHS
))
798 if (C
->getValue().isPowerOf2())
801 assert(isa
<ConstantInt
>(RHS
) && "Expected an ConstantInt.");
802 const auto *C
= cast
<ConstantInt
>(RHS
);
803 unsigned Val
= C
->getValue().logBase2();
804 if (Val
< 1 || Val
> 3)
807 uint64_t NumBytes
= 0;
808 if (Ty
&& Ty
->isSized()) {
809 uint64_t NumBits
= DL
.getTypeSizeInBits(Ty
);
810 NumBytes
= NumBits
/ 8;
811 if (!isPowerOf2_64(NumBits
))
815 if (NumBytes
!= (1ULL << Val
))
819 Addr
.setExtendType(AArch64_AM::LSL
);
821 const Value
*Src
= LHS
;
822 if (const auto *I
= dyn_cast
<Instruction
>(Src
)) {
823 if (FuncInfo
.MBBMap
[I
->getParent()] == FuncInfo
.MBB
) {
824 // Fold the zext or sext when it won't become a noop.
825 if (const auto *ZE
= dyn_cast
<ZExtInst
>(I
)) {
826 if (!isIntExtFree(ZE
) &&
827 ZE
->getOperand(0)->getType()->isIntegerTy(32)) {
828 Addr
.setExtendType(AArch64_AM::UXTW
);
829 Src
= ZE
->getOperand(0);
831 } else if (const auto *SE
= dyn_cast
<SExtInst
>(I
)) {
832 if (!isIntExtFree(SE
) &&
833 SE
->getOperand(0)->getType()->isIntegerTy(32)) {
834 Addr
.setExtendType(AArch64_AM::SXTW
);
835 Src
= SE
->getOperand(0);
841 unsigned Reg
= getRegForValue(Src
);
844 Addr
.setOffsetReg(Reg
);
847 case Instruction::And
: {
848 if (Addr
.getOffsetReg())
851 if (!Ty
|| DL
.getTypeSizeInBits(Ty
) != 8)
854 const Value
*LHS
= U
->getOperand(0);
855 const Value
*RHS
= U
->getOperand(1);
857 if (const auto *C
= dyn_cast
<ConstantInt
>(LHS
))
858 if (C
->getValue() == 0xffffffff)
861 if (const auto *C
= dyn_cast
<ConstantInt
>(RHS
))
862 if (C
->getValue() == 0xffffffff) {
864 Addr
.setExtendType(AArch64_AM::LSL
);
865 Addr
.setExtendType(AArch64_AM::UXTW
);
867 unsigned Reg
= getRegForValue(LHS
);
870 bool RegIsKill
= hasTrivialKill(LHS
);
871 Reg
= fastEmitInst_extractsubreg(MVT::i32
, Reg
, RegIsKill
,
873 Addr
.setOffsetReg(Reg
);
878 case Instruction::SExt
:
879 case Instruction::ZExt
: {
880 if (!Addr
.getReg() || Addr
.getOffsetReg())
883 const Value
*Src
= nullptr;
884 // Fold the zext or sext when it won't become a noop.
885 if (const auto *ZE
= dyn_cast
<ZExtInst
>(U
)) {
886 if (!isIntExtFree(ZE
) && ZE
->getOperand(0)->getType()->isIntegerTy(32)) {
887 Addr
.setExtendType(AArch64_AM::UXTW
);
888 Src
= ZE
->getOperand(0);
890 } else if (const auto *SE
= dyn_cast
<SExtInst
>(U
)) {
891 if (!isIntExtFree(SE
) && SE
->getOperand(0)->getType()->isIntegerTy(32)) {
892 Addr
.setExtendType(AArch64_AM::SXTW
);
893 Src
= SE
->getOperand(0);
901 unsigned Reg
= getRegForValue(Src
);
904 Addr
.setOffsetReg(Reg
);
909 if (Addr
.isRegBase() && !Addr
.getReg()) {
910 unsigned Reg
= getRegForValue(Obj
);
917 if (!Addr
.getOffsetReg()) {
918 unsigned Reg
= getRegForValue(Obj
);
921 Addr
.setOffsetReg(Reg
);
928 bool AArch64FastISel::computeCallAddress(const Value
*V
, Address
&Addr
) {
929 const User
*U
= nullptr;
930 unsigned Opcode
= Instruction::UserOp1
;
933 if (const auto *I
= dyn_cast
<Instruction
>(V
)) {
934 Opcode
= I
->getOpcode();
936 InMBB
= I
->getParent() == FuncInfo
.MBB
->getBasicBlock();
937 } else if (const auto *C
= dyn_cast
<ConstantExpr
>(V
)) {
938 Opcode
= C
->getOpcode();
944 case Instruction::BitCast
:
945 // Look past bitcasts if its operand is in the same BB.
947 return computeCallAddress(U
->getOperand(0), Addr
);
949 case Instruction::IntToPtr
:
950 // Look past no-op inttoptrs if its operand is in the same BB.
952 TLI
.getValueType(DL
, U
->getOperand(0)->getType()) ==
953 TLI
.getPointerTy(DL
))
954 return computeCallAddress(U
->getOperand(0), Addr
);
956 case Instruction::PtrToInt
:
957 // Look past no-op ptrtoints if its operand is in the same BB.
958 if (InMBB
&& TLI
.getValueType(DL
, U
->getType()) == TLI
.getPointerTy(DL
))
959 return computeCallAddress(U
->getOperand(0), Addr
);
963 if (const GlobalValue
*GV
= dyn_cast
<GlobalValue
>(V
)) {
964 Addr
.setGlobalValue(GV
);
968 // If all else fails, try to materialize the value in a register.
969 if (!Addr
.getGlobalValue()) {
970 Addr
.setReg(getRegForValue(V
));
971 return Addr
.getReg() != 0;
977 bool AArch64FastISel::isTypeLegal(Type
*Ty
, MVT
&VT
) {
978 EVT evt
= TLI
.getValueType(DL
, Ty
, true);
980 if (Subtarget
->isTargetILP32() && Ty
->isPointerTy())
983 // Only handle simple types.
984 if (evt
== MVT::Other
|| !evt
.isSimple())
986 VT
= evt
.getSimpleVT();
988 // This is a legal type, but it's not something we handle in fast-isel.
992 // Handle all other legal types, i.e. a register that will directly hold this
994 return TLI
.isTypeLegal(VT
);
997 /// Determine if the value type is supported by FastISel.
999 /// FastISel for AArch64 can handle more value types than are legal. This adds
1000 /// simple value type such as i1, i8, and i16.
1001 bool AArch64FastISel::isTypeSupported(Type
*Ty
, MVT
&VT
, bool IsVectorAllowed
) {
1002 if (Ty
->isVectorTy() && !IsVectorAllowed
)
1005 if (isTypeLegal(Ty
, VT
))
1008 // If this is a type than can be sign or zero-extended to a basic operation
1009 // go ahead and accept it now.
1010 if (VT
== MVT::i1
|| VT
== MVT::i8
|| VT
== MVT::i16
)
1016 bool AArch64FastISel::isValueAvailable(const Value
*V
) const {
1017 if (!isa
<Instruction
>(V
))
1020 const auto *I
= cast
<Instruction
>(V
);
1021 return FuncInfo
.MBBMap
[I
->getParent()] == FuncInfo
.MBB
;
1024 bool AArch64FastISel::simplifyAddress(Address
&Addr
, MVT VT
) {
1025 if (Subtarget
->isTargetILP32())
1028 unsigned ScaleFactor
= getImplicitScaleFactor(VT
);
1032 bool ImmediateOffsetNeedsLowering
= false;
1033 bool RegisterOffsetNeedsLowering
= false;
1034 int64_t Offset
= Addr
.getOffset();
1035 if (((Offset
< 0) || (Offset
& (ScaleFactor
- 1))) && !isInt
<9>(Offset
))
1036 ImmediateOffsetNeedsLowering
= true;
1037 else if (Offset
> 0 && !(Offset
& (ScaleFactor
- 1)) &&
1038 !isUInt
<12>(Offset
/ ScaleFactor
))
1039 ImmediateOffsetNeedsLowering
= true;
1041 // Cannot encode an offset register and an immediate offset in the same
1042 // instruction. Fold the immediate offset into the load/store instruction and
1043 // emit an additional add to take care of the offset register.
1044 if (!ImmediateOffsetNeedsLowering
&& Addr
.getOffset() && Addr
.getOffsetReg())
1045 RegisterOffsetNeedsLowering
= true;
1047 // Cannot encode zero register as base.
1048 if (Addr
.isRegBase() && Addr
.getOffsetReg() && !Addr
.getReg())
1049 RegisterOffsetNeedsLowering
= true;
1051 // If this is a stack pointer and the offset needs to be simplified then put
1052 // the alloca address into a register, set the base type back to register and
1053 // continue. This should almost never happen.
1054 if ((ImmediateOffsetNeedsLowering
|| Addr
.getOffsetReg()) && Addr
.isFIBase())
1056 unsigned ResultReg
= createResultReg(&AArch64::GPR64spRegClass
);
1057 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, DbgLoc
, TII
.get(AArch64::ADDXri
),
1059 .addFrameIndex(Addr
.getFI())
1062 Addr
.setKind(Address::RegBase
);
1063 Addr
.setReg(ResultReg
);
1066 if (RegisterOffsetNeedsLowering
) {
1067 unsigned ResultReg
= 0;
1068 if (Addr
.getReg()) {
1069 if (Addr
.getExtendType() == AArch64_AM::SXTW
||
1070 Addr
.getExtendType() == AArch64_AM::UXTW
)
1071 ResultReg
= emitAddSub_rx(/*UseAdd=*/true, MVT::i64
, Addr
.getReg(),
1072 /*TODO:IsKill=*/false, Addr
.getOffsetReg(),
1073 /*TODO:IsKill=*/false, Addr
.getExtendType(),
1076 ResultReg
= emitAddSub_rs(/*UseAdd=*/true, MVT::i64
, Addr
.getReg(),
1077 /*TODO:IsKill=*/false, Addr
.getOffsetReg(),
1078 /*TODO:IsKill=*/false, AArch64_AM::LSL
,
1081 if (Addr
.getExtendType() == AArch64_AM::UXTW
)
1082 ResultReg
= emitLSL_ri(MVT::i64
, MVT::i32
, Addr
.getOffsetReg(),
1083 /*Op0IsKill=*/false, Addr
.getShift(),
1085 else if (Addr
.getExtendType() == AArch64_AM::SXTW
)
1086 ResultReg
= emitLSL_ri(MVT::i64
, MVT::i32
, Addr
.getOffsetReg(),
1087 /*Op0IsKill=*/false, Addr
.getShift(),
1090 ResultReg
= emitLSL_ri(MVT::i64
, MVT::i64
, Addr
.getOffsetReg(),
1091 /*Op0IsKill=*/false, Addr
.getShift());
1096 Addr
.setReg(ResultReg
);
1097 Addr
.setOffsetReg(0);
1099 Addr
.setExtendType(AArch64_AM::InvalidShiftExtend
);
1102 // Since the offset is too large for the load/store instruction get the
1103 // reg+offset into a register.
1104 if (ImmediateOffsetNeedsLowering
) {
1107 // Try to fold the immediate into the add instruction.
1108 ResultReg
= emitAdd_ri_(MVT::i64
, Addr
.getReg(), /*IsKill=*/false, Offset
);
1110 ResultReg
= fastEmit_i(MVT::i64
, MVT::i64
, ISD::Constant
, Offset
);
1114 Addr
.setReg(ResultReg
);
1120 void AArch64FastISel::addLoadStoreOperands(Address
&Addr
,
1121 const MachineInstrBuilder
&MIB
,
1122 MachineMemOperand::Flags Flags
,
1123 unsigned ScaleFactor
,
1124 MachineMemOperand
*MMO
) {
1125 int64_t Offset
= Addr
.getOffset() / ScaleFactor
;
1126 // Frame base works a bit differently. Handle it separately.
1127 if (Addr
.isFIBase()) {
1128 int FI
= Addr
.getFI();
1129 // FIXME: We shouldn't be using getObjectSize/getObjectAlignment. The size
1130 // and alignment should be based on the VT.
1131 MMO
= FuncInfo
.MF
->getMachineMemOperand(
1132 MachinePointerInfo::getFixedStack(*FuncInfo
.MF
, FI
, Offset
), Flags
,
1133 MFI
.getObjectSize(FI
), MFI
.getObjectAlignment(FI
));
1134 // Now add the rest of the operands.
1135 MIB
.addFrameIndex(FI
).addImm(Offset
);
1137 assert(Addr
.isRegBase() && "Unexpected address kind.");
1138 const MCInstrDesc
&II
= MIB
->getDesc();
1139 unsigned Idx
= (Flags
& MachineMemOperand::MOStore
) ? 1 : 0;
1141 constrainOperandRegClass(II
, Addr
.getReg(), II
.getNumDefs()+Idx
));
1143 constrainOperandRegClass(II
, Addr
.getOffsetReg(), II
.getNumDefs()+Idx
+1));
1144 if (Addr
.getOffsetReg()) {
1145 assert(Addr
.getOffset() == 0 && "Unexpected offset");
1146 bool IsSigned
= Addr
.getExtendType() == AArch64_AM::SXTW
||
1147 Addr
.getExtendType() == AArch64_AM::SXTX
;
1148 MIB
.addReg(Addr
.getReg());
1149 MIB
.addReg(Addr
.getOffsetReg());
1150 MIB
.addImm(IsSigned
);
1151 MIB
.addImm(Addr
.getShift() != 0);
1153 MIB
.addReg(Addr
.getReg()).addImm(Offset
);
1157 MIB
.addMemOperand(MMO
);
1160 unsigned AArch64FastISel::emitAddSub(bool UseAdd
, MVT RetVT
, const Value
*LHS
,
1161 const Value
*RHS
, bool SetFlags
,
1162 bool WantResult
, bool IsZExt
) {
1163 AArch64_AM::ShiftExtendType ExtendType
= AArch64_AM::InvalidShiftExtend
;
1164 bool NeedExtend
= false;
1165 switch (RetVT
.SimpleTy
) {
1173 ExtendType
= IsZExt
? AArch64_AM::UXTB
: AArch64_AM::SXTB
;
1177 ExtendType
= IsZExt
? AArch64_AM::UXTH
: AArch64_AM::SXTH
;
1179 case MVT::i32
: // fall-through
1184 RetVT
.SimpleTy
= std::max(RetVT
.SimpleTy
, MVT::i32
);
1186 // Canonicalize immediates to the RHS first.
1187 if (UseAdd
&& isa
<Constant
>(LHS
) && !isa
<Constant
>(RHS
))
1188 std::swap(LHS
, RHS
);
1190 // Canonicalize mul by power of 2 to the RHS.
1191 if (UseAdd
&& LHS
->hasOneUse() && isValueAvailable(LHS
))
1192 if (isMulPowOf2(LHS
))
1193 std::swap(LHS
, RHS
);
1195 // Canonicalize shift immediate to the RHS.
1196 if (UseAdd
&& LHS
->hasOneUse() && isValueAvailable(LHS
))
1197 if (const auto *SI
= dyn_cast
<BinaryOperator
>(LHS
))
1198 if (isa
<ConstantInt
>(SI
->getOperand(1)))
1199 if (SI
->getOpcode() == Instruction::Shl
||
1200 SI
->getOpcode() == Instruction::LShr
||
1201 SI
->getOpcode() == Instruction::AShr
)
1202 std::swap(LHS
, RHS
);
1204 unsigned LHSReg
= getRegForValue(LHS
);
1207 bool LHSIsKill
= hasTrivialKill(LHS
);
1210 LHSReg
= emitIntExt(SrcVT
, LHSReg
, RetVT
, IsZExt
);
1212 unsigned ResultReg
= 0;
1213 if (const auto *C
= dyn_cast
<ConstantInt
>(RHS
)) {
1214 uint64_t Imm
= IsZExt
? C
->getZExtValue() : C
->getSExtValue();
1215 if (C
->isNegative())
1216 ResultReg
= emitAddSub_ri(!UseAdd
, RetVT
, LHSReg
, LHSIsKill
, -Imm
,
1217 SetFlags
, WantResult
);
1219 ResultReg
= emitAddSub_ri(UseAdd
, RetVT
, LHSReg
, LHSIsKill
, Imm
, SetFlags
,
1221 } else if (const auto *C
= dyn_cast
<Constant
>(RHS
))
1222 if (C
->isNullValue())
1223 ResultReg
= emitAddSub_ri(UseAdd
, RetVT
, LHSReg
, LHSIsKill
, 0, SetFlags
,
1229 // Only extend the RHS within the instruction if there is a valid extend type.
1230 if (ExtendType
!= AArch64_AM::InvalidShiftExtend
&& RHS
->hasOneUse() &&
1231 isValueAvailable(RHS
)) {
1232 if (const auto *SI
= dyn_cast
<BinaryOperator
>(RHS
))
1233 if (const auto *C
= dyn_cast
<ConstantInt
>(SI
->getOperand(1)))
1234 if ((SI
->getOpcode() == Instruction::Shl
) && (C
->getZExtValue() < 4)) {
1235 unsigned RHSReg
= getRegForValue(SI
->getOperand(0));
1238 bool RHSIsKill
= hasTrivialKill(SI
->getOperand(0));
1239 return emitAddSub_rx(UseAdd
, RetVT
, LHSReg
, LHSIsKill
, RHSReg
,
1240 RHSIsKill
, ExtendType
, C
->getZExtValue(),
1241 SetFlags
, WantResult
);
1243 unsigned RHSReg
= getRegForValue(RHS
);
1246 bool RHSIsKill
= hasTrivialKill(RHS
);
1247 return emitAddSub_rx(UseAdd
, RetVT
, LHSReg
, LHSIsKill
, RHSReg
, RHSIsKill
,
1248 ExtendType
, 0, SetFlags
, WantResult
);
1251 // Check if the mul can be folded into the instruction.
1252 if (RHS
->hasOneUse() && isValueAvailable(RHS
)) {
1253 if (isMulPowOf2(RHS
)) {
1254 const Value
*MulLHS
= cast
<MulOperator
>(RHS
)->getOperand(0);
1255 const Value
*MulRHS
= cast
<MulOperator
>(RHS
)->getOperand(1);
1257 if (const auto *C
= dyn_cast
<ConstantInt
>(MulLHS
))
1258 if (C
->getValue().isPowerOf2())
1259 std::swap(MulLHS
, MulRHS
);
1261 assert(isa
<ConstantInt
>(MulRHS
) && "Expected a ConstantInt.");
1262 uint64_t ShiftVal
= cast
<ConstantInt
>(MulRHS
)->getValue().logBase2();
1263 unsigned RHSReg
= getRegForValue(MulLHS
);
1266 bool RHSIsKill
= hasTrivialKill(MulLHS
);
1267 ResultReg
= emitAddSub_rs(UseAdd
, RetVT
, LHSReg
, LHSIsKill
, RHSReg
,
1268 RHSIsKill
, AArch64_AM::LSL
, ShiftVal
, SetFlags
,
1275 // Check if the shift can be folded into the instruction.
1276 if (RHS
->hasOneUse() && isValueAvailable(RHS
)) {
1277 if (const auto *SI
= dyn_cast
<BinaryOperator
>(RHS
)) {
1278 if (const auto *C
= dyn_cast
<ConstantInt
>(SI
->getOperand(1))) {
1279 AArch64_AM::ShiftExtendType ShiftType
= AArch64_AM::InvalidShiftExtend
;
1280 switch (SI
->getOpcode()) {
1282 case Instruction::Shl
: ShiftType
= AArch64_AM::LSL
; break;
1283 case Instruction::LShr
: ShiftType
= AArch64_AM::LSR
; break;
1284 case Instruction::AShr
: ShiftType
= AArch64_AM::ASR
; break;
1286 uint64_t ShiftVal
= C
->getZExtValue();
1287 if (ShiftType
!= AArch64_AM::InvalidShiftExtend
) {
1288 unsigned RHSReg
= getRegForValue(SI
->getOperand(0));
1291 bool RHSIsKill
= hasTrivialKill(SI
->getOperand(0));
1292 ResultReg
= emitAddSub_rs(UseAdd
, RetVT
, LHSReg
, LHSIsKill
, RHSReg
,
1293 RHSIsKill
, ShiftType
, ShiftVal
, SetFlags
,
1302 unsigned RHSReg
= getRegForValue(RHS
);
1305 bool RHSIsKill
= hasTrivialKill(RHS
);
1308 RHSReg
= emitIntExt(SrcVT
, RHSReg
, RetVT
, IsZExt
);
1310 return emitAddSub_rr(UseAdd
, RetVT
, LHSReg
, LHSIsKill
, RHSReg
, RHSIsKill
,
1311 SetFlags
, WantResult
);
1314 unsigned AArch64FastISel::emitAddSub_rr(bool UseAdd
, MVT RetVT
, unsigned LHSReg
,
1315 bool LHSIsKill
, unsigned RHSReg
,
1316 bool RHSIsKill
, bool SetFlags
,
1318 assert(LHSReg
&& RHSReg
&& "Invalid register number.");
1320 if (LHSReg
== AArch64::SP
|| LHSReg
== AArch64::WSP
||
1321 RHSReg
== AArch64::SP
|| RHSReg
== AArch64::WSP
)
1324 if (RetVT
!= MVT::i32
&& RetVT
!= MVT::i64
)
1327 static const unsigned OpcTable
[2][2][2] = {
1328 { { AArch64::SUBWrr
, AArch64::SUBXrr
},
1329 { AArch64::ADDWrr
, AArch64::ADDXrr
} },
1330 { { AArch64::SUBSWrr
, AArch64::SUBSXrr
},
1331 { AArch64::ADDSWrr
, AArch64::ADDSXrr
} }
1333 bool Is64Bit
= RetVT
== MVT::i64
;
1334 unsigned Opc
= OpcTable
[SetFlags
][UseAdd
][Is64Bit
];
1335 const TargetRegisterClass
*RC
=
1336 Is64Bit
? &AArch64::GPR64RegClass
: &AArch64::GPR32RegClass
;
1339 ResultReg
= createResultReg(RC
);
1341 ResultReg
= Is64Bit
? AArch64::XZR
: AArch64::WZR
;
1343 const MCInstrDesc
&II
= TII
.get(Opc
);
1344 LHSReg
= constrainOperandRegClass(II
, LHSReg
, II
.getNumDefs());
1345 RHSReg
= constrainOperandRegClass(II
, RHSReg
, II
.getNumDefs() + 1);
1346 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, DbgLoc
, II
, ResultReg
)
1347 .addReg(LHSReg
, getKillRegState(LHSIsKill
))
1348 .addReg(RHSReg
, getKillRegState(RHSIsKill
));
1352 unsigned AArch64FastISel::emitAddSub_ri(bool UseAdd
, MVT RetVT
, unsigned LHSReg
,
1353 bool LHSIsKill
, uint64_t Imm
,
1354 bool SetFlags
, bool WantResult
) {
1355 assert(LHSReg
&& "Invalid register number.");
1357 if (RetVT
!= MVT::i32
&& RetVT
!= MVT::i64
)
1361 if (isUInt
<12>(Imm
))
1363 else if ((Imm
& 0xfff000) == Imm
) {
1369 static const unsigned OpcTable
[2][2][2] = {
1370 { { AArch64::SUBWri
, AArch64::SUBXri
},
1371 { AArch64::ADDWri
, AArch64::ADDXri
} },
1372 { { AArch64::SUBSWri
, AArch64::SUBSXri
},
1373 { AArch64::ADDSWri
, AArch64::ADDSXri
} }
1375 bool Is64Bit
= RetVT
== MVT::i64
;
1376 unsigned Opc
= OpcTable
[SetFlags
][UseAdd
][Is64Bit
];
1377 const TargetRegisterClass
*RC
;
1379 RC
= Is64Bit
? &AArch64::GPR64RegClass
: &AArch64::GPR32RegClass
;
1381 RC
= Is64Bit
? &AArch64::GPR64spRegClass
: &AArch64::GPR32spRegClass
;
1384 ResultReg
= createResultReg(RC
);
1386 ResultReg
= Is64Bit
? AArch64::XZR
: AArch64::WZR
;
1388 const MCInstrDesc
&II
= TII
.get(Opc
);
1389 LHSReg
= constrainOperandRegClass(II
, LHSReg
, II
.getNumDefs());
1390 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, DbgLoc
, II
, ResultReg
)
1391 .addReg(LHSReg
, getKillRegState(LHSIsKill
))
1393 .addImm(getShifterImm(AArch64_AM::LSL
, ShiftImm
));
1397 unsigned AArch64FastISel::emitAddSub_rs(bool UseAdd
, MVT RetVT
, unsigned LHSReg
,
1398 bool LHSIsKill
, unsigned RHSReg
,
1400 AArch64_AM::ShiftExtendType ShiftType
,
1401 uint64_t ShiftImm
, bool SetFlags
,
1403 assert(LHSReg
&& RHSReg
&& "Invalid register number.");
1404 assert(LHSReg
!= AArch64::SP
&& LHSReg
!= AArch64::WSP
&&
1405 RHSReg
!= AArch64::SP
&& RHSReg
!= AArch64::WSP
);
1407 if (RetVT
!= MVT::i32
&& RetVT
!= MVT::i64
)
1410 // Don't deal with undefined shifts.
1411 if (ShiftImm
>= RetVT
.getSizeInBits())
1414 static const unsigned OpcTable
[2][2][2] = {
1415 { { AArch64::SUBWrs
, AArch64::SUBXrs
},
1416 { AArch64::ADDWrs
, AArch64::ADDXrs
} },
1417 { { AArch64::SUBSWrs
, AArch64::SUBSXrs
},
1418 { AArch64::ADDSWrs
, AArch64::ADDSXrs
} }
1420 bool Is64Bit
= RetVT
== MVT::i64
;
1421 unsigned Opc
= OpcTable
[SetFlags
][UseAdd
][Is64Bit
];
1422 const TargetRegisterClass
*RC
=
1423 Is64Bit
? &AArch64::GPR64RegClass
: &AArch64::GPR32RegClass
;
1426 ResultReg
= createResultReg(RC
);
1428 ResultReg
= Is64Bit
? AArch64::XZR
: AArch64::WZR
;
1430 const MCInstrDesc
&II
= TII
.get(Opc
);
1431 LHSReg
= constrainOperandRegClass(II
, LHSReg
, II
.getNumDefs());
1432 RHSReg
= constrainOperandRegClass(II
, RHSReg
, II
.getNumDefs() + 1);
1433 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, DbgLoc
, II
, ResultReg
)
1434 .addReg(LHSReg
, getKillRegState(LHSIsKill
))
1435 .addReg(RHSReg
, getKillRegState(RHSIsKill
))
1436 .addImm(getShifterImm(ShiftType
, ShiftImm
));
1440 unsigned AArch64FastISel::emitAddSub_rx(bool UseAdd
, MVT RetVT
, unsigned LHSReg
,
1441 bool LHSIsKill
, unsigned RHSReg
,
1443 AArch64_AM::ShiftExtendType ExtType
,
1444 uint64_t ShiftImm
, bool SetFlags
,
1446 assert(LHSReg
&& RHSReg
&& "Invalid register number.");
1447 assert(LHSReg
!= AArch64::XZR
&& LHSReg
!= AArch64::WZR
&&
1448 RHSReg
!= AArch64::XZR
&& RHSReg
!= AArch64::WZR
);
1450 if (RetVT
!= MVT::i32
&& RetVT
!= MVT::i64
)
1456 static const unsigned OpcTable
[2][2][2] = {
1457 { { AArch64::SUBWrx
, AArch64::SUBXrx
},
1458 { AArch64::ADDWrx
, AArch64::ADDXrx
} },
1459 { { AArch64::SUBSWrx
, AArch64::SUBSXrx
},
1460 { AArch64::ADDSWrx
, AArch64::ADDSXrx
} }
1462 bool Is64Bit
= RetVT
== MVT::i64
;
1463 unsigned Opc
= OpcTable
[SetFlags
][UseAdd
][Is64Bit
];
1464 const TargetRegisterClass
*RC
= nullptr;
1466 RC
= Is64Bit
? &AArch64::GPR64RegClass
: &AArch64::GPR32RegClass
;
1468 RC
= Is64Bit
? &AArch64::GPR64spRegClass
: &AArch64::GPR32spRegClass
;
1471 ResultReg
= createResultReg(RC
);
1473 ResultReg
= Is64Bit
? AArch64::XZR
: AArch64::WZR
;
1475 const MCInstrDesc
&II
= TII
.get(Opc
);
1476 LHSReg
= constrainOperandRegClass(II
, LHSReg
, II
.getNumDefs());
1477 RHSReg
= constrainOperandRegClass(II
, RHSReg
, II
.getNumDefs() + 1);
1478 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, DbgLoc
, II
, ResultReg
)
1479 .addReg(LHSReg
, getKillRegState(LHSIsKill
))
1480 .addReg(RHSReg
, getKillRegState(RHSIsKill
))
1481 .addImm(getArithExtendImm(ExtType
, ShiftImm
));
1485 bool AArch64FastISel::emitCmp(const Value
*LHS
, const Value
*RHS
, bool IsZExt
) {
1486 Type
*Ty
= LHS
->getType();
1487 EVT EVT
= TLI
.getValueType(DL
, Ty
, true);
1488 if (!EVT
.isSimple())
1490 MVT VT
= EVT
.getSimpleVT();
1492 switch (VT
.SimpleTy
) {
1500 return emitICmp(VT
, LHS
, RHS
, IsZExt
);
1503 return emitFCmp(VT
, LHS
, RHS
);
1507 bool AArch64FastISel::emitICmp(MVT RetVT
, const Value
*LHS
, const Value
*RHS
,
1509 return emitSub(RetVT
, LHS
, RHS
, /*SetFlags=*/true, /*WantResult=*/false,
1513 bool AArch64FastISel::emitICmp_ri(MVT RetVT
, unsigned LHSReg
, bool LHSIsKill
,
1515 return emitAddSub_ri(/*UseAdd=*/false, RetVT
, LHSReg
, LHSIsKill
, Imm
,
1516 /*SetFlags=*/true, /*WantResult=*/false) != 0;
1519 bool AArch64FastISel::emitFCmp(MVT RetVT
, const Value
*LHS
, const Value
*RHS
) {
1520 if (RetVT
!= MVT::f32
&& RetVT
!= MVT::f64
)
1523 // Check to see if the 2nd operand is a constant that we can encode directly
1525 bool UseImm
= false;
1526 if (const auto *CFP
= dyn_cast
<ConstantFP
>(RHS
))
1527 if (CFP
->isZero() && !CFP
->isNegative())
1530 unsigned LHSReg
= getRegForValue(LHS
);
1533 bool LHSIsKill
= hasTrivialKill(LHS
);
1536 unsigned Opc
= (RetVT
== MVT::f64
) ? AArch64::FCMPDri
: AArch64::FCMPSri
;
1537 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, DbgLoc
, TII
.get(Opc
))
1538 .addReg(LHSReg
, getKillRegState(LHSIsKill
));
1542 unsigned RHSReg
= getRegForValue(RHS
);
1545 bool RHSIsKill
= hasTrivialKill(RHS
);
1547 unsigned Opc
= (RetVT
== MVT::f64
) ? AArch64::FCMPDrr
: AArch64::FCMPSrr
;
1548 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, DbgLoc
, TII
.get(Opc
))
1549 .addReg(LHSReg
, getKillRegState(LHSIsKill
))
1550 .addReg(RHSReg
, getKillRegState(RHSIsKill
));
1554 unsigned AArch64FastISel::emitAdd(MVT RetVT
, const Value
*LHS
, const Value
*RHS
,
1555 bool SetFlags
, bool WantResult
, bool IsZExt
) {
1556 return emitAddSub(/*UseAdd=*/true, RetVT
, LHS
, RHS
, SetFlags
, WantResult
,
1560 /// This method is a wrapper to simplify add emission.
1562 /// First try to emit an add with an immediate operand using emitAddSub_ri. If
1563 /// that fails, then try to materialize the immediate into a register and use
1564 /// emitAddSub_rr instead.
1565 unsigned AArch64FastISel::emitAdd_ri_(MVT VT
, unsigned Op0
, bool Op0IsKill
,
1569 ResultReg
= emitAddSub_ri(false, VT
, Op0
, Op0IsKill
, -Imm
);
1571 ResultReg
= emitAddSub_ri(true, VT
, Op0
, Op0IsKill
, Imm
);
1576 unsigned CReg
= fastEmit_i(VT
, VT
, ISD::Constant
, Imm
);
1580 ResultReg
= emitAddSub_rr(true, VT
, Op0
, Op0IsKill
, CReg
, true);
1584 unsigned AArch64FastISel::emitSub(MVT RetVT
, const Value
*LHS
, const Value
*RHS
,
1585 bool SetFlags
, bool WantResult
, bool IsZExt
) {
1586 return emitAddSub(/*UseAdd=*/false, RetVT
, LHS
, RHS
, SetFlags
, WantResult
,
1590 unsigned AArch64FastISel::emitSubs_rr(MVT RetVT
, unsigned LHSReg
,
1591 bool LHSIsKill
, unsigned RHSReg
,
1592 bool RHSIsKill
, bool WantResult
) {
1593 return emitAddSub_rr(/*UseAdd=*/false, RetVT
, LHSReg
, LHSIsKill
, RHSReg
,
1594 RHSIsKill
, /*SetFlags=*/true, WantResult
);
1597 unsigned AArch64FastISel::emitSubs_rs(MVT RetVT
, unsigned LHSReg
,
1598 bool LHSIsKill
, unsigned RHSReg
,
1600 AArch64_AM::ShiftExtendType ShiftType
,
1601 uint64_t ShiftImm
, bool WantResult
) {
1602 return emitAddSub_rs(/*UseAdd=*/false, RetVT
, LHSReg
, LHSIsKill
, RHSReg
,
1603 RHSIsKill
, ShiftType
, ShiftImm
, /*SetFlags=*/true,
1607 unsigned AArch64FastISel::emitLogicalOp(unsigned ISDOpc
, MVT RetVT
,
1608 const Value
*LHS
, const Value
*RHS
) {
1609 // Canonicalize immediates to the RHS first.
1610 if (isa
<ConstantInt
>(LHS
) && !isa
<ConstantInt
>(RHS
))
1611 std::swap(LHS
, RHS
);
1613 // Canonicalize mul by power-of-2 to the RHS.
1614 if (LHS
->hasOneUse() && isValueAvailable(LHS
))
1615 if (isMulPowOf2(LHS
))
1616 std::swap(LHS
, RHS
);
1618 // Canonicalize shift immediate to the RHS.
1619 if (LHS
->hasOneUse() && isValueAvailable(LHS
))
1620 if (const auto *SI
= dyn_cast
<ShlOperator
>(LHS
))
1621 if (isa
<ConstantInt
>(SI
->getOperand(1)))
1622 std::swap(LHS
, RHS
);
1624 unsigned LHSReg
= getRegForValue(LHS
);
1627 bool LHSIsKill
= hasTrivialKill(LHS
);
1629 unsigned ResultReg
= 0;
1630 if (const auto *C
= dyn_cast
<ConstantInt
>(RHS
)) {
1631 uint64_t Imm
= C
->getZExtValue();
1632 ResultReg
= emitLogicalOp_ri(ISDOpc
, RetVT
, LHSReg
, LHSIsKill
, Imm
);
1637 // Check if the mul can be folded into the instruction.
1638 if (RHS
->hasOneUse() && isValueAvailable(RHS
)) {
1639 if (isMulPowOf2(RHS
)) {
1640 const Value
*MulLHS
= cast
<MulOperator
>(RHS
)->getOperand(0);
1641 const Value
*MulRHS
= cast
<MulOperator
>(RHS
)->getOperand(1);
1643 if (const auto *C
= dyn_cast
<ConstantInt
>(MulLHS
))
1644 if (C
->getValue().isPowerOf2())
1645 std::swap(MulLHS
, MulRHS
);
1647 assert(isa
<ConstantInt
>(MulRHS
) && "Expected a ConstantInt.");
1648 uint64_t ShiftVal
= cast
<ConstantInt
>(MulRHS
)->getValue().logBase2();
1650 unsigned RHSReg
= getRegForValue(MulLHS
);
1653 bool RHSIsKill
= hasTrivialKill(MulLHS
);
1654 ResultReg
= emitLogicalOp_rs(ISDOpc
, RetVT
, LHSReg
, LHSIsKill
, RHSReg
,
1655 RHSIsKill
, ShiftVal
);
1661 // Check if the shift can be folded into the instruction.
1662 if (RHS
->hasOneUse() && isValueAvailable(RHS
)) {
1663 if (const auto *SI
= dyn_cast
<ShlOperator
>(RHS
))
1664 if (const auto *C
= dyn_cast
<ConstantInt
>(SI
->getOperand(1))) {
1665 uint64_t ShiftVal
= C
->getZExtValue();
1666 unsigned RHSReg
= getRegForValue(SI
->getOperand(0));
1669 bool RHSIsKill
= hasTrivialKill(SI
->getOperand(0));
1670 ResultReg
= emitLogicalOp_rs(ISDOpc
, RetVT
, LHSReg
, LHSIsKill
, RHSReg
,
1671 RHSIsKill
, ShiftVal
);
1677 unsigned RHSReg
= getRegForValue(RHS
);
1680 bool RHSIsKill
= hasTrivialKill(RHS
);
1682 MVT VT
= std::max(MVT::i32
, RetVT
.SimpleTy
);
1683 ResultReg
= fastEmit_rr(VT
, VT
, ISDOpc
, LHSReg
, LHSIsKill
, RHSReg
, RHSIsKill
);
1684 if (RetVT
>= MVT::i8
&& RetVT
<= MVT::i16
) {
1685 uint64_t Mask
= (RetVT
== MVT::i8
) ? 0xff : 0xffff;
1686 ResultReg
= emitAnd_ri(MVT::i32
, ResultReg
, /*IsKill=*/true, Mask
);
1691 unsigned AArch64FastISel::emitLogicalOp_ri(unsigned ISDOpc
, MVT RetVT
,
1692 unsigned LHSReg
, bool LHSIsKill
,
1694 static_assert((ISD::AND
+ 1 == ISD::OR
) && (ISD::AND
+ 2 == ISD::XOR
),
1695 "ISD nodes are not consecutive!");
1696 static const unsigned OpcTable
[3][2] = {
1697 { AArch64::ANDWri
, AArch64::ANDXri
},
1698 { AArch64::ORRWri
, AArch64::ORRXri
},
1699 { AArch64::EORWri
, AArch64::EORXri
}
1701 const TargetRegisterClass
*RC
;
1704 switch (RetVT
.SimpleTy
) {
1711 unsigned Idx
= ISDOpc
- ISD::AND
;
1712 Opc
= OpcTable
[Idx
][0];
1713 RC
= &AArch64::GPR32spRegClass
;
1718 Opc
= OpcTable
[ISDOpc
- ISD::AND
][1];
1719 RC
= &AArch64::GPR64spRegClass
;
1724 if (!AArch64_AM::isLogicalImmediate(Imm
, RegSize
))
1727 unsigned ResultReg
=
1728 fastEmitInst_ri(Opc
, RC
, LHSReg
, LHSIsKill
,
1729 AArch64_AM::encodeLogicalImmediate(Imm
, RegSize
));
1730 if (RetVT
>= MVT::i8
&& RetVT
<= MVT::i16
&& ISDOpc
!= ISD::AND
) {
1731 uint64_t Mask
= (RetVT
== MVT::i8
) ? 0xff : 0xffff;
1732 ResultReg
= emitAnd_ri(MVT::i32
, ResultReg
, /*IsKill=*/true, Mask
);
1737 unsigned AArch64FastISel::emitLogicalOp_rs(unsigned ISDOpc
, MVT RetVT
,
1738 unsigned LHSReg
, bool LHSIsKill
,
1739 unsigned RHSReg
, bool RHSIsKill
,
1740 uint64_t ShiftImm
) {
1741 static_assert((ISD::AND
+ 1 == ISD::OR
) && (ISD::AND
+ 2 == ISD::XOR
),
1742 "ISD nodes are not consecutive!");
1743 static const unsigned OpcTable
[3][2] = {
1744 { AArch64::ANDWrs
, AArch64::ANDXrs
},
1745 { AArch64::ORRWrs
, AArch64::ORRXrs
},
1746 { AArch64::EORWrs
, AArch64::EORXrs
}
1749 // Don't deal with undefined shifts.
1750 if (ShiftImm
>= RetVT
.getSizeInBits())
1753 const TargetRegisterClass
*RC
;
1755 switch (RetVT
.SimpleTy
) {
1762 Opc
= OpcTable
[ISDOpc
- ISD::AND
][0];
1763 RC
= &AArch64::GPR32RegClass
;
1766 Opc
= OpcTable
[ISDOpc
- ISD::AND
][1];
1767 RC
= &AArch64::GPR64RegClass
;
1770 unsigned ResultReg
=
1771 fastEmitInst_rri(Opc
, RC
, LHSReg
, LHSIsKill
, RHSReg
, RHSIsKill
,
1772 AArch64_AM::getShifterImm(AArch64_AM::LSL
, ShiftImm
));
1773 if (RetVT
>= MVT::i8
&& RetVT
<= MVT::i16
) {
1774 uint64_t Mask
= (RetVT
== MVT::i8
) ? 0xff : 0xffff;
1775 ResultReg
= emitAnd_ri(MVT::i32
, ResultReg
, /*IsKill=*/true, Mask
);
1780 unsigned AArch64FastISel::emitAnd_ri(MVT RetVT
, unsigned LHSReg
, bool LHSIsKill
,
1782 return emitLogicalOp_ri(ISD::AND
, RetVT
, LHSReg
, LHSIsKill
, Imm
);
1785 unsigned AArch64FastISel::emitLoad(MVT VT
, MVT RetVT
, Address Addr
,
1786 bool WantZExt
, MachineMemOperand
*MMO
) {
1787 if (!TLI
.allowsMisalignedMemoryAccesses(VT
))
1790 // Simplify this down to something we can handle.
1791 if (!simplifyAddress(Addr
, VT
))
1794 unsigned ScaleFactor
= getImplicitScaleFactor(VT
);
1796 llvm_unreachable("Unexpected value type.");
1798 // Negative offsets require unscaled, 9-bit, signed immediate offsets.
1799 // Otherwise, we try using scaled, 12-bit, unsigned immediate offsets.
1800 bool UseScaled
= true;
1801 if ((Addr
.getOffset() < 0) || (Addr
.getOffset() & (ScaleFactor
- 1))) {
1806 static const unsigned GPOpcTable
[2][8][4] = {
1808 { { AArch64::LDURSBWi
, AArch64::LDURSHWi
, AArch64::LDURWi
,
1810 { AArch64::LDURSBXi
, AArch64::LDURSHXi
, AArch64::LDURSWi
,
1812 { AArch64::LDRSBWui
, AArch64::LDRSHWui
, AArch64::LDRWui
,
1814 { AArch64::LDRSBXui
, AArch64::LDRSHXui
, AArch64::LDRSWui
,
1816 { AArch64::LDRSBWroX
, AArch64::LDRSHWroX
, AArch64::LDRWroX
,
1818 { AArch64::LDRSBXroX
, AArch64::LDRSHXroX
, AArch64::LDRSWroX
,
1820 { AArch64::LDRSBWroW
, AArch64::LDRSHWroW
, AArch64::LDRWroW
,
1822 { AArch64::LDRSBXroW
, AArch64::LDRSHXroW
, AArch64::LDRSWroW
,
1826 { { AArch64::LDURBBi
, AArch64::LDURHHi
, AArch64::LDURWi
,
1828 { AArch64::LDURBBi
, AArch64::LDURHHi
, AArch64::LDURWi
,
1830 { AArch64::LDRBBui
, AArch64::LDRHHui
, AArch64::LDRWui
,
1832 { AArch64::LDRBBui
, AArch64::LDRHHui
, AArch64::LDRWui
,
1834 { AArch64::LDRBBroX
, AArch64::LDRHHroX
, AArch64::LDRWroX
,
1836 { AArch64::LDRBBroX
, AArch64::LDRHHroX
, AArch64::LDRWroX
,
1838 { AArch64::LDRBBroW
, AArch64::LDRHHroW
, AArch64::LDRWroW
,
1840 { AArch64::LDRBBroW
, AArch64::LDRHHroW
, AArch64::LDRWroW
,
1845 static const unsigned FPOpcTable
[4][2] = {
1846 { AArch64::LDURSi
, AArch64::LDURDi
},
1847 { AArch64::LDRSui
, AArch64::LDRDui
},
1848 { AArch64::LDRSroX
, AArch64::LDRDroX
},
1849 { AArch64::LDRSroW
, AArch64::LDRDroW
}
1853 const TargetRegisterClass
*RC
;
1854 bool UseRegOffset
= Addr
.isRegBase() && !Addr
.getOffset() && Addr
.getReg() &&
1855 Addr
.getOffsetReg();
1856 unsigned Idx
= UseRegOffset
? 2 : UseScaled
? 1 : 0;
1857 if (Addr
.getExtendType() == AArch64_AM::UXTW
||
1858 Addr
.getExtendType() == AArch64_AM::SXTW
)
1861 bool IsRet64Bit
= RetVT
== MVT::i64
;
1862 switch (VT
.SimpleTy
) {
1864 llvm_unreachable("Unexpected value type.");
1865 case MVT::i1
: // Intentional fall-through.
1867 Opc
= GPOpcTable
[WantZExt
][2 * Idx
+ IsRet64Bit
][0];
1868 RC
= (IsRet64Bit
&& !WantZExt
) ?
1869 &AArch64::GPR64RegClass
: &AArch64::GPR32RegClass
;
1872 Opc
= GPOpcTable
[WantZExt
][2 * Idx
+ IsRet64Bit
][1];
1873 RC
= (IsRet64Bit
&& !WantZExt
) ?
1874 &AArch64::GPR64RegClass
: &AArch64::GPR32RegClass
;
1877 Opc
= GPOpcTable
[WantZExt
][2 * Idx
+ IsRet64Bit
][2];
1878 RC
= (IsRet64Bit
&& !WantZExt
) ?
1879 &AArch64::GPR64RegClass
: &AArch64::GPR32RegClass
;
1882 Opc
= GPOpcTable
[WantZExt
][2 * Idx
+ IsRet64Bit
][3];
1883 RC
= &AArch64::GPR64RegClass
;
1886 Opc
= FPOpcTable
[Idx
][0];
1887 RC
= &AArch64::FPR32RegClass
;
1890 Opc
= FPOpcTable
[Idx
][1];
1891 RC
= &AArch64::FPR64RegClass
;
1895 // Create the base instruction, then add the operands.
1896 unsigned ResultReg
= createResultReg(RC
);
1897 MachineInstrBuilder MIB
= BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, DbgLoc
,
1898 TII
.get(Opc
), ResultReg
);
1899 addLoadStoreOperands(Addr
, MIB
, MachineMemOperand::MOLoad
, ScaleFactor
, MMO
);
1901 // Loading an i1 requires special handling.
1902 if (VT
== MVT::i1
) {
1903 unsigned ANDReg
= emitAnd_ri(MVT::i32
, ResultReg
, /*IsKill=*/true, 1);
1904 assert(ANDReg
&& "Unexpected AND instruction emission failure.");
1908 // For zero-extending loads to 64bit we emit a 32bit load and then convert
1909 // the 32bit reg to a 64bit reg.
1910 if (WantZExt
&& RetVT
== MVT::i64
&& VT
<= MVT::i32
) {
1911 unsigned Reg64
= createResultReg(&AArch64::GPR64RegClass
);
1912 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, DbgLoc
,
1913 TII
.get(AArch64::SUBREG_TO_REG
), Reg64
)
1915 .addReg(ResultReg
, getKillRegState(true))
1916 .addImm(AArch64::sub_32
);
1922 bool AArch64FastISel::selectAddSub(const Instruction
*I
) {
1924 if (!isTypeSupported(I
->getType(), VT
, /*IsVectorAllowed=*/true))
1928 return selectOperator(I
, I
->getOpcode());
1931 switch (I
->getOpcode()) {
1933 llvm_unreachable("Unexpected instruction.");
1934 case Instruction::Add
:
1935 ResultReg
= emitAdd(VT
, I
->getOperand(0), I
->getOperand(1));
1937 case Instruction::Sub
:
1938 ResultReg
= emitSub(VT
, I
->getOperand(0), I
->getOperand(1));
1944 updateValueMap(I
, ResultReg
);
1948 bool AArch64FastISel::selectLogicalOp(const Instruction
*I
) {
1950 if (!isTypeSupported(I
->getType(), VT
, /*IsVectorAllowed=*/true))
1954 return selectOperator(I
, I
->getOpcode());
1957 switch (I
->getOpcode()) {
1959 llvm_unreachable("Unexpected instruction.");
1960 case Instruction::And
:
1961 ResultReg
= emitLogicalOp(ISD::AND
, VT
, I
->getOperand(0), I
->getOperand(1));
1963 case Instruction::Or
:
1964 ResultReg
= emitLogicalOp(ISD::OR
, VT
, I
->getOperand(0), I
->getOperand(1));
1966 case Instruction::Xor
:
1967 ResultReg
= emitLogicalOp(ISD::XOR
, VT
, I
->getOperand(0), I
->getOperand(1));
1973 updateValueMap(I
, ResultReg
);
1977 bool AArch64FastISel::selectLoad(const Instruction
*I
) {
1979 // Verify we have a legal type before going any further. Currently, we handle
1980 // simple types that will directly fit in a register (i32/f32/i64/f64) or
1981 // those that can be sign or zero-extended to a basic operation (i1/i8/i16).
1982 if (!isTypeSupported(I
->getType(), VT
, /*IsVectorAllowed=*/true) ||
1983 cast
<LoadInst
>(I
)->isAtomic())
1986 const Value
*SV
= I
->getOperand(0);
1987 if (TLI
.supportSwiftError()) {
1988 // Swifterror values can come from either a function parameter with
1989 // swifterror attribute or an alloca with swifterror attribute.
1990 if (const Argument
*Arg
= dyn_cast
<Argument
>(SV
)) {
1991 if (Arg
->hasSwiftErrorAttr())
1995 if (const AllocaInst
*Alloca
= dyn_cast
<AllocaInst
>(SV
)) {
1996 if (Alloca
->isSwiftError())
2001 // See if we can handle this address.
2003 if (!computeAddress(I
->getOperand(0), Addr
, I
->getType()))
2006 // Fold the following sign-/zero-extend into the load instruction.
2007 bool WantZExt
= true;
2009 const Value
*IntExtVal
= nullptr;
2010 if (I
->hasOneUse()) {
2011 if (const auto *ZE
= dyn_cast
<ZExtInst
>(I
->use_begin()->getUser())) {
2012 if (isTypeSupported(ZE
->getType(), RetVT
))
2016 } else if (const auto *SE
= dyn_cast
<SExtInst
>(I
->use_begin()->getUser())) {
2017 if (isTypeSupported(SE
->getType(), RetVT
))
2025 unsigned ResultReg
=
2026 emitLoad(VT
, RetVT
, Addr
, WantZExt
, createMachineMemOperandFor(I
));
2030 // There are a few different cases we have to handle, because the load or the
2031 // sign-/zero-extend might not be selected by FastISel if we fall-back to
2032 // SelectionDAG. There is also an ordering issue when both instructions are in
2033 // different basic blocks.
2034 // 1.) The load instruction is selected by FastISel, but the integer extend
2035 // not. This usually happens when the integer extend is in a different
2036 // basic block and SelectionDAG took over for that basic block.
2037 // 2.) The load instruction is selected before the integer extend. This only
2038 // happens when the integer extend is in a different basic block.
2039 // 3.) The load instruction is selected by SelectionDAG and the integer extend
2040 // by FastISel. This happens if there are instructions between the load
2041 // and the integer extend that couldn't be selected by FastISel.
2043 // The integer extend hasn't been emitted yet. FastISel or SelectionDAG
2044 // could select it. Emit a copy to subreg if necessary. FastISel will remove
2045 // it when it selects the integer extend.
2046 unsigned Reg
= lookUpRegForValue(IntExtVal
);
2047 auto *MI
= MRI
.getUniqueVRegDef(Reg
);
2049 if (RetVT
== MVT::i64
&& VT
<= MVT::i32
) {
2051 // Delete the last emitted instruction from emitLoad (SUBREG_TO_REG).
2052 MachineBasicBlock::iterator
I(std::prev(FuncInfo
.InsertPt
));
2053 ResultReg
= std::prev(I
)->getOperand(0).getReg();
2054 removeDeadCode(I
, std::next(I
));
2056 ResultReg
= fastEmitInst_extractsubreg(MVT::i32
, ResultReg
,
2060 updateValueMap(I
, ResultReg
);
2064 // The integer extend has already been emitted - delete all the instructions
2065 // that have been emitted by the integer extend lowering code and use the
2066 // result from the load instruction directly.
2069 for (auto &Opnd
: MI
->uses()) {
2071 Reg
= Opnd
.getReg();
2075 MachineBasicBlock::iterator
I(MI
);
2076 removeDeadCode(I
, std::next(I
));
2079 MI
= MRI
.getUniqueVRegDef(Reg
);
2081 updateValueMap(IntExtVal
, ResultReg
);
2085 updateValueMap(I
, ResultReg
);
2089 bool AArch64FastISel::emitStoreRelease(MVT VT
, unsigned SrcReg
,
2091 MachineMemOperand
*MMO
) {
2093 switch (VT
.SimpleTy
) {
2094 default: return false;
2095 case MVT::i8
: Opc
= AArch64::STLRB
; break;
2096 case MVT::i16
: Opc
= AArch64::STLRH
; break;
2097 case MVT::i32
: Opc
= AArch64::STLRW
; break;
2098 case MVT::i64
: Opc
= AArch64::STLRX
; break;
2101 const MCInstrDesc
&II
= TII
.get(Opc
);
2102 SrcReg
= constrainOperandRegClass(II
, SrcReg
, 0);
2103 AddrReg
= constrainOperandRegClass(II
, AddrReg
, 1);
2104 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, DbgLoc
, II
)
2107 .addMemOperand(MMO
);
2111 bool AArch64FastISel::emitStore(MVT VT
, unsigned SrcReg
, Address Addr
,
2112 MachineMemOperand
*MMO
) {
2113 if (!TLI
.allowsMisalignedMemoryAccesses(VT
))
2116 // Simplify this down to something we can handle.
2117 if (!simplifyAddress(Addr
, VT
))
2120 unsigned ScaleFactor
= getImplicitScaleFactor(VT
);
2122 llvm_unreachable("Unexpected value type.");
2124 // Negative offsets require unscaled, 9-bit, signed immediate offsets.
2125 // Otherwise, we try using scaled, 12-bit, unsigned immediate offsets.
2126 bool UseScaled
= true;
2127 if ((Addr
.getOffset() < 0) || (Addr
.getOffset() & (ScaleFactor
- 1))) {
2132 static const unsigned OpcTable
[4][6] = {
2133 { AArch64::STURBBi
, AArch64::STURHHi
, AArch64::STURWi
, AArch64::STURXi
,
2134 AArch64::STURSi
, AArch64::STURDi
},
2135 { AArch64::STRBBui
, AArch64::STRHHui
, AArch64::STRWui
, AArch64::STRXui
,
2136 AArch64::STRSui
, AArch64::STRDui
},
2137 { AArch64::STRBBroX
, AArch64::STRHHroX
, AArch64::STRWroX
, AArch64::STRXroX
,
2138 AArch64::STRSroX
, AArch64::STRDroX
},
2139 { AArch64::STRBBroW
, AArch64::STRHHroW
, AArch64::STRWroW
, AArch64::STRXroW
,
2140 AArch64::STRSroW
, AArch64::STRDroW
}
2144 bool VTIsi1
= false;
2145 bool UseRegOffset
= Addr
.isRegBase() && !Addr
.getOffset() && Addr
.getReg() &&
2146 Addr
.getOffsetReg();
2147 unsigned Idx
= UseRegOffset
? 2 : UseScaled
? 1 : 0;
2148 if (Addr
.getExtendType() == AArch64_AM::UXTW
||
2149 Addr
.getExtendType() == AArch64_AM::SXTW
)
2152 switch (VT
.SimpleTy
) {
2153 default: llvm_unreachable("Unexpected value type.");
2154 case MVT::i1
: VTIsi1
= true; LLVM_FALLTHROUGH
;
2155 case MVT::i8
: Opc
= OpcTable
[Idx
][0]; break;
2156 case MVT::i16
: Opc
= OpcTable
[Idx
][1]; break;
2157 case MVT::i32
: Opc
= OpcTable
[Idx
][2]; break;
2158 case MVT::i64
: Opc
= OpcTable
[Idx
][3]; break;
2159 case MVT::f32
: Opc
= OpcTable
[Idx
][4]; break;
2160 case MVT::f64
: Opc
= OpcTable
[Idx
][5]; break;
2163 // Storing an i1 requires special handling.
2164 if (VTIsi1
&& SrcReg
!= AArch64::WZR
) {
2165 unsigned ANDReg
= emitAnd_ri(MVT::i32
, SrcReg
, /*TODO:IsKill=*/false, 1);
2166 assert(ANDReg
&& "Unexpected AND instruction emission failure.");
2169 // Create the base instruction, then add the operands.
2170 const MCInstrDesc
&II
= TII
.get(Opc
);
2171 SrcReg
= constrainOperandRegClass(II
, SrcReg
, II
.getNumDefs());
2172 MachineInstrBuilder MIB
=
2173 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, DbgLoc
, II
).addReg(SrcReg
);
2174 addLoadStoreOperands(Addr
, MIB
, MachineMemOperand::MOStore
, ScaleFactor
, MMO
);
2179 bool AArch64FastISel::selectStore(const Instruction
*I
) {
2181 const Value
*Op0
= I
->getOperand(0);
2182 // Verify we have a legal type before going any further. Currently, we handle
2183 // simple types that will directly fit in a register (i32/f32/i64/f64) or
2184 // those that can be sign or zero-extended to a basic operation (i1/i8/i16).
2185 if (!isTypeSupported(Op0
->getType(), VT
, /*IsVectorAllowed=*/true))
2188 const Value
*PtrV
= I
->getOperand(1);
2189 if (TLI
.supportSwiftError()) {
2190 // Swifterror values can come from either a function parameter with
2191 // swifterror attribute or an alloca with swifterror attribute.
2192 if (const Argument
*Arg
= dyn_cast
<Argument
>(PtrV
)) {
2193 if (Arg
->hasSwiftErrorAttr())
2197 if (const AllocaInst
*Alloca
= dyn_cast
<AllocaInst
>(PtrV
)) {
2198 if (Alloca
->isSwiftError())
2203 // Get the value to be stored into a register. Use the zero register directly
2204 // when possible to avoid an unnecessary copy and a wasted register.
2205 unsigned SrcReg
= 0;
2206 if (const auto *CI
= dyn_cast
<ConstantInt
>(Op0
)) {
2208 SrcReg
= (VT
== MVT::i64
) ? AArch64::XZR
: AArch64::WZR
;
2209 } else if (const auto *CF
= dyn_cast
<ConstantFP
>(Op0
)) {
2210 if (CF
->isZero() && !CF
->isNegative()) {
2211 VT
= MVT::getIntegerVT(VT
.getSizeInBits());
2212 SrcReg
= (VT
== MVT::i64
) ? AArch64::XZR
: AArch64::WZR
;
2217 SrcReg
= getRegForValue(Op0
);
2222 auto *SI
= cast
<StoreInst
>(I
);
2224 // Try to emit a STLR for seq_cst/release.
2225 if (SI
->isAtomic()) {
2226 AtomicOrdering Ord
= SI
->getOrdering();
2227 // The non-atomic instructions are sufficient for relaxed stores.
2228 if (isReleaseOrStronger(Ord
)) {
2229 // The STLR addressing mode only supports a base reg; pass that directly.
2230 unsigned AddrReg
= getRegForValue(PtrV
);
2231 return emitStoreRelease(VT
, SrcReg
, AddrReg
,
2232 createMachineMemOperandFor(I
));
2236 // See if we can handle this address.
2238 if (!computeAddress(PtrV
, Addr
, Op0
->getType()))
2241 if (!emitStore(VT
, SrcReg
, Addr
, createMachineMemOperandFor(I
)))
2246 static AArch64CC::CondCode
getCompareCC(CmpInst::Predicate Pred
) {
2248 case CmpInst::FCMP_ONE
:
2249 case CmpInst::FCMP_UEQ
:
2251 // AL is our "false" for now. The other two need more compares.
2252 return AArch64CC::AL
;
2253 case CmpInst::ICMP_EQ
:
2254 case CmpInst::FCMP_OEQ
:
2255 return AArch64CC::EQ
;
2256 case CmpInst::ICMP_SGT
:
2257 case CmpInst::FCMP_OGT
:
2258 return AArch64CC::GT
;
2259 case CmpInst::ICMP_SGE
:
2260 case CmpInst::FCMP_OGE
:
2261 return AArch64CC::GE
;
2262 case CmpInst::ICMP_UGT
:
2263 case CmpInst::FCMP_UGT
:
2264 return AArch64CC::HI
;
2265 case CmpInst::FCMP_OLT
:
2266 return AArch64CC::MI
;
2267 case CmpInst::ICMP_ULE
:
2268 case CmpInst::FCMP_OLE
:
2269 return AArch64CC::LS
;
2270 case CmpInst::FCMP_ORD
:
2271 return AArch64CC::VC
;
2272 case CmpInst::FCMP_UNO
:
2273 return AArch64CC::VS
;
2274 case CmpInst::FCMP_UGE
:
2275 return AArch64CC::PL
;
2276 case CmpInst::ICMP_SLT
:
2277 case CmpInst::FCMP_ULT
:
2278 return AArch64CC::LT
;
2279 case CmpInst::ICMP_SLE
:
2280 case CmpInst::FCMP_ULE
:
2281 return AArch64CC::LE
;
2282 case CmpInst::FCMP_UNE
:
2283 case CmpInst::ICMP_NE
:
2284 return AArch64CC::NE
;
2285 case CmpInst::ICMP_UGE
:
2286 return AArch64CC::HS
;
2287 case CmpInst::ICMP_ULT
:
2288 return AArch64CC::LO
;
2292 /// Try to emit a combined compare-and-branch instruction.
2293 bool AArch64FastISel::emitCompareAndBranch(const BranchInst
*BI
) {
2294 // Speculation tracking/SLH assumes that optimized TB(N)Z/CB(N)Z instructions
2295 // will not be produced, as they are conditional branch instructions that do
2297 if (FuncInfo
.MF
->getFunction().hasFnAttribute(
2298 Attribute::SpeculativeLoadHardening
))
2301 assert(isa
<CmpInst
>(BI
->getCondition()) && "Expected cmp instruction");
2302 const CmpInst
*CI
= cast
<CmpInst
>(BI
->getCondition());
2303 CmpInst::Predicate Predicate
= optimizeCmpPredicate(CI
);
2305 const Value
*LHS
= CI
->getOperand(0);
2306 const Value
*RHS
= CI
->getOperand(1);
2309 if (!isTypeSupported(LHS
->getType(), VT
))
2312 unsigned BW
= VT
.getSizeInBits();
2316 MachineBasicBlock
*TBB
= FuncInfo
.MBBMap
[BI
->getSuccessor(0)];
2317 MachineBasicBlock
*FBB
= FuncInfo
.MBBMap
[BI
->getSuccessor(1)];
2319 // Try to take advantage of fallthrough opportunities.
2320 if (FuncInfo
.MBB
->isLayoutSuccessor(TBB
)) {
2321 std::swap(TBB
, FBB
);
2322 Predicate
= CmpInst::getInversePredicate(Predicate
);
2327 switch (Predicate
) {
2330 case CmpInst::ICMP_EQ
:
2331 case CmpInst::ICMP_NE
:
2332 if (isa
<Constant
>(LHS
) && cast
<Constant
>(LHS
)->isNullValue())
2333 std::swap(LHS
, RHS
);
2335 if (!isa
<Constant
>(RHS
) || !cast
<Constant
>(RHS
)->isNullValue())
2338 if (const auto *AI
= dyn_cast
<BinaryOperator
>(LHS
))
2339 if (AI
->getOpcode() == Instruction::And
&& isValueAvailable(AI
)) {
2340 const Value
*AndLHS
= AI
->getOperand(0);
2341 const Value
*AndRHS
= AI
->getOperand(1);
2343 if (const auto *C
= dyn_cast
<ConstantInt
>(AndLHS
))
2344 if (C
->getValue().isPowerOf2())
2345 std::swap(AndLHS
, AndRHS
);
2347 if (const auto *C
= dyn_cast
<ConstantInt
>(AndRHS
))
2348 if (C
->getValue().isPowerOf2()) {
2349 TestBit
= C
->getValue().logBase2();
2357 IsCmpNE
= Predicate
== CmpInst::ICMP_NE
;
2359 case CmpInst::ICMP_SLT
:
2360 case CmpInst::ICMP_SGE
:
2361 if (!isa
<Constant
>(RHS
) || !cast
<Constant
>(RHS
)->isNullValue())
2365 IsCmpNE
= Predicate
== CmpInst::ICMP_SLT
;
2367 case CmpInst::ICMP_SGT
:
2368 case CmpInst::ICMP_SLE
:
2369 if (!isa
<ConstantInt
>(RHS
))
2372 if (cast
<ConstantInt
>(RHS
)->getValue() != APInt(BW
, -1, true))
2376 IsCmpNE
= Predicate
== CmpInst::ICMP_SLE
;
2380 static const unsigned OpcTable
[2][2][2] = {
2381 { {AArch64::CBZW
, AArch64::CBZX
},
2382 {AArch64::CBNZW
, AArch64::CBNZX
} },
2383 { {AArch64::TBZW
, AArch64::TBZX
},
2384 {AArch64::TBNZW
, AArch64::TBNZX
} }
2387 bool IsBitTest
= TestBit
!= -1;
2388 bool Is64Bit
= BW
== 64;
2389 if (TestBit
< 32 && TestBit
>= 0)
2392 unsigned Opc
= OpcTable
[IsBitTest
][IsCmpNE
][Is64Bit
];
2393 const MCInstrDesc
&II
= TII
.get(Opc
);
2395 unsigned SrcReg
= getRegForValue(LHS
);
2398 bool SrcIsKill
= hasTrivialKill(LHS
);
2400 if (BW
== 64 && !Is64Bit
)
2401 SrcReg
= fastEmitInst_extractsubreg(MVT::i32
, SrcReg
, SrcIsKill
,
2404 if ((BW
< 32) && !IsBitTest
)
2405 SrcReg
= emitIntExt(VT
, SrcReg
, MVT::i32
, /*isZExt=*/true);
2407 // Emit the combined compare and branch instruction.
2408 SrcReg
= constrainOperandRegClass(II
, SrcReg
, II
.getNumDefs());
2409 MachineInstrBuilder MIB
=
2410 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, DbgLoc
, TII
.get(Opc
))
2411 .addReg(SrcReg
, getKillRegState(SrcIsKill
));
2413 MIB
.addImm(TestBit
);
2416 finishCondBranch(BI
->getParent(), TBB
, FBB
);
2420 bool AArch64FastISel::selectBranch(const Instruction
*I
) {
2421 const BranchInst
*BI
= cast
<BranchInst
>(I
);
2422 if (BI
->isUnconditional()) {
2423 MachineBasicBlock
*MSucc
= FuncInfo
.MBBMap
[BI
->getSuccessor(0)];
2424 fastEmitBranch(MSucc
, BI
->getDebugLoc());
2428 MachineBasicBlock
*TBB
= FuncInfo
.MBBMap
[BI
->getSuccessor(0)];
2429 MachineBasicBlock
*FBB
= FuncInfo
.MBBMap
[BI
->getSuccessor(1)];
2431 if (const CmpInst
*CI
= dyn_cast
<CmpInst
>(BI
->getCondition())) {
2432 if (CI
->hasOneUse() && isValueAvailable(CI
)) {
2433 // Try to optimize or fold the cmp.
2434 CmpInst::Predicate Predicate
= optimizeCmpPredicate(CI
);
2435 switch (Predicate
) {
2438 case CmpInst::FCMP_FALSE
:
2439 fastEmitBranch(FBB
, DbgLoc
);
2441 case CmpInst::FCMP_TRUE
:
2442 fastEmitBranch(TBB
, DbgLoc
);
2446 // Try to emit a combined compare-and-branch first.
2447 if (emitCompareAndBranch(BI
))
2450 // Try to take advantage of fallthrough opportunities.
2451 if (FuncInfo
.MBB
->isLayoutSuccessor(TBB
)) {
2452 std::swap(TBB
, FBB
);
2453 Predicate
= CmpInst::getInversePredicate(Predicate
);
2457 if (!emitCmp(CI
->getOperand(0), CI
->getOperand(1), CI
->isUnsigned()))
2460 // FCMP_UEQ and FCMP_ONE cannot be checked with a single branch
2462 AArch64CC::CondCode CC
= getCompareCC(Predicate
);
2463 AArch64CC::CondCode ExtraCC
= AArch64CC::AL
;
2464 switch (Predicate
) {
2467 case CmpInst::FCMP_UEQ
:
2468 ExtraCC
= AArch64CC::EQ
;
2471 case CmpInst::FCMP_ONE
:
2472 ExtraCC
= AArch64CC::MI
;
2476 assert((CC
!= AArch64CC::AL
) && "Unexpected condition code.");
2478 // Emit the extra branch for FCMP_UEQ and FCMP_ONE.
2479 if (ExtraCC
!= AArch64CC::AL
) {
2480 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, DbgLoc
, TII
.get(AArch64::Bcc
))
2486 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, DbgLoc
, TII
.get(AArch64::Bcc
))
2490 finishCondBranch(BI
->getParent(), TBB
, FBB
);
2493 } else if (const auto *CI
= dyn_cast
<ConstantInt
>(BI
->getCondition())) {
2494 uint64_t Imm
= CI
->getZExtValue();
2495 MachineBasicBlock
*Target
= (Imm
== 0) ? FBB
: TBB
;
2496 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, DbgLoc
, TII
.get(AArch64::B
))
2499 // Obtain the branch probability and add the target to the successor list.
2501 auto BranchProbability
= FuncInfo
.BPI
->getEdgeProbability(
2502 BI
->getParent(), Target
->getBasicBlock());
2503 FuncInfo
.MBB
->addSuccessor(Target
, BranchProbability
);
2505 FuncInfo
.MBB
->addSuccessorWithoutProb(Target
);
2508 AArch64CC::CondCode CC
= AArch64CC::NE
;
2509 if (foldXALUIntrinsic(CC
, I
, BI
->getCondition())) {
2510 // Fake request the condition, otherwise the intrinsic might be completely
2512 unsigned CondReg
= getRegForValue(BI
->getCondition());
2517 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, DbgLoc
, TII
.get(AArch64::Bcc
))
2521 finishCondBranch(BI
->getParent(), TBB
, FBB
);
2526 unsigned CondReg
= getRegForValue(BI
->getCondition());
2529 bool CondRegIsKill
= hasTrivialKill(BI
->getCondition());
2531 // i1 conditions come as i32 values, test the lowest bit with tb(n)z.
2532 unsigned Opcode
= AArch64::TBNZW
;
2533 if (FuncInfo
.MBB
->isLayoutSuccessor(TBB
)) {
2534 std::swap(TBB
, FBB
);
2535 Opcode
= AArch64::TBZW
;
2538 const MCInstrDesc
&II
= TII
.get(Opcode
);
2539 unsigned ConstrainedCondReg
2540 = constrainOperandRegClass(II
, CondReg
, II
.getNumDefs());
2541 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, DbgLoc
, II
)
2542 .addReg(ConstrainedCondReg
, getKillRegState(CondRegIsKill
))
2546 finishCondBranch(BI
->getParent(), TBB
, FBB
);
2550 bool AArch64FastISel::selectIndirectBr(const Instruction
*I
) {
2551 const IndirectBrInst
*BI
= cast
<IndirectBrInst
>(I
);
2552 unsigned AddrReg
= getRegForValue(BI
->getOperand(0));
2556 // Emit the indirect branch.
2557 const MCInstrDesc
&II
= TII
.get(AArch64::BR
);
2558 AddrReg
= constrainOperandRegClass(II
, AddrReg
, II
.getNumDefs());
2559 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, DbgLoc
, II
).addReg(AddrReg
);
2561 // Make sure the CFG is up-to-date.
2562 for (auto *Succ
: BI
->successors())
2563 FuncInfo
.MBB
->addSuccessor(FuncInfo
.MBBMap
[Succ
]);
2568 bool AArch64FastISel::selectCmp(const Instruction
*I
) {
2569 const CmpInst
*CI
= cast
<CmpInst
>(I
);
2571 // Vectors of i1 are weird: bail out.
2572 if (CI
->getType()->isVectorTy())
2575 // Try to optimize or fold the cmp.
2576 CmpInst::Predicate Predicate
= optimizeCmpPredicate(CI
);
2577 unsigned ResultReg
= 0;
2578 switch (Predicate
) {
2581 case CmpInst::FCMP_FALSE
:
2582 ResultReg
= createResultReg(&AArch64::GPR32RegClass
);
2583 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, DbgLoc
,
2584 TII
.get(TargetOpcode::COPY
), ResultReg
)
2585 .addReg(AArch64::WZR
, getKillRegState(true));
2587 case CmpInst::FCMP_TRUE
:
2588 ResultReg
= fastEmit_i(MVT::i32
, MVT::i32
, ISD::Constant
, 1);
2593 updateValueMap(I
, ResultReg
);
2598 if (!emitCmp(CI
->getOperand(0), CI
->getOperand(1), CI
->isUnsigned()))
2601 ResultReg
= createResultReg(&AArch64::GPR32RegClass
);
2603 // FCMP_UEQ and FCMP_ONE cannot be checked with a single instruction. These
2604 // condition codes are inverted, because they are used by CSINC.
2605 static unsigned CondCodeTable
[2][2] = {
2606 { AArch64CC::NE
, AArch64CC::VC
},
2607 { AArch64CC::PL
, AArch64CC::LE
}
2609 unsigned *CondCodes
= nullptr;
2610 switch (Predicate
) {
2613 case CmpInst::FCMP_UEQ
:
2614 CondCodes
= &CondCodeTable
[0][0];
2616 case CmpInst::FCMP_ONE
:
2617 CondCodes
= &CondCodeTable
[1][0];
2622 unsigned TmpReg1
= createResultReg(&AArch64::GPR32RegClass
);
2623 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, DbgLoc
, TII
.get(AArch64::CSINCWr
),
2625 .addReg(AArch64::WZR
, getKillRegState(true))
2626 .addReg(AArch64::WZR
, getKillRegState(true))
2627 .addImm(CondCodes
[0]);
2628 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, DbgLoc
, TII
.get(AArch64::CSINCWr
),
2630 .addReg(TmpReg1
, getKillRegState(true))
2631 .addReg(AArch64::WZR
, getKillRegState(true))
2632 .addImm(CondCodes
[1]);
2634 updateValueMap(I
, ResultReg
);
2638 // Now set a register based on the comparison.
2639 AArch64CC::CondCode CC
= getCompareCC(Predicate
);
2640 assert((CC
!= AArch64CC::AL
) && "Unexpected condition code.");
2641 AArch64CC::CondCode invertedCC
= getInvertedCondCode(CC
);
2642 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, DbgLoc
, TII
.get(AArch64::CSINCWr
),
2644 .addReg(AArch64::WZR
, getKillRegState(true))
2645 .addReg(AArch64::WZR
, getKillRegState(true))
2646 .addImm(invertedCC
);
2648 updateValueMap(I
, ResultReg
);
2652 /// Optimize selects of i1 if one of the operands has a 'true' or 'false'
2654 bool AArch64FastISel::optimizeSelect(const SelectInst
*SI
) {
2655 if (!SI
->getType()->isIntegerTy(1))
2658 const Value
*Src1Val
, *Src2Val
;
2660 bool NeedExtraOp
= false;
2661 if (auto *CI
= dyn_cast
<ConstantInt
>(SI
->getTrueValue())) {
2663 Src1Val
= SI
->getCondition();
2664 Src2Val
= SI
->getFalseValue();
2665 Opc
= AArch64::ORRWrr
;
2667 assert(CI
->isZero());
2668 Src1Val
= SI
->getFalseValue();
2669 Src2Val
= SI
->getCondition();
2670 Opc
= AArch64::BICWrr
;
2672 } else if (auto *CI
= dyn_cast
<ConstantInt
>(SI
->getFalseValue())) {
2674 Src1Val
= SI
->getCondition();
2675 Src2Val
= SI
->getTrueValue();
2676 Opc
= AArch64::ORRWrr
;
2679 assert(CI
->isZero());
2680 Src1Val
= SI
->getCondition();
2681 Src2Val
= SI
->getTrueValue();
2682 Opc
= AArch64::ANDWrr
;
2689 unsigned Src1Reg
= getRegForValue(Src1Val
);
2692 bool Src1IsKill
= hasTrivialKill(Src1Val
);
2694 unsigned Src2Reg
= getRegForValue(Src2Val
);
2697 bool Src2IsKill
= hasTrivialKill(Src2Val
);
2700 Src1Reg
= emitLogicalOp_ri(ISD::XOR
, MVT::i32
, Src1Reg
, Src1IsKill
, 1);
2703 unsigned ResultReg
= fastEmitInst_rr(Opc
, &AArch64::GPR32RegClass
, Src1Reg
,
2704 Src1IsKill
, Src2Reg
, Src2IsKill
);
2705 updateValueMap(SI
, ResultReg
);
2709 bool AArch64FastISel::selectSelect(const Instruction
*I
) {
2710 assert(isa
<SelectInst
>(I
) && "Expected a select instruction.");
2712 if (!isTypeSupported(I
->getType(), VT
))
2716 const TargetRegisterClass
*RC
;
2717 switch (VT
.SimpleTy
) {
2724 Opc
= AArch64::CSELWr
;
2725 RC
= &AArch64::GPR32RegClass
;
2728 Opc
= AArch64::CSELXr
;
2729 RC
= &AArch64::GPR64RegClass
;
2732 Opc
= AArch64::FCSELSrrr
;
2733 RC
= &AArch64::FPR32RegClass
;
2736 Opc
= AArch64::FCSELDrrr
;
2737 RC
= &AArch64::FPR64RegClass
;
2741 const SelectInst
*SI
= cast
<SelectInst
>(I
);
2742 const Value
*Cond
= SI
->getCondition();
2743 AArch64CC::CondCode CC
= AArch64CC::NE
;
2744 AArch64CC::CondCode ExtraCC
= AArch64CC::AL
;
2746 if (optimizeSelect(SI
))
2749 // Try to pickup the flags, so we don't have to emit another compare.
2750 if (foldXALUIntrinsic(CC
, I
, Cond
)) {
2751 // Fake request the condition to force emission of the XALU intrinsic.
2752 unsigned CondReg
= getRegForValue(Cond
);
2755 } else if (isa
<CmpInst
>(Cond
) && cast
<CmpInst
>(Cond
)->hasOneUse() &&
2756 isValueAvailable(Cond
)) {
2757 const auto *Cmp
= cast
<CmpInst
>(Cond
);
2758 // Try to optimize or fold the cmp.
2759 CmpInst::Predicate Predicate
= optimizeCmpPredicate(Cmp
);
2760 const Value
*FoldSelect
= nullptr;
2761 switch (Predicate
) {
2764 case CmpInst::FCMP_FALSE
:
2765 FoldSelect
= SI
->getFalseValue();
2767 case CmpInst::FCMP_TRUE
:
2768 FoldSelect
= SI
->getTrueValue();
2773 unsigned SrcReg
= getRegForValue(FoldSelect
);
2776 unsigned UseReg
= lookUpRegForValue(SI
);
2778 MRI
.clearKillFlags(UseReg
);
2780 updateValueMap(I
, SrcReg
);
2785 if (!emitCmp(Cmp
->getOperand(0), Cmp
->getOperand(1), Cmp
->isUnsigned()))
2788 // FCMP_UEQ and FCMP_ONE cannot be checked with a single select instruction.
2789 CC
= getCompareCC(Predicate
);
2790 switch (Predicate
) {
2793 case CmpInst::FCMP_UEQ
:
2794 ExtraCC
= AArch64CC::EQ
;
2797 case CmpInst::FCMP_ONE
:
2798 ExtraCC
= AArch64CC::MI
;
2802 assert((CC
!= AArch64CC::AL
) && "Unexpected condition code.");
2804 unsigned CondReg
= getRegForValue(Cond
);
2807 bool CondIsKill
= hasTrivialKill(Cond
);
2809 const MCInstrDesc
&II
= TII
.get(AArch64::ANDSWri
);
2810 CondReg
= constrainOperandRegClass(II
, CondReg
, 1);
2812 // Emit a TST instruction (ANDS wzr, reg, #imm).
2813 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, DbgLoc
, II
,
2815 .addReg(CondReg
, getKillRegState(CondIsKill
))
2816 .addImm(AArch64_AM::encodeLogicalImmediate(1, 32));
2819 unsigned Src1Reg
= getRegForValue(SI
->getTrueValue());
2820 bool Src1IsKill
= hasTrivialKill(SI
->getTrueValue());
2822 unsigned Src2Reg
= getRegForValue(SI
->getFalseValue());
2823 bool Src2IsKill
= hasTrivialKill(SI
->getFalseValue());
2825 if (!Src1Reg
|| !Src2Reg
)
2828 if (ExtraCC
!= AArch64CC::AL
) {
2829 Src2Reg
= fastEmitInst_rri(Opc
, RC
, Src1Reg
, Src1IsKill
, Src2Reg
,
2830 Src2IsKill
, ExtraCC
);
2833 unsigned ResultReg
= fastEmitInst_rri(Opc
, RC
, Src1Reg
, Src1IsKill
, Src2Reg
,
2835 updateValueMap(I
, ResultReg
);
2839 bool AArch64FastISel::selectFPExt(const Instruction
*I
) {
2840 Value
*V
= I
->getOperand(0);
2841 if (!I
->getType()->isDoubleTy() || !V
->getType()->isFloatTy())
2844 unsigned Op
= getRegForValue(V
);
2848 unsigned ResultReg
= createResultReg(&AArch64::FPR64RegClass
);
2849 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, DbgLoc
, TII
.get(AArch64::FCVTDSr
),
2850 ResultReg
).addReg(Op
);
2851 updateValueMap(I
, ResultReg
);
2855 bool AArch64FastISel::selectFPTrunc(const Instruction
*I
) {
2856 Value
*V
= I
->getOperand(0);
2857 if (!I
->getType()->isFloatTy() || !V
->getType()->isDoubleTy())
2860 unsigned Op
= getRegForValue(V
);
2864 unsigned ResultReg
= createResultReg(&AArch64::FPR32RegClass
);
2865 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, DbgLoc
, TII
.get(AArch64::FCVTSDr
),
2866 ResultReg
).addReg(Op
);
2867 updateValueMap(I
, ResultReg
);
2871 // FPToUI and FPToSI
2872 bool AArch64FastISel::selectFPToInt(const Instruction
*I
, bool Signed
) {
2874 if (!isTypeLegal(I
->getType(), DestVT
) || DestVT
.isVector())
2877 unsigned SrcReg
= getRegForValue(I
->getOperand(0));
2881 EVT SrcVT
= TLI
.getValueType(DL
, I
->getOperand(0)->getType(), true);
2882 if (SrcVT
== MVT::f128
|| SrcVT
== MVT::f16
)
2886 if (SrcVT
== MVT::f64
) {
2888 Opc
= (DestVT
== MVT::i32
) ? AArch64::FCVTZSUWDr
: AArch64::FCVTZSUXDr
;
2890 Opc
= (DestVT
== MVT::i32
) ? AArch64::FCVTZUUWDr
: AArch64::FCVTZUUXDr
;
2893 Opc
= (DestVT
== MVT::i32
) ? AArch64::FCVTZSUWSr
: AArch64::FCVTZSUXSr
;
2895 Opc
= (DestVT
== MVT::i32
) ? AArch64::FCVTZUUWSr
: AArch64::FCVTZUUXSr
;
2897 unsigned ResultReg
= createResultReg(
2898 DestVT
== MVT::i32
? &AArch64::GPR32RegClass
: &AArch64::GPR64RegClass
);
2899 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, DbgLoc
, TII
.get(Opc
), ResultReg
)
2901 updateValueMap(I
, ResultReg
);
2905 bool AArch64FastISel::selectIntToFP(const Instruction
*I
, bool Signed
) {
2907 if (!isTypeLegal(I
->getType(), DestVT
) || DestVT
.isVector())
2909 // Let regular ISEL handle FP16
2910 if (DestVT
== MVT::f16
)
2913 assert((DestVT
== MVT::f32
|| DestVT
== MVT::f64
) &&
2914 "Unexpected value type.");
2916 unsigned SrcReg
= getRegForValue(I
->getOperand(0));
2919 bool SrcIsKill
= hasTrivialKill(I
->getOperand(0));
2921 EVT SrcVT
= TLI
.getValueType(DL
, I
->getOperand(0)->getType(), true);
2923 // Handle sign-extension.
2924 if (SrcVT
== MVT::i16
|| SrcVT
== MVT::i8
|| SrcVT
== MVT::i1
) {
2926 emitIntExt(SrcVT
.getSimpleVT(), SrcReg
, MVT::i32
, /*isZExt*/ !Signed
);
2933 if (SrcVT
== MVT::i64
) {
2935 Opc
= (DestVT
== MVT::f32
) ? AArch64::SCVTFUXSri
: AArch64::SCVTFUXDri
;
2937 Opc
= (DestVT
== MVT::f32
) ? AArch64::UCVTFUXSri
: AArch64::UCVTFUXDri
;
2940 Opc
= (DestVT
== MVT::f32
) ? AArch64::SCVTFUWSri
: AArch64::SCVTFUWDri
;
2942 Opc
= (DestVT
== MVT::f32
) ? AArch64::UCVTFUWSri
: AArch64::UCVTFUWDri
;
2945 unsigned ResultReg
= fastEmitInst_r(Opc
, TLI
.getRegClassFor(DestVT
), SrcReg
,
2947 updateValueMap(I
, ResultReg
);
2951 bool AArch64FastISel::fastLowerArguments() {
2952 if (!FuncInfo
.CanLowerReturn
)
2955 const Function
*F
= FuncInfo
.Fn
;
2959 CallingConv::ID CC
= F
->getCallingConv();
2960 if (CC
!= CallingConv::C
&& CC
!= CallingConv::Swift
)
2963 if (Subtarget
->hasCustomCallingConv())
2966 // Only handle simple cases of up to 8 GPR and FPR each.
2967 unsigned GPRCnt
= 0;
2968 unsigned FPRCnt
= 0;
2969 for (auto const &Arg
: F
->args()) {
2970 if (Arg
.hasAttribute(Attribute::ByVal
) ||
2971 Arg
.hasAttribute(Attribute::InReg
) ||
2972 Arg
.hasAttribute(Attribute::StructRet
) ||
2973 Arg
.hasAttribute(Attribute::SwiftSelf
) ||
2974 Arg
.hasAttribute(Attribute::SwiftError
) ||
2975 Arg
.hasAttribute(Attribute::Nest
))
2978 Type
*ArgTy
= Arg
.getType();
2979 if (ArgTy
->isStructTy() || ArgTy
->isArrayTy())
2982 EVT ArgVT
= TLI
.getValueType(DL
, ArgTy
);
2983 if (!ArgVT
.isSimple())
2986 MVT VT
= ArgVT
.getSimpleVT().SimpleTy
;
2987 if (VT
.isFloatingPoint() && !Subtarget
->hasFPARMv8())
2990 if (VT
.isVector() &&
2991 (!Subtarget
->hasNEON() || !Subtarget
->isLittleEndian()))
2994 if (VT
>= MVT::i1
&& VT
<= MVT::i64
)
2996 else if ((VT
>= MVT::f16
&& VT
<= MVT::f64
) || VT
.is64BitVector() ||
2997 VT
.is128BitVector())
3002 if (GPRCnt
> 8 || FPRCnt
> 8)
3006 static const MCPhysReg Registers
[6][8] = {
3007 { AArch64::W0
, AArch64::W1
, AArch64::W2
, AArch64::W3
, AArch64::W4
,
3008 AArch64::W5
, AArch64::W6
, AArch64::W7
},
3009 { AArch64::X0
, AArch64::X1
, AArch64::X2
, AArch64::X3
, AArch64::X4
,
3010 AArch64::X5
, AArch64::X6
, AArch64::X7
},
3011 { AArch64::H0
, AArch64::H1
, AArch64::H2
, AArch64::H3
, AArch64::H4
,
3012 AArch64::H5
, AArch64::H6
, AArch64::H7
},
3013 { AArch64::S0
, AArch64::S1
, AArch64::S2
, AArch64::S3
, AArch64::S4
,
3014 AArch64::S5
, AArch64::S6
, AArch64::S7
},
3015 { AArch64::D0
, AArch64::D1
, AArch64::D2
, AArch64::D3
, AArch64::D4
,
3016 AArch64::D5
, AArch64::D6
, AArch64::D7
},
3017 { AArch64::Q0
, AArch64::Q1
, AArch64::Q2
, AArch64::Q3
, AArch64::Q4
,
3018 AArch64::Q5
, AArch64::Q6
, AArch64::Q7
}
3021 unsigned GPRIdx
= 0;
3022 unsigned FPRIdx
= 0;
3023 for (auto const &Arg
: F
->args()) {
3024 MVT VT
= TLI
.getSimpleValueType(DL
, Arg
.getType());
3026 const TargetRegisterClass
*RC
;
3027 if (VT
>= MVT::i1
&& VT
<= MVT::i32
) {
3028 SrcReg
= Registers
[0][GPRIdx
++];
3029 RC
= &AArch64::GPR32RegClass
;
3031 } else if (VT
== MVT::i64
) {
3032 SrcReg
= Registers
[1][GPRIdx
++];
3033 RC
= &AArch64::GPR64RegClass
;
3034 } else if (VT
== MVT::f16
) {
3035 SrcReg
= Registers
[2][FPRIdx
++];
3036 RC
= &AArch64::FPR16RegClass
;
3037 } else if (VT
== MVT::f32
) {
3038 SrcReg
= Registers
[3][FPRIdx
++];
3039 RC
= &AArch64::FPR32RegClass
;
3040 } else if ((VT
== MVT::f64
) || VT
.is64BitVector()) {
3041 SrcReg
= Registers
[4][FPRIdx
++];
3042 RC
= &AArch64::FPR64RegClass
;
3043 } else if (VT
.is128BitVector()) {
3044 SrcReg
= Registers
[5][FPRIdx
++];
3045 RC
= &AArch64::FPR128RegClass
;
3047 llvm_unreachable("Unexpected value type.");
3049 unsigned DstReg
= FuncInfo
.MF
->addLiveIn(SrcReg
, RC
);
3050 // FIXME: Unfortunately it's necessary to emit a copy from the livein copy.
3051 // Without this, EmitLiveInCopies may eliminate the livein if its only
3052 // use is a bitcast (which isn't turned into an instruction).
3053 unsigned ResultReg
= createResultReg(RC
);
3054 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, DbgLoc
,
3055 TII
.get(TargetOpcode::COPY
), ResultReg
)
3056 .addReg(DstReg
, getKillRegState(true));
3057 updateValueMap(&Arg
, ResultReg
);
3062 bool AArch64FastISel::processCallArgs(CallLoweringInfo
&CLI
,
3063 SmallVectorImpl
<MVT
> &OutVTs
,
3064 unsigned &NumBytes
) {
3065 CallingConv::ID CC
= CLI
.CallConv
;
3066 SmallVector
<CCValAssign
, 16> ArgLocs
;
3067 CCState
CCInfo(CC
, false, *FuncInfo
.MF
, ArgLocs
, *Context
);
3068 CCInfo
.AnalyzeCallOperands(OutVTs
, CLI
.OutFlags
, CCAssignFnForCall(CC
));
3070 // Get a count of how many bytes are to be pushed on the stack.
3071 NumBytes
= CCInfo
.getNextStackOffset();
3073 // Issue CALLSEQ_START
3074 unsigned AdjStackDown
= TII
.getCallFrameSetupOpcode();
3075 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, DbgLoc
, TII
.get(AdjStackDown
))
3076 .addImm(NumBytes
).addImm(0);
3078 // Process the args.
3079 for (CCValAssign
&VA
: ArgLocs
) {
3080 const Value
*ArgVal
= CLI
.OutVals
[VA
.getValNo()];
3081 MVT ArgVT
= OutVTs
[VA
.getValNo()];
3083 unsigned ArgReg
= getRegForValue(ArgVal
);
3087 // Handle arg promotion: SExt, ZExt, AExt.
3088 switch (VA
.getLocInfo()) {
3089 case CCValAssign::Full
:
3091 case CCValAssign::SExt
: {
3092 MVT DestVT
= VA
.getLocVT();
3094 ArgReg
= emitIntExt(SrcVT
, ArgReg
, DestVT
, /*isZExt=*/false);
3099 case CCValAssign::AExt
:
3100 // Intentional fall-through.
3101 case CCValAssign::ZExt
: {
3102 MVT DestVT
= VA
.getLocVT();
3104 ArgReg
= emitIntExt(SrcVT
, ArgReg
, DestVT
, /*isZExt=*/true);
3110 llvm_unreachable("Unknown arg promotion!");
3113 // Now copy/store arg to correct locations.
3114 if (VA
.isRegLoc() && !VA
.needsCustom()) {
3115 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, DbgLoc
,
3116 TII
.get(TargetOpcode::COPY
), VA
.getLocReg()).addReg(ArgReg
);
3117 CLI
.OutRegs
.push_back(VA
.getLocReg());
3118 } else if (VA
.needsCustom()) {
3119 // FIXME: Handle custom args.
3122 assert(VA
.isMemLoc() && "Assuming store on stack.");
3124 // Don't emit stores for undef values.
3125 if (isa
<UndefValue
>(ArgVal
))
3128 // Need to store on the stack.
3129 unsigned ArgSize
= (ArgVT
.getSizeInBits() + 7) / 8;
3131 unsigned BEAlign
= 0;
3132 if (ArgSize
< 8 && !Subtarget
->isLittleEndian())
3133 BEAlign
= 8 - ArgSize
;
3136 Addr
.setKind(Address::RegBase
);
3137 Addr
.setReg(AArch64::SP
);
3138 Addr
.setOffset(VA
.getLocMemOffset() + BEAlign
);
3140 unsigned Alignment
= DL
.getABITypeAlignment(ArgVal
->getType());
3141 MachineMemOperand
*MMO
= FuncInfo
.MF
->getMachineMemOperand(
3142 MachinePointerInfo::getStack(*FuncInfo
.MF
, Addr
.getOffset()),
3143 MachineMemOperand::MOStore
, ArgVT
.getStoreSize(), Alignment
);
3145 if (!emitStore(ArgVT
, ArgReg
, Addr
, MMO
))
3152 bool AArch64FastISel::finishCall(CallLoweringInfo
&CLI
, MVT RetVT
,
3153 unsigned NumBytes
) {
3154 CallingConv::ID CC
= CLI
.CallConv
;
3156 // Issue CALLSEQ_END
3157 unsigned AdjStackUp
= TII
.getCallFrameDestroyOpcode();
3158 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, DbgLoc
, TII
.get(AdjStackUp
))
3159 .addImm(NumBytes
).addImm(0);
3161 // Now the return value.
3162 if (RetVT
!= MVT::isVoid
) {
3163 SmallVector
<CCValAssign
, 16> RVLocs
;
3164 CCState
CCInfo(CC
, false, *FuncInfo
.MF
, RVLocs
, *Context
);
3165 CCInfo
.AnalyzeCallResult(RetVT
, CCAssignFnForCall(CC
));
3167 // Only handle a single return value.
3168 if (RVLocs
.size() != 1)
3171 // Copy all of the result registers out of their specified physreg.
3172 MVT CopyVT
= RVLocs
[0].getValVT();
3174 // TODO: Handle big-endian results
3175 if (CopyVT
.isVector() && !Subtarget
->isLittleEndian())
3178 unsigned ResultReg
= createResultReg(TLI
.getRegClassFor(CopyVT
));
3179 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, DbgLoc
,
3180 TII
.get(TargetOpcode::COPY
), ResultReg
)
3181 .addReg(RVLocs
[0].getLocReg());
3182 CLI
.InRegs
.push_back(RVLocs
[0].getLocReg());
3184 CLI
.ResultReg
= ResultReg
;
3185 CLI
.NumResultRegs
= 1;
3191 bool AArch64FastISel::fastLowerCall(CallLoweringInfo
&CLI
) {
3192 CallingConv::ID CC
= CLI
.CallConv
;
3193 bool IsTailCall
= CLI
.IsTailCall
;
3194 bool IsVarArg
= CLI
.IsVarArg
;
3195 const Value
*Callee
= CLI
.Callee
;
3196 MCSymbol
*Symbol
= CLI
.Symbol
;
3198 if (!Callee
&& !Symbol
)
3201 // Allow SelectionDAG isel to handle tail calls.
3205 // FIXME: we could and should support this, but for now correctness at -O0 is
3207 if (Subtarget
->isTargetILP32())
3210 CodeModel::Model CM
= TM
.getCodeModel();
3211 // Only support the small-addressing and large code models.
3212 if (CM
!= CodeModel::Large
&& !Subtarget
->useSmallAddressing())
3215 // FIXME: Add large code model support for ELF.
3216 if (CM
== CodeModel::Large
&& !Subtarget
->isTargetMachO())
3219 // Let SDISel handle vararg functions.
3223 // FIXME: Only handle *simple* calls for now.
3225 if (CLI
.RetTy
->isVoidTy())
3226 RetVT
= MVT::isVoid
;
3227 else if (!isTypeLegal(CLI
.RetTy
, RetVT
))
3230 for (auto Flag
: CLI
.OutFlags
)
3231 if (Flag
.isInReg() || Flag
.isSRet() || Flag
.isNest() || Flag
.isByVal() ||
3232 Flag
.isSwiftSelf() || Flag
.isSwiftError())
3235 // Set up the argument vectors.
3236 SmallVector
<MVT
, 16> OutVTs
;
3237 OutVTs
.reserve(CLI
.OutVals
.size());
3239 for (auto *Val
: CLI
.OutVals
) {
3241 if (!isTypeLegal(Val
->getType(), VT
) &&
3242 !(VT
== MVT::i1
|| VT
== MVT::i8
|| VT
== MVT::i16
))
3245 // We don't handle vector parameters yet.
3246 if (VT
.isVector() || VT
.getSizeInBits() > 64)
3249 OutVTs
.push_back(VT
);
3253 if (Callee
&& !computeCallAddress(Callee
, Addr
))
3256 // The weak function target may be zero; in that case we must use indirect
3257 // addressing via a stub on windows as it may be out of range for a
3258 // PC-relative jump.
3259 if (Subtarget
->isTargetWindows() && Addr
.getGlobalValue() &&
3260 Addr
.getGlobalValue()->hasExternalWeakLinkage())
3263 // Handle the arguments now that we've gotten them.
3265 if (!processCallArgs(CLI
, OutVTs
, NumBytes
))
3268 const AArch64RegisterInfo
*RegInfo
= Subtarget
->getRegisterInfo();
3269 if (RegInfo
->isAnyArgRegReserved(*MF
))
3270 RegInfo
->emitReservedArgRegCallError(*MF
);
3273 MachineInstrBuilder MIB
;
3274 if (Subtarget
->useSmallAddressing()) {
3275 const MCInstrDesc
&II
= TII
.get(Addr
.getReg() ? AArch64::BLR
: AArch64::BL
);
3276 MIB
= BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, DbgLoc
, II
);
3278 MIB
.addSym(Symbol
, 0);
3279 else if (Addr
.getGlobalValue())
3280 MIB
.addGlobalAddress(Addr
.getGlobalValue(), 0, 0);
3281 else if (Addr
.getReg()) {
3282 unsigned Reg
= constrainOperandRegClass(II
, Addr
.getReg(), 0);
3287 unsigned CallReg
= 0;
3289 unsigned ADRPReg
= createResultReg(&AArch64::GPR64commonRegClass
);
3290 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, DbgLoc
, TII
.get(AArch64::ADRP
),
3292 .addSym(Symbol
, AArch64II::MO_GOT
| AArch64II::MO_PAGE
);
3294 CallReg
= createResultReg(&AArch64::GPR64RegClass
);
3295 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, DbgLoc
,
3296 TII
.get(AArch64::LDRXui
), CallReg
)
3299 AArch64II::MO_GOT
| AArch64II::MO_PAGEOFF
| AArch64II::MO_NC
);
3300 } else if (Addr
.getGlobalValue())
3301 CallReg
= materializeGV(Addr
.getGlobalValue());
3302 else if (Addr
.getReg())
3303 CallReg
= Addr
.getReg();
3308 const MCInstrDesc
&II
= TII
.get(AArch64::BLR
);
3309 CallReg
= constrainOperandRegClass(II
, CallReg
, 0);
3310 MIB
= BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, DbgLoc
, II
).addReg(CallReg
);
3313 // Add implicit physical register uses to the call.
3314 for (auto Reg
: CLI
.OutRegs
)
3315 MIB
.addReg(Reg
, RegState::Implicit
);
3317 // Add a register mask with the call-preserved registers.
3318 // Proper defs for return values will be added by setPhysRegsDeadExcept().
3319 MIB
.addRegMask(TRI
.getCallPreservedMask(*FuncInfo
.MF
, CC
));
3323 // Finish off the call including any return values.
3324 return finishCall(CLI
, RetVT
, NumBytes
);
3327 bool AArch64FastISel::isMemCpySmall(uint64_t Len
, unsigned Alignment
) {
3329 return Len
/ Alignment
<= 4;
3334 bool AArch64FastISel::tryEmitSmallMemCpy(Address Dest
, Address Src
,
3335 uint64_t Len
, unsigned Alignment
) {
3336 // Make sure we don't bloat code by inlining very large memcpy's.
3337 if (!isMemCpySmall(Len
, Alignment
))
3340 int64_t UnscaledOffset
= 0;
3341 Address OrigDest
= Dest
;
3342 Address OrigSrc
= Src
;
3346 if (!Alignment
|| Alignment
>= 8) {
3357 // Bound based on alignment.
3358 if (Len
>= 4 && Alignment
== 4)
3360 else if (Len
>= 2 && Alignment
== 2)
3367 unsigned ResultReg
= emitLoad(VT
, VT
, Src
);
3371 if (!emitStore(VT
, ResultReg
, Dest
))
3374 int64_t Size
= VT
.getSizeInBits() / 8;
3376 UnscaledOffset
+= Size
;
3378 // We need to recompute the unscaled offset for each iteration.
3379 Dest
.setOffset(OrigDest
.getOffset() + UnscaledOffset
);
3380 Src
.setOffset(OrigSrc
.getOffset() + UnscaledOffset
);
3386 /// Check if it is possible to fold the condition from the XALU intrinsic
3387 /// into the user. The condition code will only be updated on success.
3388 bool AArch64FastISel::foldXALUIntrinsic(AArch64CC::CondCode
&CC
,
3389 const Instruction
*I
,
3390 const Value
*Cond
) {
3391 if (!isa
<ExtractValueInst
>(Cond
))
3394 const auto *EV
= cast
<ExtractValueInst
>(Cond
);
3395 if (!isa
<IntrinsicInst
>(EV
->getAggregateOperand()))
3398 const auto *II
= cast
<IntrinsicInst
>(EV
->getAggregateOperand());
3400 const Function
*Callee
= II
->getCalledFunction();
3402 cast
<StructType
>(Callee
->getReturnType())->getTypeAtIndex(0U);
3403 if (!isTypeLegal(RetTy
, RetVT
))
3406 if (RetVT
!= MVT::i32
&& RetVT
!= MVT::i64
)
3409 const Value
*LHS
= II
->getArgOperand(0);
3410 const Value
*RHS
= II
->getArgOperand(1);
3412 // Canonicalize immediate to the RHS.
3413 if (isa
<ConstantInt
>(LHS
) && !isa
<ConstantInt
>(RHS
) &&
3414 isCommutativeIntrinsic(II
))
3415 std::swap(LHS
, RHS
);
3417 // Simplify multiplies.
3418 Intrinsic::ID IID
= II
->getIntrinsicID();
3422 case Intrinsic::smul_with_overflow
:
3423 if (const auto *C
= dyn_cast
<ConstantInt
>(RHS
))
3424 if (C
->getValue() == 2)
3425 IID
= Intrinsic::sadd_with_overflow
;
3427 case Intrinsic::umul_with_overflow
:
3428 if (const auto *C
= dyn_cast
<ConstantInt
>(RHS
))
3429 if (C
->getValue() == 2)
3430 IID
= Intrinsic::uadd_with_overflow
;
3434 AArch64CC::CondCode TmpCC
;
3438 case Intrinsic::sadd_with_overflow
:
3439 case Intrinsic::ssub_with_overflow
:
3440 TmpCC
= AArch64CC::VS
;
3442 case Intrinsic::uadd_with_overflow
:
3443 TmpCC
= AArch64CC::HS
;
3445 case Intrinsic::usub_with_overflow
:
3446 TmpCC
= AArch64CC::LO
;
3448 case Intrinsic::smul_with_overflow
:
3449 case Intrinsic::umul_with_overflow
:
3450 TmpCC
= AArch64CC::NE
;
3454 // Check if both instructions are in the same basic block.
3455 if (!isValueAvailable(II
))
3458 // Make sure nothing is in the way
3459 BasicBlock::const_iterator
Start(I
);
3460 BasicBlock::const_iterator
End(II
);
3461 for (auto Itr
= std::prev(Start
); Itr
!= End
; --Itr
) {
3462 // We only expect extractvalue instructions between the intrinsic and the
3463 // instruction to be selected.
3464 if (!isa
<ExtractValueInst
>(Itr
))
3467 // Check that the extractvalue operand comes from the intrinsic.
3468 const auto *EVI
= cast
<ExtractValueInst
>(Itr
);
3469 if (EVI
->getAggregateOperand() != II
)
3477 bool AArch64FastISel::fastLowerIntrinsicCall(const IntrinsicInst
*II
) {
3478 // FIXME: Handle more intrinsics.
3479 switch (II
->getIntrinsicID()) {
3480 default: return false;
3481 case Intrinsic::frameaddress
: {
3482 MachineFrameInfo
&MFI
= FuncInfo
.MF
->getFrameInfo();
3483 MFI
.setFrameAddressIsTaken(true);
3485 const AArch64RegisterInfo
*RegInfo
= Subtarget
->getRegisterInfo();
3486 Register FramePtr
= RegInfo
->getFrameRegister(*(FuncInfo
.MF
));
3487 Register SrcReg
= MRI
.createVirtualRegister(&AArch64::GPR64RegClass
);
3488 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, DbgLoc
,
3489 TII
.get(TargetOpcode::COPY
), SrcReg
).addReg(FramePtr
);
3490 // Recursively load frame address
3496 unsigned Depth
= cast
<ConstantInt
>(II
->getOperand(0))->getZExtValue();
3498 DestReg
= fastEmitInst_ri(AArch64::LDRXui
, &AArch64::GPR64RegClass
,
3499 SrcReg
, /*IsKill=*/true, 0);
3500 assert(DestReg
&& "Unexpected LDR instruction emission failure.");
3504 updateValueMap(II
, SrcReg
);
3507 case Intrinsic::sponentry
: {
3508 MachineFrameInfo
&MFI
= FuncInfo
.MF
->getFrameInfo();
3510 // SP = FP + Fixed Object + 16
3511 int FI
= MFI
.CreateFixedObject(4, 0, false);
3512 unsigned ResultReg
= createResultReg(&AArch64::GPR64spRegClass
);
3513 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, DbgLoc
,
3514 TII
.get(AArch64::ADDXri
), ResultReg
)
3519 updateValueMap(II
, ResultReg
);
3522 case Intrinsic::memcpy
:
3523 case Intrinsic::memmove
: {
3524 const auto *MTI
= cast
<MemTransferInst
>(II
);
3525 // Don't handle volatile.
3526 if (MTI
->isVolatile())
3529 // Disable inlining for memmove before calls to ComputeAddress. Otherwise,
3530 // we would emit dead code because we don't currently handle memmoves.
3531 bool IsMemCpy
= (II
->getIntrinsicID() == Intrinsic::memcpy
);
3532 if (isa
<ConstantInt
>(MTI
->getLength()) && IsMemCpy
) {
3533 // Small memcpy's are common enough that we want to do them without a call
3535 uint64_t Len
= cast
<ConstantInt
>(MTI
->getLength())->getZExtValue();
3536 unsigned Alignment
= MinAlign(MTI
->getDestAlignment(),
3537 MTI
->getSourceAlignment());
3538 if (isMemCpySmall(Len
, Alignment
)) {
3540 if (!computeAddress(MTI
->getRawDest(), Dest
) ||
3541 !computeAddress(MTI
->getRawSource(), Src
))
3543 if (tryEmitSmallMemCpy(Dest
, Src
, Len
, Alignment
))
3548 if (!MTI
->getLength()->getType()->isIntegerTy(64))
3551 if (MTI
->getSourceAddressSpace() > 255 || MTI
->getDestAddressSpace() > 255)
3552 // Fast instruction selection doesn't support the special
3556 const char *IntrMemName
= isa
<MemCpyInst
>(II
) ? "memcpy" : "memmove";
3557 return lowerCallTo(II
, IntrMemName
, II
->getNumArgOperands() - 1);
3559 case Intrinsic::memset
: {
3560 const MemSetInst
*MSI
= cast
<MemSetInst
>(II
);
3561 // Don't handle volatile.
3562 if (MSI
->isVolatile())
3565 if (!MSI
->getLength()->getType()->isIntegerTy(64))
3568 if (MSI
->getDestAddressSpace() > 255)
3569 // Fast instruction selection doesn't support the special
3573 return lowerCallTo(II
, "memset", II
->getNumArgOperands() - 1);
3575 case Intrinsic::sin
:
3576 case Intrinsic::cos
:
3577 case Intrinsic::pow
: {
3579 if (!isTypeLegal(II
->getType(), RetVT
))
3582 if (RetVT
!= MVT::f32
&& RetVT
!= MVT::f64
)
3585 static const RTLIB::Libcall LibCallTable
[3][2] = {
3586 { RTLIB::SIN_F32
, RTLIB::SIN_F64
},
3587 { RTLIB::COS_F32
, RTLIB::COS_F64
},
3588 { RTLIB::POW_F32
, RTLIB::POW_F64
}
3591 bool Is64Bit
= RetVT
== MVT::f64
;
3592 switch (II
->getIntrinsicID()) {
3594 llvm_unreachable("Unexpected intrinsic.");
3595 case Intrinsic::sin
:
3596 LC
= LibCallTable
[0][Is64Bit
];
3598 case Intrinsic::cos
:
3599 LC
= LibCallTable
[1][Is64Bit
];
3601 case Intrinsic::pow
:
3602 LC
= LibCallTable
[2][Is64Bit
];
3607 Args
.reserve(II
->getNumArgOperands());
3609 // Populate the argument list.
3610 for (auto &Arg
: II
->arg_operands()) {
3613 Entry
.Ty
= Arg
->getType();
3614 Args
.push_back(Entry
);
3617 CallLoweringInfo CLI
;
3618 MCContext
&Ctx
= MF
->getContext();
3619 CLI
.setCallee(DL
, Ctx
, TLI
.getLibcallCallingConv(LC
), II
->getType(),
3620 TLI
.getLibcallName(LC
), std::move(Args
));
3621 if (!lowerCallTo(CLI
))
3623 updateValueMap(II
, CLI
.ResultReg
);
3626 case Intrinsic::fabs
: {
3628 if (!isTypeLegal(II
->getType(), VT
))
3632 switch (VT
.SimpleTy
) {
3636 Opc
= AArch64::FABSSr
;
3639 Opc
= AArch64::FABSDr
;
3642 unsigned SrcReg
= getRegForValue(II
->getOperand(0));
3645 bool SrcRegIsKill
= hasTrivialKill(II
->getOperand(0));
3646 unsigned ResultReg
= createResultReg(TLI
.getRegClassFor(VT
));
3647 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, DbgLoc
, TII
.get(Opc
), ResultReg
)
3648 .addReg(SrcReg
, getKillRegState(SrcRegIsKill
));
3649 updateValueMap(II
, ResultReg
);
3652 case Intrinsic::trap
:
3653 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, DbgLoc
, TII
.get(AArch64::BRK
))
3656 case Intrinsic::debugtrap
: {
3657 if (Subtarget
->isTargetWindows()) {
3658 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, DbgLoc
, TII
.get(AArch64::BRK
))
3665 case Intrinsic::sqrt
: {
3666 Type
*RetTy
= II
->getCalledFunction()->getReturnType();
3669 if (!isTypeLegal(RetTy
, VT
))
3672 unsigned Op0Reg
= getRegForValue(II
->getOperand(0));
3675 bool Op0IsKill
= hasTrivialKill(II
->getOperand(0));
3677 unsigned ResultReg
= fastEmit_r(VT
, VT
, ISD::FSQRT
, Op0Reg
, Op0IsKill
);
3681 updateValueMap(II
, ResultReg
);
3684 case Intrinsic::sadd_with_overflow
:
3685 case Intrinsic::uadd_with_overflow
:
3686 case Intrinsic::ssub_with_overflow
:
3687 case Intrinsic::usub_with_overflow
:
3688 case Intrinsic::smul_with_overflow
:
3689 case Intrinsic::umul_with_overflow
: {
3690 // This implements the basic lowering of the xalu with overflow intrinsics.
3691 const Function
*Callee
= II
->getCalledFunction();
3692 auto *Ty
= cast
<StructType
>(Callee
->getReturnType());
3693 Type
*RetTy
= Ty
->getTypeAtIndex(0U);
3696 if (!isTypeLegal(RetTy
, VT
))
3699 if (VT
!= MVT::i32
&& VT
!= MVT::i64
)
3702 const Value
*LHS
= II
->getArgOperand(0);
3703 const Value
*RHS
= II
->getArgOperand(1);
3704 // Canonicalize immediate to the RHS.
3705 if (isa
<ConstantInt
>(LHS
) && !isa
<ConstantInt
>(RHS
) &&
3706 isCommutativeIntrinsic(II
))
3707 std::swap(LHS
, RHS
);
3709 // Simplify multiplies.
3710 Intrinsic::ID IID
= II
->getIntrinsicID();
3714 case Intrinsic::smul_with_overflow
:
3715 if (const auto *C
= dyn_cast
<ConstantInt
>(RHS
))
3716 if (C
->getValue() == 2) {
3717 IID
= Intrinsic::sadd_with_overflow
;
3721 case Intrinsic::umul_with_overflow
:
3722 if (const auto *C
= dyn_cast
<ConstantInt
>(RHS
))
3723 if (C
->getValue() == 2) {
3724 IID
= Intrinsic::uadd_with_overflow
;
3730 unsigned ResultReg1
= 0, ResultReg2
= 0, MulReg
= 0;
3731 AArch64CC::CondCode CC
= AArch64CC::Invalid
;
3733 default: llvm_unreachable("Unexpected intrinsic!");
3734 case Intrinsic::sadd_with_overflow
:
3735 ResultReg1
= emitAdd(VT
, LHS
, RHS
, /*SetFlags=*/true);
3738 case Intrinsic::uadd_with_overflow
:
3739 ResultReg1
= emitAdd(VT
, LHS
, RHS
, /*SetFlags=*/true);
3742 case Intrinsic::ssub_with_overflow
:
3743 ResultReg1
= emitSub(VT
, LHS
, RHS
, /*SetFlags=*/true);
3746 case Intrinsic::usub_with_overflow
:
3747 ResultReg1
= emitSub(VT
, LHS
, RHS
, /*SetFlags=*/true);
3750 case Intrinsic::smul_with_overflow
: {
3752 unsigned LHSReg
= getRegForValue(LHS
);
3755 bool LHSIsKill
= hasTrivialKill(LHS
);
3757 unsigned RHSReg
= getRegForValue(RHS
);
3760 bool RHSIsKill
= hasTrivialKill(RHS
);
3762 if (VT
== MVT::i32
) {
3763 MulReg
= emitSMULL_rr(MVT::i64
, LHSReg
, LHSIsKill
, RHSReg
, RHSIsKill
);
3764 unsigned ShiftReg
= emitLSR_ri(MVT::i64
, MVT::i64
, MulReg
,
3765 /*IsKill=*/false, 32);
3766 MulReg
= fastEmitInst_extractsubreg(VT
, MulReg
, /*IsKill=*/true,
3768 ShiftReg
= fastEmitInst_extractsubreg(VT
, ShiftReg
, /*IsKill=*/true,
3770 emitSubs_rs(VT
, ShiftReg
, /*IsKill=*/true, MulReg
, /*IsKill=*/false,
3771 AArch64_AM::ASR
, 31, /*WantResult=*/false);
3773 assert(VT
== MVT::i64
&& "Unexpected value type.");
3774 // LHSReg and RHSReg cannot be killed by this Mul, since they are
3775 // reused in the next instruction.
3776 MulReg
= emitMul_rr(VT
, LHSReg
, /*IsKill=*/false, RHSReg
,
3778 unsigned SMULHReg
= fastEmit_rr(VT
, VT
, ISD::MULHS
, LHSReg
, LHSIsKill
,
3780 emitSubs_rs(VT
, SMULHReg
, /*IsKill=*/true, MulReg
, /*IsKill=*/false,
3781 AArch64_AM::ASR
, 63, /*WantResult=*/false);
3785 case Intrinsic::umul_with_overflow
: {
3787 unsigned LHSReg
= getRegForValue(LHS
);
3790 bool LHSIsKill
= hasTrivialKill(LHS
);
3792 unsigned RHSReg
= getRegForValue(RHS
);
3795 bool RHSIsKill
= hasTrivialKill(RHS
);
3797 if (VT
== MVT::i32
) {
3798 MulReg
= emitUMULL_rr(MVT::i64
, LHSReg
, LHSIsKill
, RHSReg
, RHSIsKill
);
3799 emitSubs_rs(MVT::i64
, AArch64::XZR
, /*IsKill=*/true, MulReg
,
3800 /*IsKill=*/false, AArch64_AM::LSR
, 32,
3801 /*WantResult=*/false);
3802 MulReg
= fastEmitInst_extractsubreg(VT
, MulReg
, /*IsKill=*/true,
3805 assert(VT
== MVT::i64
&& "Unexpected value type.");
3806 // LHSReg and RHSReg cannot be killed by this Mul, since they are
3807 // reused in the next instruction.
3808 MulReg
= emitMul_rr(VT
, LHSReg
, /*IsKill=*/false, RHSReg
,
3810 unsigned UMULHReg
= fastEmit_rr(VT
, VT
, ISD::MULHU
, LHSReg
, LHSIsKill
,
3812 emitSubs_rr(VT
, AArch64::XZR
, /*IsKill=*/true, UMULHReg
,
3813 /*IsKill=*/false, /*WantResult=*/false);
3820 ResultReg1
= createResultReg(TLI
.getRegClassFor(VT
));
3821 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, DbgLoc
,
3822 TII
.get(TargetOpcode::COPY
), ResultReg1
).addReg(MulReg
);
3828 ResultReg2
= fastEmitInst_rri(AArch64::CSINCWr
, &AArch64::GPR32RegClass
,
3829 AArch64::WZR
, /*IsKill=*/true, AArch64::WZR
,
3830 /*IsKill=*/true, getInvertedCondCode(CC
));
3832 assert((ResultReg1
+ 1) == ResultReg2
&&
3833 "Nonconsecutive result registers.");
3834 updateValueMap(II
, ResultReg1
, 2);
3841 bool AArch64FastISel::selectRet(const Instruction
*I
) {
3842 const ReturnInst
*Ret
= cast
<ReturnInst
>(I
);
3843 const Function
&F
= *I
->getParent()->getParent();
3845 if (!FuncInfo
.CanLowerReturn
)
3851 if (TLI
.supportSwiftError() &&
3852 F
.getAttributes().hasAttrSomewhere(Attribute::SwiftError
))
3855 if (TLI
.supportSplitCSR(FuncInfo
.MF
))
3858 // Build a list of return value registers.
3859 SmallVector
<unsigned, 4> RetRegs
;
3861 if (Ret
->getNumOperands() > 0) {
3862 CallingConv::ID CC
= F
.getCallingConv();
3863 SmallVector
<ISD::OutputArg
, 4> Outs
;
3864 GetReturnInfo(CC
, F
.getReturnType(), F
.getAttributes(), Outs
, TLI
, DL
);
3866 // Analyze operands of the call, assigning locations to each operand.
3867 SmallVector
<CCValAssign
, 16> ValLocs
;
3868 CCState
CCInfo(CC
, F
.isVarArg(), *FuncInfo
.MF
, ValLocs
, I
->getContext());
3869 CCAssignFn
*RetCC
= CC
== CallingConv::WebKit_JS
? RetCC_AArch64_WebKit_JS
3870 : RetCC_AArch64_AAPCS
;
3871 CCInfo
.AnalyzeReturn(Outs
, RetCC
);
3873 // Only handle a single return value for now.
3874 if (ValLocs
.size() != 1)
3877 CCValAssign
&VA
= ValLocs
[0];
3878 const Value
*RV
= Ret
->getOperand(0);
3880 // Don't bother handling odd stuff for now.
3881 if ((VA
.getLocInfo() != CCValAssign::Full
) &&
3882 (VA
.getLocInfo() != CCValAssign::BCvt
))
3885 // Only handle register returns for now.
3889 unsigned Reg
= getRegForValue(RV
);
3893 unsigned SrcReg
= Reg
+ VA
.getValNo();
3894 Register DestReg
= VA
.getLocReg();
3895 // Avoid a cross-class copy. This is very unlikely.
3896 if (!MRI
.getRegClass(SrcReg
)->contains(DestReg
))
3899 EVT RVEVT
= TLI
.getValueType(DL
, RV
->getType());
3900 if (!RVEVT
.isSimple())
3903 // Vectors (of > 1 lane) in big endian need tricky handling.
3904 if (RVEVT
.isVector() && RVEVT
.getVectorNumElements() > 1 &&
3905 !Subtarget
->isLittleEndian())
3908 MVT RVVT
= RVEVT
.getSimpleVT();
3909 if (RVVT
== MVT::f128
)
3912 MVT DestVT
= VA
.getValVT();
3913 // Special handling for extended integers.
3914 if (RVVT
!= DestVT
) {
3915 if (RVVT
!= MVT::i1
&& RVVT
!= MVT::i8
&& RVVT
!= MVT::i16
)
3918 if (!Outs
[0].Flags
.isZExt() && !Outs
[0].Flags
.isSExt())
3921 bool IsZExt
= Outs
[0].Flags
.isZExt();
3922 SrcReg
= emitIntExt(RVVT
, SrcReg
, DestVT
, IsZExt
);
3927 // "Callee" (i.e. value producer) zero extends pointers at function
3929 if (Subtarget
->isTargetILP32() && RV
->getType()->isPointerTy())
3930 SrcReg
= emitAnd_ri(MVT::i64
, SrcReg
, false, 0xffffffff);
3933 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, DbgLoc
,
3934 TII
.get(TargetOpcode::COPY
), DestReg
).addReg(SrcReg
);
3936 // Add register to return instruction.
3937 RetRegs
.push_back(VA
.getLocReg());
3940 MachineInstrBuilder MIB
= BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, DbgLoc
,
3941 TII
.get(AArch64::RET_ReallyLR
));
3942 for (unsigned RetReg
: RetRegs
)
3943 MIB
.addReg(RetReg
, RegState::Implicit
);
3947 bool AArch64FastISel::selectTrunc(const Instruction
*I
) {
3948 Type
*DestTy
= I
->getType();
3949 Value
*Op
= I
->getOperand(0);
3950 Type
*SrcTy
= Op
->getType();
3952 EVT SrcEVT
= TLI
.getValueType(DL
, SrcTy
, true);
3953 EVT DestEVT
= TLI
.getValueType(DL
, DestTy
, true);
3954 if (!SrcEVT
.isSimple())
3956 if (!DestEVT
.isSimple())
3959 MVT SrcVT
= SrcEVT
.getSimpleVT();
3960 MVT DestVT
= DestEVT
.getSimpleVT();
3962 if (SrcVT
!= MVT::i64
&& SrcVT
!= MVT::i32
&& SrcVT
!= MVT::i16
&&
3965 if (DestVT
!= MVT::i32
&& DestVT
!= MVT::i16
&& DestVT
!= MVT::i8
&&
3969 unsigned SrcReg
= getRegForValue(Op
);
3972 bool SrcIsKill
= hasTrivialKill(Op
);
3974 // If we're truncating from i64 to a smaller non-legal type then generate an
3975 // AND. Otherwise, we know the high bits are undefined and a truncate only
3976 // generate a COPY. We cannot mark the source register also as result
3977 // register, because this can incorrectly transfer the kill flag onto the
3980 if (SrcVT
== MVT::i64
) {
3982 switch (DestVT
.SimpleTy
) {
3984 // Trunc i64 to i32 is handled by the target-independent fast-isel.
3996 // Issue an extract_subreg to get the lower 32-bits.
3997 unsigned Reg32
= fastEmitInst_extractsubreg(MVT::i32
, SrcReg
, SrcIsKill
,
3999 // Create the AND instruction which performs the actual truncation.
4000 ResultReg
= emitAnd_ri(MVT::i32
, Reg32
, /*IsKill=*/true, Mask
);
4001 assert(ResultReg
&& "Unexpected AND instruction emission failure.");
4003 ResultReg
= createResultReg(&AArch64::GPR32RegClass
);
4004 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, DbgLoc
,
4005 TII
.get(TargetOpcode::COPY
), ResultReg
)
4006 .addReg(SrcReg
, getKillRegState(SrcIsKill
));
4009 updateValueMap(I
, ResultReg
);
4013 unsigned AArch64FastISel::emiti1Ext(unsigned SrcReg
, MVT DestVT
, bool IsZExt
) {
4014 assert((DestVT
== MVT::i8
|| DestVT
== MVT::i16
|| DestVT
== MVT::i32
||
4015 DestVT
== MVT::i64
) &&
4016 "Unexpected value type.");
4017 // Handle i8 and i16 as i32.
4018 if (DestVT
== MVT::i8
|| DestVT
== MVT::i16
)
4022 unsigned ResultReg
= emitAnd_ri(MVT::i32
, SrcReg
, /*TODO:IsKill=*/false, 1);
4023 assert(ResultReg
&& "Unexpected AND instruction emission failure.");
4024 if (DestVT
== MVT::i64
) {
4025 // We're ZExt i1 to i64. The ANDWri Wd, Ws, #1 implicitly clears the
4026 // upper 32 bits. Emit a SUBREG_TO_REG to extend from Wd to Xd.
4027 Register Reg64
= MRI
.createVirtualRegister(&AArch64::GPR64RegClass
);
4028 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, DbgLoc
,
4029 TII
.get(AArch64::SUBREG_TO_REG
), Reg64
)
4032 .addImm(AArch64::sub_32
);
4037 if (DestVT
== MVT::i64
) {
4038 // FIXME: We're SExt i1 to i64.
4041 return fastEmitInst_rii(AArch64::SBFMWri
, &AArch64::GPR32RegClass
, SrcReg
,
4042 /*TODO:IsKill=*/false, 0, 0);
4046 unsigned AArch64FastISel::emitMul_rr(MVT RetVT
, unsigned Op0
, bool Op0IsKill
,
4047 unsigned Op1
, bool Op1IsKill
) {
4049 switch (RetVT
.SimpleTy
) {
4055 Opc
= AArch64::MADDWrrr
; ZReg
= AArch64::WZR
; break;
4057 Opc
= AArch64::MADDXrrr
; ZReg
= AArch64::XZR
; break;
4060 const TargetRegisterClass
*RC
=
4061 (RetVT
== MVT::i64
) ? &AArch64::GPR64RegClass
: &AArch64::GPR32RegClass
;
4062 return fastEmitInst_rrr(Opc
, RC
, Op0
, Op0IsKill
, Op1
, Op1IsKill
,
4063 /*IsKill=*/ZReg
, true);
4066 unsigned AArch64FastISel::emitSMULL_rr(MVT RetVT
, unsigned Op0
, bool Op0IsKill
,
4067 unsigned Op1
, bool Op1IsKill
) {
4068 if (RetVT
!= MVT::i64
)
4071 return fastEmitInst_rrr(AArch64::SMADDLrrr
, &AArch64::GPR64RegClass
,
4072 Op0
, Op0IsKill
, Op1
, Op1IsKill
,
4073 AArch64::XZR
, /*IsKill=*/true);
4076 unsigned AArch64FastISel::emitUMULL_rr(MVT RetVT
, unsigned Op0
, bool Op0IsKill
,
4077 unsigned Op1
, bool Op1IsKill
) {
4078 if (RetVT
!= MVT::i64
)
4081 return fastEmitInst_rrr(AArch64::UMADDLrrr
, &AArch64::GPR64RegClass
,
4082 Op0
, Op0IsKill
, Op1
, Op1IsKill
,
4083 AArch64::XZR
, /*IsKill=*/true);
4086 unsigned AArch64FastISel::emitLSL_rr(MVT RetVT
, unsigned Op0Reg
, bool Op0IsKill
,
4087 unsigned Op1Reg
, bool Op1IsKill
) {
4089 bool NeedTrunc
= false;
4091 switch (RetVT
.SimpleTy
) {
4093 case MVT::i8
: Opc
= AArch64::LSLVWr
; NeedTrunc
= true; Mask
= 0xff; break;
4094 case MVT::i16
: Opc
= AArch64::LSLVWr
; NeedTrunc
= true; Mask
= 0xffff; break;
4095 case MVT::i32
: Opc
= AArch64::LSLVWr
; break;
4096 case MVT::i64
: Opc
= AArch64::LSLVXr
; break;
4099 const TargetRegisterClass
*RC
=
4100 (RetVT
== MVT::i64
) ? &AArch64::GPR64RegClass
: &AArch64::GPR32RegClass
;
4102 Op1Reg
= emitAnd_ri(MVT::i32
, Op1Reg
, Op1IsKill
, Mask
);
4105 unsigned ResultReg
= fastEmitInst_rr(Opc
, RC
, Op0Reg
, Op0IsKill
, Op1Reg
,
4108 ResultReg
= emitAnd_ri(MVT::i32
, ResultReg
, /*IsKill=*/true, Mask
);
4112 unsigned AArch64FastISel::emitLSL_ri(MVT RetVT
, MVT SrcVT
, unsigned Op0
,
4113 bool Op0IsKill
, uint64_t Shift
,
4115 assert(RetVT
.SimpleTy
>= SrcVT
.SimpleTy
&&
4116 "Unexpected source/return type pair.");
4117 assert((SrcVT
== MVT::i1
|| SrcVT
== MVT::i8
|| SrcVT
== MVT::i16
||
4118 SrcVT
== MVT::i32
|| SrcVT
== MVT::i64
) &&
4119 "Unexpected source value type.");
4120 assert((RetVT
== MVT::i8
|| RetVT
== MVT::i16
|| RetVT
== MVT::i32
||
4121 RetVT
== MVT::i64
) && "Unexpected return value type.");
4123 bool Is64Bit
= (RetVT
== MVT::i64
);
4124 unsigned RegSize
= Is64Bit
? 64 : 32;
4125 unsigned DstBits
= RetVT
.getSizeInBits();
4126 unsigned SrcBits
= SrcVT
.getSizeInBits();
4127 const TargetRegisterClass
*RC
=
4128 Is64Bit
? &AArch64::GPR64RegClass
: &AArch64::GPR32RegClass
;
4130 // Just emit a copy for "zero" shifts.
4132 if (RetVT
== SrcVT
) {
4133 unsigned ResultReg
= createResultReg(RC
);
4134 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, DbgLoc
,
4135 TII
.get(TargetOpcode::COPY
), ResultReg
)
4136 .addReg(Op0
, getKillRegState(Op0IsKill
));
4139 return emitIntExt(SrcVT
, Op0
, RetVT
, IsZExt
);
4142 // Don't deal with undefined shifts.
4143 if (Shift
>= DstBits
)
4146 // For immediate shifts we can fold the zero-/sign-extension into the shift.
4147 // {S|U}BFM Wd, Wn, #r, #s
4148 // Wd<32+s-r,32-r> = Wn<s:0> when r > s
4150 // %1 = {s|z}ext i8 {0b1010_1010|0b0101_0101} to i16
4151 // %2 = shl i16 %1, 4
4152 // Wd<32+7-28,32-28> = Wn<7:0> <- clamp s to 7
4153 // 0b1111_1111_1111_1111__1111_1010_1010_0000 sext
4154 // 0b0000_0000_0000_0000__0000_0101_0101_0000 sext | zext
4155 // 0b0000_0000_0000_0000__0000_1010_1010_0000 zext
4157 // %1 = {s|z}ext i8 {0b1010_1010|0b0101_0101} to i16
4158 // %2 = shl i16 %1, 8
4159 // Wd<32+7-24,32-24> = Wn<7:0>
4160 // 0b1111_1111_1111_1111__1010_1010_0000_0000 sext
4161 // 0b0000_0000_0000_0000__0101_0101_0000_0000 sext | zext
4162 // 0b0000_0000_0000_0000__1010_1010_0000_0000 zext
4164 // %1 = {s|z}ext i8 {0b1010_1010|0b0101_0101} to i16
4165 // %2 = shl i16 %1, 12
4166 // Wd<32+3-20,32-20> = Wn<3:0>
4167 // 0b1111_1111_1111_1111__1010_0000_0000_0000 sext
4168 // 0b0000_0000_0000_0000__0101_0000_0000_0000 sext | zext
4169 // 0b0000_0000_0000_0000__1010_0000_0000_0000 zext
4171 unsigned ImmR
= RegSize
- Shift
;
4172 // Limit the width to the length of the source type.
4173 unsigned ImmS
= std::min
<unsigned>(SrcBits
- 1, DstBits
- 1 - Shift
);
4174 static const unsigned OpcTable
[2][2] = {
4175 {AArch64::SBFMWri
, AArch64::SBFMXri
},
4176 {AArch64::UBFMWri
, AArch64::UBFMXri
}
4178 unsigned Opc
= OpcTable
[IsZExt
][Is64Bit
];
4179 if (SrcVT
.SimpleTy
<= MVT::i32
&& RetVT
== MVT::i64
) {
4180 Register TmpReg
= MRI
.createVirtualRegister(RC
);
4181 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, DbgLoc
,
4182 TII
.get(AArch64::SUBREG_TO_REG
), TmpReg
)
4184 .addReg(Op0
, getKillRegState(Op0IsKill
))
4185 .addImm(AArch64::sub_32
);
4189 return fastEmitInst_rii(Opc
, RC
, Op0
, Op0IsKill
, ImmR
, ImmS
);
4192 unsigned AArch64FastISel::emitLSR_rr(MVT RetVT
, unsigned Op0Reg
, bool Op0IsKill
,
4193 unsigned Op1Reg
, bool Op1IsKill
) {
4195 bool NeedTrunc
= false;
4197 switch (RetVT
.SimpleTy
) {
4199 case MVT::i8
: Opc
= AArch64::LSRVWr
; NeedTrunc
= true; Mask
= 0xff; break;
4200 case MVT::i16
: Opc
= AArch64::LSRVWr
; NeedTrunc
= true; Mask
= 0xffff; break;
4201 case MVT::i32
: Opc
= AArch64::LSRVWr
; break;
4202 case MVT::i64
: Opc
= AArch64::LSRVXr
; break;
4205 const TargetRegisterClass
*RC
=
4206 (RetVT
== MVT::i64
) ? &AArch64::GPR64RegClass
: &AArch64::GPR32RegClass
;
4208 Op0Reg
= emitAnd_ri(MVT::i32
, Op0Reg
, Op0IsKill
, Mask
);
4209 Op1Reg
= emitAnd_ri(MVT::i32
, Op1Reg
, Op1IsKill
, Mask
);
4210 Op0IsKill
= Op1IsKill
= true;
4212 unsigned ResultReg
= fastEmitInst_rr(Opc
, RC
, Op0Reg
, Op0IsKill
, Op1Reg
,
4215 ResultReg
= emitAnd_ri(MVT::i32
, ResultReg
, /*IsKill=*/true, Mask
);
4219 unsigned AArch64FastISel::emitLSR_ri(MVT RetVT
, MVT SrcVT
, unsigned Op0
,
4220 bool Op0IsKill
, uint64_t Shift
,
4222 assert(RetVT
.SimpleTy
>= SrcVT
.SimpleTy
&&
4223 "Unexpected source/return type pair.");
4224 assert((SrcVT
== MVT::i1
|| SrcVT
== MVT::i8
|| SrcVT
== MVT::i16
||
4225 SrcVT
== MVT::i32
|| SrcVT
== MVT::i64
) &&
4226 "Unexpected source value type.");
4227 assert((RetVT
== MVT::i8
|| RetVT
== MVT::i16
|| RetVT
== MVT::i32
||
4228 RetVT
== MVT::i64
) && "Unexpected return value type.");
4230 bool Is64Bit
= (RetVT
== MVT::i64
);
4231 unsigned RegSize
= Is64Bit
? 64 : 32;
4232 unsigned DstBits
= RetVT
.getSizeInBits();
4233 unsigned SrcBits
= SrcVT
.getSizeInBits();
4234 const TargetRegisterClass
*RC
=
4235 Is64Bit
? &AArch64::GPR64RegClass
: &AArch64::GPR32RegClass
;
4237 // Just emit a copy for "zero" shifts.
4239 if (RetVT
== SrcVT
) {
4240 unsigned ResultReg
= createResultReg(RC
);
4241 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, DbgLoc
,
4242 TII
.get(TargetOpcode::COPY
), ResultReg
)
4243 .addReg(Op0
, getKillRegState(Op0IsKill
));
4246 return emitIntExt(SrcVT
, Op0
, RetVT
, IsZExt
);
4249 // Don't deal with undefined shifts.
4250 if (Shift
>= DstBits
)
4253 // For immediate shifts we can fold the zero-/sign-extension into the shift.
4254 // {S|U}BFM Wd, Wn, #r, #s
4255 // Wd<s-r:0> = Wn<s:r> when r <= s
4257 // %1 = {s|z}ext i8 {0b1010_1010|0b0101_0101} to i16
4258 // %2 = lshr i16 %1, 4
4259 // Wd<7-4:0> = Wn<7:4>
4260 // 0b0000_0000_0000_0000__0000_1111_1111_1010 sext
4261 // 0b0000_0000_0000_0000__0000_0000_0000_0101 sext | zext
4262 // 0b0000_0000_0000_0000__0000_0000_0000_1010 zext
4264 // %1 = {s|z}ext i8 {0b1010_1010|0b0101_0101} to i16
4265 // %2 = lshr i16 %1, 8
4266 // Wd<7-7,0> = Wn<7:7>
4267 // 0b0000_0000_0000_0000__0000_0000_1111_1111 sext
4268 // 0b0000_0000_0000_0000__0000_0000_0000_0000 sext
4269 // 0b0000_0000_0000_0000__0000_0000_0000_0000 zext
4271 // %1 = {s|z}ext i8 {0b1010_1010|0b0101_0101} to i16
4272 // %2 = lshr i16 %1, 12
4273 // Wd<7-7,0> = Wn<7:7> <- clamp r to 7
4274 // 0b0000_0000_0000_0000__0000_0000_0000_1111 sext
4275 // 0b0000_0000_0000_0000__0000_0000_0000_0000 sext
4276 // 0b0000_0000_0000_0000__0000_0000_0000_0000 zext
4278 if (Shift
>= SrcBits
&& IsZExt
)
4279 return materializeInt(ConstantInt::get(*Context
, APInt(RegSize
, 0)), RetVT
);
4281 // It is not possible to fold a sign-extend into the LShr instruction. In this
4282 // case emit a sign-extend.
4284 Op0
= emitIntExt(SrcVT
, Op0
, RetVT
, IsZExt
);
4289 SrcBits
= SrcVT
.getSizeInBits();
4293 unsigned ImmR
= std::min
<unsigned>(SrcBits
- 1, Shift
);
4294 unsigned ImmS
= SrcBits
- 1;
4295 static const unsigned OpcTable
[2][2] = {
4296 {AArch64::SBFMWri
, AArch64::SBFMXri
},
4297 {AArch64::UBFMWri
, AArch64::UBFMXri
}
4299 unsigned Opc
= OpcTable
[IsZExt
][Is64Bit
];
4300 if (SrcVT
.SimpleTy
<= MVT::i32
&& RetVT
== MVT::i64
) {
4301 Register TmpReg
= MRI
.createVirtualRegister(RC
);
4302 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, DbgLoc
,
4303 TII
.get(AArch64::SUBREG_TO_REG
), TmpReg
)
4305 .addReg(Op0
, getKillRegState(Op0IsKill
))
4306 .addImm(AArch64::sub_32
);
4310 return fastEmitInst_rii(Opc
, RC
, Op0
, Op0IsKill
, ImmR
, ImmS
);
4313 unsigned AArch64FastISel::emitASR_rr(MVT RetVT
, unsigned Op0Reg
, bool Op0IsKill
,
4314 unsigned Op1Reg
, bool Op1IsKill
) {
4316 bool NeedTrunc
= false;
4318 switch (RetVT
.SimpleTy
) {
4320 case MVT::i8
: Opc
= AArch64::ASRVWr
; NeedTrunc
= true; Mask
= 0xff; break;
4321 case MVT::i16
: Opc
= AArch64::ASRVWr
; NeedTrunc
= true; Mask
= 0xffff; break;
4322 case MVT::i32
: Opc
= AArch64::ASRVWr
; break;
4323 case MVT::i64
: Opc
= AArch64::ASRVXr
; break;
4326 const TargetRegisterClass
*RC
=
4327 (RetVT
== MVT::i64
) ? &AArch64::GPR64RegClass
: &AArch64::GPR32RegClass
;
4329 Op0Reg
= emitIntExt(RetVT
, Op0Reg
, MVT::i32
, /*isZExt=*/false);
4330 Op1Reg
= emitAnd_ri(MVT::i32
, Op1Reg
, Op1IsKill
, Mask
);
4331 Op0IsKill
= Op1IsKill
= true;
4333 unsigned ResultReg
= fastEmitInst_rr(Opc
, RC
, Op0Reg
, Op0IsKill
, Op1Reg
,
4336 ResultReg
= emitAnd_ri(MVT::i32
, ResultReg
, /*IsKill=*/true, Mask
);
4340 unsigned AArch64FastISel::emitASR_ri(MVT RetVT
, MVT SrcVT
, unsigned Op0
,
4341 bool Op0IsKill
, uint64_t Shift
,
4343 assert(RetVT
.SimpleTy
>= SrcVT
.SimpleTy
&&
4344 "Unexpected source/return type pair.");
4345 assert((SrcVT
== MVT::i1
|| SrcVT
== MVT::i8
|| SrcVT
== MVT::i16
||
4346 SrcVT
== MVT::i32
|| SrcVT
== MVT::i64
) &&
4347 "Unexpected source value type.");
4348 assert((RetVT
== MVT::i8
|| RetVT
== MVT::i16
|| RetVT
== MVT::i32
||
4349 RetVT
== MVT::i64
) && "Unexpected return value type.");
4351 bool Is64Bit
= (RetVT
== MVT::i64
);
4352 unsigned RegSize
= Is64Bit
? 64 : 32;
4353 unsigned DstBits
= RetVT
.getSizeInBits();
4354 unsigned SrcBits
= SrcVT
.getSizeInBits();
4355 const TargetRegisterClass
*RC
=
4356 Is64Bit
? &AArch64::GPR64RegClass
: &AArch64::GPR32RegClass
;
4358 // Just emit a copy for "zero" shifts.
4360 if (RetVT
== SrcVT
) {
4361 unsigned ResultReg
= createResultReg(RC
);
4362 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, DbgLoc
,
4363 TII
.get(TargetOpcode::COPY
), ResultReg
)
4364 .addReg(Op0
, getKillRegState(Op0IsKill
));
4367 return emitIntExt(SrcVT
, Op0
, RetVT
, IsZExt
);
4370 // Don't deal with undefined shifts.
4371 if (Shift
>= DstBits
)
4374 // For immediate shifts we can fold the zero-/sign-extension into the shift.
4375 // {S|U}BFM Wd, Wn, #r, #s
4376 // Wd<s-r:0> = Wn<s:r> when r <= s
4378 // %1 = {s|z}ext i8 {0b1010_1010|0b0101_0101} to i16
4379 // %2 = ashr i16 %1, 4
4380 // Wd<7-4:0> = Wn<7:4>
4381 // 0b1111_1111_1111_1111__1111_1111_1111_1010 sext
4382 // 0b0000_0000_0000_0000__0000_0000_0000_0101 sext | zext
4383 // 0b0000_0000_0000_0000__0000_0000_0000_1010 zext
4385 // %1 = {s|z}ext i8 {0b1010_1010|0b0101_0101} to i16
4386 // %2 = ashr i16 %1, 8
4387 // Wd<7-7,0> = Wn<7:7>
4388 // 0b1111_1111_1111_1111__1111_1111_1111_1111 sext
4389 // 0b0000_0000_0000_0000__0000_0000_0000_0000 sext
4390 // 0b0000_0000_0000_0000__0000_0000_0000_0000 zext
4392 // %1 = {s|z}ext i8 {0b1010_1010|0b0101_0101} to i16
4393 // %2 = ashr i16 %1, 12
4394 // Wd<7-7,0> = Wn<7:7> <- clamp r to 7
4395 // 0b1111_1111_1111_1111__1111_1111_1111_1111 sext
4396 // 0b0000_0000_0000_0000__0000_0000_0000_0000 sext
4397 // 0b0000_0000_0000_0000__0000_0000_0000_0000 zext
4399 if (Shift
>= SrcBits
&& IsZExt
)
4400 return materializeInt(ConstantInt::get(*Context
, APInt(RegSize
, 0)), RetVT
);
4402 unsigned ImmR
= std::min
<unsigned>(SrcBits
- 1, Shift
);
4403 unsigned ImmS
= SrcBits
- 1;
4404 static const unsigned OpcTable
[2][2] = {
4405 {AArch64::SBFMWri
, AArch64::SBFMXri
},
4406 {AArch64::UBFMWri
, AArch64::UBFMXri
}
4408 unsigned Opc
= OpcTable
[IsZExt
][Is64Bit
];
4409 if (SrcVT
.SimpleTy
<= MVT::i32
&& RetVT
== MVT::i64
) {
4410 Register TmpReg
= MRI
.createVirtualRegister(RC
);
4411 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, DbgLoc
,
4412 TII
.get(AArch64::SUBREG_TO_REG
), TmpReg
)
4414 .addReg(Op0
, getKillRegState(Op0IsKill
))
4415 .addImm(AArch64::sub_32
);
4419 return fastEmitInst_rii(Opc
, RC
, Op0
, Op0IsKill
, ImmR
, ImmS
);
4422 unsigned AArch64FastISel::emitIntExt(MVT SrcVT
, unsigned SrcReg
, MVT DestVT
,
4424 assert(DestVT
!= MVT::i1
&& "ZeroExt/SignExt an i1?");
4426 // FastISel does not have plumbing to deal with extensions where the SrcVT or
4427 // DestVT are odd things, so test to make sure that they are both types we can
4428 // handle (i1/i8/i16/i32 for SrcVT and i8/i16/i32/i64 for DestVT), otherwise
4429 // bail out to SelectionDAG.
4430 if (((DestVT
!= MVT::i8
) && (DestVT
!= MVT::i16
) &&
4431 (DestVT
!= MVT::i32
) && (DestVT
!= MVT::i64
)) ||
4432 ((SrcVT
!= MVT::i1
) && (SrcVT
!= MVT::i8
) &&
4433 (SrcVT
!= MVT::i16
) && (SrcVT
!= MVT::i32
)))
4439 switch (SrcVT
.SimpleTy
) {
4443 return emiti1Ext(SrcReg
, DestVT
, IsZExt
);
4445 if (DestVT
== MVT::i64
)
4446 Opc
= IsZExt
? AArch64::UBFMXri
: AArch64::SBFMXri
;
4448 Opc
= IsZExt
? AArch64::UBFMWri
: AArch64::SBFMWri
;
4452 if (DestVT
== MVT::i64
)
4453 Opc
= IsZExt
? AArch64::UBFMXri
: AArch64::SBFMXri
;
4455 Opc
= IsZExt
? AArch64::UBFMWri
: AArch64::SBFMWri
;
4459 assert(DestVT
== MVT::i64
&& "IntExt i32 to i32?!?");
4460 Opc
= IsZExt
? AArch64::UBFMXri
: AArch64::SBFMXri
;
4465 // Handle i8 and i16 as i32.
4466 if (DestVT
== MVT::i8
|| DestVT
== MVT::i16
)
4468 else if (DestVT
== MVT::i64
) {
4469 Register Src64
= MRI
.createVirtualRegister(&AArch64::GPR64RegClass
);
4470 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, DbgLoc
,
4471 TII
.get(AArch64::SUBREG_TO_REG
), Src64
)
4474 .addImm(AArch64::sub_32
);
4478 const TargetRegisterClass
*RC
=
4479 (DestVT
== MVT::i64
) ? &AArch64::GPR64RegClass
: &AArch64::GPR32RegClass
;
4480 return fastEmitInst_rii(Opc
, RC
, SrcReg
, /*TODO:IsKill=*/false, 0, Imm
);
4483 static bool isZExtLoad(const MachineInstr
*LI
) {
4484 switch (LI
->getOpcode()) {
4487 case AArch64::LDURBBi
:
4488 case AArch64::LDURHHi
:
4489 case AArch64::LDURWi
:
4490 case AArch64::LDRBBui
:
4491 case AArch64::LDRHHui
:
4492 case AArch64::LDRWui
:
4493 case AArch64::LDRBBroX
:
4494 case AArch64::LDRHHroX
:
4495 case AArch64::LDRWroX
:
4496 case AArch64::LDRBBroW
:
4497 case AArch64::LDRHHroW
:
4498 case AArch64::LDRWroW
:
4503 static bool isSExtLoad(const MachineInstr
*LI
) {
4504 switch (LI
->getOpcode()) {
4507 case AArch64::LDURSBWi
:
4508 case AArch64::LDURSHWi
:
4509 case AArch64::LDURSBXi
:
4510 case AArch64::LDURSHXi
:
4511 case AArch64::LDURSWi
:
4512 case AArch64::LDRSBWui
:
4513 case AArch64::LDRSHWui
:
4514 case AArch64::LDRSBXui
:
4515 case AArch64::LDRSHXui
:
4516 case AArch64::LDRSWui
:
4517 case AArch64::LDRSBWroX
:
4518 case AArch64::LDRSHWroX
:
4519 case AArch64::LDRSBXroX
:
4520 case AArch64::LDRSHXroX
:
4521 case AArch64::LDRSWroX
:
4522 case AArch64::LDRSBWroW
:
4523 case AArch64::LDRSHWroW
:
4524 case AArch64::LDRSBXroW
:
4525 case AArch64::LDRSHXroW
:
4526 case AArch64::LDRSWroW
:
4531 bool AArch64FastISel::optimizeIntExtLoad(const Instruction
*I
, MVT RetVT
,
4533 const auto *LI
= dyn_cast
<LoadInst
>(I
->getOperand(0));
4534 if (!LI
|| !LI
->hasOneUse())
4537 // Check if the load instruction has already been selected.
4538 unsigned Reg
= lookUpRegForValue(LI
);
4542 MachineInstr
*MI
= MRI
.getUniqueVRegDef(Reg
);
4546 // Check if the correct load instruction has been emitted - SelectionDAG might
4547 // have emitted a zero-extending load, but we need a sign-extending load.
4548 bool IsZExt
= isa
<ZExtInst
>(I
);
4549 const auto *LoadMI
= MI
;
4550 if (LoadMI
->getOpcode() == TargetOpcode::COPY
&&
4551 LoadMI
->getOperand(1).getSubReg() == AArch64::sub_32
) {
4552 Register LoadReg
= MI
->getOperand(1).getReg();
4553 LoadMI
= MRI
.getUniqueVRegDef(LoadReg
);
4554 assert(LoadMI
&& "Expected valid instruction");
4556 if (!(IsZExt
&& isZExtLoad(LoadMI
)) && !(!IsZExt
&& isSExtLoad(LoadMI
)))
4559 // Nothing to be done.
4560 if (RetVT
!= MVT::i64
|| SrcVT
> MVT::i32
) {
4561 updateValueMap(I
, Reg
);
4566 unsigned Reg64
= createResultReg(&AArch64::GPR64RegClass
);
4567 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, DbgLoc
,
4568 TII
.get(AArch64::SUBREG_TO_REG
), Reg64
)
4570 .addReg(Reg
, getKillRegState(true))
4571 .addImm(AArch64::sub_32
);
4574 assert((MI
->getOpcode() == TargetOpcode::COPY
&&
4575 MI
->getOperand(1).getSubReg() == AArch64::sub_32
) &&
4576 "Expected copy instruction");
4577 Reg
= MI
->getOperand(1).getReg();
4578 MachineBasicBlock::iterator
I(MI
);
4579 removeDeadCode(I
, std::next(I
));
4581 updateValueMap(I
, Reg
);
4585 bool AArch64FastISel::selectIntExt(const Instruction
*I
) {
4586 assert((isa
<ZExtInst
>(I
) || isa
<SExtInst
>(I
)) &&
4587 "Unexpected integer extend instruction.");
4590 if (!isTypeSupported(I
->getType(), RetVT
))
4593 if (!isTypeSupported(I
->getOperand(0)->getType(), SrcVT
))
4596 // Try to optimize already sign-/zero-extended values from load instructions.
4597 if (optimizeIntExtLoad(I
, RetVT
, SrcVT
))
4600 unsigned SrcReg
= getRegForValue(I
->getOperand(0));
4603 bool SrcIsKill
= hasTrivialKill(I
->getOperand(0));
4605 // Try to optimize already sign-/zero-extended values from function arguments.
4606 bool IsZExt
= isa
<ZExtInst
>(I
);
4607 if (const auto *Arg
= dyn_cast
<Argument
>(I
->getOperand(0))) {
4608 if ((IsZExt
&& Arg
->hasZExtAttr()) || (!IsZExt
&& Arg
->hasSExtAttr())) {
4609 if (RetVT
== MVT::i64
&& SrcVT
!= MVT::i64
) {
4610 unsigned ResultReg
= createResultReg(&AArch64::GPR64RegClass
);
4611 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, DbgLoc
,
4612 TII
.get(AArch64::SUBREG_TO_REG
), ResultReg
)
4614 .addReg(SrcReg
, getKillRegState(SrcIsKill
))
4615 .addImm(AArch64::sub_32
);
4618 // Conservatively clear all kill flags from all uses, because we are
4619 // replacing a sign-/zero-extend instruction at IR level with a nop at MI
4620 // level. The result of the instruction at IR level might have been
4621 // trivially dead, which is now not longer true.
4622 unsigned UseReg
= lookUpRegForValue(I
);
4624 MRI
.clearKillFlags(UseReg
);
4626 updateValueMap(I
, SrcReg
);
4631 unsigned ResultReg
= emitIntExt(SrcVT
, SrcReg
, RetVT
, IsZExt
);
4635 updateValueMap(I
, ResultReg
);
4639 bool AArch64FastISel::selectRem(const Instruction
*I
, unsigned ISDOpcode
) {
4640 EVT DestEVT
= TLI
.getValueType(DL
, I
->getType(), true);
4641 if (!DestEVT
.isSimple())
4644 MVT DestVT
= DestEVT
.getSimpleVT();
4645 if (DestVT
!= MVT::i64
&& DestVT
!= MVT::i32
)
4649 bool Is64bit
= (DestVT
== MVT::i64
);
4650 switch (ISDOpcode
) {
4654 DivOpc
= Is64bit
? AArch64::SDIVXr
: AArch64::SDIVWr
;
4657 DivOpc
= Is64bit
? AArch64::UDIVXr
: AArch64::UDIVWr
;
4660 unsigned MSubOpc
= Is64bit
? AArch64::MSUBXrrr
: AArch64::MSUBWrrr
;
4661 unsigned Src0Reg
= getRegForValue(I
->getOperand(0));
4664 bool Src0IsKill
= hasTrivialKill(I
->getOperand(0));
4666 unsigned Src1Reg
= getRegForValue(I
->getOperand(1));
4669 bool Src1IsKill
= hasTrivialKill(I
->getOperand(1));
4671 const TargetRegisterClass
*RC
=
4672 (DestVT
== MVT::i64
) ? &AArch64::GPR64RegClass
: &AArch64::GPR32RegClass
;
4673 unsigned QuotReg
= fastEmitInst_rr(DivOpc
, RC
, Src0Reg
, /*IsKill=*/false,
4674 Src1Reg
, /*IsKill=*/false);
4675 assert(QuotReg
&& "Unexpected DIV instruction emission failure.");
4676 // The remainder is computed as numerator - (quotient * denominator) using the
4677 // MSUB instruction.
4678 unsigned ResultReg
= fastEmitInst_rrr(MSubOpc
, RC
, QuotReg
, /*IsKill=*/true,
4679 Src1Reg
, Src1IsKill
, Src0Reg
,
4681 updateValueMap(I
, ResultReg
);
4685 bool AArch64FastISel::selectMul(const Instruction
*I
) {
4687 if (!isTypeSupported(I
->getType(), VT
, /*IsVectorAllowed=*/true))
4691 return selectBinaryOp(I
, ISD::MUL
);
4693 const Value
*Src0
= I
->getOperand(0);
4694 const Value
*Src1
= I
->getOperand(1);
4695 if (const auto *C
= dyn_cast
<ConstantInt
>(Src0
))
4696 if (C
->getValue().isPowerOf2())
4697 std::swap(Src0
, Src1
);
4699 // Try to simplify to a shift instruction.
4700 if (const auto *C
= dyn_cast
<ConstantInt
>(Src1
))
4701 if (C
->getValue().isPowerOf2()) {
4702 uint64_t ShiftVal
= C
->getValue().logBase2();
4705 if (const auto *ZExt
= dyn_cast
<ZExtInst
>(Src0
)) {
4706 if (!isIntExtFree(ZExt
)) {
4708 if (isValueAvailable(ZExt
) && isTypeSupported(ZExt
->getSrcTy(), VT
)) {
4711 Src0
= ZExt
->getOperand(0);
4714 } else if (const auto *SExt
= dyn_cast
<SExtInst
>(Src0
)) {
4715 if (!isIntExtFree(SExt
)) {
4717 if (isValueAvailable(SExt
) && isTypeSupported(SExt
->getSrcTy(), VT
)) {
4720 Src0
= SExt
->getOperand(0);
4725 unsigned Src0Reg
= getRegForValue(Src0
);
4728 bool Src0IsKill
= hasTrivialKill(Src0
);
4730 unsigned ResultReg
=
4731 emitLSL_ri(VT
, SrcVT
, Src0Reg
, Src0IsKill
, ShiftVal
, IsZExt
);
4734 updateValueMap(I
, ResultReg
);
4739 unsigned Src0Reg
= getRegForValue(I
->getOperand(0));
4742 bool Src0IsKill
= hasTrivialKill(I
->getOperand(0));
4744 unsigned Src1Reg
= getRegForValue(I
->getOperand(1));
4747 bool Src1IsKill
= hasTrivialKill(I
->getOperand(1));
4749 unsigned ResultReg
= emitMul_rr(VT
, Src0Reg
, Src0IsKill
, Src1Reg
, Src1IsKill
);
4754 updateValueMap(I
, ResultReg
);
4758 bool AArch64FastISel::selectShift(const Instruction
*I
) {
4760 if (!isTypeSupported(I
->getType(), RetVT
, /*IsVectorAllowed=*/true))
4763 if (RetVT
.isVector())
4764 return selectOperator(I
, I
->getOpcode());
4766 if (const auto *C
= dyn_cast
<ConstantInt
>(I
->getOperand(1))) {
4767 unsigned ResultReg
= 0;
4768 uint64_t ShiftVal
= C
->getZExtValue();
4770 bool IsZExt
= I
->getOpcode() != Instruction::AShr
;
4771 const Value
*Op0
= I
->getOperand(0);
4772 if (const auto *ZExt
= dyn_cast
<ZExtInst
>(Op0
)) {
4773 if (!isIntExtFree(ZExt
)) {
4775 if (isValueAvailable(ZExt
) && isTypeSupported(ZExt
->getSrcTy(), TmpVT
)) {
4778 Op0
= ZExt
->getOperand(0);
4781 } else if (const auto *SExt
= dyn_cast
<SExtInst
>(Op0
)) {
4782 if (!isIntExtFree(SExt
)) {
4784 if (isValueAvailable(SExt
) && isTypeSupported(SExt
->getSrcTy(), TmpVT
)) {
4787 Op0
= SExt
->getOperand(0);
4792 unsigned Op0Reg
= getRegForValue(Op0
);
4795 bool Op0IsKill
= hasTrivialKill(Op0
);
4797 switch (I
->getOpcode()) {
4798 default: llvm_unreachable("Unexpected instruction.");
4799 case Instruction::Shl
:
4800 ResultReg
= emitLSL_ri(RetVT
, SrcVT
, Op0Reg
, Op0IsKill
, ShiftVal
, IsZExt
);
4802 case Instruction::AShr
:
4803 ResultReg
= emitASR_ri(RetVT
, SrcVT
, Op0Reg
, Op0IsKill
, ShiftVal
, IsZExt
);
4805 case Instruction::LShr
:
4806 ResultReg
= emitLSR_ri(RetVT
, SrcVT
, Op0Reg
, Op0IsKill
, ShiftVal
, IsZExt
);
4812 updateValueMap(I
, ResultReg
);
4816 unsigned Op0Reg
= getRegForValue(I
->getOperand(0));
4819 bool Op0IsKill
= hasTrivialKill(I
->getOperand(0));
4821 unsigned Op1Reg
= getRegForValue(I
->getOperand(1));
4824 bool Op1IsKill
= hasTrivialKill(I
->getOperand(1));
4826 unsigned ResultReg
= 0;
4827 switch (I
->getOpcode()) {
4828 default: llvm_unreachable("Unexpected instruction.");
4829 case Instruction::Shl
:
4830 ResultReg
= emitLSL_rr(RetVT
, Op0Reg
, Op0IsKill
, Op1Reg
, Op1IsKill
);
4832 case Instruction::AShr
:
4833 ResultReg
= emitASR_rr(RetVT
, Op0Reg
, Op0IsKill
, Op1Reg
, Op1IsKill
);
4835 case Instruction::LShr
:
4836 ResultReg
= emitLSR_rr(RetVT
, Op0Reg
, Op0IsKill
, Op1Reg
, Op1IsKill
);
4843 updateValueMap(I
, ResultReg
);
4847 bool AArch64FastISel::selectBitCast(const Instruction
*I
) {
4850 if (!isTypeLegal(I
->getOperand(0)->getType(), SrcVT
))
4852 if (!isTypeLegal(I
->getType(), RetVT
))
4856 if (RetVT
== MVT::f32
&& SrcVT
== MVT::i32
)
4857 Opc
= AArch64::FMOVWSr
;
4858 else if (RetVT
== MVT::f64
&& SrcVT
== MVT::i64
)
4859 Opc
= AArch64::FMOVXDr
;
4860 else if (RetVT
== MVT::i32
&& SrcVT
== MVT::f32
)
4861 Opc
= AArch64::FMOVSWr
;
4862 else if (RetVT
== MVT::i64
&& SrcVT
== MVT::f64
)
4863 Opc
= AArch64::FMOVDXr
;
4867 const TargetRegisterClass
*RC
= nullptr;
4868 switch (RetVT
.SimpleTy
) {
4869 default: llvm_unreachable("Unexpected value type.");
4870 case MVT::i32
: RC
= &AArch64::GPR32RegClass
; break;
4871 case MVT::i64
: RC
= &AArch64::GPR64RegClass
; break;
4872 case MVT::f32
: RC
= &AArch64::FPR32RegClass
; break;
4873 case MVT::f64
: RC
= &AArch64::FPR64RegClass
; break;
4875 unsigned Op0Reg
= getRegForValue(I
->getOperand(0));
4878 bool Op0IsKill
= hasTrivialKill(I
->getOperand(0));
4879 unsigned ResultReg
= fastEmitInst_r(Opc
, RC
, Op0Reg
, Op0IsKill
);
4884 updateValueMap(I
, ResultReg
);
4888 bool AArch64FastISel::selectFRem(const Instruction
*I
) {
4890 if (!isTypeLegal(I
->getType(), RetVT
))
4894 switch (RetVT
.SimpleTy
) {
4898 LC
= RTLIB::REM_F32
;
4901 LC
= RTLIB::REM_F64
;
4906 Args
.reserve(I
->getNumOperands());
4908 // Populate the argument list.
4909 for (auto &Arg
: I
->operands()) {
4912 Entry
.Ty
= Arg
->getType();
4913 Args
.push_back(Entry
);
4916 CallLoweringInfo CLI
;
4917 MCContext
&Ctx
= MF
->getContext();
4918 CLI
.setCallee(DL
, Ctx
, TLI
.getLibcallCallingConv(LC
), I
->getType(),
4919 TLI
.getLibcallName(LC
), std::move(Args
));
4920 if (!lowerCallTo(CLI
))
4922 updateValueMap(I
, CLI
.ResultReg
);
4926 bool AArch64FastISel::selectSDiv(const Instruction
*I
) {
4928 if (!isTypeLegal(I
->getType(), VT
))
4931 if (!isa
<ConstantInt
>(I
->getOperand(1)))
4932 return selectBinaryOp(I
, ISD::SDIV
);
4934 const APInt
&C
= cast
<ConstantInt
>(I
->getOperand(1))->getValue();
4935 if ((VT
!= MVT::i32
&& VT
!= MVT::i64
) || !C
||
4936 !(C
.isPowerOf2() || (-C
).isPowerOf2()))
4937 return selectBinaryOp(I
, ISD::SDIV
);
4939 unsigned Lg2
= C
.countTrailingZeros();
4940 unsigned Src0Reg
= getRegForValue(I
->getOperand(0));
4943 bool Src0IsKill
= hasTrivialKill(I
->getOperand(0));
4945 if (cast
<BinaryOperator
>(I
)->isExact()) {
4946 unsigned ResultReg
= emitASR_ri(VT
, VT
, Src0Reg
, Src0IsKill
, Lg2
);
4949 updateValueMap(I
, ResultReg
);
4953 int64_t Pow2MinusOne
= (1ULL << Lg2
) - 1;
4954 unsigned AddReg
= emitAdd_ri_(VT
, Src0Reg
, /*IsKill=*/false, Pow2MinusOne
);
4958 // (Src0 < 0) ? Pow2 - 1 : 0;
4959 if (!emitICmp_ri(VT
, Src0Reg
, /*IsKill=*/false, 0))
4963 const TargetRegisterClass
*RC
;
4964 if (VT
== MVT::i64
) {
4965 SelectOpc
= AArch64::CSELXr
;
4966 RC
= &AArch64::GPR64RegClass
;
4968 SelectOpc
= AArch64::CSELWr
;
4969 RC
= &AArch64::GPR32RegClass
;
4971 unsigned SelectReg
=
4972 fastEmitInst_rri(SelectOpc
, RC
, AddReg
, /*IsKill=*/true, Src0Reg
,
4973 Src0IsKill
, AArch64CC::LT
);
4977 // Divide by Pow2 --> ashr. If we're dividing by a negative value we must also
4978 // negate the result.
4979 unsigned ZeroReg
= (VT
== MVT::i64
) ? AArch64::XZR
: AArch64::WZR
;
4982 ResultReg
= emitAddSub_rs(/*UseAdd=*/false, VT
, ZeroReg
, /*IsKill=*/true,
4983 SelectReg
, /*IsKill=*/true, AArch64_AM::ASR
, Lg2
);
4985 ResultReg
= emitASR_ri(VT
, VT
, SelectReg
, /*IsKill=*/true, Lg2
);
4990 updateValueMap(I
, ResultReg
);
4994 /// This is mostly a copy of the existing FastISel getRegForGEPIndex code. We
4995 /// have to duplicate it for AArch64, because otherwise we would fail during the
4996 /// sign-extend emission.
4997 std::pair
<unsigned, bool> AArch64FastISel::getRegForGEPIndex(const Value
*Idx
) {
4998 unsigned IdxN
= getRegForValue(Idx
);
5000 // Unhandled operand. Halt "fast" selection and bail.
5001 return std::pair
<unsigned, bool>(0, false);
5003 bool IdxNIsKill
= hasTrivialKill(Idx
);
5005 // If the index is smaller or larger than intptr_t, truncate or extend it.
5006 MVT PtrVT
= TLI
.getPointerTy(DL
);
5007 EVT IdxVT
= EVT::getEVT(Idx
->getType(), /*HandleUnknown=*/false);
5008 if (IdxVT
.bitsLT(PtrVT
)) {
5009 IdxN
= emitIntExt(IdxVT
.getSimpleVT(), IdxN
, PtrVT
, /*isZExt=*/false);
5011 } else if (IdxVT
.bitsGT(PtrVT
))
5012 llvm_unreachable("AArch64 FastISel doesn't support types larger than i64");
5013 return std::pair
<unsigned, bool>(IdxN
, IdxNIsKill
);
5016 /// This is mostly a copy of the existing FastISel GEP code, but we have to
5017 /// duplicate it for AArch64, because otherwise we would bail out even for
5018 /// simple cases. This is because the standard fastEmit functions don't cover
5019 /// MUL at all and ADD is lowered very inefficientily.
5020 bool AArch64FastISel::selectGetElementPtr(const Instruction
*I
) {
5021 if (Subtarget
->isTargetILP32())
5024 unsigned N
= getRegForValue(I
->getOperand(0));
5027 bool NIsKill
= hasTrivialKill(I
->getOperand(0));
5029 // Keep a running tab of the total offset to coalesce multiple N = N + Offset
5030 // into a single N = N + TotalOffset.
5031 uint64_t TotalOffs
= 0;
5032 MVT VT
= TLI
.getPointerTy(DL
);
5033 for (gep_type_iterator GTI
= gep_type_begin(I
), E
= gep_type_end(I
);
5035 const Value
*Idx
= GTI
.getOperand();
5036 if (auto *StTy
= GTI
.getStructTypeOrNull()) {
5037 unsigned Field
= cast
<ConstantInt
>(Idx
)->getZExtValue();
5040 TotalOffs
+= DL
.getStructLayout(StTy
)->getElementOffset(Field
);
5042 Type
*Ty
= GTI
.getIndexedType();
5044 // If this is a constant subscript, handle it quickly.
5045 if (const auto *CI
= dyn_cast
<ConstantInt
>(Idx
)) {
5050 DL
.getTypeAllocSize(Ty
) * cast
<ConstantInt
>(CI
)->getSExtValue();
5054 N
= emitAdd_ri_(VT
, N
, NIsKill
, TotalOffs
);
5061 // N = N + Idx * ElementSize;
5062 uint64_t ElementSize
= DL
.getTypeAllocSize(Ty
);
5063 std::pair
<unsigned, bool> Pair
= getRegForGEPIndex(Idx
);
5064 unsigned IdxN
= Pair
.first
;
5065 bool IdxNIsKill
= Pair
.second
;
5069 if (ElementSize
!= 1) {
5070 unsigned C
= fastEmit_i(VT
, VT
, ISD::Constant
, ElementSize
);
5073 IdxN
= emitMul_rr(VT
, IdxN
, IdxNIsKill
, C
, true);
5078 N
= fastEmit_rr(VT
, VT
, ISD::ADD
, N
, NIsKill
, IdxN
, IdxNIsKill
);
5084 N
= emitAdd_ri_(VT
, N
, NIsKill
, TotalOffs
);
5088 updateValueMap(I
, N
);
5092 bool AArch64FastISel::selectAtomicCmpXchg(const AtomicCmpXchgInst
*I
) {
5093 assert(TM
.getOptLevel() == CodeGenOpt::None
&&
5094 "cmpxchg survived AtomicExpand at optlevel > -O0");
5096 auto *RetPairTy
= cast
<StructType
>(I
->getType());
5097 Type
*RetTy
= RetPairTy
->getTypeAtIndex(0U);
5098 assert(RetPairTy
->getTypeAtIndex(1U)->isIntegerTy(1) &&
5099 "cmpxchg has a non-i1 status result");
5102 if (!isTypeLegal(RetTy
, VT
))
5105 const TargetRegisterClass
*ResRC
;
5106 unsigned Opc
, CmpOpc
;
5107 // This only supports i32/i64, because i8/i16 aren't legal, and the generic
5108 // extractvalue selection doesn't support that.
5109 if (VT
== MVT::i32
) {
5110 Opc
= AArch64::CMP_SWAP_32
;
5111 CmpOpc
= AArch64::SUBSWrs
;
5112 ResRC
= &AArch64::GPR32RegClass
;
5113 } else if (VT
== MVT::i64
) {
5114 Opc
= AArch64::CMP_SWAP_64
;
5115 CmpOpc
= AArch64::SUBSXrs
;
5116 ResRC
= &AArch64::GPR64RegClass
;
5121 const MCInstrDesc
&II
= TII
.get(Opc
);
5123 const unsigned AddrReg
= constrainOperandRegClass(
5124 II
, getRegForValue(I
->getPointerOperand()), II
.getNumDefs());
5125 const unsigned DesiredReg
= constrainOperandRegClass(
5126 II
, getRegForValue(I
->getCompareOperand()), II
.getNumDefs() + 1);
5127 const unsigned NewReg
= constrainOperandRegClass(
5128 II
, getRegForValue(I
->getNewValOperand()), II
.getNumDefs() + 2);
5130 const unsigned ResultReg1
= createResultReg(ResRC
);
5131 const unsigned ResultReg2
= createResultReg(&AArch64::GPR32RegClass
);
5132 const unsigned ScratchReg
= createResultReg(&AArch64::GPR32RegClass
);
5134 // FIXME: MachineMemOperand doesn't support cmpxchg yet.
5135 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, DbgLoc
, II
)
5142 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, DbgLoc
, TII
.get(CmpOpc
))
5143 .addDef(VT
== MVT::i32
? AArch64::WZR
: AArch64::XZR
)
5148 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, DbgLoc
, TII
.get(AArch64::CSINCWr
))
5150 .addUse(AArch64::WZR
)
5151 .addUse(AArch64::WZR
)
5152 .addImm(AArch64CC::NE
);
5154 assert((ResultReg1
+ 1) == ResultReg2
&& "Nonconsecutive result registers.");
5155 updateValueMap(I
, ResultReg1
, 2);
5159 bool AArch64FastISel::fastSelectInstruction(const Instruction
*I
) {
5160 switch (I
->getOpcode()) {
5163 case Instruction::Add
:
5164 case Instruction::Sub
:
5165 return selectAddSub(I
);
5166 case Instruction::Mul
:
5167 return selectMul(I
);
5168 case Instruction::SDiv
:
5169 return selectSDiv(I
);
5170 case Instruction::SRem
:
5171 if (!selectBinaryOp(I
, ISD::SREM
))
5172 return selectRem(I
, ISD::SREM
);
5174 case Instruction::URem
:
5175 if (!selectBinaryOp(I
, ISD::UREM
))
5176 return selectRem(I
, ISD::UREM
);
5178 case Instruction::Shl
:
5179 case Instruction::LShr
:
5180 case Instruction::AShr
:
5181 return selectShift(I
);
5182 case Instruction::And
:
5183 case Instruction::Or
:
5184 case Instruction::Xor
:
5185 return selectLogicalOp(I
);
5186 case Instruction::Br
:
5187 return selectBranch(I
);
5188 case Instruction::IndirectBr
:
5189 return selectIndirectBr(I
);
5190 case Instruction::BitCast
:
5191 if (!FastISel::selectBitCast(I
))
5192 return selectBitCast(I
);
5194 case Instruction::FPToSI
:
5195 if (!selectCast(I
, ISD::FP_TO_SINT
))
5196 return selectFPToInt(I
, /*Signed=*/true);
5198 case Instruction::FPToUI
:
5199 return selectFPToInt(I
, /*Signed=*/false);
5200 case Instruction::ZExt
:
5201 case Instruction::SExt
:
5202 return selectIntExt(I
);
5203 case Instruction::Trunc
:
5204 if (!selectCast(I
, ISD::TRUNCATE
))
5205 return selectTrunc(I
);
5207 case Instruction::FPExt
:
5208 return selectFPExt(I
);
5209 case Instruction::FPTrunc
:
5210 return selectFPTrunc(I
);
5211 case Instruction::SIToFP
:
5212 if (!selectCast(I
, ISD::SINT_TO_FP
))
5213 return selectIntToFP(I
, /*Signed=*/true);
5215 case Instruction::UIToFP
:
5216 return selectIntToFP(I
, /*Signed=*/false);
5217 case Instruction::Load
:
5218 return selectLoad(I
);
5219 case Instruction::Store
:
5220 return selectStore(I
);
5221 case Instruction::FCmp
:
5222 case Instruction::ICmp
:
5223 return selectCmp(I
);
5224 case Instruction::Select
:
5225 return selectSelect(I
);
5226 case Instruction::Ret
:
5227 return selectRet(I
);
5228 case Instruction::FRem
:
5229 return selectFRem(I
);
5230 case Instruction::GetElementPtr
:
5231 return selectGetElementPtr(I
);
5232 case Instruction::AtomicCmpXchg
:
5233 return selectAtomicCmpXchg(cast
<AtomicCmpXchgInst
>(I
));
5236 // fall-back to target-independent instruction selection.
5237 return selectOperator(I
, I
->getOpcode());
5242 FastISel
*AArch64::createFastISel(FunctionLoweringInfo
&FuncInfo
,
5243 const TargetLibraryInfo
*LibInfo
) {
5244 return new AArch64FastISel(FuncInfo
, LibInfo
);
5247 } // end namespace llvm