1 //===- AArch6464FastISel.cpp - AArch64 FastISel implementation ------------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This file defines the AArch64-specific support for the FastISel class. Some
10 // of the target-specific code is generated by tablegen in the file
11 // AArch64GenFastISel.inc, which is #included here.
13 //===----------------------------------------------------------------------===//
16 #include "AArch64CallingConvention.h"
17 #include "AArch64RegisterInfo.h"
18 #include "AArch64Subtarget.h"
19 #include "MCTargetDesc/AArch64AddressingModes.h"
20 #include "Utils/AArch64BaseInfo.h"
21 #include "llvm/ADT/APFloat.h"
22 #include "llvm/ADT/APInt.h"
23 #include "llvm/ADT/DenseMap.h"
24 #include "llvm/ADT/SmallVector.h"
25 #include "llvm/Analysis/BranchProbabilityInfo.h"
26 #include "llvm/CodeGen/CallingConvLower.h"
27 #include "llvm/CodeGen/FastISel.h"
28 #include "llvm/CodeGen/FunctionLoweringInfo.h"
29 #include "llvm/CodeGen/ISDOpcodes.h"
30 #include "llvm/CodeGen/MachineBasicBlock.h"
31 #include "llvm/CodeGen/MachineConstantPool.h"
32 #include "llvm/CodeGen/MachineFrameInfo.h"
33 #include "llvm/CodeGen/MachineInstr.h"
34 #include "llvm/CodeGen/MachineInstrBuilder.h"
35 #include "llvm/CodeGen/MachineMemOperand.h"
36 #include "llvm/CodeGen/MachineRegisterInfo.h"
37 #include "llvm/CodeGen/RuntimeLibcalls.h"
38 #include "llvm/CodeGen/ValueTypes.h"
39 #include "llvm/IR/Argument.h"
40 #include "llvm/IR/Attributes.h"
41 #include "llvm/IR/BasicBlock.h"
42 #include "llvm/IR/CallingConv.h"
43 #include "llvm/IR/Constant.h"
44 #include "llvm/IR/Constants.h"
45 #include "llvm/IR/DataLayout.h"
46 #include "llvm/IR/DerivedTypes.h"
47 #include "llvm/IR/Function.h"
48 #include "llvm/IR/GetElementPtrTypeIterator.h"
49 #include "llvm/IR/GlobalValue.h"
50 #include "llvm/IR/InstrTypes.h"
51 #include "llvm/IR/Instruction.h"
52 #include "llvm/IR/Instructions.h"
53 #include "llvm/IR/IntrinsicInst.h"
54 #include "llvm/IR/Intrinsics.h"
55 #include "llvm/IR/Operator.h"
56 #include "llvm/IR/Type.h"
57 #include "llvm/IR/User.h"
58 #include "llvm/IR/Value.h"
59 #include "llvm/MC/MCInstrDesc.h"
60 #include "llvm/MC/MCRegisterInfo.h"
61 #include "llvm/MC/MCSymbol.h"
62 #include "llvm/Support/AtomicOrdering.h"
63 #include "llvm/Support/Casting.h"
64 #include "llvm/Support/CodeGen.h"
65 #include "llvm/Support/Compiler.h"
66 #include "llvm/Support/ErrorHandling.h"
67 #include "llvm/Support/MachineValueType.h"
68 #include "llvm/Support/MathExtras.h"
79 class AArch64FastISel final
: public FastISel
{
82 using BaseKind
= enum {
88 BaseKind Kind
= RegBase
;
89 AArch64_AM::ShiftExtendType ExtType
= AArch64_AM::InvalidShiftExtend
;
94 unsigned OffsetReg
= 0;
97 const GlobalValue
*GV
= nullptr;
100 Address() { Base
.Reg
= 0; }
102 void setKind(BaseKind K
) { Kind
= K
; }
103 BaseKind
getKind() const { return Kind
; }
104 void setExtendType(AArch64_AM::ShiftExtendType E
) { ExtType
= E
; }
105 AArch64_AM::ShiftExtendType
getExtendType() const { return ExtType
; }
106 bool isRegBase() const { return Kind
== RegBase
; }
107 bool isFIBase() const { return Kind
== FrameIndexBase
; }
109 void setReg(unsigned Reg
) {
110 assert(isRegBase() && "Invalid base register access!");
114 unsigned getReg() const {
115 assert(isRegBase() && "Invalid base register access!");
119 void setOffsetReg(unsigned Reg
) {
123 unsigned getOffsetReg() const {
127 void setFI(unsigned FI
) {
128 assert(isFIBase() && "Invalid base frame index access!");
132 unsigned getFI() const {
133 assert(isFIBase() && "Invalid base frame index access!");
137 void setOffset(int64_t O
) { Offset
= O
; }
138 int64_t getOffset() { return Offset
; }
139 void setShift(unsigned S
) { Shift
= S
; }
140 unsigned getShift() { return Shift
; }
142 void setGlobalValue(const GlobalValue
*G
) { GV
= G
; }
143 const GlobalValue
*getGlobalValue() { return GV
; }
146 /// Subtarget - Keep a pointer to the AArch64Subtarget around so that we can
147 /// make the right decision when generating code for different targets.
148 const AArch64Subtarget
*Subtarget
;
149 LLVMContext
*Context
;
151 bool fastLowerArguments() override
;
152 bool fastLowerCall(CallLoweringInfo
&CLI
) override
;
153 bool fastLowerIntrinsicCall(const IntrinsicInst
*II
) override
;
156 // Selection routines.
157 bool selectAddSub(const Instruction
*I
);
158 bool selectLogicalOp(const Instruction
*I
);
159 bool selectLoad(const Instruction
*I
);
160 bool selectStore(const Instruction
*I
);
161 bool selectBranch(const Instruction
*I
);
162 bool selectIndirectBr(const Instruction
*I
);
163 bool selectCmp(const Instruction
*I
);
164 bool selectSelect(const Instruction
*I
);
165 bool selectFPExt(const Instruction
*I
);
166 bool selectFPTrunc(const Instruction
*I
);
167 bool selectFPToInt(const Instruction
*I
, bool Signed
);
168 bool selectIntToFP(const Instruction
*I
, bool Signed
);
169 bool selectRem(const Instruction
*I
, unsigned ISDOpcode
);
170 bool selectRet(const Instruction
*I
);
171 bool selectTrunc(const Instruction
*I
);
172 bool selectIntExt(const Instruction
*I
);
173 bool selectMul(const Instruction
*I
);
174 bool selectShift(const Instruction
*I
);
175 bool selectBitCast(const Instruction
*I
);
176 bool selectFRem(const Instruction
*I
);
177 bool selectSDiv(const Instruction
*I
);
178 bool selectGetElementPtr(const Instruction
*I
);
179 bool selectAtomicCmpXchg(const AtomicCmpXchgInst
*I
);
181 // Utility helper routines.
182 bool isTypeLegal(Type
*Ty
, MVT
&VT
);
183 bool isTypeSupported(Type
*Ty
, MVT
&VT
, bool IsVectorAllowed
= false);
184 bool isValueAvailable(const Value
*V
) const;
185 bool computeAddress(const Value
*Obj
, Address
&Addr
, Type
*Ty
= nullptr);
186 bool computeCallAddress(const Value
*V
, Address
&Addr
);
187 bool simplifyAddress(Address
&Addr
, MVT VT
);
188 void addLoadStoreOperands(Address
&Addr
, const MachineInstrBuilder
&MIB
,
189 MachineMemOperand::Flags Flags
,
190 unsigned ScaleFactor
, MachineMemOperand
*MMO
);
191 bool isMemCpySmall(uint64_t Len
, unsigned Alignment
);
192 bool tryEmitSmallMemCpy(Address Dest
, Address Src
, uint64_t Len
,
194 bool foldXALUIntrinsic(AArch64CC::CondCode
&CC
, const Instruction
*I
,
196 bool optimizeIntExtLoad(const Instruction
*I
, MVT RetVT
, MVT SrcVT
);
197 bool optimizeSelect(const SelectInst
*SI
);
198 std::pair
<unsigned, bool> getRegForGEPIndex(const Value
*Idx
);
200 // Emit helper routines.
201 unsigned emitAddSub(bool UseAdd
, MVT RetVT
, const Value
*LHS
,
202 const Value
*RHS
, bool SetFlags
= false,
203 bool WantResult
= true, bool IsZExt
= false);
204 unsigned emitAddSub_rr(bool UseAdd
, MVT RetVT
, unsigned LHSReg
,
205 bool LHSIsKill
, unsigned RHSReg
, bool RHSIsKill
,
206 bool SetFlags
= false, bool WantResult
= true);
207 unsigned emitAddSub_ri(bool UseAdd
, MVT RetVT
, unsigned LHSReg
,
208 bool LHSIsKill
, uint64_t Imm
, bool SetFlags
= false,
209 bool WantResult
= true);
210 unsigned emitAddSub_rs(bool UseAdd
, MVT RetVT
, unsigned LHSReg
,
211 bool LHSIsKill
, unsigned RHSReg
, bool RHSIsKill
,
212 AArch64_AM::ShiftExtendType ShiftType
,
213 uint64_t ShiftImm
, bool SetFlags
= false,
214 bool WantResult
= true);
215 unsigned emitAddSub_rx(bool UseAdd
, MVT RetVT
, unsigned LHSReg
,
216 bool LHSIsKill
, unsigned RHSReg
, bool RHSIsKill
,
217 AArch64_AM::ShiftExtendType ExtType
,
218 uint64_t ShiftImm
, bool SetFlags
= false,
219 bool WantResult
= true);
222 bool emitCompareAndBranch(const BranchInst
*BI
);
223 bool emitCmp(const Value
*LHS
, const Value
*RHS
, bool IsZExt
);
224 bool emitICmp(MVT RetVT
, const Value
*LHS
, const Value
*RHS
, bool IsZExt
);
225 bool emitICmp_ri(MVT RetVT
, unsigned LHSReg
, bool LHSIsKill
, uint64_t Imm
);
226 bool emitFCmp(MVT RetVT
, const Value
*LHS
, const Value
*RHS
);
227 unsigned emitLoad(MVT VT
, MVT ResultVT
, Address Addr
, bool WantZExt
= true,
228 MachineMemOperand
*MMO
= nullptr);
229 bool emitStore(MVT VT
, unsigned SrcReg
, Address Addr
,
230 MachineMemOperand
*MMO
= nullptr);
231 bool emitStoreRelease(MVT VT
, unsigned SrcReg
, unsigned AddrReg
,
232 MachineMemOperand
*MMO
= nullptr);
233 unsigned emitIntExt(MVT SrcVT
, unsigned SrcReg
, MVT DestVT
, bool isZExt
);
234 unsigned emiti1Ext(unsigned SrcReg
, MVT DestVT
, bool isZExt
);
235 unsigned emitAdd(MVT RetVT
, const Value
*LHS
, const Value
*RHS
,
236 bool SetFlags
= false, bool WantResult
= true,
237 bool IsZExt
= false);
238 unsigned emitAdd_ri_(MVT VT
, unsigned Op0
, bool Op0IsKill
, int64_t Imm
);
239 unsigned emitSub(MVT RetVT
, const Value
*LHS
, const Value
*RHS
,
240 bool SetFlags
= false, bool WantResult
= true,
241 bool IsZExt
= false);
242 unsigned emitSubs_rr(MVT RetVT
, unsigned LHSReg
, bool LHSIsKill
,
243 unsigned RHSReg
, bool RHSIsKill
, bool WantResult
= true);
244 unsigned emitSubs_rs(MVT RetVT
, unsigned LHSReg
, bool LHSIsKill
,
245 unsigned RHSReg
, bool RHSIsKill
,
246 AArch64_AM::ShiftExtendType ShiftType
, uint64_t ShiftImm
,
247 bool WantResult
= true);
248 unsigned emitLogicalOp(unsigned ISDOpc
, MVT RetVT
, const Value
*LHS
,
250 unsigned emitLogicalOp_ri(unsigned ISDOpc
, MVT RetVT
, unsigned LHSReg
,
251 bool LHSIsKill
, uint64_t Imm
);
252 unsigned emitLogicalOp_rs(unsigned ISDOpc
, MVT RetVT
, unsigned LHSReg
,
253 bool LHSIsKill
, unsigned RHSReg
, bool RHSIsKill
,
255 unsigned emitAnd_ri(MVT RetVT
, unsigned LHSReg
, bool LHSIsKill
, uint64_t Imm
);
256 unsigned emitMul_rr(MVT RetVT
, unsigned Op0
, bool Op0IsKill
,
257 unsigned Op1
, bool Op1IsKill
);
258 unsigned emitSMULL_rr(MVT RetVT
, unsigned Op0
, bool Op0IsKill
,
259 unsigned Op1
, bool Op1IsKill
);
260 unsigned emitUMULL_rr(MVT RetVT
, unsigned Op0
, bool Op0IsKill
,
261 unsigned Op1
, bool Op1IsKill
);
262 unsigned emitLSL_rr(MVT RetVT
, unsigned Op0Reg
, bool Op0IsKill
,
263 unsigned Op1Reg
, bool Op1IsKill
);
264 unsigned emitLSL_ri(MVT RetVT
, MVT SrcVT
, unsigned Op0Reg
, bool Op0IsKill
,
265 uint64_t Imm
, bool IsZExt
= true);
266 unsigned emitLSR_rr(MVT RetVT
, unsigned Op0Reg
, bool Op0IsKill
,
267 unsigned Op1Reg
, bool Op1IsKill
);
268 unsigned emitLSR_ri(MVT RetVT
, MVT SrcVT
, unsigned Op0Reg
, bool Op0IsKill
,
269 uint64_t Imm
, bool IsZExt
= true);
270 unsigned emitASR_rr(MVT RetVT
, unsigned Op0Reg
, bool Op0IsKill
,
271 unsigned Op1Reg
, bool Op1IsKill
);
272 unsigned emitASR_ri(MVT RetVT
, MVT SrcVT
, unsigned Op0Reg
, bool Op0IsKill
,
273 uint64_t Imm
, bool IsZExt
= false);
275 unsigned materializeInt(const ConstantInt
*CI
, MVT VT
);
276 unsigned materializeFP(const ConstantFP
*CFP
, MVT VT
);
277 unsigned materializeGV(const GlobalValue
*GV
);
279 // Call handling routines.
281 CCAssignFn
*CCAssignFnForCall(CallingConv::ID CC
) const;
282 bool processCallArgs(CallLoweringInfo
&CLI
, SmallVectorImpl
<MVT
> &ArgVTs
,
284 bool finishCall(CallLoweringInfo
&CLI
, MVT RetVT
, unsigned NumBytes
);
287 // Backend specific FastISel code.
288 unsigned fastMaterializeAlloca(const AllocaInst
*AI
) override
;
289 unsigned fastMaterializeConstant(const Constant
*C
) override
;
290 unsigned fastMaterializeFloatZero(const ConstantFP
* CF
) override
;
292 explicit AArch64FastISel(FunctionLoweringInfo
&FuncInfo
,
293 const TargetLibraryInfo
*LibInfo
)
294 : FastISel(FuncInfo
, LibInfo
, /*SkipTargetIndependentISel=*/true) {
296 &static_cast<const AArch64Subtarget
&>(FuncInfo
.MF
->getSubtarget());
297 Context
= &FuncInfo
.Fn
->getContext();
300 bool fastSelectInstruction(const Instruction
*I
) override
;
302 #include "AArch64GenFastISel.inc"
305 } // end anonymous namespace
307 /// Check if the sign-/zero-extend will be a noop.
308 static bool isIntExtFree(const Instruction
*I
) {
309 assert((isa
<ZExtInst
>(I
) || isa
<SExtInst
>(I
)) &&
310 "Unexpected integer extend instruction.");
311 assert(!I
->getType()->isVectorTy() && I
->getType()->isIntegerTy() &&
312 "Unexpected value type.");
313 bool IsZExt
= isa
<ZExtInst
>(I
);
315 if (const auto *LI
= dyn_cast
<LoadInst
>(I
->getOperand(0)))
319 if (const auto *Arg
= dyn_cast
<Argument
>(I
->getOperand(0)))
320 if ((IsZExt
&& Arg
->hasZExtAttr()) || (!IsZExt
&& Arg
->hasSExtAttr()))
326 /// Determine the implicit scale factor that is applied by a memory
327 /// operation for a given value type.
328 static unsigned getImplicitScaleFactor(MVT VT
) {
329 switch (VT
.SimpleTy
) {
332 case MVT::i1
: // fall-through
337 case MVT::i32
: // fall-through
340 case MVT::i64
: // fall-through
346 CCAssignFn
*AArch64FastISel::CCAssignFnForCall(CallingConv::ID CC
) const {
347 if (CC
== CallingConv::WebKit_JS
)
348 return CC_AArch64_WebKit_JS
;
349 if (CC
== CallingConv::GHC
)
350 return CC_AArch64_GHC
;
351 return Subtarget
->isTargetDarwin() ? CC_AArch64_DarwinPCS
: CC_AArch64_AAPCS
;
354 unsigned AArch64FastISel::fastMaterializeAlloca(const AllocaInst
*AI
) {
355 assert(TLI
.getValueType(DL
, AI
->getType(), true) == MVT::i64
&&
356 "Alloca should always return a pointer.");
358 // Don't handle dynamic allocas.
359 if (!FuncInfo
.StaticAllocaMap
.count(AI
))
362 DenseMap
<const AllocaInst
*, int>::iterator SI
=
363 FuncInfo
.StaticAllocaMap
.find(AI
);
365 if (SI
!= FuncInfo
.StaticAllocaMap
.end()) {
366 unsigned ResultReg
= createResultReg(&AArch64::GPR64spRegClass
);
367 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, DbgLoc
, TII
.get(AArch64::ADDXri
),
369 .addFrameIndex(SI
->second
)
378 unsigned AArch64FastISel::materializeInt(const ConstantInt
*CI
, MVT VT
) {
383 return fastEmit_i(VT
, VT
, ISD::Constant
, CI
->getZExtValue());
385 // Create a copy from the zero register to materialize a "0" value.
386 const TargetRegisterClass
*RC
= (VT
== MVT::i64
) ? &AArch64::GPR64RegClass
387 : &AArch64::GPR32RegClass
;
388 unsigned ZeroReg
= (VT
== MVT::i64
) ? AArch64::XZR
: AArch64::WZR
;
389 unsigned ResultReg
= createResultReg(RC
);
390 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, DbgLoc
, TII
.get(TargetOpcode::COPY
),
391 ResultReg
).addReg(ZeroReg
, getKillRegState(true));
395 unsigned AArch64FastISel::materializeFP(const ConstantFP
*CFP
, MVT VT
) {
396 // Positive zero (+0.0) has to be materialized with a fmov from the zero
397 // register, because the immediate version of fmov cannot encode zero.
398 if (CFP
->isNullValue())
399 return fastMaterializeFloatZero(CFP
);
401 if (VT
!= MVT::f32
&& VT
!= MVT::f64
)
404 const APFloat Val
= CFP
->getValueAPF();
405 bool Is64Bit
= (VT
== MVT::f64
);
406 // This checks to see if we can use FMOV instructions to materialize
407 // a constant, otherwise we have to materialize via the constant pool.
409 Is64Bit
? AArch64_AM::getFP64Imm(Val
) : AArch64_AM::getFP32Imm(Val
);
411 unsigned Opc
= Is64Bit
? AArch64::FMOVDi
: AArch64::FMOVSi
;
412 return fastEmitInst_i(Opc
, TLI
.getRegClassFor(VT
), Imm
);
415 // For the MachO large code model materialize the FP constant in code.
416 if (Subtarget
->isTargetMachO() && TM
.getCodeModel() == CodeModel::Large
) {
417 unsigned Opc1
= Is64Bit
? AArch64::MOVi64imm
: AArch64::MOVi32imm
;
418 const TargetRegisterClass
*RC
= Is64Bit
?
419 &AArch64::GPR64RegClass
: &AArch64::GPR32RegClass
;
421 unsigned TmpReg
= createResultReg(RC
);
422 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, DbgLoc
, TII
.get(Opc1
), TmpReg
)
423 .addImm(CFP
->getValueAPF().bitcastToAPInt().getZExtValue());
425 unsigned ResultReg
= createResultReg(TLI
.getRegClassFor(VT
));
426 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, DbgLoc
,
427 TII
.get(TargetOpcode::COPY
), ResultReg
)
428 .addReg(TmpReg
, getKillRegState(true));
433 // Materialize via constant pool. MachineConstantPool wants an explicit
435 unsigned Align
= DL
.getPrefTypeAlignment(CFP
->getType());
437 Align
= DL
.getTypeAllocSize(CFP
->getType());
439 unsigned CPI
= MCP
.getConstantPoolIndex(cast
<Constant
>(CFP
), Align
);
440 unsigned ADRPReg
= createResultReg(&AArch64::GPR64commonRegClass
);
441 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, DbgLoc
, TII
.get(AArch64::ADRP
),
442 ADRPReg
).addConstantPoolIndex(CPI
, 0, AArch64II::MO_PAGE
);
444 unsigned Opc
= Is64Bit
? AArch64::LDRDui
: AArch64::LDRSui
;
445 unsigned ResultReg
= createResultReg(TLI
.getRegClassFor(VT
));
446 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, DbgLoc
, TII
.get(Opc
), ResultReg
)
448 .addConstantPoolIndex(CPI
, 0, AArch64II::MO_PAGEOFF
| AArch64II::MO_NC
);
452 unsigned AArch64FastISel::materializeGV(const GlobalValue
*GV
) {
453 // We can't handle thread-local variables quickly yet.
454 if (GV
->isThreadLocal())
457 // MachO still uses GOT for large code-model accesses, but ELF requires
458 // movz/movk sequences, which FastISel doesn't handle yet.
459 if (!Subtarget
->useSmallAddressing() && !Subtarget
->isTargetMachO())
462 unsigned OpFlags
= Subtarget
->ClassifyGlobalReference(GV
, TM
);
464 EVT DestEVT
= TLI
.getValueType(DL
, GV
->getType(), true);
465 if (!DestEVT
.isSimple())
468 unsigned ADRPReg
= createResultReg(&AArch64::GPR64commonRegClass
);
471 if (OpFlags
& AArch64II::MO_GOT
) {
473 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, DbgLoc
, TII
.get(AArch64::ADRP
),
475 .addGlobalAddress(GV
, 0, AArch64II::MO_PAGE
| OpFlags
);
478 if (Subtarget
->isTargetILP32()) {
479 ResultReg
= createResultReg(&AArch64::GPR32RegClass
);
480 LdrOpc
= AArch64::LDRWui
;
482 ResultReg
= createResultReg(&AArch64::GPR64RegClass
);
483 LdrOpc
= AArch64::LDRXui
;
485 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, DbgLoc
, TII
.get(LdrOpc
),
488 .addGlobalAddress(GV
, 0, AArch64II::MO_GOT
| AArch64II::MO_PAGEOFF
|
489 AArch64II::MO_NC
| OpFlags
);
490 if (!Subtarget
->isTargetILP32())
493 // LDRWui produces a 32-bit register, but pointers in-register are 64-bits
494 // so we must extend the result on ILP32.
495 unsigned Result64
= createResultReg(&AArch64::GPR64RegClass
);
496 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, DbgLoc
,
497 TII
.get(TargetOpcode::SUBREG_TO_REG
))
500 .addReg(ResultReg
, RegState::Kill
)
501 .addImm(AArch64::sub_32
);
505 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, DbgLoc
, TII
.get(AArch64::ADRP
),
507 .addGlobalAddress(GV
, 0, AArch64II::MO_PAGE
| OpFlags
);
509 ResultReg
= createResultReg(&AArch64::GPR64spRegClass
);
510 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, DbgLoc
, TII
.get(AArch64::ADDXri
),
513 .addGlobalAddress(GV
, 0,
514 AArch64II::MO_PAGEOFF
| AArch64II::MO_NC
| OpFlags
)
520 unsigned AArch64FastISel::fastMaterializeConstant(const Constant
*C
) {
521 EVT CEVT
= TLI
.getValueType(DL
, C
->getType(), true);
523 // Only handle simple types.
524 if (!CEVT
.isSimple())
526 MVT VT
= CEVT
.getSimpleVT();
527 // arm64_32 has 32-bit pointers held in 64-bit registers. Because of that,
528 // 'null' pointers need to have a somewhat special treatment.
529 if (const auto *CPN
= dyn_cast
<ConstantPointerNull
>(C
)) {
531 assert(CPN
->getType()->getPointerAddressSpace() == 0 &&
532 "Unexpected address space");
533 assert(VT
== MVT::i64
&& "Expected 64-bit pointers");
534 return materializeInt(ConstantInt::get(Type::getInt64Ty(*Context
), 0), VT
);
537 if (const auto *CI
= dyn_cast
<ConstantInt
>(C
))
538 return materializeInt(CI
, VT
);
539 else if (const ConstantFP
*CFP
= dyn_cast
<ConstantFP
>(C
))
540 return materializeFP(CFP
, VT
);
541 else if (const GlobalValue
*GV
= dyn_cast
<GlobalValue
>(C
))
542 return materializeGV(GV
);
547 unsigned AArch64FastISel::fastMaterializeFloatZero(const ConstantFP
* CFP
) {
548 assert(CFP
->isNullValue() &&
549 "Floating-point constant is not a positive zero.");
551 if (!isTypeLegal(CFP
->getType(), VT
))
554 if (VT
!= MVT::f32
&& VT
!= MVT::f64
)
557 bool Is64Bit
= (VT
== MVT::f64
);
558 unsigned ZReg
= Is64Bit
? AArch64::XZR
: AArch64::WZR
;
559 unsigned Opc
= Is64Bit
? AArch64::FMOVXDr
: AArch64::FMOVWSr
;
560 return fastEmitInst_r(Opc
, TLI
.getRegClassFor(VT
), ZReg
, /*IsKill=*/true);
563 /// Check if the multiply is by a power-of-2 constant.
564 static bool isMulPowOf2(const Value
*I
) {
565 if (const auto *MI
= dyn_cast
<MulOperator
>(I
)) {
566 if (const auto *C
= dyn_cast
<ConstantInt
>(MI
->getOperand(0)))
567 if (C
->getValue().isPowerOf2())
569 if (const auto *C
= dyn_cast
<ConstantInt
>(MI
->getOperand(1)))
570 if (C
->getValue().isPowerOf2())
576 // Computes the address to get to an object.
577 bool AArch64FastISel::computeAddress(const Value
*Obj
, Address
&Addr
, Type
*Ty
)
579 const User
*U
= nullptr;
580 unsigned Opcode
= Instruction::UserOp1
;
581 if (const Instruction
*I
= dyn_cast
<Instruction
>(Obj
)) {
582 // Don't walk into other basic blocks unless the object is an alloca from
583 // another block, otherwise it may not have a virtual register assigned.
584 if (FuncInfo
.StaticAllocaMap
.count(static_cast<const AllocaInst
*>(Obj
)) ||
585 FuncInfo
.MBBMap
[I
->getParent()] == FuncInfo
.MBB
) {
586 Opcode
= I
->getOpcode();
589 } else if (const ConstantExpr
*C
= dyn_cast
<ConstantExpr
>(Obj
)) {
590 Opcode
= C
->getOpcode();
594 if (auto *Ty
= dyn_cast
<PointerType
>(Obj
->getType()))
595 if (Ty
->getAddressSpace() > 255)
596 // Fast instruction selection doesn't support the special
603 case Instruction::BitCast
:
604 // Look through bitcasts.
605 return computeAddress(U
->getOperand(0), Addr
, Ty
);
607 case Instruction::IntToPtr
:
608 // Look past no-op inttoptrs.
609 if (TLI
.getValueType(DL
, U
->getOperand(0)->getType()) ==
610 TLI
.getPointerTy(DL
))
611 return computeAddress(U
->getOperand(0), Addr
, Ty
);
614 case Instruction::PtrToInt
:
615 // Look past no-op ptrtoints.
616 if (TLI
.getValueType(DL
, U
->getType()) == TLI
.getPointerTy(DL
))
617 return computeAddress(U
->getOperand(0), Addr
, Ty
);
620 case Instruction::GetElementPtr
: {
621 Address SavedAddr
= Addr
;
622 uint64_t TmpOffset
= Addr
.getOffset();
624 // Iterate through the GEP folding the constants into offsets where
626 for (gep_type_iterator GTI
= gep_type_begin(U
), E
= gep_type_end(U
);
628 const Value
*Op
= GTI
.getOperand();
629 if (StructType
*STy
= GTI
.getStructTypeOrNull()) {
630 const StructLayout
*SL
= DL
.getStructLayout(STy
);
631 unsigned Idx
= cast
<ConstantInt
>(Op
)->getZExtValue();
632 TmpOffset
+= SL
->getElementOffset(Idx
);
634 uint64_t S
= DL
.getTypeAllocSize(GTI
.getIndexedType());
636 if (const ConstantInt
*CI
= dyn_cast
<ConstantInt
>(Op
)) {
637 // Constant-offset addressing.
638 TmpOffset
+= CI
->getSExtValue() * S
;
641 if (canFoldAddIntoGEP(U
, Op
)) {
642 // A compatible add with a constant operand. Fold the constant.
644 cast
<ConstantInt
>(cast
<AddOperator
>(Op
)->getOperand(1));
645 TmpOffset
+= CI
->getSExtValue() * S
;
646 // Iterate on the other operand.
647 Op
= cast
<AddOperator
>(Op
)->getOperand(0);
651 goto unsupported_gep
;
656 // Try to grab the base operand now.
657 Addr
.setOffset(TmpOffset
);
658 if (computeAddress(U
->getOperand(0), Addr
, Ty
))
661 // We failed, restore everything and try the other options.
667 case Instruction::Alloca
: {
668 const AllocaInst
*AI
= cast
<AllocaInst
>(Obj
);
669 DenseMap
<const AllocaInst
*, int>::iterator SI
=
670 FuncInfo
.StaticAllocaMap
.find(AI
);
671 if (SI
!= FuncInfo
.StaticAllocaMap
.end()) {
672 Addr
.setKind(Address::FrameIndexBase
);
673 Addr
.setFI(SI
->second
);
678 case Instruction::Add
: {
679 // Adds of constants are common and easy enough.
680 const Value
*LHS
= U
->getOperand(0);
681 const Value
*RHS
= U
->getOperand(1);
683 if (isa
<ConstantInt
>(LHS
))
686 if (const ConstantInt
*CI
= dyn_cast
<ConstantInt
>(RHS
)) {
687 Addr
.setOffset(Addr
.getOffset() + CI
->getSExtValue());
688 return computeAddress(LHS
, Addr
, Ty
);
691 Address Backup
= Addr
;
692 if (computeAddress(LHS
, Addr
, Ty
) && computeAddress(RHS
, Addr
, Ty
))
698 case Instruction::Sub
: {
699 // Subs of constants are common and easy enough.
700 const Value
*LHS
= U
->getOperand(0);
701 const Value
*RHS
= U
->getOperand(1);
703 if (const ConstantInt
*CI
= dyn_cast
<ConstantInt
>(RHS
)) {
704 Addr
.setOffset(Addr
.getOffset() - CI
->getSExtValue());
705 return computeAddress(LHS
, Addr
, Ty
);
709 case Instruction::Shl
: {
710 if (Addr
.getOffsetReg())
713 const auto *CI
= dyn_cast
<ConstantInt
>(U
->getOperand(1));
717 unsigned Val
= CI
->getZExtValue();
718 if (Val
< 1 || Val
> 3)
721 uint64_t NumBytes
= 0;
722 if (Ty
&& Ty
->isSized()) {
723 uint64_t NumBits
= DL
.getTypeSizeInBits(Ty
);
724 NumBytes
= NumBits
/ 8;
725 if (!isPowerOf2_64(NumBits
))
729 if (NumBytes
!= (1ULL << Val
))
733 Addr
.setExtendType(AArch64_AM::LSL
);
735 const Value
*Src
= U
->getOperand(0);
736 if (const auto *I
= dyn_cast
<Instruction
>(Src
)) {
737 if (FuncInfo
.MBBMap
[I
->getParent()] == FuncInfo
.MBB
) {
738 // Fold the zext or sext when it won't become a noop.
739 if (const auto *ZE
= dyn_cast
<ZExtInst
>(I
)) {
740 if (!isIntExtFree(ZE
) &&
741 ZE
->getOperand(0)->getType()->isIntegerTy(32)) {
742 Addr
.setExtendType(AArch64_AM::UXTW
);
743 Src
= ZE
->getOperand(0);
745 } else if (const auto *SE
= dyn_cast
<SExtInst
>(I
)) {
746 if (!isIntExtFree(SE
) &&
747 SE
->getOperand(0)->getType()->isIntegerTy(32)) {
748 Addr
.setExtendType(AArch64_AM::SXTW
);
749 Src
= SE
->getOperand(0);
755 if (const auto *AI
= dyn_cast
<BinaryOperator
>(Src
))
756 if (AI
->getOpcode() == Instruction::And
) {
757 const Value
*LHS
= AI
->getOperand(0);
758 const Value
*RHS
= AI
->getOperand(1);
760 if (const auto *C
= dyn_cast
<ConstantInt
>(LHS
))
761 if (C
->getValue() == 0xffffffff)
764 if (const auto *C
= dyn_cast
<ConstantInt
>(RHS
))
765 if (C
->getValue() == 0xffffffff) {
766 Addr
.setExtendType(AArch64_AM::UXTW
);
767 unsigned Reg
= getRegForValue(LHS
);
770 bool RegIsKill
= hasTrivialKill(LHS
);
771 Reg
= fastEmitInst_extractsubreg(MVT::i32
, Reg
, RegIsKill
,
773 Addr
.setOffsetReg(Reg
);
778 unsigned Reg
= getRegForValue(Src
);
781 Addr
.setOffsetReg(Reg
);
784 case Instruction::Mul
: {
785 if (Addr
.getOffsetReg())
791 const Value
*LHS
= U
->getOperand(0);
792 const Value
*RHS
= U
->getOperand(1);
794 // Canonicalize power-of-2 value to the RHS.
795 if (const auto *C
= dyn_cast
<ConstantInt
>(LHS
))
796 if (C
->getValue().isPowerOf2())
799 assert(isa
<ConstantInt
>(RHS
) && "Expected an ConstantInt.");
800 const auto *C
= cast
<ConstantInt
>(RHS
);
801 unsigned Val
= C
->getValue().logBase2();
802 if (Val
< 1 || Val
> 3)
805 uint64_t NumBytes
= 0;
806 if (Ty
&& Ty
->isSized()) {
807 uint64_t NumBits
= DL
.getTypeSizeInBits(Ty
);
808 NumBytes
= NumBits
/ 8;
809 if (!isPowerOf2_64(NumBits
))
813 if (NumBytes
!= (1ULL << Val
))
817 Addr
.setExtendType(AArch64_AM::LSL
);
819 const Value
*Src
= LHS
;
820 if (const auto *I
= dyn_cast
<Instruction
>(Src
)) {
821 if (FuncInfo
.MBBMap
[I
->getParent()] == FuncInfo
.MBB
) {
822 // Fold the zext or sext when it won't become a noop.
823 if (const auto *ZE
= dyn_cast
<ZExtInst
>(I
)) {
824 if (!isIntExtFree(ZE
) &&
825 ZE
->getOperand(0)->getType()->isIntegerTy(32)) {
826 Addr
.setExtendType(AArch64_AM::UXTW
);
827 Src
= ZE
->getOperand(0);
829 } else if (const auto *SE
= dyn_cast
<SExtInst
>(I
)) {
830 if (!isIntExtFree(SE
) &&
831 SE
->getOperand(0)->getType()->isIntegerTy(32)) {
832 Addr
.setExtendType(AArch64_AM::SXTW
);
833 Src
= SE
->getOperand(0);
839 unsigned Reg
= getRegForValue(Src
);
842 Addr
.setOffsetReg(Reg
);
845 case Instruction::And
: {
846 if (Addr
.getOffsetReg())
849 if (!Ty
|| DL
.getTypeSizeInBits(Ty
) != 8)
852 const Value
*LHS
= U
->getOperand(0);
853 const Value
*RHS
= U
->getOperand(1);
855 if (const auto *C
= dyn_cast
<ConstantInt
>(LHS
))
856 if (C
->getValue() == 0xffffffff)
859 if (const auto *C
= dyn_cast
<ConstantInt
>(RHS
))
860 if (C
->getValue() == 0xffffffff) {
862 Addr
.setExtendType(AArch64_AM::LSL
);
863 Addr
.setExtendType(AArch64_AM::UXTW
);
865 unsigned Reg
= getRegForValue(LHS
);
868 bool RegIsKill
= hasTrivialKill(LHS
);
869 Reg
= fastEmitInst_extractsubreg(MVT::i32
, Reg
, RegIsKill
,
871 Addr
.setOffsetReg(Reg
);
876 case Instruction::SExt
:
877 case Instruction::ZExt
: {
878 if (!Addr
.getReg() || Addr
.getOffsetReg())
881 const Value
*Src
= nullptr;
882 // Fold the zext or sext when it won't become a noop.
883 if (const auto *ZE
= dyn_cast
<ZExtInst
>(U
)) {
884 if (!isIntExtFree(ZE
) && ZE
->getOperand(0)->getType()->isIntegerTy(32)) {
885 Addr
.setExtendType(AArch64_AM::UXTW
);
886 Src
= ZE
->getOperand(0);
888 } else if (const auto *SE
= dyn_cast
<SExtInst
>(U
)) {
889 if (!isIntExtFree(SE
) && SE
->getOperand(0)->getType()->isIntegerTy(32)) {
890 Addr
.setExtendType(AArch64_AM::SXTW
);
891 Src
= SE
->getOperand(0);
899 unsigned Reg
= getRegForValue(Src
);
902 Addr
.setOffsetReg(Reg
);
907 if (Addr
.isRegBase() && !Addr
.getReg()) {
908 unsigned Reg
= getRegForValue(Obj
);
915 if (!Addr
.getOffsetReg()) {
916 unsigned Reg
= getRegForValue(Obj
);
919 Addr
.setOffsetReg(Reg
);
926 bool AArch64FastISel::computeCallAddress(const Value
*V
, Address
&Addr
) {
927 const User
*U
= nullptr;
928 unsigned Opcode
= Instruction::UserOp1
;
931 if (const auto *I
= dyn_cast
<Instruction
>(V
)) {
932 Opcode
= I
->getOpcode();
934 InMBB
= I
->getParent() == FuncInfo
.MBB
->getBasicBlock();
935 } else if (const auto *C
= dyn_cast
<ConstantExpr
>(V
)) {
936 Opcode
= C
->getOpcode();
942 case Instruction::BitCast
:
943 // Look past bitcasts if its operand is in the same BB.
945 return computeCallAddress(U
->getOperand(0), Addr
);
947 case Instruction::IntToPtr
:
948 // Look past no-op inttoptrs if its operand is in the same BB.
950 TLI
.getValueType(DL
, U
->getOperand(0)->getType()) ==
951 TLI
.getPointerTy(DL
))
952 return computeCallAddress(U
->getOperand(0), Addr
);
954 case Instruction::PtrToInt
:
955 // Look past no-op ptrtoints if its operand is in the same BB.
956 if (InMBB
&& TLI
.getValueType(DL
, U
->getType()) == TLI
.getPointerTy(DL
))
957 return computeCallAddress(U
->getOperand(0), Addr
);
961 if (const GlobalValue
*GV
= dyn_cast
<GlobalValue
>(V
)) {
962 Addr
.setGlobalValue(GV
);
966 // If all else fails, try to materialize the value in a register.
967 if (!Addr
.getGlobalValue()) {
968 Addr
.setReg(getRegForValue(V
));
969 return Addr
.getReg() != 0;
975 bool AArch64FastISel::isTypeLegal(Type
*Ty
, MVT
&VT
) {
976 EVT evt
= TLI
.getValueType(DL
, Ty
, true);
978 if (Subtarget
->isTargetILP32() && Ty
->isPointerTy())
981 // Only handle simple types.
982 if (evt
== MVT::Other
|| !evt
.isSimple())
984 VT
= evt
.getSimpleVT();
986 // This is a legal type, but it's not something we handle in fast-isel.
990 // Handle all other legal types, i.e. a register that will directly hold this
992 return TLI
.isTypeLegal(VT
);
995 /// Determine if the value type is supported by FastISel.
997 /// FastISel for AArch64 can handle more value types than are legal. This adds
998 /// simple value type such as i1, i8, and i16.
999 bool AArch64FastISel::isTypeSupported(Type
*Ty
, MVT
&VT
, bool IsVectorAllowed
) {
1000 if (Ty
->isVectorTy() && !IsVectorAllowed
)
1003 if (isTypeLegal(Ty
, VT
))
1006 // If this is a type than can be sign or zero-extended to a basic operation
1007 // go ahead and accept it now.
1008 if (VT
== MVT::i1
|| VT
== MVT::i8
|| VT
== MVT::i16
)
1014 bool AArch64FastISel::isValueAvailable(const Value
*V
) const {
1015 if (!isa
<Instruction
>(V
))
1018 const auto *I
= cast
<Instruction
>(V
);
1019 return FuncInfo
.MBBMap
[I
->getParent()] == FuncInfo
.MBB
;
1022 bool AArch64FastISel::simplifyAddress(Address
&Addr
, MVT VT
) {
1023 if (Subtarget
->isTargetILP32())
1026 unsigned ScaleFactor
= getImplicitScaleFactor(VT
);
1030 bool ImmediateOffsetNeedsLowering
= false;
1031 bool RegisterOffsetNeedsLowering
= false;
1032 int64_t Offset
= Addr
.getOffset();
1033 if (((Offset
< 0) || (Offset
& (ScaleFactor
- 1))) && !isInt
<9>(Offset
))
1034 ImmediateOffsetNeedsLowering
= true;
1035 else if (Offset
> 0 && !(Offset
& (ScaleFactor
- 1)) &&
1036 !isUInt
<12>(Offset
/ ScaleFactor
))
1037 ImmediateOffsetNeedsLowering
= true;
1039 // Cannot encode an offset register and an immediate offset in the same
1040 // instruction. Fold the immediate offset into the load/store instruction and
1041 // emit an additional add to take care of the offset register.
1042 if (!ImmediateOffsetNeedsLowering
&& Addr
.getOffset() && Addr
.getOffsetReg())
1043 RegisterOffsetNeedsLowering
= true;
1045 // Cannot encode zero register as base.
1046 if (Addr
.isRegBase() && Addr
.getOffsetReg() && !Addr
.getReg())
1047 RegisterOffsetNeedsLowering
= true;
1049 // If this is a stack pointer and the offset needs to be simplified then put
1050 // the alloca address into a register, set the base type back to register and
1051 // continue. This should almost never happen.
1052 if ((ImmediateOffsetNeedsLowering
|| Addr
.getOffsetReg()) && Addr
.isFIBase())
1054 unsigned ResultReg
= createResultReg(&AArch64::GPR64spRegClass
);
1055 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, DbgLoc
, TII
.get(AArch64::ADDXri
),
1057 .addFrameIndex(Addr
.getFI())
1060 Addr
.setKind(Address::RegBase
);
1061 Addr
.setReg(ResultReg
);
1064 if (RegisterOffsetNeedsLowering
) {
1065 unsigned ResultReg
= 0;
1066 if (Addr
.getReg()) {
1067 if (Addr
.getExtendType() == AArch64_AM::SXTW
||
1068 Addr
.getExtendType() == AArch64_AM::UXTW
)
1069 ResultReg
= emitAddSub_rx(/*UseAdd=*/true, MVT::i64
, Addr
.getReg(),
1070 /*TODO:IsKill=*/false, Addr
.getOffsetReg(),
1071 /*TODO:IsKill=*/false, Addr
.getExtendType(),
1074 ResultReg
= emitAddSub_rs(/*UseAdd=*/true, MVT::i64
, Addr
.getReg(),
1075 /*TODO:IsKill=*/false, Addr
.getOffsetReg(),
1076 /*TODO:IsKill=*/false, AArch64_AM::LSL
,
1079 if (Addr
.getExtendType() == AArch64_AM::UXTW
)
1080 ResultReg
= emitLSL_ri(MVT::i64
, MVT::i32
, Addr
.getOffsetReg(),
1081 /*Op0IsKill=*/false, Addr
.getShift(),
1083 else if (Addr
.getExtendType() == AArch64_AM::SXTW
)
1084 ResultReg
= emitLSL_ri(MVT::i64
, MVT::i32
, Addr
.getOffsetReg(),
1085 /*Op0IsKill=*/false, Addr
.getShift(),
1088 ResultReg
= emitLSL_ri(MVT::i64
, MVT::i64
, Addr
.getOffsetReg(),
1089 /*Op0IsKill=*/false, Addr
.getShift());
1094 Addr
.setReg(ResultReg
);
1095 Addr
.setOffsetReg(0);
1097 Addr
.setExtendType(AArch64_AM::InvalidShiftExtend
);
1100 // Since the offset is too large for the load/store instruction get the
1101 // reg+offset into a register.
1102 if (ImmediateOffsetNeedsLowering
) {
1105 // Try to fold the immediate into the add instruction.
1106 ResultReg
= emitAdd_ri_(MVT::i64
, Addr
.getReg(), /*IsKill=*/false, Offset
);
1108 ResultReg
= fastEmit_i(MVT::i64
, MVT::i64
, ISD::Constant
, Offset
);
1112 Addr
.setReg(ResultReg
);
1118 void AArch64FastISel::addLoadStoreOperands(Address
&Addr
,
1119 const MachineInstrBuilder
&MIB
,
1120 MachineMemOperand::Flags Flags
,
1121 unsigned ScaleFactor
,
1122 MachineMemOperand
*MMO
) {
1123 int64_t Offset
= Addr
.getOffset() / ScaleFactor
;
1124 // Frame base works a bit differently. Handle it separately.
1125 if (Addr
.isFIBase()) {
1126 int FI
= Addr
.getFI();
1127 // FIXME: We shouldn't be using getObjectSize/getObjectAlignment. The size
1128 // and alignment should be based on the VT.
1129 MMO
= FuncInfo
.MF
->getMachineMemOperand(
1130 MachinePointerInfo::getFixedStack(*FuncInfo
.MF
, FI
, Offset
), Flags
,
1131 MFI
.getObjectSize(FI
), MFI
.getObjectAlignment(FI
));
1132 // Now add the rest of the operands.
1133 MIB
.addFrameIndex(FI
).addImm(Offset
);
1135 assert(Addr
.isRegBase() && "Unexpected address kind.");
1136 const MCInstrDesc
&II
= MIB
->getDesc();
1137 unsigned Idx
= (Flags
& MachineMemOperand::MOStore
) ? 1 : 0;
1139 constrainOperandRegClass(II
, Addr
.getReg(), II
.getNumDefs()+Idx
));
1141 constrainOperandRegClass(II
, Addr
.getOffsetReg(), II
.getNumDefs()+Idx
+1));
1142 if (Addr
.getOffsetReg()) {
1143 assert(Addr
.getOffset() == 0 && "Unexpected offset");
1144 bool IsSigned
= Addr
.getExtendType() == AArch64_AM::SXTW
||
1145 Addr
.getExtendType() == AArch64_AM::SXTX
;
1146 MIB
.addReg(Addr
.getReg());
1147 MIB
.addReg(Addr
.getOffsetReg());
1148 MIB
.addImm(IsSigned
);
1149 MIB
.addImm(Addr
.getShift() != 0);
1151 MIB
.addReg(Addr
.getReg()).addImm(Offset
);
1155 MIB
.addMemOperand(MMO
);
1158 unsigned AArch64FastISel::emitAddSub(bool UseAdd
, MVT RetVT
, const Value
*LHS
,
1159 const Value
*RHS
, bool SetFlags
,
1160 bool WantResult
, bool IsZExt
) {
1161 AArch64_AM::ShiftExtendType ExtendType
= AArch64_AM::InvalidShiftExtend
;
1162 bool NeedExtend
= false;
1163 switch (RetVT
.SimpleTy
) {
1171 ExtendType
= IsZExt
? AArch64_AM::UXTB
: AArch64_AM::SXTB
;
1175 ExtendType
= IsZExt
? AArch64_AM::UXTH
: AArch64_AM::SXTH
;
1177 case MVT::i32
: // fall-through
1182 RetVT
.SimpleTy
= std::max(RetVT
.SimpleTy
, MVT::i32
);
1184 // Canonicalize immediates to the RHS first.
1185 if (UseAdd
&& isa
<Constant
>(LHS
) && !isa
<Constant
>(RHS
))
1186 std::swap(LHS
, RHS
);
1188 // Canonicalize mul by power of 2 to the RHS.
1189 if (UseAdd
&& LHS
->hasOneUse() && isValueAvailable(LHS
))
1190 if (isMulPowOf2(LHS
))
1191 std::swap(LHS
, RHS
);
1193 // Canonicalize shift immediate to the RHS.
1194 if (UseAdd
&& LHS
->hasOneUse() && isValueAvailable(LHS
))
1195 if (const auto *SI
= dyn_cast
<BinaryOperator
>(LHS
))
1196 if (isa
<ConstantInt
>(SI
->getOperand(1)))
1197 if (SI
->getOpcode() == Instruction::Shl
||
1198 SI
->getOpcode() == Instruction::LShr
||
1199 SI
->getOpcode() == Instruction::AShr
)
1200 std::swap(LHS
, RHS
);
1202 unsigned LHSReg
= getRegForValue(LHS
);
1205 bool LHSIsKill
= hasTrivialKill(LHS
);
1208 LHSReg
= emitIntExt(SrcVT
, LHSReg
, RetVT
, IsZExt
);
1210 unsigned ResultReg
= 0;
1211 if (const auto *C
= dyn_cast
<ConstantInt
>(RHS
)) {
1212 uint64_t Imm
= IsZExt
? C
->getZExtValue() : C
->getSExtValue();
1213 if (C
->isNegative())
1214 ResultReg
= emitAddSub_ri(!UseAdd
, RetVT
, LHSReg
, LHSIsKill
, -Imm
,
1215 SetFlags
, WantResult
);
1217 ResultReg
= emitAddSub_ri(UseAdd
, RetVT
, LHSReg
, LHSIsKill
, Imm
, SetFlags
,
1219 } else if (const auto *C
= dyn_cast
<Constant
>(RHS
))
1220 if (C
->isNullValue())
1221 ResultReg
= emitAddSub_ri(UseAdd
, RetVT
, LHSReg
, LHSIsKill
, 0, SetFlags
,
1227 // Only extend the RHS within the instruction if there is a valid extend type.
1228 if (ExtendType
!= AArch64_AM::InvalidShiftExtend
&& RHS
->hasOneUse() &&
1229 isValueAvailable(RHS
)) {
1230 if (const auto *SI
= dyn_cast
<BinaryOperator
>(RHS
))
1231 if (const auto *C
= dyn_cast
<ConstantInt
>(SI
->getOperand(1)))
1232 if ((SI
->getOpcode() == Instruction::Shl
) && (C
->getZExtValue() < 4)) {
1233 unsigned RHSReg
= getRegForValue(SI
->getOperand(0));
1236 bool RHSIsKill
= hasTrivialKill(SI
->getOperand(0));
1237 return emitAddSub_rx(UseAdd
, RetVT
, LHSReg
, LHSIsKill
, RHSReg
,
1238 RHSIsKill
, ExtendType
, C
->getZExtValue(),
1239 SetFlags
, WantResult
);
1241 unsigned RHSReg
= getRegForValue(RHS
);
1244 bool RHSIsKill
= hasTrivialKill(RHS
);
1245 return emitAddSub_rx(UseAdd
, RetVT
, LHSReg
, LHSIsKill
, RHSReg
, RHSIsKill
,
1246 ExtendType
, 0, SetFlags
, WantResult
);
1249 // Check if the mul can be folded into the instruction.
1250 if (RHS
->hasOneUse() && isValueAvailable(RHS
)) {
1251 if (isMulPowOf2(RHS
)) {
1252 const Value
*MulLHS
= cast
<MulOperator
>(RHS
)->getOperand(0);
1253 const Value
*MulRHS
= cast
<MulOperator
>(RHS
)->getOperand(1);
1255 if (const auto *C
= dyn_cast
<ConstantInt
>(MulLHS
))
1256 if (C
->getValue().isPowerOf2())
1257 std::swap(MulLHS
, MulRHS
);
1259 assert(isa
<ConstantInt
>(MulRHS
) && "Expected a ConstantInt.");
1260 uint64_t ShiftVal
= cast
<ConstantInt
>(MulRHS
)->getValue().logBase2();
1261 unsigned RHSReg
= getRegForValue(MulLHS
);
1264 bool RHSIsKill
= hasTrivialKill(MulLHS
);
1265 ResultReg
= emitAddSub_rs(UseAdd
, RetVT
, LHSReg
, LHSIsKill
, RHSReg
,
1266 RHSIsKill
, AArch64_AM::LSL
, ShiftVal
, SetFlags
,
1273 // Check if the shift can be folded into the instruction.
1274 if (RHS
->hasOneUse() && isValueAvailable(RHS
)) {
1275 if (const auto *SI
= dyn_cast
<BinaryOperator
>(RHS
)) {
1276 if (const auto *C
= dyn_cast
<ConstantInt
>(SI
->getOperand(1))) {
1277 AArch64_AM::ShiftExtendType ShiftType
= AArch64_AM::InvalidShiftExtend
;
1278 switch (SI
->getOpcode()) {
1280 case Instruction::Shl
: ShiftType
= AArch64_AM::LSL
; break;
1281 case Instruction::LShr
: ShiftType
= AArch64_AM::LSR
; break;
1282 case Instruction::AShr
: ShiftType
= AArch64_AM::ASR
; break;
1284 uint64_t ShiftVal
= C
->getZExtValue();
1285 if (ShiftType
!= AArch64_AM::InvalidShiftExtend
) {
1286 unsigned RHSReg
= getRegForValue(SI
->getOperand(0));
1289 bool RHSIsKill
= hasTrivialKill(SI
->getOperand(0));
1290 ResultReg
= emitAddSub_rs(UseAdd
, RetVT
, LHSReg
, LHSIsKill
, RHSReg
,
1291 RHSIsKill
, ShiftType
, ShiftVal
, SetFlags
,
1300 unsigned RHSReg
= getRegForValue(RHS
);
1303 bool RHSIsKill
= hasTrivialKill(RHS
);
1306 RHSReg
= emitIntExt(SrcVT
, RHSReg
, RetVT
, IsZExt
);
1308 return emitAddSub_rr(UseAdd
, RetVT
, LHSReg
, LHSIsKill
, RHSReg
, RHSIsKill
,
1309 SetFlags
, WantResult
);
1312 unsigned AArch64FastISel::emitAddSub_rr(bool UseAdd
, MVT RetVT
, unsigned LHSReg
,
1313 bool LHSIsKill
, unsigned RHSReg
,
1314 bool RHSIsKill
, bool SetFlags
,
1316 assert(LHSReg
&& RHSReg
&& "Invalid register number.");
1318 if (LHSReg
== AArch64::SP
|| LHSReg
== AArch64::WSP
||
1319 RHSReg
== AArch64::SP
|| RHSReg
== AArch64::WSP
)
1322 if (RetVT
!= MVT::i32
&& RetVT
!= MVT::i64
)
1325 static const unsigned OpcTable
[2][2][2] = {
1326 { { AArch64::SUBWrr
, AArch64::SUBXrr
},
1327 { AArch64::ADDWrr
, AArch64::ADDXrr
} },
1328 { { AArch64::SUBSWrr
, AArch64::SUBSXrr
},
1329 { AArch64::ADDSWrr
, AArch64::ADDSXrr
} }
1331 bool Is64Bit
= RetVT
== MVT::i64
;
1332 unsigned Opc
= OpcTable
[SetFlags
][UseAdd
][Is64Bit
];
1333 const TargetRegisterClass
*RC
=
1334 Is64Bit
? &AArch64::GPR64RegClass
: &AArch64::GPR32RegClass
;
1337 ResultReg
= createResultReg(RC
);
1339 ResultReg
= Is64Bit
? AArch64::XZR
: AArch64::WZR
;
1341 const MCInstrDesc
&II
= TII
.get(Opc
);
1342 LHSReg
= constrainOperandRegClass(II
, LHSReg
, II
.getNumDefs());
1343 RHSReg
= constrainOperandRegClass(II
, RHSReg
, II
.getNumDefs() + 1);
1344 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, DbgLoc
, II
, ResultReg
)
1345 .addReg(LHSReg
, getKillRegState(LHSIsKill
))
1346 .addReg(RHSReg
, getKillRegState(RHSIsKill
));
1350 unsigned AArch64FastISel::emitAddSub_ri(bool UseAdd
, MVT RetVT
, unsigned LHSReg
,
1351 bool LHSIsKill
, uint64_t Imm
,
1352 bool SetFlags
, bool WantResult
) {
1353 assert(LHSReg
&& "Invalid register number.");
1355 if (RetVT
!= MVT::i32
&& RetVT
!= MVT::i64
)
1359 if (isUInt
<12>(Imm
))
1361 else if ((Imm
& 0xfff000) == Imm
) {
1367 static const unsigned OpcTable
[2][2][2] = {
1368 { { AArch64::SUBWri
, AArch64::SUBXri
},
1369 { AArch64::ADDWri
, AArch64::ADDXri
} },
1370 { { AArch64::SUBSWri
, AArch64::SUBSXri
},
1371 { AArch64::ADDSWri
, AArch64::ADDSXri
} }
1373 bool Is64Bit
= RetVT
== MVT::i64
;
1374 unsigned Opc
= OpcTable
[SetFlags
][UseAdd
][Is64Bit
];
1375 const TargetRegisterClass
*RC
;
1377 RC
= Is64Bit
? &AArch64::GPR64RegClass
: &AArch64::GPR32RegClass
;
1379 RC
= Is64Bit
? &AArch64::GPR64spRegClass
: &AArch64::GPR32spRegClass
;
1382 ResultReg
= createResultReg(RC
);
1384 ResultReg
= Is64Bit
? AArch64::XZR
: AArch64::WZR
;
1386 const MCInstrDesc
&II
= TII
.get(Opc
);
1387 LHSReg
= constrainOperandRegClass(II
, LHSReg
, II
.getNumDefs());
1388 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, DbgLoc
, II
, ResultReg
)
1389 .addReg(LHSReg
, getKillRegState(LHSIsKill
))
1391 .addImm(getShifterImm(AArch64_AM::LSL
, ShiftImm
));
1395 unsigned AArch64FastISel::emitAddSub_rs(bool UseAdd
, MVT RetVT
, unsigned LHSReg
,
1396 bool LHSIsKill
, unsigned RHSReg
,
1398 AArch64_AM::ShiftExtendType ShiftType
,
1399 uint64_t ShiftImm
, bool SetFlags
,
1401 assert(LHSReg
&& RHSReg
&& "Invalid register number.");
1402 assert(LHSReg
!= AArch64::SP
&& LHSReg
!= AArch64::WSP
&&
1403 RHSReg
!= AArch64::SP
&& RHSReg
!= AArch64::WSP
);
1405 if (RetVT
!= MVT::i32
&& RetVT
!= MVT::i64
)
1408 // Don't deal with undefined shifts.
1409 if (ShiftImm
>= RetVT
.getSizeInBits())
1412 static const unsigned OpcTable
[2][2][2] = {
1413 { { AArch64::SUBWrs
, AArch64::SUBXrs
},
1414 { AArch64::ADDWrs
, AArch64::ADDXrs
} },
1415 { { AArch64::SUBSWrs
, AArch64::SUBSXrs
},
1416 { AArch64::ADDSWrs
, AArch64::ADDSXrs
} }
1418 bool Is64Bit
= RetVT
== MVT::i64
;
1419 unsigned Opc
= OpcTable
[SetFlags
][UseAdd
][Is64Bit
];
1420 const TargetRegisterClass
*RC
=
1421 Is64Bit
? &AArch64::GPR64RegClass
: &AArch64::GPR32RegClass
;
1424 ResultReg
= createResultReg(RC
);
1426 ResultReg
= Is64Bit
? AArch64::XZR
: AArch64::WZR
;
1428 const MCInstrDesc
&II
= TII
.get(Opc
);
1429 LHSReg
= constrainOperandRegClass(II
, LHSReg
, II
.getNumDefs());
1430 RHSReg
= constrainOperandRegClass(II
, RHSReg
, II
.getNumDefs() + 1);
1431 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, DbgLoc
, II
, ResultReg
)
1432 .addReg(LHSReg
, getKillRegState(LHSIsKill
))
1433 .addReg(RHSReg
, getKillRegState(RHSIsKill
))
1434 .addImm(getShifterImm(ShiftType
, ShiftImm
));
1438 unsigned AArch64FastISel::emitAddSub_rx(bool UseAdd
, MVT RetVT
, unsigned LHSReg
,
1439 bool LHSIsKill
, unsigned RHSReg
,
1441 AArch64_AM::ShiftExtendType ExtType
,
1442 uint64_t ShiftImm
, bool SetFlags
,
1444 assert(LHSReg
&& RHSReg
&& "Invalid register number.");
1445 assert(LHSReg
!= AArch64::XZR
&& LHSReg
!= AArch64::WZR
&&
1446 RHSReg
!= AArch64::XZR
&& RHSReg
!= AArch64::WZR
);
1448 if (RetVT
!= MVT::i32
&& RetVT
!= MVT::i64
)
1454 static const unsigned OpcTable
[2][2][2] = {
1455 { { AArch64::SUBWrx
, AArch64::SUBXrx
},
1456 { AArch64::ADDWrx
, AArch64::ADDXrx
} },
1457 { { AArch64::SUBSWrx
, AArch64::SUBSXrx
},
1458 { AArch64::ADDSWrx
, AArch64::ADDSXrx
} }
1460 bool Is64Bit
= RetVT
== MVT::i64
;
1461 unsigned Opc
= OpcTable
[SetFlags
][UseAdd
][Is64Bit
];
1462 const TargetRegisterClass
*RC
= nullptr;
1464 RC
= Is64Bit
? &AArch64::GPR64RegClass
: &AArch64::GPR32RegClass
;
1466 RC
= Is64Bit
? &AArch64::GPR64spRegClass
: &AArch64::GPR32spRegClass
;
1469 ResultReg
= createResultReg(RC
);
1471 ResultReg
= Is64Bit
? AArch64::XZR
: AArch64::WZR
;
1473 const MCInstrDesc
&II
= TII
.get(Opc
);
1474 LHSReg
= constrainOperandRegClass(II
, LHSReg
, II
.getNumDefs());
1475 RHSReg
= constrainOperandRegClass(II
, RHSReg
, II
.getNumDefs() + 1);
1476 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, DbgLoc
, II
, ResultReg
)
1477 .addReg(LHSReg
, getKillRegState(LHSIsKill
))
1478 .addReg(RHSReg
, getKillRegState(RHSIsKill
))
1479 .addImm(getArithExtendImm(ExtType
, ShiftImm
));
1483 bool AArch64FastISel::emitCmp(const Value
*LHS
, const Value
*RHS
, bool IsZExt
) {
1484 Type
*Ty
= LHS
->getType();
1485 EVT EVT
= TLI
.getValueType(DL
, Ty
, true);
1486 if (!EVT
.isSimple())
1488 MVT VT
= EVT
.getSimpleVT();
1490 switch (VT
.SimpleTy
) {
1498 return emitICmp(VT
, LHS
, RHS
, IsZExt
);
1501 return emitFCmp(VT
, LHS
, RHS
);
1505 bool AArch64FastISel::emitICmp(MVT RetVT
, const Value
*LHS
, const Value
*RHS
,
1507 return emitSub(RetVT
, LHS
, RHS
, /*SetFlags=*/true, /*WantResult=*/false,
1511 bool AArch64FastISel::emitICmp_ri(MVT RetVT
, unsigned LHSReg
, bool LHSIsKill
,
1513 return emitAddSub_ri(/*UseAdd=*/false, RetVT
, LHSReg
, LHSIsKill
, Imm
,
1514 /*SetFlags=*/true, /*WantResult=*/false) != 0;
1517 bool AArch64FastISel::emitFCmp(MVT RetVT
, const Value
*LHS
, const Value
*RHS
) {
1518 if (RetVT
!= MVT::f32
&& RetVT
!= MVT::f64
)
1521 // Check to see if the 2nd operand is a constant that we can encode directly
1523 bool UseImm
= false;
1524 if (const auto *CFP
= dyn_cast
<ConstantFP
>(RHS
))
1525 if (CFP
->isZero() && !CFP
->isNegative())
1528 unsigned LHSReg
= getRegForValue(LHS
);
1531 bool LHSIsKill
= hasTrivialKill(LHS
);
1534 unsigned Opc
= (RetVT
== MVT::f64
) ? AArch64::FCMPDri
: AArch64::FCMPSri
;
1535 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, DbgLoc
, TII
.get(Opc
))
1536 .addReg(LHSReg
, getKillRegState(LHSIsKill
));
1540 unsigned RHSReg
= getRegForValue(RHS
);
1543 bool RHSIsKill
= hasTrivialKill(RHS
);
1545 unsigned Opc
= (RetVT
== MVT::f64
) ? AArch64::FCMPDrr
: AArch64::FCMPSrr
;
1546 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, DbgLoc
, TII
.get(Opc
))
1547 .addReg(LHSReg
, getKillRegState(LHSIsKill
))
1548 .addReg(RHSReg
, getKillRegState(RHSIsKill
));
1552 unsigned AArch64FastISel::emitAdd(MVT RetVT
, const Value
*LHS
, const Value
*RHS
,
1553 bool SetFlags
, bool WantResult
, bool IsZExt
) {
1554 return emitAddSub(/*UseAdd=*/true, RetVT
, LHS
, RHS
, SetFlags
, WantResult
,
1558 /// This method is a wrapper to simplify add emission.
1560 /// First try to emit an add with an immediate operand using emitAddSub_ri. If
1561 /// that fails, then try to materialize the immediate into a register and use
1562 /// emitAddSub_rr instead.
1563 unsigned AArch64FastISel::emitAdd_ri_(MVT VT
, unsigned Op0
, bool Op0IsKill
,
1567 ResultReg
= emitAddSub_ri(false, VT
, Op0
, Op0IsKill
, -Imm
);
1569 ResultReg
= emitAddSub_ri(true, VT
, Op0
, Op0IsKill
, Imm
);
1574 unsigned CReg
= fastEmit_i(VT
, VT
, ISD::Constant
, Imm
);
1578 ResultReg
= emitAddSub_rr(true, VT
, Op0
, Op0IsKill
, CReg
, true);
1582 unsigned AArch64FastISel::emitSub(MVT RetVT
, const Value
*LHS
, const Value
*RHS
,
1583 bool SetFlags
, bool WantResult
, bool IsZExt
) {
1584 return emitAddSub(/*UseAdd=*/false, RetVT
, LHS
, RHS
, SetFlags
, WantResult
,
1588 unsigned AArch64FastISel::emitSubs_rr(MVT RetVT
, unsigned LHSReg
,
1589 bool LHSIsKill
, unsigned RHSReg
,
1590 bool RHSIsKill
, bool WantResult
) {
1591 return emitAddSub_rr(/*UseAdd=*/false, RetVT
, LHSReg
, LHSIsKill
, RHSReg
,
1592 RHSIsKill
, /*SetFlags=*/true, WantResult
);
1595 unsigned AArch64FastISel::emitSubs_rs(MVT RetVT
, unsigned LHSReg
,
1596 bool LHSIsKill
, unsigned RHSReg
,
1598 AArch64_AM::ShiftExtendType ShiftType
,
1599 uint64_t ShiftImm
, bool WantResult
) {
1600 return emitAddSub_rs(/*UseAdd=*/false, RetVT
, LHSReg
, LHSIsKill
, RHSReg
,
1601 RHSIsKill
, ShiftType
, ShiftImm
, /*SetFlags=*/true,
1605 unsigned AArch64FastISel::emitLogicalOp(unsigned ISDOpc
, MVT RetVT
,
1606 const Value
*LHS
, const Value
*RHS
) {
1607 // Canonicalize immediates to the RHS first.
1608 if (isa
<ConstantInt
>(LHS
) && !isa
<ConstantInt
>(RHS
))
1609 std::swap(LHS
, RHS
);
1611 // Canonicalize mul by power-of-2 to the RHS.
1612 if (LHS
->hasOneUse() && isValueAvailable(LHS
))
1613 if (isMulPowOf2(LHS
))
1614 std::swap(LHS
, RHS
);
1616 // Canonicalize shift immediate to the RHS.
1617 if (LHS
->hasOneUse() && isValueAvailable(LHS
))
1618 if (const auto *SI
= dyn_cast
<ShlOperator
>(LHS
))
1619 if (isa
<ConstantInt
>(SI
->getOperand(1)))
1620 std::swap(LHS
, RHS
);
1622 unsigned LHSReg
= getRegForValue(LHS
);
1625 bool LHSIsKill
= hasTrivialKill(LHS
);
1627 unsigned ResultReg
= 0;
1628 if (const auto *C
= dyn_cast
<ConstantInt
>(RHS
)) {
1629 uint64_t Imm
= C
->getZExtValue();
1630 ResultReg
= emitLogicalOp_ri(ISDOpc
, RetVT
, LHSReg
, LHSIsKill
, Imm
);
1635 // Check if the mul can be folded into the instruction.
1636 if (RHS
->hasOneUse() && isValueAvailable(RHS
)) {
1637 if (isMulPowOf2(RHS
)) {
1638 const Value
*MulLHS
= cast
<MulOperator
>(RHS
)->getOperand(0);
1639 const Value
*MulRHS
= cast
<MulOperator
>(RHS
)->getOperand(1);
1641 if (const auto *C
= dyn_cast
<ConstantInt
>(MulLHS
))
1642 if (C
->getValue().isPowerOf2())
1643 std::swap(MulLHS
, MulRHS
);
1645 assert(isa
<ConstantInt
>(MulRHS
) && "Expected a ConstantInt.");
1646 uint64_t ShiftVal
= cast
<ConstantInt
>(MulRHS
)->getValue().logBase2();
1648 unsigned RHSReg
= getRegForValue(MulLHS
);
1651 bool RHSIsKill
= hasTrivialKill(MulLHS
);
1652 ResultReg
= emitLogicalOp_rs(ISDOpc
, RetVT
, LHSReg
, LHSIsKill
, RHSReg
,
1653 RHSIsKill
, ShiftVal
);
1659 // Check if the shift can be folded into the instruction.
1660 if (RHS
->hasOneUse() && isValueAvailable(RHS
)) {
1661 if (const auto *SI
= dyn_cast
<ShlOperator
>(RHS
))
1662 if (const auto *C
= dyn_cast
<ConstantInt
>(SI
->getOperand(1))) {
1663 uint64_t ShiftVal
= C
->getZExtValue();
1664 unsigned RHSReg
= getRegForValue(SI
->getOperand(0));
1667 bool RHSIsKill
= hasTrivialKill(SI
->getOperand(0));
1668 ResultReg
= emitLogicalOp_rs(ISDOpc
, RetVT
, LHSReg
, LHSIsKill
, RHSReg
,
1669 RHSIsKill
, ShiftVal
);
1675 unsigned RHSReg
= getRegForValue(RHS
);
1678 bool RHSIsKill
= hasTrivialKill(RHS
);
1680 MVT VT
= std::max(MVT::i32
, RetVT
.SimpleTy
);
1681 ResultReg
= fastEmit_rr(VT
, VT
, ISDOpc
, LHSReg
, LHSIsKill
, RHSReg
, RHSIsKill
);
1682 if (RetVT
>= MVT::i8
&& RetVT
<= MVT::i16
) {
1683 uint64_t Mask
= (RetVT
== MVT::i8
) ? 0xff : 0xffff;
1684 ResultReg
= emitAnd_ri(MVT::i32
, ResultReg
, /*IsKill=*/true, Mask
);
1689 unsigned AArch64FastISel::emitLogicalOp_ri(unsigned ISDOpc
, MVT RetVT
,
1690 unsigned LHSReg
, bool LHSIsKill
,
1692 static_assert((ISD::AND
+ 1 == ISD::OR
) && (ISD::AND
+ 2 == ISD::XOR
),
1693 "ISD nodes are not consecutive!");
1694 static const unsigned OpcTable
[3][2] = {
1695 { AArch64::ANDWri
, AArch64::ANDXri
},
1696 { AArch64::ORRWri
, AArch64::ORRXri
},
1697 { AArch64::EORWri
, AArch64::EORXri
}
1699 const TargetRegisterClass
*RC
;
1702 switch (RetVT
.SimpleTy
) {
1709 unsigned Idx
= ISDOpc
- ISD::AND
;
1710 Opc
= OpcTable
[Idx
][0];
1711 RC
= &AArch64::GPR32spRegClass
;
1716 Opc
= OpcTable
[ISDOpc
- ISD::AND
][1];
1717 RC
= &AArch64::GPR64spRegClass
;
1722 if (!AArch64_AM::isLogicalImmediate(Imm
, RegSize
))
1725 unsigned ResultReg
=
1726 fastEmitInst_ri(Opc
, RC
, LHSReg
, LHSIsKill
,
1727 AArch64_AM::encodeLogicalImmediate(Imm
, RegSize
));
1728 if (RetVT
>= MVT::i8
&& RetVT
<= MVT::i16
&& ISDOpc
!= ISD::AND
) {
1729 uint64_t Mask
= (RetVT
== MVT::i8
) ? 0xff : 0xffff;
1730 ResultReg
= emitAnd_ri(MVT::i32
, ResultReg
, /*IsKill=*/true, Mask
);
1735 unsigned AArch64FastISel::emitLogicalOp_rs(unsigned ISDOpc
, MVT RetVT
,
1736 unsigned LHSReg
, bool LHSIsKill
,
1737 unsigned RHSReg
, bool RHSIsKill
,
1738 uint64_t ShiftImm
) {
1739 static_assert((ISD::AND
+ 1 == ISD::OR
) && (ISD::AND
+ 2 == ISD::XOR
),
1740 "ISD nodes are not consecutive!");
1741 static const unsigned OpcTable
[3][2] = {
1742 { AArch64::ANDWrs
, AArch64::ANDXrs
},
1743 { AArch64::ORRWrs
, AArch64::ORRXrs
},
1744 { AArch64::EORWrs
, AArch64::EORXrs
}
1747 // Don't deal with undefined shifts.
1748 if (ShiftImm
>= RetVT
.getSizeInBits())
1751 const TargetRegisterClass
*RC
;
1753 switch (RetVT
.SimpleTy
) {
1760 Opc
= OpcTable
[ISDOpc
- ISD::AND
][0];
1761 RC
= &AArch64::GPR32RegClass
;
1764 Opc
= OpcTable
[ISDOpc
- ISD::AND
][1];
1765 RC
= &AArch64::GPR64RegClass
;
1768 unsigned ResultReg
=
1769 fastEmitInst_rri(Opc
, RC
, LHSReg
, LHSIsKill
, RHSReg
, RHSIsKill
,
1770 AArch64_AM::getShifterImm(AArch64_AM::LSL
, ShiftImm
));
1771 if (RetVT
>= MVT::i8
&& RetVT
<= MVT::i16
) {
1772 uint64_t Mask
= (RetVT
== MVT::i8
) ? 0xff : 0xffff;
1773 ResultReg
= emitAnd_ri(MVT::i32
, ResultReg
, /*IsKill=*/true, Mask
);
1778 unsigned AArch64FastISel::emitAnd_ri(MVT RetVT
, unsigned LHSReg
, bool LHSIsKill
,
1780 return emitLogicalOp_ri(ISD::AND
, RetVT
, LHSReg
, LHSIsKill
, Imm
);
1783 unsigned AArch64FastISel::emitLoad(MVT VT
, MVT RetVT
, Address Addr
,
1784 bool WantZExt
, MachineMemOperand
*MMO
) {
1785 if (!TLI
.allowsMisalignedMemoryAccesses(VT
))
1788 // Simplify this down to something we can handle.
1789 if (!simplifyAddress(Addr
, VT
))
1792 unsigned ScaleFactor
= getImplicitScaleFactor(VT
);
1794 llvm_unreachable("Unexpected value type.");
1796 // Negative offsets require unscaled, 9-bit, signed immediate offsets.
1797 // Otherwise, we try using scaled, 12-bit, unsigned immediate offsets.
1798 bool UseScaled
= true;
1799 if ((Addr
.getOffset() < 0) || (Addr
.getOffset() & (ScaleFactor
- 1))) {
1804 static const unsigned GPOpcTable
[2][8][4] = {
1806 { { AArch64::LDURSBWi
, AArch64::LDURSHWi
, AArch64::LDURWi
,
1808 { AArch64::LDURSBXi
, AArch64::LDURSHXi
, AArch64::LDURSWi
,
1810 { AArch64::LDRSBWui
, AArch64::LDRSHWui
, AArch64::LDRWui
,
1812 { AArch64::LDRSBXui
, AArch64::LDRSHXui
, AArch64::LDRSWui
,
1814 { AArch64::LDRSBWroX
, AArch64::LDRSHWroX
, AArch64::LDRWroX
,
1816 { AArch64::LDRSBXroX
, AArch64::LDRSHXroX
, AArch64::LDRSWroX
,
1818 { AArch64::LDRSBWroW
, AArch64::LDRSHWroW
, AArch64::LDRWroW
,
1820 { AArch64::LDRSBXroW
, AArch64::LDRSHXroW
, AArch64::LDRSWroW
,
1824 { { AArch64::LDURBBi
, AArch64::LDURHHi
, AArch64::LDURWi
,
1826 { AArch64::LDURBBi
, AArch64::LDURHHi
, AArch64::LDURWi
,
1828 { AArch64::LDRBBui
, AArch64::LDRHHui
, AArch64::LDRWui
,
1830 { AArch64::LDRBBui
, AArch64::LDRHHui
, AArch64::LDRWui
,
1832 { AArch64::LDRBBroX
, AArch64::LDRHHroX
, AArch64::LDRWroX
,
1834 { AArch64::LDRBBroX
, AArch64::LDRHHroX
, AArch64::LDRWroX
,
1836 { AArch64::LDRBBroW
, AArch64::LDRHHroW
, AArch64::LDRWroW
,
1838 { AArch64::LDRBBroW
, AArch64::LDRHHroW
, AArch64::LDRWroW
,
1843 static const unsigned FPOpcTable
[4][2] = {
1844 { AArch64::LDURSi
, AArch64::LDURDi
},
1845 { AArch64::LDRSui
, AArch64::LDRDui
},
1846 { AArch64::LDRSroX
, AArch64::LDRDroX
},
1847 { AArch64::LDRSroW
, AArch64::LDRDroW
}
1851 const TargetRegisterClass
*RC
;
1852 bool UseRegOffset
= Addr
.isRegBase() && !Addr
.getOffset() && Addr
.getReg() &&
1853 Addr
.getOffsetReg();
1854 unsigned Idx
= UseRegOffset
? 2 : UseScaled
? 1 : 0;
1855 if (Addr
.getExtendType() == AArch64_AM::UXTW
||
1856 Addr
.getExtendType() == AArch64_AM::SXTW
)
1859 bool IsRet64Bit
= RetVT
== MVT::i64
;
1860 switch (VT
.SimpleTy
) {
1862 llvm_unreachable("Unexpected value type.");
1863 case MVT::i1
: // Intentional fall-through.
1865 Opc
= GPOpcTable
[WantZExt
][2 * Idx
+ IsRet64Bit
][0];
1866 RC
= (IsRet64Bit
&& !WantZExt
) ?
1867 &AArch64::GPR64RegClass
: &AArch64::GPR32RegClass
;
1870 Opc
= GPOpcTable
[WantZExt
][2 * Idx
+ IsRet64Bit
][1];
1871 RC
= (IsRet64Bit
&& !WantZExt
) ?
1872 &AArch64::GPR64RegClass
: &AArch64::GPR32RegClass
;
1875 Opc
= GPOpcTable
[WantZExt
][2 * Idx
+ IsRet64Bit
][2];
1876 RC
= (IsRet64Bit
&& !WantZExt
) ?
1877 &AArch64::GPR64RegClass
: &AArch64::GPR32RegClass
;
1880 Opc
= GPOpcTable
[WantZExt
][2 * Idx
+ IsRet64Bit
][3];
1881 RC
= &AArch64::GPR64RegClass
;
1884 Opc
= FPOpcTable
[Idx
][0];
1885 RC
= &AArch64::FPR32RegClass
;
1888 Opc
= FPOpcTable
[Idx
][1];
1889 RC
= &AArch64::FPR64RegClass
;
1893 // Create the base instruction, then add the operands.
1894 unsigned ResultReg
= createResultReg(RC
);
1895 MachineInstrBuilder MIB
= BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, DbgLoc
,
1896 TII
.get(Opc
), ResultReg
);
1897 addLoadStoreOperands(Addr
, MIB
, MachineMemOperand::MOLoad
, ScaleFactor
, MMO
);
1899 // Loading an i1 requires special handling.
1900 if (VT
== MVT::i1
) {
1901 unsigned ANDReg
= emitAnd_ri(MVT::i32
, ResultReg
, /*IsKill=*/true, 1);
1902 assert(ANDReg
&& "Unexpected AND instruction emission failure.");
1906 // For zero-extending loads to 64bit we emit a 32bit load and then convert
1907 // the 32bit reg to a 64bit reg.
1908 if (WantZExt
&& RetVT
== MVT::i64
&& VT
<= MVT::i32
) {
1909 unsigned Reg64
= createResultReg(&AArch64::GPR64RegClass
);
1910 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, DbgLoc
,
1911 TII
.get(AArch64::SUBREG_TO_REG
), Reg64
)
1913 .addReg(ResultReg
, getKillRegState(true))
1914 .addImm(AArch64::sub_32
);
1920 bool AArch64FastISel::selectAddSub(const Instruction
*I
) {
1922 if (!isTypeSupported(I
->getType(), VT
, /*IsVectorAllowed=*/true))
1926 return selectOperator(I
, I
->getOpcode());
1929 switch (I
->getOpcode()) {
1931 llvm_unreachable("Unexpected instruction.");
1932 case Instruction::Add
:
1933 ResultReg
= emitAdd(VT
, I
->getOperand(0), I
->getOperand(1));
1935 case Instruction::Sub
:
1936 ResultReg
= emitSub(VT
, I
->getOperand(0), I
->getOperand(1));
1942 updateValueMap(I
, ResultReg
);
1946 bool AArch64FastISel::selectLogicalOp(const Instruction
*I
) {
1948 if (!isTypeSupported(I
->getType(), VT
, /*IsVectorAllowed=*/true))
1952 return selectOperator(I
, I
->getOpcode());
1955 switch (I
->getOpcode()) {
1957 llvm_unreachable("Unexpected instruction.");
1958 case Instruction::And
:
1959 ResultReg
= emitLogicalOp(ISD::AND
, VT
, I
->getOperand(0), I
->getOperand(1));
1961 case Instruction::Or
:
1962 ResultReg
= emitLogicalOp(ISD::OR
, VT
, I
->getOperand(0), I
->getOperand(1));
1964 case Instruction::Xor
:
1965 ResultReg
= emitLogicalOp(ISD::XOR
, VT
, I
->getOperand(0), I
->getOperand(1));
1971 updateValueMap(I
, ResultReg
);
1975 bool AArch64FastISel::selectLoad(const Instruction
*I
) {
1977 // Verify we have a legal type before going any further. Currently, we handle
1978 // simple types that will directly fit in a register (i32/f32/i64/f64) or
1979 // those that can be sign or zero-extended to a basic operation (i1/i8/i16).
1980 if (!isTypeSupported(I
->getType(), VT
, /*IsVectorAllowed=*/true) ||
1981 cast
<LoadInst
>(I
)->isAtomic())
1984 const Value
*SV
= I
->getOperand(0);
1985 if (TLI
.supportSwiftError()) {
1986 // Swifterror values can come from either a function parameter with
1987 // swifterror attribute or an alloca with swifterror attribute.
1988 if (const Argument
*Arg
= dyn_cast
<Argument
>(SV
)) {
1989 if (Arg
->hasSwiftErrorAttr())
1993 if (const AllocaInst
*Alloca
= dyn_cast
<AllocaInst
>(SV
)) {
1994 if (Alloca
->isSwiftError())
1999 // See if we can handle this address.
2001 if (!computeAddress(I
->getOperand(0), Addr
, I
->getType()))
2004 // Fold the following sign-/zero-extend into the load instruction.
2005 bool WantZExt
= true;
2007 const Value
*IntExtVal
= nullptr;
2008 if (I
->hasOneUse()) {
2009 if (const auto *ZE
= dyn_cast
<ZExtInst
>(I
->use_begin()->getUser())) {
2010 if (isTypeSupported(ZE
->getType(), RetVT
))
2014 } else if (const auto *SE
= dyn_cast
<SExtInst
>(I
->use_begin()->getUser())) {
2015 if (isTypeSupported(SE
->getType(), RetVT
))
2023 unsigned ResultReg
=
2024 emitLoad(VT
, RetVT
, Addr
, WantZExt
, createMachineMemOperandFor(I
));
2028 // There are a few different cases we have to handle, because the load or the
2029 // sign-/zero-extend might not be selected by FastISel if we fall-back to
2030 // SelectionDAG. There is also an ordering issue when both instructions are in
2031 // different basic blocks.
2032 // 1.) The load instruction is selected by FastISel, but the integer extend
2033 // not. This usually happens when the integer extend is in a different
2034 // basic block and SelectionDAG took over for that basic block.
2035 // 2.) The load instruction is selected before the integer extend. This only
2036 // happens when the integer extend is in a different basic block.
2037 // 3.) The load instruction is selected by SelectionDAG and the integer extend
2038 // by FastISel. This happens if there are instructions between the load
2039 // and the integer extend that couldn't be selected by FastISel.
2041 // The integer extend hasn't been emitted yet. FastISel or SelectionDAG
2042 // could select it. Emit a copy to subreg if necessary. FastISel will remove
2043 // it when it selects the integer extend.
2044 unsigned Reg
= lookUpRegForValue(IntExtVal
);
2045 auto *MI
= MRI
.getUniqueVRegDef(Reg
);
2047 if (RetVT
== MVT::i64
&& VT
<= MVT::i32
) {
2049 // Delete the last emitted instruction from emitLoad (SUBREG_TO_REG).
2050 MachineBasicBlock::iterator
I(std::prev(FuncInfo
.InsertPt
));
2051 ResultReg
= std::prev(I
)->getOperand(0).getReg();
2052 removeDeadCode(I
, std::next(I
));
2054 ResultReg
= fastEmitInst_extractsubreg(MVT::i32
, ResultReg
,
2058 updateValueMap(I
, ResultReg
);
2062 // The integer extend has already been emitted - delete all the instructions
2063 // that have been emitted by the integer extend lowering code and use the
2064 // result from the load instruction directly.
2067 for (auto &Opnd
: MI
->uses()) {
2069 Reg
= Opnd
.getReg();
2073 MachineBasicBlock::iterator
I(MI
);
2074 removeDeadCode(I
, std::next(I
));
2077 MI
= MRI
.getUniqueVRegDef(Reg
);
2079 updateValueMap(IntExtVal
, ResultReg
);
2083 updateValueMap(I
, ResultReg
);
2087 bool AArch64FastISel::emitStoreRelease(MVT VT
, unsigned SrcReg
,
2089 MachineMemOperand
*MMO
) {
2091 switch (VT
.SimpleTy
) {
2092 default: return false;
2093 case MVT::i8
: Opc
= AArch64::STLRB
; break;
2094 case MVT::i16
: Opc
= AArch64::STLRH
; break;
2095 case MVT::i32
: Opc
= AArch64::STLRW
; break;
2096 case MVT::i64
: Opc
= AArch64::STLRX
; break;
2099 const MCInstrDesc
&II
= TII
.get(Opc
);
2100 SrcReg
= constrainOperandRegClass(II
, SrcReg
, 0);
2101 AddrReg
= constrainOperandRegClass(II
, AddrReg
, 1);
2102 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, DbgLoc
, II
)
2105 .addMemOperand(MMO
);
2109 bool AArch64FastISel::emitStore(MVT VT
, unsigned SrcReg
, Address Addr
,
2110 MachineMemOperand
*MMO
) {
2111 if (!TLI
.allowsMisalignedMemoryAccesses(VT
))
2114 // Simplify this down to something we can handle.
2115 if (!simplifyAddress(Addr
, VT
))
2118 unsigned ScaleFactor
= getImplicitScaleFactor(VT
);
2120 llvm_unreachable("Unexpected value type.");
2122 // Negative offsets require unscaled, 9-bit, signed immediate offsets.
2123 // Otherwise, we try using scaled, 12-bit, unsigned immediate offsets.
2124 bool UseScaled
= true;
2125 if ((Addr
.getOffset() < 0) || (Addr
.getOffset() & (ScaleFactor
- 1))) {
2130 static const unsigned OpcTable
[4][6] = {
2131 { AArch64::STURBBi
, AArch64::STURHHi
, AArch64::STURWi
, AArch64::STURXi
,
2132 AArch64::STURSi
, AArch64::STURDi
},
2133 { AArch64::STRBBui
, AArch64::STRHHui
, AArch64::STRWui
, AArch64::STRXui
,
2134 AArch64::STRSui
, AArch64::STRDui
},
2135 { AArch64::STRBBroX
, AArch64::STRHHroX
, AArch64::STRWroX
, AArch64::STRXroX
,
2136 AArch64::STRSroX
, AArch64::STRDroX
},
2137 { AArch64::STRBBroW
, AArch64::STRHHroW
, AArch64::STRWroW
, AArch64::STRXroW
,
2138 AArch64::STRSroW
, AArch64::STRDroW
}
2142 bool VTIsi1
= false;
2143 bool UseRegOffset
= Addr
.isRegBase() && !Addr
.getOffset() && Addr
.getReg() &&
2144 Addr
.getOffsetReg();
2145 unsigned Idx
= UseRegOffset
? 2 : UseScaled
? 1 : 0;
2146 if (Addr
.getExtendType() == AArch64_AM::UXTW
||
2147 Addr
.getExtendType() == AArch64_AM::SXTW
)
2150 switch (VT
.SimpleTy
) {
2151 default: llvm_unreachable("Unexpected value type.");
2152 case MVT::i1
: VTIsi1
= true; LLVM_FALLTHROUGH
;
2153 case MVT::i8
: Opc
= OpcTable
[Idx
][0]; break;
2154 case MVT::i16
: Opc
= OpcTable
[Idx
][1]; break;
2155 case MVT::i32
: Opc
= OpcTable
[Idx
][2]; break;
2156 case MVT::i64
: Opc
= OpcTable
[Idx
][3]; break;
2157 case MVT::f32
: Opc
= OpcTable
[Idx
][4]; break;
2158 case MVT::f64
: Opc
= OpcTable
[Idx
][5]; break;
2161 // Storing an i1 requires special handling.
2162 if (VTIsi1
&& SrcReg
!= AArch64::WZR
) {
2163 unsigned ANDReg
= emitAnd_ri(MVT::i32
, SrcReg
, /*TODO:IsKill=*/false, 1);
2164 assert(ANDReg
&& "Unexpected AND instruction emission failure.");
2167 // Create the base instruction, then add the operands.
2168 const MCInstrDesc
&II
= TII
.get(Opc
);
2169 SrcReg
= constrainOperandRegClass(II
, SrcReg
, II
.getNumDefs());
2170 MachineInstrBuilder MIB
=
2171 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, DbgLoc
, II
).addReg(SrcReg
);
2172 addLoadStoreOperands(Addr
, MIB
, MachineMemOperand::MOStore
, ScaleFactor
, MMO
);
2177 bool AArch64FastISel::selectStore(const Instruction
*I
) {
2179 const Value
*Op0
= I
->getOperand(0);
2180 // Verify we have a legal type before going any further. Currently, we handle
2181 // simple types that will directly fit in a register (i32/f32/i64/f64) or
2182 // those that can be sign or zero-extended to a basic operation (i1/i8/i16).
2183 if (!isTypeSupported(Op0
->getType(), VT
, /*IsVectorAllowed=*/true))
2186 const Value
*PtrV
= I
->getOperand(1);
2187 if (TLI
.supportSwiftError()) {
2188 // Swifterror values can come from either a function parameter with
2189 // swifterror attribute or an alloca with swifterror attribute.
2190 if (const Argument
*Arg
= dyn_cast
<Argument
>(PtrV
)) {
2191 if (Arg
->hasSwiftErrorAttr())
2195 if (const AllocaInst
*Alloca
= dyn_cast
<AllocaInst
>(PtrV
)) {
2196 if (Alloca
->isSwiftError())
2201 // Get the value to be stored into a register. Use the zero register directly
2202 // when possible to avoid an unnecessary copy and a wasted register.
2203 unsigned SrcReg
= 0;
2204 if (const auto *CI
= dyn_cast
<ConstantInt
>(Op0
)) {
2206 SrcReg
= (VT
== MVT::i64
) ? AArch64::XZR
: AArch64::WZR
;
2207 } else if (const auto *CF
= dyn_cast
<ConstantFP
>(Op0
)) {
2208 if (CF
->isZero() && !CF
->isNegative()) {
2209 VT
= MVT::getIntegerVT(VT
.getSizeInBits());
2210 SrcReg
= (VT
== MVT::i64
) ? AArch64::XZR
: AArch64::WZR
;
2215 SrcReg
= getRegForValue(Op0
);
2220 auto *SI
= cast
<StoreInst
>(I
);
2222 // Try to emit a STLR for seq_cst/release.
2223 if (SI
->isAtomic()) {
2224 AtomicOrdering Ord
= SI
->getOrdering();
2225 // The non-atomic instructions are sufficient for relaxed stores.
2226 if (isReleaseOrStronger(Ord
)) {
2227 // The STLR addressing mode only supports a base reg; pass that directly.
2228 unsigned AddrReg
= getRegForValue(PtrV
);
2229 return emitStoreRelease(VT
, SrcReg
, AddrReg
,
2230 createMachineMemOperandFor(I
));
2234 // See if we can handle this address.
2236 if (!computeAddress(PtrV
, Addr
, Op0
->getType()))
2239 if (!emitStore(VT
, SrcReg
, Addr
, createMachineMemOperandFor(I
)))
2244 static AArch64CC::CondCode
getCompareCC(CmpInst::Predicate Pred
) {
2246 case CmpInst::FCMP_ONE
:
2247 case CmpInst::FCMP_UEQ
:
2249 // AL is our "false" for now. The other two need more compares.
2250 return AArch64CC::AL
;
2251 case CmpInst::ICMP_EQ
:
2252 case CmpInst::FCMP_OEQ
:
2253 return AArch64CC::EQ
;
2254 case CmpInst::ICMP_SGT
:
2255 case CmpInst::FCMP_OGT
:
2256 return AArch64CC::GT
;
2257 case CmpInst::ICMP_SGE
:
2258 case CmpInst::FCMP_OGE
:
2259 return AArch64CC::GE
;
2260 case CmpInst::ICMP_UGT
:
2261 case CmpInst::FCMP_UGT
:
2262 return AArch64CC::HI
;
2263 case CmpInst::FCMP_OLT
:
2264 return AArch64CC::MI
;
2265 case CmpInst::ICMP_ULE
:
2266 case CmpInst::FCMP_OLE
:
2267 return AArch64CC::LS
;
2268 case CmpInst::FCMP_ORD
:
2269 return AArch64CC::VC
;
2270 case CmpInst::FCMP_UNO
:
2271 return AArch64CC::VS
;
2272 case CmpInst::FCMP_UGE
:
2273 return AArch64CC::PL
;
2274 case CmpInst::ICMP_SLT
:
2275 case CmpInst::FCMP_ULT
:
2276 return AArch64CC::LT
;
2277 case CmpInst::ICMP_SLE
:
2278 case CmpInst::FCMP_ULE
:
2279 return AArch64CC::LE
;
2280 case CmpInst::FCMP_UNE
:
2281 case CmpInst::ICMP_NE
:
2282 return AArch64CC::NE
;
2283 case CmpInst::ICMP_UGE
:
2284 return AArch64CC::HS
;
2285 case CmpInst::ICMP_ULT
:
2286 return AArch64CC::LO
;
2290 /// Try to emit a combined compare-and-branch instruction.
2291 bool AArch64FastISel::emitCompareAndBranch(const BranchInst
*BI
) {
2292 // Speculation tracking/SLH assumes that optimized TB(N)Z/CB(N)Z instructions
2293 // will not be produced, as they are conditional branch instructions that do
2295 if (FuncInfo
.MF
->getFunction().hasFnAttribute(
2296 Attribute::SpeculativeLoadHardening
))
2299 assert(isa
<CmpInst
>(BI
->getCondition()) && "Expected cmp instruction");
2300 const CmpInst
*CI
= cast
<CmpInst
>(BI
->getCondition());
2301 CmpInst::Predicate Predicate
= optimizeCmpPredicate(CI
);
2303 const Value
*LHS
= CI
->getOperand(0);
2304 const Value
*RHS
= CI
->getOperand(1);
2307 if (!isTypeSupported(LHS
->getType(), VT
))
2310 unsigned BW
= VT
.getSizeInBits();
2314 MachineBasicBlock
*TBB
= FuncInfo
.MBBMap
[BI
->getSuccessor(0)];
2315 MachineBasicBlock
*FBB
= FuncInfo
.MBBMap
[BI
->getSuccessor(1)];
2317 // Try to take advantage of fallthrough opportunities.
2318 if (FuncInfo
.MBB
->isLayoutSuccessor(TBB
)) {
2319 std::swap(TBB
, FBB
);
2320 Predicate
= CmpInst::getInversePredicate(Predicate
);
2325 switch (Predicate
) {
2328 case CmpInst::ICMP_EQ
:
2329 case CmpInst::ICMP_NE
:
2330 if (isa
<Constant
>(LHS
) && cast
<Constant
>(LHS
)->isNullValue())
2331 std::swap(LHS
, RHS
);
2333 if (!isa
<Constant
>(RHS
) || !cast
<Constant
>(RHS
)->isNullValue())
2336 if (const auto *AI
= dyn_cast
<BinaryOperator
>(LHS
))
2337 if (AI
->getOpcode() == Instruction::And
&& isValueAvailable(AI
)) {
2338 const Value
*AndLHS
= AI
->getOperand(0);
2339 const Value
*AndRHS
= AI
->getOperand(1);
2341 if (const auto *C
= dyn_cast
<ConstantInt
>(AndLHS
))
2342 if (C
->getValue().isPowerOf2())
2343 std::swap(AndLHS
, AndRHS
);
2345 if (const auto *C
= dyn_cast
<ConstantInt
>(AndRHS
))
2346 if (C
->getValue().isPowerOf2()) {
2347 TestBit
= C
->getValue().logBase2();
2355 IsCmpNE
= Predicate
== CmpInst::ICMP_NE
;
2357 case CmpInst::ICMP_SLT
:
2358 case CmpInst::ICMP_SGE
:
2359 if (!isa
<Constant
>(RHS
) || !cast
<Constant
>(RHS
)->isNullValue())
2363 IsCmpNE
= Predicate
== CmpInst::ICMP_SLT
;
2365 case CmpInst::ICMP_SGT
:
2366 case CmpInst::ICMP_SLE
:
2367 if (!isa
<ConstantInt
>(RHS
))
2370 if (cast
<ConstantInt
>(RHS
)->getValue() != APInt(BW
, -1, true))
2374 IsCmpNE
= Predicate
== CmpInst::ICMP_SLE
;
2378 static const unsigned OpcTable
[2][2][2] = {
2379 { {AArch64::CBZW
, AArch64::CBZX
},
2380 {AArch64::CBNZW
, AArch64::CBNZX
} },
2381 { {AArch64::TBZW
, AArch64::TBZX
},
2382 {AArch64::TBNZW
, AArch64::TBNZX
} }
2385 bool IsBitTest
= TestBit
!= -1;
2386 bool Is64Bit
= BW
== 64;
2387 if (TestBit
< 32 && TestBit
>= 0)
2390 unsigned Opc
= OpcTable
[IsBitTest
][IsCmpNE
][Is64Bit
];
2391 const MCInstrDesc
&II
= TII
.get(Opc
);
2393 unsigned SrcReg
= getRegForValue(LHS
);
2396 bool SrcIsKill
= hasTrivialKill(LHS
);
2398 if (BW
== 64 && !Is64Bit
)
2399 SrcReg
= fastEmitInst_extractsubreg(MVT::i32
, SrcReg
, SrcIsKill
,
2402 if ((BW
< 32) && !IsBitTest
)
2403 SrcReg
= emitIntExt(VT
, SrcReg
, MVT::i32
, /*isZExt=*/true);
2405 // Emit the combined compare and branch instruction.
2406 SrcReg
= constrainOperandRegClass(II
, SrcReg
, II
.getNumDefs());
2407 MachineInstrBuilder MIB
=
2408 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, DbgLoc
, TII
.get(Opc
))
2409 .addReg(SrcReg
, getKillRegState(SrcIsKill
));
2411 MIB
.addImm(TestBit
);
2414 finishCondBranch(BI
->getParent(), TBB
, FBB
);
2418 bool AArch64FastISel::selectBranch(const Instruction
*I
) {
2419 const BranchInst
*BI
= cast
<BranchInst
>(I
);
2420 if (BI
->isUnconditional()) {
2421 MachineBasicBlock
*MSucc
= FuncInfo
.MBBMap
[BI
->getSuccessor(0)];
2422 fastEmitBranch(MSucc
, BI
->getDebugLoc());
2426 MachineBasicBlock
*TBB
= FuncInfo
.MBBMap
[BI
->getSuccessor(0)];
2427 MachineBasicBlock
*FBB
= FuncInfo
.MBBMap
[BI
->getSuccessor(1)];
2429 if (const CmpInst
*CI
= dyn_cast
<CmpInst
>(BI
->getCondition())) {
2430 if (CI
->hasOneUse() && isValueAvailable(CI
)) {
2431 // Try to optimize or fold the cmp.
2432 CmpInst::Predicate Predicate
= optimizeCmpPredicate(CI
);
2433 switch (Predicate
) {
2436 case CmpInst::FCMP_FALSE
:
2437 fastEmitBranch(FBB
, DbgLoc
);
2439 case CmpInst::FCMP_TRUE
:
2440 fastEmitBranch(TBB
, DbgLoc
);
2444 // Try to emit a combined compare-and-branch first.
2445 if (emitCompareAndBranch(BI
))
2448 // Try to take advantage of fallthrough opportunities.
2449 if (FuncInfo
.MBB
->isLayoutSuccessor(TBB
)) {
2450 std::swap(TBB
, FBB
);
2451 Predicate
= CmpInst::getInversePredicate(Predicate
);
2455 if (!emitCmp(CI
->getOperand(0), CI
->getOperand(1), CI
->isUnsigned()))
2458 // FCMP_UEQ and FCMP_ONE cannot be checked with a single branch
2460 AArch64CC::CondCode CC
= getCompareCC(Predicate
);
2461 AArch64CC::CondCode ExtraCC
= AArch64CC::AL
;
2462 switch (Predicate
) {
2465 case CmpInst::FCMP_UEQ
:
2466 ExtraCC
= AArch64CC::EQ
;
2469 case CmpInst::FCMP_ONE
:
2470 ExtraCC
= AArch64CC::MI
;
2474 assert((CC
!= AArch64CC::AL
) && "Unexpected condition code.");
2476 // Emit the extra branch for FCMP_UEQ and FCMP_ONE.
2477 if (ExtraCC
!= AArch64CC::AL
) {
2478 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, DbgLoc
, TII
.get(AArch64::Bcc
))
2484 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, DbgLoc
, TII
.get(AArch64::Bcc
))
2488 finishCondBranch(BI
->getParent(), TBB
, FBB
);
2491 } else if (const auto *CI
= dyn_cast
<ConstantInt
>(BI
->getCondition())) {
2492 uint64_t Imm
= CI
->getZExtValue();
2493 MachineBasicBlock
*Target
= (Imm
== 0) ? FBB
: TBB
;
2494 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, DbgLoc
, TII
.get(AArch64::B
))
2497 // Obtain the branch probability and add the target to the successor list.
2499 auto BranchProbability
= FuncInfo
.BPI
->getEdgeProbability(
2500 BI
->getParent(), Target
->getBasicBlock());
2501 FuncInfo
.MBB
->addSuccessor(Target
, BranchProbability
);
2503 FuncInfo
.MBB
->addSuccessorWithoutProb(Target
);
2506 AArch64CC::CondCode CC
= AArch64CC::NE
;
2507 if (foldXALUIntrinsic(CC
, I
, BI
->getCondition())) {
2508 // Fake request the condition, otherwise the intrinsic might be completely
2510 unsigned CondReg
= getRegForValue(BI
->getCondition());
2515 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, DbgLoc
, TII
.get(AArch64::Bcc
))
2519 finishCondBranch(BI
->getParent(), TBB
, FBB
);
2524 unsigned CondReg
= getRegForValue(BI
->getCondition());
2527 bool CondRegIsKill
= hasTrivialKill(BI
->getCondition());
2529 // i1 conditions come as i32 values, test the lowest bit with tb(n)z.
2530 unsigned Opcode
= AArch64::TBNZW
;
2531 if (FuncInfo
.MBB
->isLayoutSuccessor(TBB
)) {
2532 std::swap(TBB
, FBB
);
2533 Opcode
= AArch64::TBZW
;
2536 const MCInstrDesc
&II
= TII
.get(Opcode
);
2537 unsigned ConstrainedCondReg
2538 = constrainOperandRegClass(II
, CondReg
, II
.getNumDefs());
2539 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, DbgLoc
, II
)
2540 .addReg(ConstrainedCondReg
, getKillRegState(CondRegIsKill
))
2544 finishCondBranch(BI
->getParent(), TBB
, FBB
);
2548 bool AArch64FastISel::selectIndirectBr(const Instruction
*I
) {
2549 const IndirectBrInst
*BI
= cast
<IndirectBrInst
>(I
);
2550 unsigned AddrReg
= getRegForValue(BI
->getOperand(0));
2554 // Emit the indirect branch.
2555 const MCInstrDesc
&II
= TII
.get(AArch64::BR
);
2556 AddrReg
= constrainOperandRegClass(II
, AddrReg
, II
.getNumDefs());
2557 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, DbgLoc
, II
).addReg(AddrReg
);
2559 // Make sure the CFG is up-to-date.
2560 for (auto *Succ
: BI
->successors())
2561 FuncInfo
.MBB
->addSuccessor(FuncInfo
.MBBMap
[Succ
]);
2566 bool AArch64FastISel::selectCmp(const Instruction
*I
) {
2567 const CmpInst
*CI
= cast
<CmpInst
>(I
);
2569 // Vectors of i1 are weird: bail out.
2570 if (CI
->getType()->isVectorTy())
2573 // Try to optimize or fold the cmp.
2574 CmpInst::Predicate Predicate
= optimizeCmpPredicate(CI
);
2575 unsigned ResultReg
= 0;
2576 switch (Predicate
) {
2579 case CmpInst::FCMP_FALSE
:
2580 ResultReg
= createResultReg(&AArch64::GPR32RegClass
);
2581 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, DbgLoc
,
2582 TII
.get(TargetOpcode::COPY
), ResultReg
)
2583 .addReg(AArch64::WZR
, getKillRegState(true));
2585 case CmpInst::FCMP_TRUE
:
2586 ResultReg
= fastEmit_i(MVT::i32
, MVT::i32
, ISD::Constant
, 1);
2591 updateValueMap(I
, ResultReg
);
2596 if (!emitCmp(CI
->getOperand(0), CI
->getOperand(1), CI
->isUnsigned()))
2599 ResultReg
= createResultReg(&AArch64::GPR32RegClass
);
2601 // FCMP_UEQ and FCMP_ONE cannot be checked with a single instruction. These
2602 // condition codes are inverted, because they are used by CSINC.
2603 static unsigned CondCodeTable
[2][2] = {
2604 { AArch64CC::NE
, AArch64CC::VC
},
2605 { AArch64CC::PL
, AArch64CC::LE
}
2607 unsigned *CondCodes
= nullptr;
2608 switch (Predicate
) {
2611 case CmpInst::FCMP_UEQ
:
2612 CondCodes
= &CondCodeTable
[0][0];
2614 case CmpInst::FCMP_ONE
:
2615 CondCodes
= &CondCodeTable
[1][0];
2620 unsigned TmpReg1
= createResultReg(&AArch64::GPR32RegClass
);
2621 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, DbgLoc
, TII
.get(AArch64::CSINCWr
),
2623 .addReg(AArch64::WZR
, getKillRegState(true))
2624 .addReg(AArch64::WZR
, getKillRegState(true))
2625 .addImm(CondCodes
[0]);
2626 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, DbgLoc
, TII
.get(AArch64::CSINCWr
),
2628 .addReg(TmpReg1
, getKillRegState(true))
2629 .addReg(AArch64::WZR
, getKillRegState(true))
2630 .addImm(CondCodes
[1]);
2632 updateValueMap(I
, ResultReg
);
2636 // Now set a register based on the comparison.
2637 AArch64CC::CondCode CC
= getCompareCC(Predicate
);
2638 assert((CC
!= AArch64CC::AL
) && "Unexpected condition code.");
2639 AArch64CC::CondCode invertedCC
= getInvertedCondCode(CC
);
2640 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, DbgLoc
, TII
.get(AArch64::CSINCWr
),
2642 .addReg(AArch64::WZR
, getKillRegState(true))
2643 .addReg(AArch64::WZR
, getKillRegState(true))
2644 .addImm(invertedCC
);
2646 updateValueMap(I
, ResultReg
);
2650 /// Optimize selects of i1 if one of the operands has a 'true' or 'false'
2652 bool AArch64FastISel::optimizeSelect(const SelectInst
*SI
) {
2653 if (!SI
->getType()->isIntegerTy(1))
2656 const Value
*Src1Val
, *Src2Val
;
2658 bool NeedExtraOp
= false;
2659 if (auto *CI
= dyn_cast
<ConstantInt
>(SI
->getTrueValue())) {
2661 Src1Val
= SI
->getCondition();
2662 Src2Val
= SI
->getFalseValue();
2663 Opc
= AArch64::ORRWrr
;
2665 assert(CI
->isZero());
2666 Src1Val
= SI
->getFalseValue();
2667 Src2Val
= SI
->getCondition();
2668 Opc
= AArch64::BICWrr
;
2670 } else if (auto *CI
= dyn_cast
<ConstantInt
>(SI
->getFalseValue())) {
2672 Src1Val
= SI
->getCondition();
2673 Src2Val
= SI
->getTrueValue();
2674 Opc
= AArch64::ORRWrr
;
2677 assert(CI
->isZero());
2678 Src1Val
= SI
->getCondition();
2679 Src2Val
= SI
->getTrueValue();
2680 Opc
= AArch64::ANDWrr
;
2687 unsigned Src1Reg
= getRegForValue(Src1Val
);
2690 bool Src1IsKill
= hasTrivialKill(Src1Val
);
2692 unsigned Src2Reg
= getRegForValue(Src2Val
);
2695 bool Src2IsKill
= hasTrivialKill(Src2Val
);
2698 Src1Reg
= emitLogicalOp_ri(ISD::XOR
, MVT::i32
, Src1Reg
, Src1IsKill
, 1);
2701 unsigned ResultReg
= fastEmitInst_rr(Opc
, &AArch64::GPR32RegClass
, Src1Reg
,
2702 Src1IsKill
, Src2Reg
, Src2IsKill
);
2703 updateValueMap(SI
, ResultReg
);
2707 bool AArch64FastISel::selectSelect(const Instruction
*I
) {
2708 assert(isa
<SelectInst
>(I
) && "Expected a select instruction.");
2710 if (!isTypeSupported(I
->getType(), VT
))
2714 const TargetRegisterClass
*RC
;
2715 switch (VT
.SimpleTy
) {
2722 Opc
= AArch64::CSELWr
;
2723 RC
= &AArch64::GPR32RegClass
;
2726 Opc
= AArch64::CSELXr
;
2727 RC
= &AArch64::GPR64RegClass
;
2730 Opc
= AArch64::FCSELSrrr
;
2731 RC
= &AArch64::FPR32RegClass
;
2734 Opc
= AArch64::FCSELDrrr
;
2735 RC
= &AArch64::FPR64RegClass
;
2739 const SelectInst
*SI
= cast
<SelectInst
>(I
);
2740 const Value
*Cond
= SI
->getCondition();
2741 AArch64CC::CondCode CC
= AArch64CC::NE
;
2742 AArch64CC::CondCode ExtraCC
= AArch64CC::AL
;
2744 if (optimizeSelect(SI
))
2747 // Try to pickup the flags, so we don't have to emit another compare.
2748 if (foldXALUIntrinsic(CC
, I
, Cond
)) {
2749 // Fake request the condition to force emission of the XALU intrinsic.
2750 unsigned CondReg
= getRegForValue(Cond
);
2753 } else if (isa
<CmpInst
>(Cond
) && cast
<CmpInst
>(Cond
)->hasOneUse() &&
2754 isValueAvailable(Cond
)) {
2755 const auto *Cmp
= cast
<CmpInst
>(Cond
);
2756 // Try to optimize or fold the cmp.
2757 CmpInst::Predicate Predicate
= optimizeCmpPredicate(Cmp
);
2758 const Value
*FoldSelect
= nullptr;
2759 switch (Predicate
) {
2762 case CmpInst::FCMP_FALSE
:
2763 FoldSelect
= SI
->getFalseValue();
2765 case CmpInst::FCMP_TRUE
:
2766 FoldSelect
= SI
->getTrueValue();
2771 unsigned SrcReg
= getRegForValue(FoldSelect
);
2774 unsigned UseReg
= lookUpRegForValue(SI
);
2776 MRI
.clearKillFlags(UseReg
);
2778 updateValueMap(I
, SrcReg
);
2783 if (!emitCmp(Cmp
->getOperand(0), Cmp
->getOperand(1), Cmp
->isUnsigned()))
2786 // FCMP_UEQ and FCMP_ONE cannot be checked with a single select instruction.
2787 CC
= getCompareCC(Predicate
);
2788 switch (Predicate
) {
2791 case CmpInst::FCMP_UEQ
:
2792 ExtraCC
= AArch64CC::EQ
;
2795 case CmpInst::FCMP_ONE
:
2796 ExtraCC
= AArch64CC::MI
;
2800 assert((CC
!= AArch64CC::AL
) && "Unexpected condition code.");
2802 unsigned CondReg
= getRegForValue(Cond
);
2805 bool CondIsKill
= hasTrivialKill(Cond
);
2807 const MCInstrDesc
&II
= TII
.get(AArch64::ANDSWri
);
2808 CondReg
= constrainOperandRegClass(II
, CondReg
, 1);
2810 // Emit a TST instruction (ANDS wzr, reg, #imm).
2811 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, DbgLoc
, II
,
2813 .addReg(CondReg
, getKillRegState(CondIsKill
))
2814 .addImm(AArch64_AM::encodeLogicalImmediate(1, 32));
2817 unsigned Src1Reg
= getRegForValue(SI
->getTrueValue());
2818 bool Src1IsKill
= hasTrivialKill(SI
->getTrueValue());
2820 unsigned Src2Reg
= getRegForValue(SI
->getFalseValue());
2821 bool Src2IsKill
= hasTrivialKill(SI
->getFalseValue());
2823 if (!Src1Reg
|| !Src2Reg
)
2826 if (ExtraCC
!= AArch64CC::AL
) {
2827 Src2Reg
= fastEmitInst_rri(Opc
, RC
, Src1Reg
, Src1IsKill
, Src2Reg
,
2828 Src2IsKill
, ExtraCC
);
2831 unsigned ResultReg
= fastEmitInst_rri(Opc
, RC
, Src1Reg
, Src1IsKill
, Src2Reg
,
2833 updateValueMap(I
, ResultReg
);
2837 bool AArch64FastISel::selectFPExt(const Instruction
*I
) {
2838 Value
*V
= I
->getOperand(0);
2839 if (!I
->getType()->isDoubleTy() || !V
->getType()->isFloatTy())
2842 unsigned Op
= getRegForValue(V
);
2846 unsigned ResultReg
= createResultReg(&AArch64::FPR64RegClass
);
2847 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, DbgLoc
, TII
.get(AArch64::FCVTDSr
),
2848 ResultReg
).addReg(Op
);
2849 updateValueMap(I
, ResultReg
);
2853 bool AArch64FastISel::selectFPTrunc(const Instruction
*I
) {
2854 Value
*V
= I
->getOperand(0);
2855 if (!I
->getType()->isFloatTy() || !V
->getType()->isDoubleTy())
2858 unsigned Op
= getRegForValue(V
);
2862 unsigned ResultReg
= createResultReg(&AArch64::FPR32RegClass
);
2863 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, DbgLoc
, TII
.get(AArch64::FCVTSDr
),
2864 ResultReg
).addReg(Op
);
2865 updateValueMap(I
, ResultReg
);
2869 // FPToUI and FPToSI
2870 bool AArch64FastISel::selectFPToInt(const Instruction
*I
, bool Signed
) {
2872 if (!isTypeLegal(I
->getType(), DestVT
) || DestVT
.isVector())
2875 unsigned SrcReg
= getRegForValue(I
->getOperand(0));
2879 EVT SrcVT
= TLI
.getValueType(DL
, I
->getOperand(0)->getType(), true);
2880 if (SrcVT
== MVT::f128
|| SrcVT
== MVT::f16
)
2884 if (SrcVT
== MVT::f64
) {
2886 Opc
= (DestVT
== MVT::i32
) ? AArch64::FCVTZSUWDr
: AArch64::FCVTZSUXDr
;
2888 Opc
= (DestVT
== MVT::i32
) ? AArch64::FCVTZUUWDr
: AArch64::FCVTZUUXDr
;
2891 Opc
= (DestVT
== MVT::i32
) ? AArch64::FCVTZSUWSr
: AArch64::FCVTZSUXSr
;
2893 Opc
= (DestVT
== MVT::i32
) ? AArch64::FCVTZUUWSr
: AArch64::FCVTZUUXSr
;
2895 unsigned ResultReg
= createResultReg(
2896 DestVT
== MVT::i32
? &AArch64::GPR32RegClass
: &AArch64::GPR64RegClass
);
2897 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, DbgLoc
, TII
.get(Opc
), ResultReg
)
2899 updateValueMap(I
, ResultReg
);
2903 bool AArch64FastISel::selectIntToFP(const Instruction
*I
, bool Signed
) {
2905 if (!isTypeLegal(I
->getType(), DestVT
) || DestVT
.isVector())
2907 // Let regular ISEL handle FP16
2908 if (DestVT
== MVT::f16
)
2911 assert((DestVT
== MVT::f32
|| DestVT
== MVT::f64
) &&
2912 "Unexpected value type.");
2914 unsigned SrcReg
= getRegForValue(I
->getOperand(0));
2917 bool SrcIsKill
= hasTrivialKill(I
->getOperand(0));
2919 EVT SrcVT
= TLI
.getValueType(DL
, I
->getOperand(0)->getType(), true);
2921 // Handle sign-extension.
2922 if (SrcVT
== MVT::i16
|| SrcVT
== MVT::i8
|| SrcVT
== MVT::i1
) {
2924 emitIntExt(SrcVT
.getSimpleVT(), SrcReg
, MVT::i32
, /*isZExt*/ !Signed
);
2931 if (SrcVT
== MVT::i64
) {
2933 Opc
= (DestVT
== MVT::f32
) ? AArch64::SCVTFUXSri
: AArch64::SCVTFUXDri
;
2935 Opc
= (DestVT
== MVT::f32
) ? AArch64::UCVTFUXSri
: AArch64::UCVTFUXDri
;
2938 Opc
= (DestVT
== MVT::f32
) ? AArch64::SCVTFUWSri
: AArch64::SCVTFUWDri
;
2940 Opc
= (DestVT
== MVT::f32
) ? AArch64::UCVTFUWSri
: AArch64::UCVTFUWDri
;
2943 unsigned ResultReg
= fastEmitInst_r(Opc
, TLI
.getRegClassFor(DestVT
), SrcReg
,
2945 updateValueMap(I
, ResultReg
);
2949 bool AArch64FastISel::fastLowerArguments() {
2950 if (!FuncInfo
.CanLowerReturn
)
2953 const Function
*F
= FuncInfo
.Fn
;
2957 CallingConv::ID CC
= F
->getCallingConv();
2958 if (CC
!= CallingConv::C
&& CC
!= CallingConv::Swift
)
2961 if (Subtarget
->hasCustomCallingConv())
2964 // Only handle simple cases of up to 8 GPR and FPR each.
2965 unsigned GPRCnt
= 0;
2966 unsigned FPRCnt
= 0;
2967 for (auto const &Arg
: F
->args()) {
2968 if (Arg
.hasAttribute(Attribute::ByVal
) ||
2969 Arg
.hasAttribute(Attribute::InReg
) ||
2970 Arg
.hasAttribute(Attribute::StructRet
) ||
2971 Arg
.hasAttribute(Attribute::SwiftSelf
) ||
2972 Arg
.hasAttribute(Attribute::SwiftError
) ||
2973 Arg
.hasAttribute(Attribute::Nest
))
2976 Type
*ArgTy
= Arg
.getType();
2977 if (ArgTy
->isStructTy() || ArgTy
->isArrayTy())
2980 EVT ArgVT
= TLI
.getValueType(DL
, ArgTy
);
2981 if (!ArgVT
.isSimple())
2984 MVT VT
= ArgVT
.getSimpleVT().SimpleTy
;
2985 if (VT
.isFloatingPoint() && !Subtarget
->hasFPARMv8())
2988 if (VT
.isVector() &&
2989 (!Subtarget
->hasNEON() || !Subtarget
->isLittleEndian()))
2992 if (VT
>= MVT::i1
&& VT
<= MVT::i64
)
2994 else if ((VT
>= MVT::f16
&& VT
<= MVT::f64
) || VT
.is64BitVector() ||
2995 VT
.is128BitVector())
3000 if (GPRCnt
> 8 || FPRCnt
> 8)
3004 static const MCPhysReg Registers
[6][8] = {
3005 { AArch64::W0
, AArch64::W1
, AArch64::W2
, AArch64::W3
, AArch64::W4
,
3006 AArch64::W5
, AArch64::W6
, AArch64::W7
},
3007 { AArch64::X0
, AArch64::X1
, AArch64::X2
, AArch64::X3
, AArch64::X4
,
3008 AArch64::X5
, AArch64::X6
, AArch64::X7
},
3009 { AArch64::H0
, AArch64::H1
, AArch64::H2
, AArch64::H3
, AArch64::H4
,
3010 AArch64::H5
, AArch64::H6
, AArch64::H7
},
3011 { AArch64::S0
, AArch64::S1
, AArch64::S2
, AArch64::S3
, AArch64::S4
,
3012 AArch64::S5
, AArch64::S6
, AArch64::S7
},
3013 { AArch64::D0
, AArch64::D1
, AArch64::D2
, AArch64::D3
, AArch64::D4
,
3014 AArch64::D5
, AArch64::D6
, AArch64::D7
},
3015 { AArch64::Q0
, AArch64::Q1
, AArch64::Q2
, AArch64::Q3
, AArch64::Q4
,
3016 AArch64::Q5
, AArch64::Q6
, AArch64::Q7
}
3019 unsigned GPRIdx
= 0;
3020 unsigned FPRIdx
= 0;
3021 for (auto const &Arg
: F
->args()) {
3022 MVT VT
= TLI
.getSimpleValueType(DL
, Arg
.getType());
3024 const TargetRegisterClass
*RC
;
3025 if (VT
>= MVT::i1
&& VT
<= MVT::i32
) {
3026 SrcReg
= Registers
[0][GPRIdx
++];
3027 RC
= &AArch64::GPR32RegClass
;
3029 } else if (VT
== MVT::i64
) {
3030 SrcReg
= Registers
[1][GPRIdx
++];
3031 RC
= &AArch64::GPR64RegClass
;
3032 } else if (VT
== MVT::f16
) {
3033 SrcReg
= Registers
[2][FPRIdx
++];
3034 RC
= &AArch64::FPR16RegClass
;
3035 } else if (VT
== MVT::f32
) {
3036 SrcReg
= Registers
[3][FPRIdx
++];
3037 RC
= &AArch64::FPR32RegClass
;
3038 } else if ((VT
== MVT::f64
) || VT
.is64BitVector()) {
3039 SrcReg
= Registers
[4][FPRIdx
++];
3040 RC
= &AArch64::FPR64RegClass
;
3041 } else if (VT
.is128BitVector()) {
3042 SrcReg
= Registers
[5][FPRIdx
++];
3043 RC
= &AArch64::FPR128RegClass
;
3045 llvm_unreachable("Unexpected value type.");
3047 unsigned DstReg
= FuncInfo
.MF
->addLiveIn(SrcReg
, RC
);
3048 // FIXME: Unfortunately it's necessary to emit a copy from the livein copy.
3049 // Without this, EmitLiveInCopies may eliminate the livein if its only
3050 // use is a bitcast (which isn't turned into an instruction).
3051 unsigned ResultReg
= createResultReg(RC
);
3052 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, DbgLoc
,
3053 TII
.get(TargetOpcode::COPY
), ResultReg
)
3054 .addReg(DstReg
, getKillRegState(true));
3055 updateValueMap(&Arg
, ResultReg
);
3060 bool AArch64FastISel::processCallArgs(CallLoweringInfo
&CLI
,
3061 SmallVectorImpl
<MVT
> &OutVTs
,
3062 unsigned &NumBytes
) {
3063 CallingConv::ID CC
= CLI
.CallConv
;
3064 SmallVector
<CCValAssign
, 16> ArgLocs
;
3065 CCState
CCInfo(CC
, false, *FuncInfo
.MF
, ArgLocs
, *Context
);
3066 CCInfo
.AnalyzeCallOperands(OutVTs
, CLI
.OutFlags
, CCAssignFnForCall(CC
));
3068 // Get a count of how many bytes are to be pushed on the stack.
3069 NumBytes
= CCInfo
.getNextStackOffset();
3071 // Issue CALLSEQ_START
3072 unsigned AdjStackDown
= TII
.getCallFrameSetupOpcode();
3073 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, DbgLoc
, TII
.get(AdjStackDown
))
3074 .addImm(NumBytes
).addImm(0);
3076 // Process the args.
3077 for (CCValAssign
&VA
: ArgLocs
) {
3078 const Value
*ArgVal
= CLI
.OutVals
[VA
.getValNo()];
3079 MVT ArgVT
= OutVTs
[VA
.getValNo()];
3081 unsigned ArgReg
= getRegForValue(ArgVal
);
3085 // Handle arg promotion: SExt, ZExt, AExt.
3086 switch (VA
.getLocInfo()) {
3087 case CCValAssign::Full
:
3089 case CCValAssign::SExt
: {
3090 MVT DestVT
= VA
.getLocVT();
3092 ArgReg
= emitIntExt(SrcVT
, ArgReg
, DestVT
, /*isZExt=*/false);
3097 case CCValAssign::AExt
:
3098 // Intentional fall-through.
3099 case CCValAssign::ZExt
: {
3100 MVT DestVT
= VA
.getLocVT();
3102 ArgReg
= emitIntExt(SrcVT
, ArgReg
, DestVT
, /*isZExt=*/true);
3108 llvm_unreachable("Unknown arg promotion!");
3111 // Now copy/store arg to correct locations.
3112 if (VA
.isRegLoc() && !VA
.needsCustom()) {
3113 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, DbgLoc
,
3114 TII
.get(TargetOpcode::COPY
), VA
.getLocReg()).addReg(ArgReg
);
3115 CLI
.OutRegs
.push_back(VA
.getLocReg());
3116 } else if (VA
.needsCustom()) {
3117 // FIXME: Handle custom args.
3120 assert(VA
.isMemLoc() && "Assuming store on stack.");
3122 // Don't emit stores for undef values.
3123 if (isa
<UndefValue
>(ArgVal
))
3126 // Need to store on the stack.
3127 unsigned ArgSize
= (ArgVT
.getSizeInBits() + 7) / 8;
3129 unsigned BEAlign
= 0;
3130 if (ArgSize
< 8 && !Subtarget
->isLittleEndian())
3131 BEAlign
= 8 - ArgSize
;
3134 Addr
.setKind(Address::RegBase
);
3135 Addr
.setReg(AArch64::SP
);
3136 Addr
.setOffset(VA
.getLocMemOffset() + BEAlign
);
3138 unsigned Alignment
= DL
.getABITypeAlignment(ArgVal
->getType());
3139 MachineMemOperand
*MMO
= FuncInfo
.MF
->getMachineMemOperand(
3140 MachinePointerInfo::getStack(*FuncInfo
.MF
, Addr
.getOffset()),
3141 MachineMemOperand::MOStore
, ArgVT
.getStoreSize(), Alignment
);
3143 if (!emitStore(ArgVT
, ArgReg
, Addr
, MMO
))
3150 bool AArch64FastISel::finishCall(CallLoweringInfo
&CLI
, MVT RetVT
,
3151 unsigned NumBytes
) {
3152 CallingConv::ID CC
= CLI
.CallConv
;
3154 // Issue CALLSEQ_END
3155 unsigned AdjStackUp
= TII
.getCallFrameDestroyOpcode();
3156 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, DbgLoc
, TII
.get(AdjStackUp
))
3157 .addImm(NumBytes
).addImm(0);
3159 // Now the return value.
3160 if (RetVT
!= MVT::isVoid
) {
3161 SmallVector
<CCValAssign
, 16> RVLocs
;
3162 CCState
CCInfo(CC
, false, *FuncInfo
.MF
, RVLocs
, *Context
);
3163 CCInfo
.AnalyzeCallResult(RetVT
, CCAssignFnForCall(CC
));
3165 // Only handle a single return value.
3166 if (RVLocs
.size() != 1)
3169 // Copy all of the result registers out of their specified physreg.
3170 MVT CopyVT
= RVLocs
[0].getValVT();
3172 // TODO: Handle big-endian results
3173 if (CopyVT
.isVector() && !Subtarget
->isLittleEndian())
3176 unsigned ResultReg
= createResultReg(TLI
.getRegClassFor(CopyVT
));
3177 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, DbgLoc
,
3178 TII
.get(TargetOpcode::COPY
), ResultReg
)
3179 .addReg(RVLocs
[0].getLocReg());
3180 CLI
.InRegs
.push_back(RVLocs
[0].getLocReg());
3182 CLI
.ResultReg
= ResultReg
;
3183 CLI
.NumResultRegs
= 1;
3189 bool AArch64FastISel::fastLowerCall(CallLoweringInfo
&CLI
) {
3190 CallingConv::ID CC
= CLI
.CallConv
;
3191 bool IsTailCall
= CLI
.IsTailCall
;
3192 bool IsVarArg
= CLI
.IsVarArg
;
3193 const Value
*Callee
= CLI
.Callee
;
3194 MCSymbol
*Symbol
= CLI
.Symbol
;
3196 if (!Callee
&& !Symbol
)
3199 // Allow SelectionDAG isel to handle tail calls.
3203 // FIXME: we could and should support this, but for now correctness at -O0 is
3205 if (Subtarget
->isTargetILP32())
3208 CodeModel::Model CM
= TM
.getCodeModel();
3209 // Only support the small-addressing and large code models.
3210 if (CM
!= CodeModel::Large
&& !Subtarget
->useSmallAddressing())
3213 // FIXME: Add large code model support for ELF.
3214 if (CM
== CodeModel::Large
&& !Subtarget
->isTargetMachO())
3217 // Let SDISel handle vararg functions.
3221 // FIXME: Only handle *simple* calls for now.
3223 if (CLI
.RetTy
->isVoidTy())
3224 RetVT
= MVT::isVoid
;
3225 else if (!isTypeLegal(CLI
.RetTy
, RetVT
))
3228 for (auto Flag
: CLI
.OutFlags
)
3229 if (Flag
.isInReg() || Flag
.isSRet() || Flag
.isNest() || Flag
.isByVal() ||
3230 Flag
.isSwiftSelf() || Flag
.isSwiftError())
3233 // Set up the argument vectors.
3234 SmallVector
<MVT
, 16> OutVTs
;
3235 OutVTs
.reserve(CLI
.OutVals
.size());
3237 for (auto *Val
: CLI
.OutVals
) {
3239 if (!isTypeLegal(Val
->getType(), VT
) &&
3240 !(VT
== MVT::i1
|| VT
== MVT::i8
|| VT
== MVT::i16
))
3243 // We don't handle vector parameters yet.
3244 if (VT
.isVector() || VT
.getSizeInBits() > 64)
3247 OutVTs
.push_back(VT
);
3251 if (Callee
&& !computeCallAddress(Callee
, Addr
))
3254 // Handle the arguments now that we've gotten them.
3256 if (!processCallArgs(CLI
, OutVTs
, NumBytes
))
3259 const AArch64RegisterInfo
*RegInfo
= Subtarget
->getRegisterInfo();
3260 if (RegInfo
->isAnyArgRegReserved(*MF
))
3261 RegInfo
->emitReservedArgRegCallError(*MF
);
3264 MachineInstrBuilder MIB
;
3265 if (Subtarget
->useSmallAddressing()) {
3266 const MCInstrDesc
&II
= TII
.get(Addr
.getReg() ? AArch64::BLR
: AArch64::BL
);
3267 MIB
= BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, DbgLoc
, II
);
3269 MIB
.addSym(Symbol
, 0);
3270 else if (Addr
.getGlobalValue())
3271 MIB
.addGlobalAddress(Addr
.getGlobalValue(), 0, 0);
3272 else if (Addr
.getReg()) {
3273 unsigned Reg
= constrainOperandRegClass(II
, Addr
.getReg(), 0);
3278 unsigned CallReg
= 0;
3280 unsigned ADRPReg
= createResultReg(&AArch64::GPR64commonRegClass
);
3281 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, DbgLoc
, TII
.get(AArch64::ADRP
),
3283 .addSym(Symbol
, AArch64II::MO_GOT
| AArch64II::MO_PAGE
);
3285 CallReg
= createResultReg(&AArch64::GPR64RegClass
);
3286 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, DbgLoc
,
3287 TII
.get(AArch64::LDRXui
), CallReg
)
3290 AArch64II::MO_GOT
| AArch64II::MO_PAGEOFF
| AArch64II::MO_NC
);
3291 } else if (Addr
.getGlobalValue())
3292 CallReg
= materializeGV(Addr
.getGlobalValue());
3293 else if (Addr
.getReg())
3294 CallReg
= Addr
.getReg();
3299 const MCInstrDesc
&II
= TII
.get(AArch64::BLR
);
3300 CallReg
= constrainOperandRegClass(II
, CallReg
, 0);
3301 MIB
= BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, DbgLoc
, II
).addReg(CallReg
);
3304 // Add implicit physical register uses to the call.
3305 for (auto Reg
: CLI
.OutRegs
)
3306 MIB
.addReg(Reg
, RegState::Implicit
);
3308 // Add a register mask with the call-preserved registers.
3309 // Proper defs for return values will be added by setPhysRegsDeadExcept().
3310 MIB
.addRegMask(TRI
.getCallPreservedMask(*FuncInfo
.MF
, CC
));
3314 // Finish off the call including any return values.
3315 return finishCall(CLI
, RetVT
, NumBytes
);
3318 bool AArch64FastISel::isMemCpySmall(uint64_t Len
, unsigned Alignment
) {
3320 return Len
/ Alignment
<= 4;
3325 bool AArch64FastISel::tryEmitSmallMemCpy(Address Dest
, Address Src
,
3326 uint64_t Len
, unsigned Alignment
) {
3327 // Make sure we don't bloat code by inlining very large memcpy's.
3328 if (!isMemCpySmall(Len
, Alignment
))
3331 int64_t UnscaledOffset
= 0;
3332 Address OrigDest
= Dest
;
3333 Address OrigSrc
= Src
;
3337 if (!Alignment
|| Alignment
>= 8) {
3348 // Bound based on alignment.
3349 if (Len
>= 4 && Alignment
== 4)
3351 else if (Len
>= 2 && Alignment
== 2)
3358 unsigned ResultReg
= emitLoad(VT
, VT
, Src
);
3362 if (!emitStore(VT
, ResultReg
, Dest
))
3365 int64_t Size
= VT
.getSizeInBits() / 8;
3367 UnscaledOffset
+= Size
;
3369 // We need to recompute the unscaled offset for each iteration.
3370 Dest
.setOffset(OrigDest
.getOffset() + UnscaledOffset
);
3371 Src
.setOffset(OrigSrc
.getOffset() + UnscaledOffset
);
3377 /// Check if it is possible to fold the condition from the XALU intrinsic
3378 /// into the user. The condition code will only be updated on success.
3379 bool AArch64FastISel::foldXALUIntrinsic(AArch64CC::CondCode
&CC
,
3380 const Instruction
*I
,
3381 const Value
*Cond
) {
3382 if (!isa
<ExtractValueInst
>(Cond
))
3385 const auto *EV
= cast
<ExtractValueInst
>(Cond
);
3386 if (!isa
<IntrinsicInst
>(EV
->getAggregateOperand()))
3389 const auto *II
= cast
<IntrinsicInst
>(EV
->getAggregateOperand());
3391 const Function
*Callee
= II
->getCalledFunction();
3393 cast
<StructType
>(Callee
->getReturnType())->getTypeAtIndex(0U);
3394 if (!isTypeLegal(RetTy
, RetVT
))
3397 if (RetVT
!= MVT::i32
&& RetVT
!= MVT::i64
)
3400 const Value
*LHS
= II
->getArgOperand(0);
3401 const Value
*RHS
= II
->getArgOperand(1);
3403 // Canonicalize immediate to the RHS.
3404 if (isa
<ConstantInt
>(LHS
) && !isa
<ConstantInt
>(RHS
) &&
3405 isCommutativeIntrinsic(II
))
3406 std::swap(LHS
, RHS
);
3408 // Simplify multiplies.
3409 Intrinsic::ID IID
= II
->getIntrinsicID();
3413 case Intrinsic::smul_with_overflow
:
3414 if (const auto *C
= dyn_cast
<ConstantInt
>(RHS
))
3415 if (C
->getValue() == 2)
3416 IID
= Intrinsic::sadd_with_overflow
;
3418 case Intrinsic::umul_with_overflow
:
3419 if (const auto *C
= dyn_cast
<ConstantInt
>(RHS
))
3420 if (C
->getValue() == 2)
3421 IID
= Intrinsic::uadd_with_overflow
;
3425 AArch64CC::CondCode TmpCC
;
3429 case Intrinsic::sadd_with_overflow
:
3430 case Intrinsic::ssub_with_overflow
:
3431 TmpCC
= AArch64CC::VS
;
3433 case Intrinsic::uadd_with_overflow
:
3434 TmpCC
= AArch64CC::HS
;
3436 case Intrinsic::usub_with_overflow
:
3437 TmpCC
= AArch64CC::LO
;
3439 case Intrinsic::smul_with_overflow
:
3440 case Intrinsic::umul_with_overflow
:
3441 TmpCC
= AArch64CC::NE
;
3445 // Check if both instructions are in the same basic block.
3446 if (!isValueAvailable(II
))
3449 // Make sure nothing is in the way
3450 BasicBlock::const_iterator
Start(I
);
3451 BasicBlock::const_iterator
End(II
);
3452 for (auto Itr
= std::prev(Start
); Itr
!= End
; --Itr
) {
3453 // We only expect extractvalue instructions between the intrinsic and the
3454 // instruction to be selected.
3455 if (!isa
<ExtractValueInst
>(Itr
))
3458 // Check that the extractvalue operand comes from the intrinsic.
3459 const auto *EVI
= cast
<ExtractValueInst
>(Itr
);
3460 if (EVI
->getAggregateOperand() != II
)
3468 bool AArch64FastISel::fastLowerIntrinsicCall(const IntrinsicInst
*II
) {
3469 // FIXME: Handle more intrinsics.
3470 switch (II
->getIntrinsicID()) {
3471 default: return false;
3472 case Intrinsic::frameaddress
: {
3473 MachineFrameInfo
&MFI
= FuncInfo
.MF
->getFrameInfo();
3474 MFI
.setFrameAddressIsTaken(true);
3476 const AArch64RegisterInfo
*RegInfo
= Subtarget
->getRegisterInfo();
3477 Register FramePtr
= RegInfo
->getFrameRegister(*(FuncInfo
.MF
));
3478 Register SrcReg
= MRI
.createVirtualRegister(&AArch64::GPR64RegClass
);
3479 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, DbgLoc
,
3480 TII
.get(TargetOpcode::COPY
), SrcReg
).addReg(FramePtr
);
3481 // Recursively load frame address
3487 unsigned Depth
= cast
<ConstantInt
>(II
->getOperand(0))->getZExtValue();
3489 DestReg
= fastEmitInst_ri(AArch64::LDRXui
, &AArch64::GPR64RegClass
,
3490 SrcReg
, /*IsKill=*/true, 0);
3491 assert(DestReg
&& "Unexpected LDR instruction emission failure.");
3495 updateValueMap(II
, SrcReg
);
3498 case Intrinsic::sponentry
: {
3499 MachineFrameInfo
&MFI
= FuncInfo
.MF
->getFrameInfo();
3501 // SP = FP + Fixed Object + 16
3502 int FI
= MFI
.CreateFixedObject(4, 0, false);
3503 unsigned ResultReg
= createResultReg(&AArch64::GPR64spRegClass
);
3504 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, DbgLoc
,
3505 TII
.get(AArch64::ADDXri
), ResultReg
)
3510 updateValueMap(II
, ResultReg
);
3513 case Intrinsic::memcpy
:
3514 case Intrinsic::memmove
: {
3515 const auto *MTI
= cast
<MemTransferInst
>(II
);
3516 // Don't handle volatile.
3517 if (MTI
->isVolatile())
3520 // Disable inlining for memmove before calls to ComputeAddress. Otherwise,
3521 // we would emit dead code because we don't currently handle memmoves.
3522 bool IsMemCpy
= (II
->getIntrinsicID() == Intrinsic::memcpy
);
3523 if (isa
<ConstantInt
>(MTI
->getLength()) && IsMemCpy
) {
3524 // Small memcpy's are common enough that we want to do them without a call
3526 uint64_t Len
= cast
<ConstantInt
>(MTI
->getLength())->getZExtValue();
3527 unsigned Alignment
= MinAlign(MTI
->getDestAlignment(),
3528 MTI
->getSourceAlignment());
3529 if (isMemCpySmall(Len
, Alignment
)) {
3531 if (!computeAddress(MTI
->getRawDest(), Dest
) ||
3532 !computeAddress(MTI
->getRawSource(), Src
))
3534 if (tryEmitSmallMemCpy(Dest
, Src
, Len
, Alignment
))
3539 if (!MTI
->getLength()->getType()->isIntegerTy(64))
3542 if (MTI
->getSourceAddressSpace() > 255 || MTI
->getDestAddressSpace() > 255)
3543 // Fast instruction selection doesn't support the special
3547 const char *IntrMemName
= isa
<MemCpyInst
>(II
) ? "memcpy" : "memmove";
3548 return lowerCallTo(II
, IntrMemName
, II
->getNumArgOperands() - 1);
3550 case Intrinsic::memset
: {
3551 const MemSetInst
*MSI
= cast
<MemSetInst
>(II
);
3552 // Don't handle volatile.
3553 if (MSI
->isVolatile())
3556 if (!MSI
->getLength()->getType()->isIntegerTy(64))
3559 if (MSI
->getDestAddressSpace() > 255)
3560 // Fast instruction selection doesn't support the special
3564 return lowerCallTo(II
, "memset", II
->getNumArgOperands() - 1);
3566 case Intrinsic::sin
:
3567 case Intrinsic::cos
:
3568 case Intrinsic::pow
: {
3570 if (!isTypeLegal(II
->getType(), RetVT
))
3573 if (RetVT
!= MVT::f32
&& RetVT
!= MVT::f64
)
3576 static const RTLIB::Libcall LibCallTable
[3][2] = {
3577 { RTLIB::SIN_F32
, RTLIB::SIN_F64
},
3578 { RTLIB::COS_F32
, RTLIB::COS_F64
},
3579 { RTLIB::POW_F32
, RTLIB::POW_F64
}
3582 bool Is64Bit
= RetVT
== MVT::f64
;
3583 switch (II
->getIntrinsicID()) {
3585 llvm_unreachable("Unexpected intrinsic.");
3586 case Intrinsic::sin
:
3587 LC
= LibCallTable
[0][Is64Bit
];
3589 case Intrinsic::cos
:
3590 LC
= LibCallTable
[1][Is64Bit
];
3592 case Intrinsic::pow
:
3593 LC
= LibCallTable
[2][Is64Bit
];
3598 Args
.reserve(II
->getNumArgOperands());
3600 // Populate the argument list.
3601 for (auto &Arg
: II
->arg_operands()) {
3604 Entry
.Ty
= Arg
->getType();
3605 Args
.push_back(Entry
);
3608 CallLoweringInfo CLI
;
3609 MCContext
&Ctx
= MF
->getContext();
3610 CLI
.setCallee(DL
, Ctx
, TLI
.getLibcallCallingConv(LC
), II
->getType(),
3611 TLI
.getLibcallName(LC
), std::move(Args
));
3612 if (!lowerCallTo(CLI
))
3614 updateValueMap(II
, CLI
.ResultReg
);
3617 case Intrinsic::fabs
: {
3619 if (!isTypeLegal(II
->getType(), VT
))
3623 switch (VT
.SimpleTy
) {
3627 Opc
= AArch64::FABSSr
;
3630 Opc
= AArch64::FABSDr
;
3633 unsigned SrcReg
= getRegForValue(II
->getOperand(0));
3636 bool SrcRegIsKill
= hasTrivialKill(II
->getOperand(0));
3637 unsigned ResultReg
= createResultReg(TLI
.getRegClassFor(VT
));
3638 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, DbgLoc
, TII
.get(Opc
), ResultReg
)
3639 .addReg(SrcReg
, getKillRegState(SrcRegIsKill
));
3640 updateValueMap(II
, ResultReg
);
3643 case Intrinsic::trap
:
3644 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, DbgLoc
, TII
.get(AArch64::BRK
))
3647 case Intrinsic::debugtrap
: {
3648 if (Subtarget
->isTargetWindows()) {
3649 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, DbgLoc
, TII
.get(AArch64::BRK
))
3656 case Intrinsic::sqrt
: {
3657 Type
*RetTy
= II
->getCalledFunction()->getReturnType();
3660 if (!isTypeLegal(RetTy
, VT
))
3663 unsigned Op0Reg
= getRegForValue(II
->getOperand(0));
3666 bool Op0IsKill
= hasTrivialKill(II
->getOperand(0));
3668 unsigned ResultReg
= fastEmit_r(VT
, VT
, ISD::FSQRT
, Op0Reg
, Op0IsKill
);
3672 updateValueMap(II
, ResultReg
);
3675 case Intrinsic::sadd_with_overflow
:
3676 case Intrinsic::uadd_with_overflow
:
3677 case Intrinsic::ssub_with_overflow
:
3678 case Intrinsic::usub_with_overflow
:
3679 case Intrinsic::smul_with_overflow
:
3680 case Intrinsic::umul_with_overflow
: {
3681 // This implements the basic lowering of the xalu with overflow intrinsics.
3682 const Function
*Callee
= II
->getCalledFunction();
3683 auto *Ty
= cast
<StructType
>(Callee
->getReturnType());
3684 Type
*RetTy
= Ty
->getTypeAtIndex(0U);
3687 if (!isTypeLegal(RetTy
, VT
))
3690 if (VT
!= MVT::i32
&& VT
!= MVT::i64
)
3693 const Value
*LHS
= II
->getArgOperand(0);
3694 const Value
*RHS
= II
->getArgOperand(1);
3695 // Canonicalize immediate to the RHS.
3696 if (isa
<ConstantInt
>(LHS
) && !isa
<ConstantInt
>(RHS
) &&
3697 isCommutativeIntrinsic(II
))
3698 std::swap(LHS
, RHS
);
3700 // Simplify multiplies.
3701 Intrinsic::ID IID
= II
->getIntrinsicID();
3705 case Intrinsic::smul_with_overflow
:
3706 if (const auto *C
= dyn_cast
<ConstantInt
>(RHS
))
3707 if (C
->getValue() == 2) {
3708 IID
= Intrinsic::sadd_with_overflow
;
3712 case Intrinsic::umul_with_overflow
:
3713 if (const auto *C
= dyn_cast
<ConstantInt
>(RHS
))
3714 if (C
->getValue() == 2) {
3715 IID
= Intrinsic::uadd_with_overflow
;
3721 unsigned ResultReg1
= 0, ResultReg2
= 0, MulReg
= 0;
3722 AArch64CC::CondCode CC
= AArch64CC::Invalid
;
3724 default: llvm_unreachable("Unexpected intrinsic!");
3725 case Intrinsic::sadd_with_overflow
:
3726 ResultReg1
= emitAdd(VT
, LHS
, RHS
, /*SetFlags=*/true);
3729 case Intrinsic::uadd_with_overflow
:
3730 ResultReg1
= emitAdd(VT
, LHS
, RHS
, /*SetFlags=*/true);
3733 case Intrinsic::ssub_with_overflow
:
3734 ResultReg1
= emitSub(VT
, LHS
, RHS
, /*SetFlags=*/true);
3737 case Intrinsic::usub_with_overflow
:
3738 ResultReg1
= emitSub(VT
, LHS
, RHS
, /*SetFlags=*/true);
3741 case Intrinsic::smul_with_overflow
: {
3743 unsigned LHSReg
= getRegForValue(LHS
);
3746 bool LHSIsKill
= hasTrivialKill(LHS
);
3748 unsigned RHSReg
= getRegForValue(RHS
);
3751 bool RHSIsKill
= hasTrivialKill(RHS
);
3753 if (VT
== MVT::i32
) {
3754 MulReg
= emitSMULL_rr(MVT::i64
, LHSReg
, LHSIsKill
, RHSReg
, RHSIsKill
);
3755 unsigned ShiftReg
= emitLSR_ri(MVT::i64
, MVT::i64
, MulReg
,
3756 /*IsKill=*/false, 32);
3757 MulReg
= fastEmitInst_extractsubreg(VT
, MulReg
, /*IsKill=*/true,
3759 ShiftReg
= fastEmitInst_extractsubreg(VT
, ShiftReg
, /*IsKill=*/true,
3761 emitSubs_rs(VT
, ShiftReg
, /*IsKill=*/true, MulReg
, /*IsKill=*/false,
3762 AArch64_AM::ASR
, 31, /*WantResult=*/false);
3764 assert(VT
== MVT::i64
&& "Unexpected value type.");
3765 // LHSReg and RHSReg cannot be killed by this Mul, since they are
3766 // reused in the next instruction.
3767 MulReg
= emitMul_rr(VT
, LHSReg
, /*IsKill=*/false, RHSReg
,
3769 unsigned SMULHReg
= fastEmit_rr(VT
, VT
, ISD::MULHS
, LHSReg
, LHSIsKill
,
3771 emitSubs_rs(VT
, SMULHReg
, /*IsKill=*/true, MulReg
, /*IsKill=*/false,
3772 AArch64_AM::ASR
, 63, /*WantResult=*/false);
3776 case Intrinsic::umul_with_overflow
: {
3778 unsigned LHSReg
= getRegForValue(LHS
);
3781 bool LHSIsKill
= hasTrivialKill(LHS
);
3783 unsigned RHSReg
= getRegForValue(RHS
);
3786 bool RHSIsKill
= hasTrivialKill(RHS
);
3788 if (VT
== MVT::i32
) {
3789 MulReg
= emitUMULL_rr(MVT::i64
, LHSReg
, LHSIsKill
, RHSReg
, RHSIsKill
);
3790 emitSubs_rs(MVT::i64
, AArch64::XZR
, /*IsKill=*/true, MulReg
,
3791 /*IsKill=*/false, AArch64_AM::LSR
, 32,
3792 /*WantResult=*/false);
3793 MulReg
= fastEmitInst_extractsubreg(VT
, MulReg
, /*IsKill=*/true,
3796 assert(VT
== MVT::i64
&& "Unexpected value type.");
3797 // LHSReg and RHSReg cannot be killed by this Mul, since they are
3798 // reused in the next instruction.
3799 MulReg
= emitMul_rr(VT
, LHSReg
, /*IsKill=*/false, RHSReg
,
3801 unsigned UMULHReg
= fastEmit_rr(VT
, VT
, ISD::MULHU
, LHSReg
, LHSIsKill
,
3803 emitSubs_rr(VT
, AArch64::XZR
, /*IsKill=*/true, UMULHReg
,
3804 /*IsKill=*/false, /*WantResult=*/false);
3811 ResultReg1
= createResultReg(TLI
.getRegClassFor(VT
));
3812 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, DbgLoc
,
3813 TII
.get(TargetOpcode::COPY
), ResultReg1
).addReg(MulReg
);
3819 ResultReg2
= fastEmitInst_rri(AArch64::CSINCWr
, &AArch64::GPR32RegClass
,
3820 AArch64::WZR
, /*IsKill=*/true, AArch64::WZR
,
3821 /*IsKill=*/true, getInvertedCondCode(CC
));
3823 assert((ResultReg1
+ 1) == ResultReg2
&&
3824 "Nonconsecutive result registers.");
3825 updateValueMap(II
, ResultReg1
, 2);
3832 bool AArch64FastISel::selectRet(const Instruction
*I
) {
3833 const ReturnInst
*Ret
= cast
<ReturnInst
>(I
);
3834 const Function
&F
= *I
->getParent()->getParent();
3836 if (!FuncInfo
.CanLowerReturn
)
3839 // FIXME: in principle it could. Mostly just a case of zero extending outgoing
3841 if (Subtarget
->isTargetILP32())
3847 if (TLI
.supportSwiftError() &&
3848 F
.getAttributes().hasAttrSomewhere(Attribute::SwiftError
))
3851 if (TLI
.supportSplitCSR(FuncInfo
.MF
))
3854 // Build a list of return value registers.
3855 SmallVector
<unsigned, 4> RetRegs
;
3857 if (Ret
->getNumOperands() > 0) {
3858 CallingConv::ID CC
= F
.getCallingConv();
3859 SmallVector
<ISD::OutputArg
, 4> Outs
;
3860 GetReturnInfo(CC
, F
.getReturnType(), F
.getAttributes(), Outs
, TLI
, DL
);
3862 // Analyze operands of the call, assigning locations to each operand.
3863 SmallVector
<CCValAssign
, 16> ValLocs
;
3864 CCState
CCInfo(CC
, F
.isVarArg(), *FuncInfo
.MF
, ValLocs
, I
->getContext());
3865 CCAssignFn
*RetCC
= CC
== CallingConv::WebKit_JS
? RetCC_AArch64_WebKit_JS
3866 : RetCC_AArch64_AAPCS
;
3867 CCInfo
.AnalyzeReturn(Outs
, RetCC
);
3869 // Only handle a single return value for now.
3870 if (ValLocs
.size() != 1)
3873 CCValAssign
&VA
= ValLocs
[0];
3874 const Value
*RV
= Ret
->getOperand(0);
3876 // Don't bother handling odd stuff for now.
3877 if ((VA
.getLocInfo() != CCValAssign::Full
) &&
3878 (VA
.getLocInfo() != CCValAssign::BCvt
))
3881 // Only handle register returns for now.
3885 unsigned Reg
= getRegForValue(RV
);
3889 unsigned SrcReg
= Reg
+ VA
.getValNo();
3890 Register DestReg
= VA
.getLocReg();
3891 // Avoid a cross-class copy. This is very unlikely.
3892 if (!MRI
.getRegClass(SrcReg
)->contains(DestReg
))
3895 EVT RVEVT
= TLI
.getValueType(DL
, RV
->getType());
3896 if (!RVEVT
.isSimple())
3899 // Vectors (of > 1 lane) in big endian need tricky handling.
3900 if (RVEVT
.isVector() && RVEVT
.getVectorNumElements() > 1 &&
3901 !Subtarget
->isLittleEndian())
3904 MVT RVVT
= RVEVT
.getSimpleVT();
3905 if (RVVT
== MVT::f128
)
3908 MVT DestVT
= VA
.getValVT();
3909 // Special handling for extended integers.
3910 if (RVVT
!= DestVT
) {
3911 if (RVVT
!= MVT::i1
&& RVVT
!= MVT::i8
&& RVVT
!= MVT::i16
)
3914 if (!Outs
[0].Flags
.isZExt() && !Outs
[0].Flags
.isSExt())
3917 bool IsZExt
= Outs
[0].Flags
.isZExt();
3918 SrcReg
= emitIntExt(RVVT
, SrcReg
, DestVT
, IsZExt
);
3924 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, DbgLoc
,
3925 TII
.get(TargetOpcode::COPY
), DestReg
).addReg(SrcReg
);
3927 // Add register to return instruction.
3928 RetRegs
.push_back(VA
.getLocReg());
3931 MachineInstrBuilder MIB
= BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, DbgLoc
,
3932 TII
.get(AArch64::RET_ReallyLR
));
3933 for (unsigned RetReg
: RetRegs
)
3934 MIB
.addReg(RetReg
, RegState::Implicit
);
3938 bool AArch64FastISel::selectTrunc(const Instruction
*I
) {
3939 Type
*DestTy
= I
->getType();
3940 Value
*Op
= I
->getOperand(0);
3941 Type
*SrcTy
= Op
->getType();
3943 EVT SrcEVT
= TLI
.getValueType(DL
, SrcTy
, true);
3944 EVT DestEVT
= TLI
.getValueType(DL
, DestTy
, true);
3945 if (!SrcEVT
.isSimple())
3947 if (!DestEVT
.isSimple())
3950 MVT SrcVT
= SrcEVT
.getSimpleVT();
3951 MVT DestVT
= DestEVT
.getSimpleVT();
3953 if (SrcVT
!= MVT::i64
&& SrcVT
!= MVT::i32
&& SrcVT
!= MVT::i16
&&
3956 if (DestVT
!= MVT::i32
&& DestVT
!= MVT::i16
&& DestVT
!= MVT::i8
&&
3960 unsigned SrcReg
= getRegForValue(Op
);
3963 bool SrcIsKill
= hasTrivialKill(Op
);
3965 // If we're truncating from i64 to a smaller non-legal type then generate an
3966 // AND. Otherwise, we know the high bits are undefined and a truncate only
3967 // generate a COPY. We cannot mark the source register also as result
3968 // register, because this can incorrectly transfer the kill flag onto the
3971 if (SrcVT
== MVT::i64
) {
3973 switch (DestVT
.SimpleTy
) {
3975 // Trunc i64 to i32 is handled by the target-independent fast-isel.
3987 // Issue an extract_subreg to get the lower 32-bits.
3988 unsigned Reg32
= fastEmitInst_extractsubreg(MVT::i32
, SrcReg
, SrcIsKill
,
3990 // Create the AND instruction which performs the actual truncation.
3991 ResultReg
= emitAnd_ri(MVT::i32
, Reg32
, /*IsKill=*/true, Mask
);
3992 assert(ResultReg
&& "Unexpected AND instruction emission failure.");
3994 ResultReg
= createResultReg(&AArch64::GPR32RegClass
);
3995 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, DbgLoc
,
3996 TII
.get(TargetOpcode::COPY
), ResultReg
)
3997 .addReg(SrcReg
, getKillRegState(SrcIsKill
));
4000 updateValueMap(I
, ResultReg
);
4004 unsigned AArch64FastISel::emiti1Ext(unsigned SrcReg
, MVT DestVT
, bool IsZExt
) {
4005 assert((DestVT
== MVT::i8
|| DestVT
== MVT::i16
|| DestVT
== MVT::i32
||
4006 DestVT
== MVT::i64
) &&
4007 "Unexpected value type.");
4008 // Handle i8 and i16 as i32.
4009 if (DestVT
== MVT::i8
|| DestVT
== MVT::i16
)
4013 unsigned ResultReg
= emitAnd_ri(MVT::i32
, SrcReg
, /*TODO:IsKill=*/false, 1);
4014 assert(ResultReg
&& "Unexpected AND instruction emission failure.");
4015 if (DestVT
== MVT::i64
) {
4016 // We're ZExt i1 to i64. The ANDWri Wd, Ws, #1 implicitly clears the
4017 // upper 32 bits. Emit a SUBREG_TO_REG to extend from Wd to Xd.
4018 Register Reg64
= MRI
.createVirtualRegister(&AArch64::GPR64RegClass
);
4019 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, DbgLoc
,
4020 TII
.get(AArch64::SUBREG_TO_REG
), Reg64
)
4023 .addImm(AArch64::sub_32
);
4028 if (DestVT
== MVT::i64
) {
4029 // FIXME: We're SExt i1 to i64.
4032 return fastEmitInst_rii(AArch64::SBFMWri
, &AArch64::GPR32RegClass
, SrcReg
,
4033 /*TODO:IsKill=*/false, 0, 0);
4037 unsigned AArch64FastISel::emitMul_rr(MVT RetVT
, unsigned Op0
, bool Op0IsKill
,
4038 unsigned Op1
, bool Op1IsKill
) {
4040 switch (RetVT
.SimpleTy
) {
4046 Opc
= AArch64::MADDWrrr
; ZReg
= AArch64::WZR
; break;
4048 Opc
= AArch64::MADDXrrr
; ZReg
= AArch64::XZR
; break;
4051 const TargetRegisterClass
*RC
=
4052 (RetVT
== MVT::i64
) ? &AArch64::GPR64RegClass
: &AArch64::GPR32RegClass
;
4053 return fastEmitInst_rrr(Opc
, RC
, Op0
, Op0IsKill
, Op1
, Op1IsKill
,
4054 /*IsKill=*/ZReg
, true);
4057 unsigned AArch64FastISel::emitSMULL_rr(MVT RetVT
, unsigned Op0
, bool Op0IsKill
,
4058 unsigned Op1
, bool Op1IsKill
) {
4059 if (RetVT
!= MVT::i64
)
4062 return fastEmitInst_rrr(AArch64::SMADDLrrr
, &AArch64::GPR64RegClass
,
4063 Op0
, Op0IsKill
, Op1
, Op1IsKill
,
4064 AArch64::XZR
, /*IsKill=*/true);
4067 unsigned AArch64FastISel::emitUMULL_rr(MVT RetVT
, unsigned Op0
, bool Op0IsKill
,
4068 unsigned Op1
, bool Op1IsKill
) {
4069 if (RetVT
!= MVT::i64
)
4072 return fastEmitInst_rrr(AArch64::UMADDLrrr
, &AArch64::GPR64RegClass
,
4073 Op0
, Op0IsKill
, Op1
, Op1IsKill
,
4074 AArch64::XZR
, /*IsKill=*/true);
4077 unsigned AArch64FastISel::emitLSL_rr(MVT RetVT
, unsigned Op0Reg
, bool Op0IsKill
,
4078 unsigned Op1Reg
, bool Op1IsKill
) {
4080 bool NeedTrunc
= false;
4082 switch (RetVT
.SimpleTy
) {
4084 case MVT::i8
: Opc
= AArch64::LSLVWr
; NeedTrunc
= true; Mask
= 0xff; break;
4085 case MVT::i16
: Opc
= AArch64::LSLVWr
; NeedTrunc
= true; Mask
= 0xffff; break;
4086 case MVT::i32
: Opc
= AArch64::LSLVWr
; break;
4087 case MVT::i64
: Opc
= AArch64::LSLVXr
; break;
4090 const TargetRegisterClass
*RC
=
4091 (RetVT
== MVT::i64
) ? &AArch64::GPR64RegClass
: &AArch64::GPR32RegClass
;
4093 Op1Reg
= emitAnd_ri(MVT::i32
, Op1Reg
, Op1IsKill
, Mask
);
4096 unsigned ResultReg
= fastEmitInst_rr(Opc
, RC
, Op0Reg
, Op0IsKill
, Op1Reg
,
4099 ResultReg
= emitAnd_ri(MVT::i32
, ResultReg
, /*IsKill=*/true, Mask
);
4103 unsigned AArch64FastISel::emitLSL_ri(MVT RetVT
, MVT SrcVT
, unsigned Op0
,
4104 bool Op0IsKill
, uint64_t Shift
,
4106 assert(RetVT
.SimpleTy
>= SrcVT
.SimpleTy
&&
4107 "Unexpected source/return type pair.");
4108 assert((SrcVT
== MVT::i1
|| SrcVT
== MVT::i8
|| SrcVT
== MVT::i16
||
4109 SrcVT
== MVT::i32
|| SrcVT
== MVT::i64
) &&
4110 "Unexpected source value type.");
4111 assert((RetVT
== MVT::i8
|| RetVT
== MVT::i16
|| RetVT
== MVT::i32
||
4112 RetVT
== MVT::i64
) && "Unexpected return value type.");
4114 bool Is64Bit
= (RetVT
== MVT::i64
);
4115 unsigned RegSize
= Is64Bit
? 64 : 32;
4116 unsigned DstBits
= RetVT
.getSizeInBits();
4117 unsigned SrcBits
= SrcVT
.getSizeInBits();
4118 const TargetRegisterClass
*RC
=
4119 Is64Bit
? &AArch64::GPR64RegClass
: &AArch64::GPR32RegClass
;
4121 // Just emit a copy for "zero" shifts.
4123 if (RetVT
== SrcVT
) {
4124 unsigned ResultReg
= createResultReg(RC
);
4125 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, DbgLoc
,
4126 TII
.get(TargetOpcode::COPY
), ResultReg
)
4127 .addReg(Op0
, getKillRegState(Op0IsKill
));
4130 return emitIntExt(SrcVT
, Op0
, RetVT
, IsZExt
);
4133 // Don't deal with undefined shifts.
4134 if (Shift
>= DstBits
)
4137 // For immediate shifts we can fold the zero-/sign-extension into the shift.
4138 // {S|U}BFM Wd, Wn, #r, #s
4139 // Wd<32+s-r,32-r> = Wn<s:0> when r > s
4141 // %1 = {s|z}ext i8 {0b1010_1010|0b0101_0101} to i16
4142 // %2 = shl i16 %1, 4
4143 // Wd<32+7-28,32-28> = Wn<7:0> <- clamp s to 7
4144 // 0b1111_1111_1111_1111__1111_1010_1010_0000 sext
4145 // 0b0000_0000_0000_0000__0000_0101_0101_0000 sext | zext
4146 // 0b0000_0000_0000_0000__0000_1010_1010_0000 zext
4148 // %1 = {s|z}ext i8 {0b1010_1010|0b0101_0101} to i16
4149 // %2 = shl i16 %1, 8
4150 // Wd<32+7-24,32-24> = Wn<7:0>
4151 // 0b1111_1111_1111_1111__1010_1010_0000_0000 sext
4152 // 0b0000_0000_0000_0000__0101_0101_0000_0000 sext | zext
4153 // 0b0000_0000_0000_0000__1010_1010_0000_0000 zext
4155 // %1 = {s|z}ext i8 {0b1010_1010|0b0101_0101} to i16
4156 // %2 = shl i16 %1, 12
4157 // Wd<32+3-20,32-20> = Wn<3:0>
4158 // 0b1111_1111_1111_1111__1010_0000_0000_0000 sext
4159 // 0b0000_0000_0000_0000__0101_0000_0000_0000 sext | zext
4160 // 0b0000_0000_0000_0000__1010_0000_0000_0000 zext
4162 unsigned ImmR
= RegSize
- Shift
;
4163 // Limit the width to the length of the source type.
4164 unsigned ImmS
= std::min
<unsigned>(SrcBits
- 1, DstBits
- 1 - Shift
);
4165 static const unsigned OpcTable
[2][2] = {
4166 {AArch64::SBFMWri
, AArch64::SBFMXri
},
4167 {AArch64::UBFMWri
, AArch64::UBFMXri
}
4169 unsigned Opc
= OpcTable
[IsZExt
][Is64Bit
];
4170 if (SrcVT
.SimpleTy
<= MVT::i32
&& RetVT
== MVT::i64
) {
4171 Register TmpReg
= MRI
.createVirtualRegister(RC
);
4172 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, DbgLoc
,
4173 TII
.get(AArch64::SUBREG_TO_REG
), TmpReg
)
4175 .addReg(Op0
, getKillRegState(Op0IsKill
))
4176 .addImm(AArch64::sub_32
);
4180 return fastEmitInst_rii(Opc
, RC
, Op0
, Op0IsKill
, ImmR
, ImmS
);
4183 unsigned AArch64FastISel::emitLSR_rr(MVT RetVT
, unsigned Op0Reg
, bool Op0IsKill
,
4184 unsigned Op1Reg
, bool Op1IsKill
) {
4186 bool NeedTrunc
= false;
4188 switch (RetVT
.SimpleTy
) {
4190 case MVT::i8
: Opc
= AArch64::LSRVWr
; NeedTrunc
= true; Mask
= 0xff; break;
4191 case MVT::i16
: Opc
= AArch64::LSRVWr
; NeedTrunc
= true; Mask
= 0xffff; break;
4192 case MVT::i32
: Opc
= AArch64::LSRVWr
; break;
4193 case MVT::i64
: Opc
= AArch64::LSRVXr
; break;
4196 const TargetRegisterClass
*RC
=
4197 (RetVT
== MVT::i64
) ? &AArch64::GPR64RegClass
: &AArch64::GPR32RegClass
;
4199 Op0Reg
= emitAnd_ri(MVT::i32
, Op0Reg
, Op0IsKill
, Mask
);
4200 Op1Reg
= emitAnd_ri(MVT::i32
, Op1Reg
, Op1IsKill
, Mask
);
4201 Op0IsKill
= Op1IsKill
= true;
4203 unsigned ResultReg
= fastEmitInst_rr(Opc
, RC
, Op0Reg
, Op0IsKill
, Op1Reg
,
4206 ResultReg
= emitAnd_ri(MVT::i32
, ResultReg
, /*IsKill=*/true, Mask
);
4210 unsigned AArch64FastISel::emitLSR_ri(MVT RetVT
, MVT SrcVT
, unsigned Op0
,
4211 bool Op0IsKill
, uint64_t Shift
,
4213 assert(RetVT
.SimpleTy
>= SrcVT
.SimpleTy
&&
4214 "Unexpected source/return type pair.");
4215 assert((SrcVT
== MVT::i1
|| SrcVT
== MVT::i8
|| SrcVT
== MVT::i16
||
4216 SrcVT
== MVT::i32
|| SrcVT
== MVT::i64
) &&
4217 "Unexpected source value type.");
4218 assert((RetVT
== MVT::i8
|| RetVT
== MVT::i16
|| RetVT
== MVT::i32
||
4219 RetVT
== MVT::i64
) && "Unexpected return value type.");
4221 bool Is64Bit
= (RetVT
== MVT::i64
);
4222 unsigned RegSize
= Is64Bit
? 64 : 32;
4223 unsigned DstBits
= RetVT
.getSizeInBits();
4224 unsigned SrcBits
= SrcVT
.getSizeInBits();
4225 const TargetRegisterClass
*RC
=
4226 Is64Bit
? &AArch64::GPR64RegClass
: &AArch64::GPR32RegClass
;
4228 // Just emit a copy for "zero" shifts.
4230 if (RetVT
== SrcVT
) {
4231 unsigned ResultReg
= createResultReg(RC
);
4232 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, DbgLoc
,
4233 TII
.get(TargetOpcode::COPY
), ResultReg
)
4234 .addReg(Op0
, getKillRegState(Op0IsKill
));
4237 return emitIntExt(SrcVT
, Op0
, RetVT
, IsZExt
);
4240 // Don't deal with undefined shifts.
4241 if (Shift
>= DstBits
)
4244 // For immediate shifts we can fold the zero-/sign-extension into the shift.
4245 // {S|U}BFM Wd, Wn, #r, #s
4246 // Wd<s-r:0> = Wn<s:r> when r <= s
4248 // %1 = {s|z}ext i8 {0b1010_1010|0b0101_0101} to i16
4249 // %2 = lshr i16 %1, 4
4250 // Wd<7-4:0> = Wn<7:4>
4251 // 0b0000_0000_0000_0000__0000_1111_1111_1010 sext
4252 // 0b0000_0000_0000_0000__0000_0000_0000_0101 sext | zext
4253 // 0b0000_0000_0000_0000__0000_0000_0000_1010 zext
4255 // %1 = {s|z}ext i8 {0b1010_1010|0b0101_0101} to i16
4256 // %2 = lshr i16 %1, 8
4257 // Wd<7-7,0> = Wn<7:7>
4258 // 0b0000_0000_0000_0000__0000_0000_1111_1111 sext
4259 // 0b0000_0000_0000_0000__0000_0000_0000_0000 sext
4260 // 0b0000_0000_0000_0000__0000_0000_0000_0000 zext
4262 // %1 = {s|z}ext i8 {0b1010_1010|0b0101_0101} to i16
4263 // %2 = lshr i16 %1, 12
4264 // Wd<7-7,0> = Wn<7:7> <- clamp r to 7
4265 // 0b0000_0000_0000_0000__0000_0000_0000_1111 sext
4266 // 0b0000_0000_0000_0000__0000_0000_0000_0000 sext
4267 // 0b0000_0000_0000_0000__0000_0000_0000_0000 zext
4269 if (Shift
>= SrcBits
&& IsZExt
)
4270 return materializeInt(ConstantInt::get(*Context
, APInt(RegSize
, 0)), RetVT
);
4272 // It is not possible to fold a sign-extend into the LShr instruction. In this
4273 // case emit a sign-extend.
4275 Op0
= emitIntExt(SrcVT
, Op0
, RetVT
, IsZExt
);
4280 SrcBits
= SrcVT
.getSizeInBits();
4284 unsigned ImmR
= std::min
<unsigned>(SrcBits
- 1, Shift
);
4285 unsigned ImmS
= SrcBits
- 1;
4286 static const unsigned OpcTable
[2][2] = {
4287 {AArch64::SBFMWri
, AArch64::SBFMXri
},
4288 {AArch64::UBFMWri
, AArch64::UBFMXri
}
4290 unsigned Opc
= OpcTable
[IsZExt
][Is64Bit
];
4291 if (SrcVT
.SimpleTy
<= MVT::i32
&& RetVT
== MVT::i64
) {
4292 Register TmpReg
= MRI
.createVirtualRegister(RC
);
4293 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, DbgLoc
,
4294 TII
.get(AArch64::SUBREG_TO_REG
), TmpReg
)
4296 .addReg(Op0
, getKillRegState(Op0IsKill
))
4297 .addImm(AArch64::sub_32
);
4301 return fastEmitInst_rii(Opc
, RC
, Op0
, Op0IsKill
, ImmR
, ImmS
);
4304 unsigned AArch64FastISel::emitASR_rr(MVT RetVT
, unsigned Op0Reg
, bool Op0IsKill
,
4305 unsigned Op1Reg
, bool Op1IsKill
) {
4307 bool NeedTrunc
= false;
4309 switch (RetVT
.SimpleTy
) {
4311 case MVT::i8
: Opc
= AArch64::ASRVWr
; NeedTrunc
= true; Mask
= 0xff; break;
4312 case MVT::i16
: Opc
= AArch64::ASRVWr
; NeedTrunc
= true; Mask
= 0xffff; break;
4313 case MVT::i32
: Opc
= AArch64::ASRVWr
; break;
4314 case MVT::i64
: Opc
= AArch64::ASRVXr
; break;
4317 const TargetRegisterClass
*RC
=
4318 (RetVT
== MVT::i64
) ? &AArch64::GPR64RegClass
: &AArch64::GPR32RegClass
;
4320 Op0Reg
= emitIntExt(RetVT
, Op0Reg
, MVT::i32
, /*isZExt=*/false);
4321 Op1Reg
= emitAnd_ri(MVT::i32
, Op1Reg
, Op1IsKill
, Mask
);
4322 Op0IsKill
= Op1IsKill
= true;
4324 unsigned ResultReg
= fastEmitInst_rr(Opc
, RC
, Op0Reg
, Op0IsKill
, Op1Reg
,
4327 ResultReg
= emitAnd_ri(MVT::i32
, ResultReg
, /*IsKill=*/true, Mask
);
4331 unsigned AArch64FastISel::emitASR_ri(MVT RetVT
, MVT SrcVT
, unsigned Op0
,
4332 bool Op0IsKill
, uint64_t Shift
,
4334 assert(RetVT
.SimpleTy
>= SrcVT
.SimpleTy
&&
4335 "Unexpected source/return type pair.");
4336 assert((SrcVT
== MVT::i1
|| SrcVT
== MVT::i8
|| SrcVT
== MVT::i16
||
4337 SrcVT
== MVT::i32
|| SrcVT
== MVT::i64
) &&
4338 "Unexpected source value type.");
4339 assert((RetVT
== MVT::i8
|| RetVT
== MVT::i16
|| RetVT
== MVT::i32
||
4340 RetVT
== MVT::i64
) && "Unexpected return value type.");
4342 bool Is64Bit
= (RetVT
== MVT::i64
);
4343 unsigned RegSize
= Is64Bit
? 64 : 32;
4344 unsigned DstBits
= RetVT
.getSizeInBits();
4345 unsigned SrcBits
= SrcVT
.getSizeInBits();
4346 const TargetRegisterClass
*RC
=
4347 Is64Bit
? &AArch64::GPR64RegClass
: &AArch64::GPR32RegClass
;
4349 // Just emit a copy for "zero" shifts.
4351 if (RetVT
== SrcVT
) {
4352 unsigned ResultReg
= createResultReg(RC
);
4353 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, DbgLoc
,
4354 TII
.get(TargetOpcode::COPY
), ResultReg
)
4355 .addReg(Op0
, getKillRegState(Op0IsKill
));
4358 return emitIntExt(SrcVT
, Op0
, RetVT
, IsZExt
);
4361 // Don't deal with undefined shifts.
4362 if (Shift
>= DstBits
)
4365 // For immediate shifts we can fold the zero-/sign-extension into the shift.
4366 // {S|U}BFM Wd, Wn, #r, #s
4367 // Wd<s-r:0> = Wn<s:r> when r <= s
4369 // %1 = {s|z}ext i8 {0b1010_1010|0b0101_0101} to i16
4370 // %2 = ashr i16 %1, 4
4371 // Wd<7-4:0> = Wn<7:4>
4372 // 0b1111_1111_1111_1111__1111_1111_1111_1010 sext
4373 // 0b0000_0000_0000_0000__0000_0000_0000_0101 sext | zext
4374 // 0b0000_0000_0000_0000__0000_0000_0000_1010 zext
4376 // %1 = {s|z}ext i8 {0b1010_1010|0b0101_0101} to i16
4377 // %2 = ashr i16 %1, 8
4378 // Wd<7-7,0> = Wn<7:7>
4379 // 0b1111_1111_1111_1111__1111_1111_1111_1111 sext
4380 // 0b0000_0000_0000_0000__0000_0000_0000_0000 sext
4381 // 0b0000_0000_0000_0000__0000_0000_0000_0000 zext
4383 // %1 = {s|z}ext i8 {0b1010_1010|0b0101_0101} to i16
4384 // %2 = ashr i16 %1, 12
4385 // Wd<7-7,0> = Wn<7:7> <- clamp r to 7
4386 // 0b1111_1111_1111_1111__1111_1111_1111_1111 sext
4387 // 0b0000_0000_0000_0000__0000_0000_0000_0000 sext
4388 // 0b0000_0000_0000_0000__0000_0000_0000_0000 zext
4390 if (Shift
>= SrcBits
&& IsZExt
)
4391 return materializeInt(ConstantInt::get(*Context
, APInt(RegSize
, 0)), RetVT
);
4393 unsigned ImmR
= std::min
<unsigned>(SrcBits
- 1, Shift
);
4394 unsigned ImmS
= SrcBits
- 1;
4395 static const unsigned OpcTable
[2][2] = {
4396 {AArch64::SBFMWri
, AArch64::SBFMXri
},
4397 {AArch64::UBFMWri
, AArch64::UBFMXri
}
4399 unsigned Opc
= OpcTable
[IsZExt
][Is64Bit
];
4400 if (SrcVT
.SimpleTy
<= MVT::i32
&& RetVT
== MVT::i64
) {
4401 Register TmpReg
= MRI
.createVirtualRegister(RC
);
4402 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, DbgLoc
,
4403 TII
.get(AArch64::SUBREG_TO_REG
), TmpReg
)
4405 .addReg(Op0
, getKillRegState(Op0IsKill
))
4406 .addImm(AArch64::sub_32
);
4410 return fastEmitInst_rii(Opc
, RC
, Op0
, Op0IsKill
, ImmR
, ImmS
);
4413 unsigned AArch64FastISel::emitIntExt(MVT SrcVT
, unsigned SrcReg
, MVT DestVT
,
4415 assert(DestVT
!= MVT::i1
&& "ZeroExt/SignExt an i1?");
4417 // FastISel does not have plumbing to deal with extensions where the SrcVT or
4418 // DestVT are odd things, so test to make sure that they are both types we can
4419 // handle (i1/i8/i16/i32 for SrcVT and i8/i16/i32/i64 for DestVT), otherwise
4420 // bail out to SelectionDAG.
4421 if (((DestVT
!= MVT::i8
) && (DestVT
!= MVT::i16
) &&
4422 (DestVT
!= MVT::i32
) && (DestVT
!= MVT::i64
)) ||
4423 ((SrcVT
!= MVT::i1
) && (SrcVT
!= MVT::i8
) &&
4424 (SrcVT
!= MVT::i16
) && (SrcVT
!= MVT::i32
)))
4430 switch (SrcVT
.SimpleTy
) {
4434 return emiti1Ext(SrcReg
, DestVT
, IsZExt
);
4436 if (DestVT
== MVT::i64
)
4437 Opc
= IsZExt
? AArch64::UBFMXri
: AArch64::SBFMXri
;
4439 Opc
= IsZExt
? AArch64::UBFMWri
: AArch64::SBFMWri
;
4443 if (DestVT
== MVT::i64
)
4444 Opc
= IsZExt
? AArch64::UBFMXri
: AArch64::SBFMXri
;
4446 Opc
= IsZExt
? AArch64::UBFMWri
: AArch64::SBFMWri
;
4450 assert(DestVT
== MVT::i64
&& "IntExt i32 to i32?!?");
4451 Opc
= IsZExt
? AArch64::UBFMXri
: AArch64::SBFMXri
;
4456 // Handle i8 and i16 as i32.
4457 if (DestVT
== MVT::i8
|| DestVT
== MVT::i16
)
4459 else if (DestVT
== MVT::i64
) {
4460 Register Src64
= MRI
.createVirtualRegister(&AArch64::GPR64RegClass
);
4461 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, DbgLoc
,
4462 TII
.get(AArch64::SUBREG_TO_REG
), Src64
)
4465 .addImm(AArch64::sub_32
);
4469 const TargetRegisterClass
*RC
=
4470 (DestVT
== MVT::i64
) ? &AArch64::GPR64RegClass
: &AArch64::GPR32RegClass
;
4471 return fastEmitInst_rii(Opc
, RC
, SrcReg
, /*TODO:IsKill=*/false, 0, Imm
);
4474 static bool isZExtLoad(const MachineInstr
*LI
) {
4475 switch (LI
->getOpcode()) {
4478 case AArch64::LDURBBi
:
4479 case AArch64::LDURHHi
:
4480 case AArch64::LDURWi
:
4481 case AArch64::LDRBBui
:
4482 case AArch64::LDRHHui
:
4483 case AArch64::LDRWui
:
4484 case AArch64::LDRBBroX
:
4485 case AArch64::LDRHHroX
:
4486 case AArch64::LDRWroX
:
4487 case AArch64::LDRBBroW
:
4488 case AArch64::LDRHHroW
:
4489 case AArch64::LDRWroW
:
4494 static bool isSExtLoad(const MachineInstr
*LI
) {
4495 switch (LI
->getOpcode()) {
4498 case AArch64::LDURSBWi
:
4499 case AArch64::LDURSHWi
:
4500 case AArch64::LDURSBXi
:
4501 case AArch64::LDURSHXi
:
4502 case AArch64::LDURSWi
:
4503 case AArch64::LDRSBWui
:
4504 case AArch64::LDRSHWui
:
4505 case AArch64::LDRSBXui
:
4506 case AArch64::LDRSHXui
:
4507 case AArch64::LDRSWui
:
4508 case AArch64::LDRSBWroX
:
4509 case AArch64::LDRSHWroX
:
4510 case AArch64::LDRSBXroX
:
4511 case AArch64::LDRSHXroX
:
4512 case AArch64::LDRSWroX
:
4513 case AArch64::LDRSBWroW
:
4514 case AArch64::LDRSHWroW
:
4515 case AArch64::LDRSBXroW
:
4516 case AArch64::LDRSHXroW
:
4517 case AArch64::LDRSWroW
:
4522 bool AArch64FastISel::optimizeIntExtLoad(const Instruction
*I
, MVT RetVT
,
4524 const auto *LI
= dyn_cast
<LoadInst
>(I
->getOperand(0));
4525 if (!LI
|| !LI
->hasOneUse())
4528 // Check if the load instruction has already been selected.
4529 unsigned Reg
= lookUpRegForValue(LI
);
4533 MachineInstr
*MI
= MRI
.getUniqueVRegDef(Reg
);
4537 // Check if the correct load instruction has been emitted - SelectionDAG might
4538 // have emitted a zero-extending load, but we need a sign-extending load.
4539 bool IsZExt
= isa
<ZExtInst
>(I
);
4540 const auto *LoadMI
= MI
;
4541 if (LoadMI
->getOpcode() == TargetOpcode::COPY
&&
4542 LoadMI
->getOperand(1).getSubReg() == AArch64::sub_32
) {
4543 Register LoadReg
= MI
->getOperand(1).getReg();
4544 LoadMI
= MRI
.getUniqueVRegDef(LoadReg
);
4545 assert(LoadMI
&& "Expected valid instruction");
4547 if (!(IsZExt
&& isZExtLoad(LoadMI
)) && !(!IsZExt
&& isSExtLoad(LoadMI
)))
4550 // Nothing to be done.
4551 if (RetVT
!= MVT::i64
|| SrcVT
> MVT::i32
) {
4552 updateValueMap(I
, Reg
);
4557 unsigned Reg64
= createResultReg(&AArch64::GPR64RegClass
);
4558 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, DbgLoc
,
4559 TII
.get(AArch64::SUBREG_TO_REG
), Reg64
)
4561 .addReg(Reg
, getKillRegState(true))
4562 .addImm(AArch64::sub_32
);
4565 assert((MI
->getOpcode() == TargetOpcode::COPY
&&
4566 MI
->getOperand(1).getSubReg() == AArch64::sub_32
) &&
4567 "Expected copy instruction");
4568 Reg
= MI
->getOperand(1).getReg();
4569 MachineBasicBlock::iterator
I(MI
);
4570 removeDeadCode(I
, std::next(I
));
4572 updateValueMap(I
, Reg
);
4576 bool AArch64FastISel::selectIntExt(const Instruction
*I
) {
4577 assert((isa
<ZExtInst
>(I
) || isa
<SExtInst
>(I
)) &&
4578 "Unexpected integer extend instruction.");
4581 if (!isTypeSupported(I
->getType(), RetVT
))
4584 if (!isTypeSupported(I
->getOperand(0)->getType(), SrcVT
))
4587 // Try to optimize already sign-/zero-extended values from load instructions.
4588 if (optimizeIntExtLoad(I
, RetVT
, SrcVT
))
4591 unsigned SrcReg
= getRegForValue(I
->getOperand(0));
4594 bool SrcIsKill
= hasTrivialKill(I
->getOperand(0));
4596 // Try to optimize already sign-/zero-extended values from function arguments.
4597 bool IsZExt
= isa
<ZExtInst
>(I
);
4598 if (const auto *Arg
= dyn_cast
<Argument
>(I
->getOperand(0))) {
4599 if ((IsZExt
&& Arg
->hasZExtAttr()) || (!IsZExt
&& Arg
->hasSExtAttr())) {
4600 if (RetVT
== MVT::i64
&& SrcVT
!= MVT::i64
) {
4601 unsigned ResultReg
= createResultReg(&AArch64::GPR64RegClass
);
4602 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, DbgLoc
,
4603 TII
.get(AArch64::SUBREG_TO_REG
), ResultReg
)
4605 .addReg(SrcReg
, getKillRegState(SrcIsKill
))
4606 .addImm(AArch64::sub_32
);
4609 // Conservatively clear all kill flags from all uses, because we are
4610 // replacing a sign-/zero-extend instruction at IR level with a nop at MI
4611 // level. The result of the instruction at IR level might have been
4612 // trivially dead, which is now not longer true.
4613 unsigned UseReg
= lookUpRegForValue(I
);
4615 MRI
.clearKillFlags(UseReg
);
4617 updateValueMap(I
, SrcReg
);
4622 unsigned ResultReg
= emitIntExt(SrcVT
, SrcReg
, RetVT
, IsZExt
);
4626 updateValueMap(I
, ResultReg
);
4630 bool AArch64FastISel::selectRem(const Instruction
*I
, unsigned ISDOpcode
) {
4631 EVT DestEVT
= TLI
.getValueType(DL
, I
->getType(), true);
4632 if (!DestEVT
.isSimple())
4635 MVT DestVT
= DestEVT
.getSimpleVT();
4636 if (DestVT
!= MVT::i64
&& DestVT
!= MVT::i32
)
4640 bool Is64bit
= (DestVT
== MVT::i64
);
4641 switch (ISDOpcode
) {
4645 DivOpc
= Is64bit
? AArch64::SDIVXr
: AArch64::SDIVWr
;
4648 DivOpc
= Is64bit
? AArch64::UDIVXr
: AArch64::UDIVWr
;
4651 unsigned MSubOpc
= Is64bit
? AArch64::MSUBXrrr
: AArch64::MSUBWrrr
;
4652 unsigned Src0Reg
= getRegForValue(I
->getOperand(0));
4655 bool Src0IsKill
= hasTrivialKill(I
->getOperand(0));
4657 unsigned Src1Reg
= getRegForValue(I
->getOperand(1));
4660 bool Src1IsKill
= hasTrivialKill(I
->getOperand(1));
4662 const TargetRegisterClass
*RC
=
4663 (DestVT
== MVT::i64
) ? &AArch64::GPR64RegClass
: &AArch64::GPR32RegClass
;
4664 unsigned QuotReg
= fastEmitInst_rr(DivOpc
, RC
, Src0Reg
, /*IsKill=*/false,
4665 Src1Reg
, /*IsKill=*/false);
4666 assert(QuotReg
&& "Unexpected DIV instruction emission failure.");
4667 // The remainder is computed as numerator - (quotient * denominator) using the
4668 // MSUB instruction.
4669 unsigned ResultReg
= fastEmitInst_rrr(MSubOpc
, RC
, QuotReg
, /*IsKill=*/true,
4670 Src1Reg
, Src1IsKill
, Src0Reg
,
4672 updateValueMap(I
, ResultReg
);
4676 bool AArch64FastISel::selectMul(const Instruction
*I
) {
4678 if (!isTypeSupported(I
->getType(), VT
, /*IsVectorAllowed=*/true))
4682 return selectBinaryOp(I
, ISD::MUL
);
4684 const Value
*Src0
= I
->getOperand(0);
4685 const Value
*Src1
= I
->getOperand(1);
4686 if (const auto *C
= dyn_cast
<ConstantInt
>(Src0
))
4687 if (C
->getValue().isPowerOf2())
4688 std::swap(Src0
, Src1
);
4690 // Try to simplify to a shift instruction.
4691 if (const auto *C
= dyn_cast
<ConstantInt
>(Src1
))
4692 if (C
->getValue().isPowerOf2()) {
4693 uint64_t ShiftVal
= C
->getValue().logBase2();
4696 if (const auto *ZExt
= dyn_cast
<ZExtInst
>(Src0
)) {
4697 if (!isIntExtFree(ZExt
)) {
4699 if (isValueAvailable(ZExt
) && isTypeSupported(ZExt
->getSrcTy(), VT
)) {
4702 Src0
= ZExt
->getOperand(0);
4705 } else if (const auto *SExt
= dyn_cast
<SExtInst
>(Src0
)) {
4706 if (!isIntExtFree(SExt
)) {
4708 if (isValueAvailable(SExt
) && isTypeSupported(SExt
->getSrcTy(), VT
)) {
4711 Src0
= SExt
->getOperand(0);
4716 unsigned Src0Reg
= getRegForValue(Src0
);
4719 bool Src0IsKill
= hasTrivialKill(Src0
);
4721 unsigned ResultReg
=
4722 emitLSL_ri(VT
, SrcVT
, Src0Reg
, Src0IsKill
, ShiftVal
, IsZExt
);
4725 updateValueMap(I
, ResultReg
);
4730 unsigned Src0Reg
= getRegForValue(I
->getOperand(0));
4733 bool Src0IsKill
= hasTrivialKill(I
->getOperand(0));
4735 unsigned Src1Reg
= getRegForValue(I
->getOperand(1));
4738 bool Src1IsKill
= hasTrivialKill(I
->getOperand(1));
4740 unsigned ResultReg
= emitMul_rr(VT
, Src0Reg
, Src0IsKill
, Src1Reg
, Src1IsKill
);
4745 updateValueMap(I
, ResultReg
);
4749 bool AArch64FastISel::selectShift(const Instruction
*I
) {
4751 if (!isTypeSupported(I
->getType(), RetVT
, /*IsVectorAllowed=*/true))
4754 if (RetVT
.isVector())
4755 return selectOperator(I
, I
->getOpcode());
4757 if (const auto *C
= dyn_cast
<ConstantInt
>(I
->getOperand(1))) {
4758 unsigned ResultReg
= 0;
4759 uint64_t ShiftVal
= C
->getZExtValue();
4761 bool IsZExt
= I
->getOpcode() != Instruction::AShr
;
4762 const Value
*Op0
= I
->getOperand(0);
4763 if (const auto *ZExt
= dyn_cast
<ZExtInst
>(Op0
)) {
4764 if (!isIntExtFree(ZExt
)) {
4766 if (isValueAvailable(ZExt
) && isTypeSupported(ZExt
->getSrcTy(), TmpVT
)) {
4769 Op0
= ZExt
->getOperand(0);
4772 } else if (const auto *SExt
= dyn_cast
<SExtInst
>(Op0
)) {
4773 if (!isIntExtFree(SExt
)) {
4775 if (isValueAvailable(SExt
) && isTypeSupported(SExt
->getSrcTy(), TmpVT
)) {
4778 Op0
= SExt
->getOperand(0);
4783 unsigned Op0Reg
= getRegForValue(Op0
);
4786 bool Op0IsKill
= hasTrivialKill(Op0
);
4788 switch (I
->getOpcode()) {
4789 default: llvm_unreachable("Unexpected instruction.");
4790 case Instruction::Shl
:
4791 ResultReg
= emitLSL_ri(RetVT
, SrcVT
, Op0Reg
, Op0IsKill
, ShiftVal
, IsZExt
);
4793 case Instruction::AShr
:
4794 ResultReg
= emitASR_ri(RetVT
, SrcVT
, Op0Reg
, Op0IsKill
, ShiftVal
, IsZExt
);
4796 case Instruction::LShr
:
4797 ResultReg
= emitLSR_ri(RetVT
, SrcVT
, Op0Reg
, Op0IsKill
, ShiftVal
, IsZExt
);
4803 updateValueMap(I
, ResultReg
);
4807 unsigned Op0Reg
= getRegForValue(I
->getOperand(0));
4810 bool Op0IsKill
= hasTrivialKill(I
->getOperand(0));
4812 unsigned Op1Reg
= getRegForValue(I
->getOperand(1));
4815 bool Op1IsKill
= hasTrivialKill(I
->getOperand(1));
4817 unsigned ResultReg
= 0;
4818 switch (I
->getOpcode()) {
4819 default: llvm_unreachable("Unexpected instruction.");
4820 case Instruction::Shl
:
4821 ResultReg
= emitLSL_rr(RetVT
, Op0Reg
, Op0IsKill
, Op1Reg
, Op1IsKill
);
4823 case Instruction::AShr
:
4824 ResultReg
= emitASR_rr(RetVT
, Op0Reg
, Op0IsKill
, Op1Reg
, Op1IsKill
);
4826 case Instruction::LShr
:
4827 ResultReg
= emitLSR_rr(RetVT
, Op0Reg
, Op0IsKill
, Op1Reg
, Op1IsKill
);
4834 updateValueMap(I
, ResultReg
);
4838 bool AArch64FastISel::selectBitCast(const Instruction
*I
) {
4841 if (!isTypeLegal(I
->getOperand(0)->getType(), SrcVT
))
4843 if (!isTypeLegal(I
->getType(), RetVT
))
4847 if (RetVT
== MVT::f32
&& SrcVT
== MVT::i32
)
4848 Opc
= AArch64::FMOVWSr
;
4849 else if (RetVT
== MVT::f64
&& SrcVT
== MVT::i64
)
4850 Opc
= AArch64::FMOVXDr
;
4851 else if (RetVT
== MVT::i32
&& SrcVT
== MVT::f32
)
4852 Opc
= AArch64::FMOVSWr
;
4853 else if (RetVT
== MVT::i64
&& SrcVT
== MVT::f64
)
4854 Opc
= AArch64::FMOVDXr
;
4858 const TargetRegisterClass
*RC
= nullptr;
4859 switch (RetVT
.SimpleTy
) {
4860 default: llvm_unreachable("Unexpected value type.");
4861 case MVT::i32
: RC
= &AArch64::GPR32RegClass
; break;
4862 case MVT::i64
: RC
= &AArch64::GPR64RegClass
; break;
4863 case MVT::f32
: RC
= &AArch64::FPR32RegClass
; break;
4864 case MVT::f64
: RC
= &AArch64::FPR64RegClass
; break;
4866 unsigned Op0Reg
= getRegForValue(I
->getOperand(0));
4869 bool Op0IsKill
= hasTrivialKill(I
->getOperand(0));
4870 unsigned ResultReg
= fastEmitInst_r(Opc
, RC
, Op0Reg
, Op0IsKill
);
4875 updateValueMap(I
, ResultReg
);
4879 bool AArch64FastISel::selectFRem(const Instruction
*I
) {
4881 if (!isTypeLegal(I
->getType(), RetVT
))
4885 switch (RetVT
.SimpleTy
) {
4889 LC
= RTLIB::REM_F32
;
4892 LC
= RTLIB::REM_F64
;
4897 Args
.reserve(I
->getNumOperands());
4899 // Populate the argument list.
4900 for (auto &Arg
: I
->operands()) {
4903 Entry
.Ty
= Arg
->getType();
4904 Args
.push_back(Entry
);
4907 CallLoweringInfo CLI
;
4908 MCContext
&Ctx
= MF
->getContext();
4909 CLI
.setCallee(DL
, Ctx
, TLI
.getLibcallCallingConv(LC
), I
->getType(),
4910 TLI
.getLibcallName(LC
), std::move(Args
));
4911 if (!lowerCallTo(CLI
))
4913 updateValueMap(I
, CLI
.ResultReg
);
4917 bool AArch64FastISel::selectSDiv(const Instruction
*I
) {
4919 if (!isTypeLegal(I
->getType(), VT
))
4922 if (!isa
<ConstantInt
>(I
->getOperand(1)))
4923 return selectBinaryOp(I
, ISD::SDIV
);
4925 const APInt
&C
= cast
<ConstantInt
>(I
->getOperand(1))->getValue();
4926 if ((VT
!= MVT::i32
&& VT
!= MVT::i64
) || !C
||
4927 !(C
.isPowerOf2() || (-C
).isPowerOf2()))
4928 return selectBinaryOp(I
, ISD::SDIV
);
4930 unsigned Lg2
= C
.countTrailingZeros();
4931 unsigned Src0Reg
= getRegForValue(I
->getOperand(0));
4934 bool Src0IsKill
= hasTrivialKill(I
->getOperand(0));
4936 if (cast
<BinaryOperator
>(I
)->isExact()) {
4937 unsigned ResultReg
= emitASR_ri(VT
, VT
, Src0Reg
, Src0IsKill
, Lg2
);
4940 updateValueMap(I
, ResultReg
);
4944 int64_t Pow2MinusOne
= (1ULL << Lg2
) - 1;
4945 unsigned AddReg
= emitAdd_ri_(VT
, Src0Reg
, /*IsKill=*/false, Pow2MinusOne
);
4949 // (Src0 < 0) ? Pow2 - 1 : 0;
4950 if (!emitICmp_ri(VT
, Src0Reg
, /*IsKill=*/false, 0))
4954 const TargetRegisterClass
*RC
;
4955 if (VT
== MVT::i64
) {
4956 SelectOpc
= AArch64::CSELXr
;
4957 RC
= &AArch64::GPR64RegClass
;
4959 SelectOpc
= AArch64::CSELWr
;
4960 RC
= &AArch64::GPR32RegClass
;
4962 unsigned SelectReg
=
4963 fastEmitInst_rri(SelectOpc
, RC
, AddReg
, /*IsKill=*/true, Src0Reg
,
4964 Src0IsKill
, AArch64CC::LT
);
4968 // Divide by Pow2 --> ashr. If we're dividing by a negative value we must also
4969 // negate the result.
4970 unsigned ZeroReg
= (VT
== MVT::i64
) ? AArch64::XZR
: AArch64::WZR
;
4973 ResultReg
= emitAddSub_rs(/*UseAdd=*/false, VT
, ZeroReg
, /*IsKill=*/true,
4974 SelectReg
, /*IsKill=*/true, AArch64_AM::ASR
, Lg2
);
4976 ResultReg
= emitASR_ri(VT
, VT
, SelectReg
, /*IsKill=*/true, Lg2
);
4981 updateValueMap(I
, ResultReg
);
4985 /// This is mostly a copy of the existing FastISel getRegForGEPIndex code. We
4986 /// have to duplicate it for AArch64, because otherwise we would fail during the
4987 /// sign-extend emission.
4988 std::pair
<unsigned, bool> AArch64FastISel::getRegForGEPIndex(const Value
*Idx
) {
4989 unsigned IdxN
= getRegForValue(Idx
);
4991 // Unhandled operand. Halt "fast" selection and bail.
4992 return std::pair
<unsigned, bool>(0, false);
4994 bool IdxNIsKill
= hasTrivialKill(Idx
);
4996 // If the index is smaller or larger than intptr_t, truncate or extend it.
4997 MVT PtrVT
= TLI
.getPointerTy(DL
);
4998 EVT IdxVT
= EVT::getEVT(Idx
->getType(), /*HandleUnknown=*/false);
4999 if (IdxVT
.bitsLT(PtrVT
)) {
5000 IdxN
= emitIntExt(IdxVT
.getSimpleVT(), IdxN
, PtrVT
, /*isZExt=*/false);
5002 } else if (IdxVT
.bitsGT(PtrVT
))
5003 llvm_unreachable("AArch64 FastISel doesn't support types larger than i64");
5004 return std::pair
<unsigned, bool>(IdxN
, IdxNIsKill
);
5007 /// This is mostly a copy of the existing FastISel GEP code, but we have to
5008 /// duplicate it for AArch64, because otherwise we would bail out even for
5009 /// simple cases. This is because the standard fastEmit functions don't cover
5010 /// MUL at all and ADD is lowered very inefficientily.
5011 bool AArch64FastISel::selectGetElementPtr(const Instruction
*I
) {
5012 unsigned N
= getRegForValue(I
->getOperand(0));
5015 bool NIsKill
= hasTrivialKill(I
->getOperand(0));
5017 // Keep a running tab of the total offset to coalesce multiple N = N + Offset
5018 // into a single N = N + TotalOffset.
5019 uint64_t TotalOffs
= 0;
5020 MVT VT
= TLI
.getPointerTy(DL
);
5021 for (gep_type_iterator GTI
= gep_type_begin(I
), E
= gep_type_end(I
);
5023 const Value
*Idx
= GTI
.getOperand();
5024 if (auto *StTy
= GTI
.getStructTypeOrNull()) {
5025 unsigned Field
= cast
<ConstantInt
>(Idx
)->getZExtValue();
5028 TotalOffs
+= DL
.getStructLayout(StTy
)->getElementOffset(Field
);
5030 Type
*Ty
= GTI
.getIndexedType();
5032 // If this is a constant subscript, handle it quickly.
5033 if (const auto *CI
= dyn_cast
<ConstantInt
>(Idx
)) {
5038 DL
.getTypeAllocSize(Ty
) * cast
<ConstantInt
>(CI
)->getSExtValue();
5042 N
= emitAdd_ri_(VT
, N
, NIsKill
, TotalOffs
);
5049 // N = N + Idx * ElementSize;
5050 uint64_t ElementSize
= DL
.getTypeAllocSize(Ty
);
5051 std::pair
<unsigned, bool> Pair
= getRegForGEPIndex(Idx
);
5052 unsigned IdxN
= Pair
.first
;
5053 bool IdxNIsKill
= Pair
.second
;
5057 if (ElementSize
!= 1) {
5058 unsigned C
= fastEmit_i(VT
, VT
, ISD::Constant
, ElementSize
);
5061 IdxN
= emitMul_rr(VT
, IdxN
, IdxNIsKill
, C
, true);
5066 N
= fastEmit_rr(VT
, VT
, ISD::ADD
, N
, NIsKill
, IdxN
, IdxNIsKill
);
5072 N
= emitAdd_ri_(VT
, N
, NIsKill
, TotalOffs
);
5076 updateValueMap(I
, N
);
5080 bool AArch64FastISel::selectAtomicCmpXchg(const AtomicCmpXchgInst
*I
) {
5081 assert(TM
.getOptLevel() == CodeGenOpt::None
&&
5082 "cmpxchg survived AtomicExpand at optlevel > -O0");
5084 auto *RetPairTy
= cast
<StructType
>(I
->getType());
5085 Type
*RetTy
= RetPairTy
->getTypeAtIndex(0U);
5086 assert(RetPairTy
->getTypeAtIndex(1U)->isIntegerTy(1) &&
5087 "cmpxchg has a non-i1 status result");
5090 if (!isTypeLegal(RetTy
, VT
))
5093 const TargetRegisterClass
*ResRC
;
5094 unsigned Opc
, CmpOpc
;
5095 // This only supports i32/i64, because i8/i16 aren't legal, and the generic
5096 // extractvalue selection doesn't support that.
5097 if (VT
== MVT::i32
) {
5098 Opc
= AArch64::CMP_SWAP_32
;
5099 CmpOpc
= AArch64::SUBSWrs
;
5100 ResRC
= &AArch64::GPR32RegClass
;
5101 } else if (VT
== MVT::i64
) {
5102 Opc
= AArch64::CMP_SWAP_64
;
5103 CmpOpc
= AArch64::SUBSXrs
;
5104 ResRC
= &AArch64::GPR64RegClass
;
5109 const MCInstrDesc
&II
= TII
.get(Opc
);
5111 const unsigned AddrReg
= constrainOperandRegClass(
5112 II
, getRegForValue(I
->getPointerOperand()), II
.getNumDefs());
5113 const unsigned DesiredReg
= constrainOperandRegClass(
5114 II
, getRegForValue(I
->getCompareOperand()), II
.getNumDefs() + 1);
5115 const unsigned NewReg
= constrainOperandRegClass(
5116 II
, getRegForValue(I
->getNewValOperand()), II
.getNumDefs() + 2);
5118 const unsigned ResultReg1
= createResultReg(ResRC
);
5119 const unsigned ResultReg2
= createResultReg(&AArch64::GPR32RegClass
);
5120 const unsigned ScratchReg
= createResultReg(&AArch64::GPR32RegClass
);
5122 // FIXME: MachineMemOperand doesn't support cmpxchg yet.
5123 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, DbgLoc
, II
)
5130 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, DbgLoc
, TII
.get(CmpOpc
))
5131 .addDef(VT
== MVT::i32
? AArch64::WZR
: AArch64::XZR
)
5136 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, DbgLoc
, TII
.get(AArch64::CSINCWr
))
5138 .addUse(AArch64::WZR
)
5139 .addUse(AArch64::WZR
)
5140 .addImm(AArch64CC::NE
);
5142 assert((ResultReg1
+ 1) == ResultReg2
&& "Nonconsecutive result registers.");
5143 updateValueMap(I
, ResultReg1
, 2);
5147 bool AArch64FastISel::fastSelectInstruction(const Instruction
*I
) {
5148 switch (I
->getOpcode()) {
5151 case Instruction::Add
:
5152 case Instruction::Sub
:
5153 return selectAddSub(I
);
5154 case Instruction::Mul
:
5155 return selectMul(I
);
5156 case Instruction::SDiv
:
5157 return selectSDiv(I
);
5158 case Instruction::SRem
:
5159 if (!selectBinaryOp(I
, ISD::SREM
))
5160 return selectRem(I
, ISD::SREM
);
5162 case Instruction::URem
:
5163 if (!selectBinaryOp(I
, ISD::UREM
))
5164 return selectRem(I
, ISD::UREM
);
5166 case Instruction::Shl
:
5167 case Instruction::LShr
:
5168 case Instruction::AShr
:
5169 return selectShift(I
);
5170 case Instruction::And
:
5171 case Instruction::Or
:
5172 case Instruction::Xor
:
5173 return selectLogicalOp(I
);
5174 case Instruction::Br
:
5175 return selectBranch(I
);
5176 case Instruction::IndirectBr
:
5177 return selectIndirectBr(I
);
5178 case Instruction::BitCast
:
5179 if (!FastISel::selectBitCast(I
))
5180 return selectBitCast(I
);
5182 case Instruction::FPToSI
:
5183 if (!selectCast(I
, ISD::FP_TO_SINT
))
5184 return selectFPToInt(I
, /*Signed=*/true);
5186 case Instruction::FPToUI
:
5187 return selectFPToInt(I
, /*Signed=*/false);
5188 case Instruction::ZExt
:
5189 case Instruction::SExt
:
5190 return selectIntExt(I
);
5191 case Instruction::Trunc
:
5192 if (!selectCast(I
, ISD::TRUNCATE
))
5193 return selectTrunc(I
);
5195 case Instruction::FPExt
:
5196 return selectFPExt(I
);
5197 case Instruction::FPTrunc
:
5198 return selectFPTrunc(I
);
5199 case Instruction::SIToFP
:
5200 if (!selectCast(I
, ISD::SINT_TO_FP
))
5201 return selectIntToFP(I
, /*Signed=*/true);
5203 case Instruction::UIToFP
:
5204 return selectIntToFP(I
, /*Signed=*/false);
5205 case Instruction::Load
:
5206 return selectLoad(I
);
5207 case Instruction::Store
:
5208 return selectStore(I
);
5209 case Instruction::FCmp
:
5210 case Instruction::ICmp
:
5211 return selectCmp(I
);
5212 case Instruction::Select
:
5213 return selectSelect(I
);
5214 case Instruction::Ret
:
5215 return selectRet(I
);
5216 case Instruction::FRem
:
5217 return selectFRem(I
);
5218 case Instruction::GetElementPtr
:
5219 return selectGetElementPtr(I
);
5220 case Instruction::AtomicCmpXchg
:
5221 return selectAtomicCmpXchg(cast
<AtomicCmpXchgInst
>(I
));
5224 // fall-back to target-independent instruction selection.
5225 return selectOperator(I
, I
->getOpcode());
5230 FastISel
*AArch64::createFastISel(FunctionLoweringInfo
&FuncInfo
,
5231 const TargetLibraryInfo
*LibInfo
) {
5232 return new AArch64FastISel(FuncInfo
, LibInfo
);
5235 } // end namespace llvm