1 //===- AArch6464FastISel.cpp - AArch64 FastISel implementation ------------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This file defines the AArch64-specific support for the FastISel class. Some
10 // of the target-specific code is generated by tablegen in the file
11 // AArch64GenFastISel.inc, which is #included here.
13 //===----------------------------------------------------------------------===//
16 #include "AArch64CallingConvention.h"
17 #include "AArch64MachineFunctionInfo.h"
18 #include "AArch64RegisterInfo.h"
19 #include "AArch64Subtarget.h"
20 #include "MCTargetDesc/AArch64AddressingModes.h"
21 #include "Utils/AArch64BaseInfo.h"
22 #include "llvm/ADT/APFloat.h"
23 #include "llvm/ADT/APInt.h"
24 #include "llvm/ADT/DenseMap.h"
25 #include "llvm/ADT/SmallVector.h"
26 #include "llvm/Analysis/BranchProbabilityInfo.h"
27 #include "llvm/CodeGen/CallingConvLower.h"
28 #include "llvm/CodeGen/FastISel.h"
29 #include "llvm/CodeGen/FunctionLoweringInfo.h"
30 #include "llvm/CodeGen/ISDOpcodes.h"
31 #include "llvm/CodeGen/MachineBasicBlock.h"
32 #include "llvm/CodeGen/MachineConstantPool.h"
33 #include "llvm/CodeGen/MachineFrameInfo.h"
34 #include "llvm/CodeGen/MachineInstr.h"
35 #include "llvm/CodeGen/MachineInstrBuilder.h"
36 #include "llvm/CodeGen/MachineMemOperand.h"
37 #include "llvm/CodeGen/MachineRegisterInfo.h"
38 #include "llvm/CodeGen/RuntimeLibcalls.h"
39 #include "llvm/CodeGen/ValueTypes.h"
40 #include "llvm/CodeGenTypes/MachineValueType.h"
41 #include "llvm/IR/Argument.h"
42 #include "llvm/IR/Attributes.h"
43 #include "llvm/IR/BasicBlock.h"
44 #include "llvm/IR/CallingConv.h"
45 #include "llvm/IR/Constant.h"
46 #include "llvm/IR/Constants.h"
47 #include "llvm/IR/DataLayout.h"
48 #include "llvm/IR/DerivedTypes.h"
49 #include "llvm/IR/Function.h"
50 #include "llvm/IR/GetElementPtrTypeIterator.h"
51 #include "llvm/IR/GlobalValue.h"
52 #include "llvm/IR/InstrTypes.h"
53 #include "llvm/IR/Instruction.h"
54 #include "llvm/IR/Instructions.h"
55 #include "llvm/IR/IntrinsicInst.h"
56 #include "llvm/IR/Intrinsics.h"
57 #include "llvm/IR/IntrinsicsAArch64.h"
58 #include "llvm/IR/Operator.h"
59 #include "llvm/IR/Type.h"
60 #include "llvm/IR/User.h"
61 #include "llvm/IR/Value.h"
62 #include "llvm/MC/MCInstrDesc.h"
63 #include "llvm/MC/MCRegisterInfo.h"
64 #include "llvm/MC/MCSymbol.h"
65 #include "llvm/Support/AtomicOrdering.h"
66 #include "llvm/Support/Casting.h"
67 #include "llvm/Support/CodeGen.h"
68 #include "llvm/Support/Compiler.h"
69 #include "llvm/Support/ErrorHandling.h"
70 #include "llvm/Support/MathExtras.h"
81 class AArch64FastISel final
: public FastISel
{
84 using BaseKind
= enum {
90 BaseKind Kind
= RegBase
;
91 AArch64_AM::ShiftExtendType ExtType
= AArch64_AM::InvalidShiftExtend
;
96 unsigned OffsetReg
= 0;
99 const GlobalValue
*GV
= nullptr;
102 Address() { Base
.Reg
= 0; }
104 void setKind(BaseKind K
) { Kind
= K
; }
105 BaseKind
getKind() const { return Kind
; }
106 void setExtendType(AArch64_AM::ShiftExtendType E
) { ExtType
= E
; }
107 AArch64_AM::ShiftExtendType
getExtendType() const { return ExtType
; }
108 bool isRegBase() const { return Kind
== RegBase
; }
109 bool isFIBase() const { return Kind
== FrameIndexBase
; }
111 void setReg(unsigned Reg
) {
112 assert(isRegBase() && "Invalid base register access!");
116 unsigned getReg() const {
117 assert(isRegBase() && "Invalid base register access!");
121 void setOffsetReg(unsigned Reg
) {
125 unsigned getOffsetReg() const {
129 void setFI(unsigned FI
) {
130 assert(isFIBase() && "Invalid base frame index access!");
134 unsigned getFI() const {
135 assert(isFIBase() && "Invalid base frame index access!");
139 void setOffset(int64_t O
) { Offset
= O
; }
140 int64_t getOffset() { return Offset
; }
141 void setShift(unsigned S
) { Shift
= S
; }
142 unsigned getShift() { return Shift
; }
144 void setGlobalValue(const GlobalValue
*G
) { GV
= G
; }
145 const GlobalValue
*getGlobalValue() { return GV
; }
148 /// Subtarget - Keep a pointer to the AArch64Subtarget around so that we can
149 /// make the right decision when generating code for different targets.
150 const AArch64Subtarget
*Subtarget
;
151 LLVMContext
*Context
;
153 bool fastLowerArguments() override
;
154 bool fastLowerCall(CallLoweringInfo
&CLI
) override
;
155 bool fastLowerIntrinsicCall(const IntrinsicInst
*II
) override
;
158 // Selection routines.
159 bool selectAddSub(const Instruction
*I
);
160 bool selectLogicalOp(const Instruction
*I
);
161 bool selectLoad(const Instruction
*I
);
162 bool selectStore(const Instruction
*I
);
163 bool selectBranch(const Instruction
*I
);
164 bool selectIndirectBr(const Instruction
*I
);
165 bool selectCmp(const Instruction
*I
);
166 bool selectSelect(const Instruction
*I
);
167 bool selectFPExt(const Instruction
*I
);
168 bool selectFPTrunc(const Instruction
*I
);
169 bool selectFPToInt(const Instruction
*I
, bool Signed
);
170 bool selectIntToFP(const Instruction
*I
, bool Signed
);
171 bool selectRem(const Instruction
*I
, unsigned ISDOpcode
);
172 bool selectRet(const Instruction
*I
);
173 bool selectTrunc(const Instruction
*I
);
174 bool selectIntExt(const Instruction
*I
);
175 bool selectMul(const Instruction
*I
);
176 bool selectShift(const Instruction
*I
);
177 bool selectBitCast(const Instruction
*I
);
178 bool selectFRem(const Instruction
*I
);
179 bool selectSDiv(const Instruction
*I
);
180 bool selectGetElementPtr(const Instruction
*I
);
181 bool selectAtomicCmpXchg(const AtomicCmpXchgInst
*I
);
183 // Utility helper routines.
184 bool isTypeLegal(Type
*Ty
, MVT
&VT
);
185 bool isTypeSupported(Type
*Ty
, MVT
&VT
, bool IsVectorAllowed
= false);
186 bool isValueAvailable(const Value
*V
) const;
187 bool computeAddress(const Value
*Obj
, Address
&Addr
, Type
*Ty
= nullptr);
188 bool computeCallAddress(const Value
*V
, Address
&Addr
);
189 bool simplifyAddress(Address
&Addr
, MVT VT
);
190 void addLoadStoreOperands(Address
&Addr
, const MachineInstrBuilder
&MIB
,
191 MachineMemOperand::Flags Flags
,
192 unsigned ScaleFactor
, MachineMemOperand
*MMO
);
193 bool isMemCpySmall(uint64_t Len
, MaybeAlign Alignment
);
194 bool tryEmitSmallMemCpy(Address Dest
, Address Src
, uint64_t Len
,
195 MaybeAlign Alignment
);
196 bool foldXALUIntrinsic(AArch64CC::CondCode
&CC
, const Instruction
*I
,
198 bool optimizeIntExtLoad(const Instruction
*I
, MVT RetVT
, MVT SrcVT
);
199 bool optimizeSelect(const SelectInst
*SI
);
200 unsigned getRegForGEPIndex(const Value
*Idx
);
202 // Emit helper routines.
203 unsigned emitAddSub(bool UseAdd
, MVT RetVT
, const Value
*LHS
,
204 const Value
*RHS
, bool SetFlags
= false,
205 bool WantResult
= true, bool IsZExt
= false);
206 unsigned emitAddSub_rr(bool UseAdd
, MVT RetVT
, unsigned LHSReg
,
207 unsigned RHSReg
, bool SetFlags
= false,
208 bool WantResult
= true);
209 unsigned emitAddSub_ri(bool UseAdd
, MVT RetVT
, unsigned LHSReg
,
210 uint64_t Imm
, bool SetFlags
= false,
211 bool WantResult
= true);
212 unsigned emitAddSub_rs(bool UseAdd
, MVT RetVT
, unsigned LHSReg
,
213 unsigned RHSReg
, AArch64_AM::ShiftExtendType ShiftType
,
214 uint64_t ShiftImm
, bool SetFlags
= false,
215 bool WantResult
= true);
216 unsigned emitAddSub_rx(bool UseAdd
, MVT RetVT
, unsigned LHSReg
,
217 unsigned RHSReg
, AArch64_AM::ShiftExtendType ExtType
,
218 uint64_t ShiftImm
, bool SetFlags
= false,
219 bool WantResult
= true);
222 bool emitCompareAndBranch(const BranchInst
*BI
);
223 bool emitCmp(const Value
*LHS
, const Value
*RHS
, bool IsZExt
);
224 bool emitICmp(MVT RetVT
, const Value
*LHS
, const Value
*RHS
, bool IsZExt
);
225 bool emitICmp_ri(MVT RetVT
, unsigned LHSReg
, uint64_t Imm
);
226 bool emitFCmp(MVT RetVT
, const Value
*LHS
, const Value
*RHS
);
227 unsigned emitLoad(MVT VT
, MVT ResultVT
, Address Addr
, bool WantZExt
= true,
228 MachineMemOperand
*MMO
= nullptr);
229 bool emitStore(MVT VT
, unsigned SrcReg
, Address Addr
,
230 MachineMemOperand
*MMO
= nullptr);
231 bool emitStoreRelease(MVT VT
, unsigned SrcReg
, unsigned AddrReg
,
232 MachineMemOperand
*MMO
= nullptr);
233 unsigned emitIntExt(MVT SrcVT
, unsigned SrcReg
, MVT DestVT
, bool isZExt
);
234 unsigned emiti1Ext(unsigned SrcReg
, MVT DestVT
, bool isZExt
);
235 unsigned emitAdd(MVT RetVT
, const Value
*LHS
, const Value
*RHS
,
236 bool SetFlags
= false, bool WantResult
= true,
237 bool IsZExt
= false);
238 unsigned emitAdd_ri_(MVT VT
, unsigned Op0
, int64_t Imm
);
239 unsigned emitSub(MVT RetVT
, const Value
*LHS
, const Value
*RHS
,
240 bool SetFlags
= false, bool WantResult
= true,
241 bool IsZExt
= false);
242 unsigned emitSubs_rr(MVT RetVT
, unsigned LHSReg
, unsigned RHSReg
,
243 bool WantResult
= true);
244 unsigned emitSubs_rs(MVT RetVT
, unsigned LHSReg
, unsigned RHSReg
,
245 AArch64_AM::ShiftExtendType ShiftType
, uint64_t ShiftImm
,
246 bool WantResult
= true);
247 unsigned emitLogicalOp(unsigned ISDOpc
, MVT RetVT
, const Value
*LHS
,
249 unsigned emitLogicalOp_ri(unsigned ISDOpc
, MVT RetVT
, unsigned LHSReg
,
251 unsigned emitLogicalOp_rs(unsigned ISDOpc
, MVT RetVT
, unsigned LHSReg
,
252 unsigned RHSReg
, uint64_t ShiftImm
);
253 unsigned emitAnd_ri(MVT RetVT
, unsigned LHSReg
, uint64_t Imm
);
254 unsigned emitMul_rr(MVT RetVT
, unsigned Op0
, unsigned Op1
);
255 unsigned emitSMULL_rr(MVT RetVT
, unsigned Op0
, unsigned Op1
);
256 unsigned emitUMULL_rr(MVT RetVT
, unsigned Op0
, unsigned Op1
);
257 unsigned emitLSL_rr(MVT RetVT
, unsigned Op0Reg
, unsigned Op1Reg
);
258 unsigned emitLSL_ri(MVT RetVT
, MVT SrcVT
, unsigned Op0Reg
, uint64_t Imm
,
260 unsigned emitLSR_rr(MVT RetVT
, unsigned Op0Reg
, unsigned Op1Reg
);
261 unsigned emitLSR_ri(MVT RetVT
, MVT SrcVT
, unsigned Op0Reg
, uint64_t Imm
,
263 unsigned emitASR_rr(MVT RetVT
, unsigned Op0Reg
, unsigned Op1Reg
);
264 unsigned emitASR_ri(MVT RetVT
, MVT SrcVT
, unsigned Op0Reg
, uint64_t Imm
,
265 bool IsZExt
= false);
267 unsigned materializeInt(const ConstantInt
*CI
, MVT VT
);
268 unsigned materializeFP(const ConstantFP
*CFP
, MVT VT
);
269 unsigned materializeGV(const GlobalValue
*GV
);
271 // Call handling routines.
273 CCAssignFn
*CCAssignFnForCall(CallingConv::ID CC
) const;
274 bool processCallArgs(CallLoweringInfo
&CLI
, SmallVectorImpl
<MVT
> &ArgVTs
,
276 bool finishCall(CallLoweringInfo
&CLI
, unsigned NumBytes
);
279 // Backend specific FastISel code.
280 unsigned fastMaterializeAlloca(const AllocaInst
*AI
) override
;
281 unsigned fastMaterializeConstant(const Constant
*C
) override
;
282 unsigned fastMaterializeFloatZero(const ConstantFP
* CF
) override
;
284 explicit AArch64FastISel(FunctionLoweringInfo
&FuncInfo
,
285 const TargetLibraryInfo
*LibInfo
)
286 : FastISel(FuncInfo
, LibInfo
, /*SkipTargetIndependentISel=*/true) {
287 Subtarget
= &FuncInfo
.MF
->getSubtarget
<AArch64Subtarget
>();
288 Context
= &FuncInfo
.Fn
->getContext();
291 bool fastSelectInstruction(const Instruction
*I
) override
;
293 #include "AArch64GenFastISel.inc"
296 } // end anonymous namespace
298 /// Check if the sign-/zero-extend will be a noop.
299 static bool isIntExtFree(const Instruction
*I
) {
300 assert((isa
<ZExtInst
>(I
) || isa
<SExtInst
>(I
)) &&
301 "Unexpected integer extend instruction.");
302 assert(!I
->getType()->isVectorTy() && I
->getType()->isIntegerTy() &&
303 "Unexpected value type.");
304 bool IsZExt
= isa
<ZExtInst
>(I
);
306 if (const auto *LI
= dyn_cast
<LoadInst
>(I
->getOperand(0)))
310 if (const auto *Arg
= dyn_cast
<Argument
>(I
->getOperand(0)))
311 if ((IsZExt
&& Arg
->hasZExtAttr()) || (!IsZExt
&& Arg
->hasSExtAttr()))
317 /// Determine the implicit scale factor that is applied by a memory
318 /// operation for a given value type.
319 static unsigned getImplicitScaleFactor(MVT VT
) {
320 switch (VT
.SimpleTy
) {
323 case MVT::i1
: // fall-through
328 case MVT::i32
: // fall-through
331 case MVT::i64
: // fall-through
337 CCAssignFn
*AArch64FastISel::CCAssignFnForCall(CallingConv::ID CC
) const {
338 if (CC
== CallingConv::GHC
)
339 return CC_AArch64_GHC
;
340 if (CC
== CallingConv::CFGuard_Check
)
341 return CC_AArch64_Win64_CFGuard_Check
;
342 if (Subtarget
->isTargetDarwin())
343 return CC_AArch64_DarwinPCS
;
344 if (Subtarget
->isTargetWindows())
345 return CC_AArch64_Win64PCS
;
346 return CC_AArch64_AAPCS
;
349 unsigned AArch64FastISel::fastMaterializeAlloca(const AllocaInst
*AI
) {
350 assert(TLI
.getValueType(DL
, AI
->getType(), true) == MVT::i64
&&
351 "Alloca should always return a pointer.");
353 // Don't handle dynamic allocas.
354 if (!FuncInfo
.StaticAllocaMap
.count(AI
))
357 DenseMap
<const AllocaInst
*, int>::iterator SI
=
358 FuncInfo
.StaticAllocaMap
.find(AI
);
360 if (SI
!= FuncInfo
.StaticAllocaMap
.end()) {
361 Register ResultReg
= createResultReg(&AArch64::GPR64spRegClass
);
362 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, MIMD
, TII
.get(AArch64::ADDXri
),
364 .addFrameIndex(SI
->second
)
373 unsigned AArch64FastISel::materializeInt(const ConstantInt
*CI
, MVT VT
) {
378 return fastEmit_i(VT
, VT
, ISD::Constant
, CI
->getZExtValue());
380 // Create a copy from the zero register to materialize a "0" value.
381 const TargetRegisterClass
*RC
= (VT
== MVT::i64
) ? &AArch64::GPR64RegClass
382 : &AArch64::GPR32RegClass
;
383 unsigned ZeroReg
= (VT
== MVT::i64
) ? AArch64::XZR
: AArch64::WZR
;
384 Register ResultReg
= createResultReg(RC
);
385 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, MIMD
, TII
.get(TargetOpcode::COPY
),
386 ResultReg
).addReg(ZeroReg
, getKillRegState(true));
390 unsigned AArch64FastISel::materializeFP(const ConstantFP
*CFP
, MVT VT
) {
391 // Positive zero (+0.0) has to be materialized with a fmov from the zero
392 // register, because the immediate version of fmov cannot encode zero.
393 if (CFP
->isNullValue())
394 return fastMaterializeFloatZero(CFP
);
396 if (VT
!= MVT::f32
&& VT
!= MVT::f64
)
399 const APFloat Val
= CFP
->getValueAPF();
400 bool Is64Bit
= (VT
== MVT::f64
);
401 // This checks to see if we can use FMOV instructions to materialize
402 // a constant, otherwise we have to materialize via the constant pool.
404 Is64Bit
? AArch64_AM::getFP64Imm(Val
) : AArch64_AM::getFP32Imm(Val
);
406 unsigned Opc
= Is64Bit
? AArch64::FMOVDi
: AArch64::FMOVSi
;
407 return fastEmitInst_i(Opc
, TLI
.getRegClassFor(VT
), Imm
);
410 // For the large code model materialize the FP constant in code.
411 if (TM
.getCodeModel() == CodeModel::Large
) {
412 unsigned Opc1
= Is64Bit
? AArch64::MOVi64imm
: AArch64::MOVi32imm
;
413 const TargetRegisterClass
*RC
= Is64Bit
?
414 &AArch64::GPR64RegClass
: &AArch64::GPR32RegClass
;
416 Register TmpReg
= createResultReg(RC
);
417 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, MIMD
, TII
.get(Opc1
), TmpReg
)
418 .addImm(CFP
->getValueAPF().bitcastToAPInt().getZExtValue());
420 Register ResultReg
= createResultReg(TLI
.getRegClassFor(VT
));
421 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, MIMD
,
422 TII
.get(TargetOpcode::COPY
), ResultReg
)
423 .addReg(TmpReg
, getKillRegState(true));
428 // Materialize via constant pool. MachineConstantPool wants an explicit
430 Align Alignment
= DL
.getPrefTypeAlign(CFP
->getType());
432 unsigned CPI
= MCP
.getConstantPoolIndex(cast
<Constant
>(CFP
), Alignment
);
433 Register ADRPReg
= createResultReg(&AArch64::GPR64commonRegClass
);
434 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, MIMD
, TII
.get(AArch64::ADRP
),
435 ADRPReg
).addConstantPoolIndex(CPI
, 0, AArch64II::MO_PAGE
);
437 unsigned Opc
= Is64Bit
? AArch64::LDRDui
: AArch64::LDRSui
;
438 Register ResultReg
= createResultReg(TLI
.getRegClassFor(VT
));
439 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, MIMD
, TII
.get(Opc
), ResultReg
)
441 .addConstantPoolIndex(CPI
, 0, AArch64II::MO_PAGEOFF
| AArch64II::MO_NC
);
445 unsigned AArch64FastISel::materializeGV(const GlobalValue
*GV
) {
446 // We can't handle thread-local variables quickly yet.
447 if (GV
->isThreadLocal())
450 // MachO still uses GOT for large code-model accesses, but ELF requires
451 // movz/movk sequences, which FastISel doesn't handle yet.
452 if (!Subtarget
->useSmallAddressing() && !Subtarget
->isTargetMachO())
455 unsigned OpFlags
= Subtarget
->ClassifyGlobalReference(GV
, TM
);
457 EVT DestEVT
= TLI
.getValueType(DL
, GV
->getType(), true);
458 if (!DestEVT
.isSimple())
461 Register ADRPReg
= createResultReg(&AArch64::GPR64commonRegClass
);
464 if (OpFlags
& AArch64II::MO_GOT
) {
466 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, MIMD
, TII
.get(AArch64::ADRP
),
468 .addGlobalAddress(GV
, 0, AArch64II::MO_PAGE
| OpFlags
);
471 if (Subtarget
->isTargetILP32()) {
472 ResultReg
= createResultReg(&AArch64::GPR32RegClass
);
473 LdrOpc
= AArch64::LDRWui
;
475 ResultReg
= createResultReg(&AArch64::GPR64RegClass
);
476 LdrOpc
= AArch64::LDRXui
;
478 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, MIMD
, TII
.get(LdrOpc
),
481 .addGlobalAddress(GV
, 0, AArch64II::MO_GOT
| AArch64II::MO_PAGEOFF
|
482 AArch64II::MO_NC
| OpFlags
);
483 if (!Subtarget
->isTargetILP32())
486 // LDRWui produces a 32-bit register, but pointers in-register are 64-bits
487 // so we must extend the result on ILP32.
488 Register Result64
= createResultReg(&AArch64::GPR64RegClass
);
489 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, MIMD
,
490 TII
.get(TargetOpcode::SUBREG_TO_REG
))
493 .addReg(ResultReg
, RegState::Kill
)
494 .addImm(AArch64::sub_32
);
498 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, MIMD
, TII
.get(AArch64::ADRP
),
500 .addGlobalAddress(GV
, 0, AArch64II::MO_PAGE
| OpFlags
);
502 if (OpFlags
& AArch64II::MO_TAGGED
) {
503 // MO_TAGGED on the page indicates a tagged address. Set the tag now.
504 // We do so by creating a MOVK that sets bits 48-63 of the register to
505 // (global address + 0x100000000 - PC) >> 48. This assumes that we're in
506 // the small code model so we can assume a binary size of <= 4GB, which
507 // makes the untagged PC relative offset positive. The binary must also be
508 // loaded into address range [0, 2^48). Both of these properties need to
509 // be ensured at runtime when using tagged addresses.
511 // TODO: There is duplicate logic in AArch64ExpandPseudoInsts.cpp that
512 // also uses BuildMI for making an ADRP (+ MOVK) + ADD, but the operands
513 // are not exactly 1:1 with FastISel so we cannot easily abstract this
514 // out. At some point, it would be nice to find a way to not have this
516 unsigned DstReg
= createResultReg(&AArch64::GPR64commonRegClass
);
517 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, MIMD
, TII
.get(AArch64::MOVKXi
),
520 .addGlobalAddress(GV
, /*Offset=*/0x100000000,
521 AArch64II::MO_PREL
| AArch64II::MO_G3
)
526 ResultReg
= createResultReg(&AArch64::GPR64spRegClass
);
527 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, MIMD
, TII
.get(AArch64::ADDXri
),
530 .addGlobalAddress(GV
, 0,
531 AArch64II::MO_PAGEOFF
| AArch64II::MO_NC
| OpFlags
)
537 unsigned AArch64FastISel::fastMaterializeConstant(const Constant
*C
) {
538 EVT CEVT
= TLI
.getValueType(DL
, C
->getType(), true);
540 // Only handle simple types.
541 if (!CEVT
.isSimple())
543 MVT VT
= CEVT
.getSimpleVT();
544 // arm64_32 has 32-bit pointers held in 64-bit registers. Because of that,
545 // 'null' pointers need to have a somewhat special treatment.
546 if (isa
<ConstantPointerNull
>(C
)) {
547 assert(VT
== MVT::i64
&& "Expected 64-bit pointers");
548 return materializeInt(ConstantInt::get(Type::getInt64Ty(*Context
), 0), VT
);
551 if (const auto *CI
= dyn_cast
<ConstantInt
>(C
))
552 return materializeInt(CI
, VT
);
553 else if (const ConstantFP
*CFP
= dyn_cast
<ConstantFP
>(C
))
554 return materializeFP(CFP
, VT
);
555 else if (const GlobalValue
*GV
= dyn_cast
<GlobalValue
>(C
))
556 return materializeGV(GV
);
561 unsigned AArch64FastISel::fastMaterializeFloatZero(const ConstantFP
* CFP
) {
562 assert(CFP
->isNullValue() &&
563 "Floating-point constant is not a positive zero.");
565 if (!isTypeLegal(CFP
->getType(), VT
))
568 if (VT
!= MVT::f32
&& VT
!= MVT::f64
)
571 bool Is64Bit
= (VT
== MVT::f64
);
572 unsigned ZReg
= Is64Bit
? AArch64::XZR
: AArch64::WZR
;
573 unsigned Opc
= Is64Bit
? AArch64::FMOVXDr
: AArch64::FMOVWSr
;
574 return fastEmitInst_r(Opc
, TLI
.getRegClassFor(VT
), ZReg
);
577 /// Check if the multiply is by a power-of-2 constant.
578 static bool isMulPowOf2(const Value
*I
) {
579 if (const auto *MI
= dyn_cast
<MulOperator
>(I
)) {
580 if (const auto *C
= dyn_cast
<ConstantInt
>(MI
->getOperand(0)))
581 if (C
->getValue().isPowerOf2())
583 if (const auto *C
= dyn_cast
<ConstantInt
>(MI
->getOperand(1)))
584 if (C
->getValue().isPowerOf2())
590 // Computes the address to get to an object.
591 bool AArch64FastISel::computeAddress(const Value
*Obj
, Address
&Addr
, Type
*Ty
)
593 const User
*U
= nullptr;
594 unsigned Opcode
= Instruction::UserOp1
;
595 if (const Instruction
*I
= dyn_cast
<Instruction
>(Obj
)) {
596 // Don't walk into other basic blocks unless the object is an alloca from
597 // another block, otherwise it may not have a virtual register assigned.
598 if (FuncInfo
.StaticAllocaMap
.count(static_cast<const AllocaInst
*>(Obj
)) ||
599 FuncInfo
.MBBMap
[I
->getParent()] == FuncInfo
.MBB
) {
600 Opcode
= I
->getOpcode();
603 } else if (const ConstantExpr
*C
= dyn_cast
<ConstantExpr
>(Obj
)) {
604 Opcode
= C
->getOpcode();
608 if (auto *Ty
= dyn_cast
<PointerType
>(Obj
->getType()))
609 if (Ty
->getAddressSpace() > 255)
610 // Fast instruction selection doesn't support the special
617 case Instruction::BitCast
:
618 // Look through bitcasts.
619 return computeAddress(U
->getOperand(0), Addr
, Ty
);
621 case Instruction::IntToPtr
:
622 // Look past no-op inttoptrs.
623 if (TLI
.getValueType(DL
, U
->getOperand(0)->getType()) ==
624 TLI
.getPointerTy(DL
))
625 return computeAddress(U
->getOperand(0), Addr
, Ty
);
628 case Instruction::PtrToInt
:
629 // Look past no-op ptrtoints.
630 if (TLI
.getValueType(DL
, U
->getType()) == TLI
.getPointerTy(DL
))
631 return computeAddress(U
->getOperand(0), Addr
, Ty
);
634 case Instruction::GetElementPtr
: {
635 Address SavedAddr
= Addr
;
636 uint64_t TmpOffset
= Addr
.getOffset();
638 // Iterate through the GEP folding the constants into offsets where
640 for (gep_type_iterator GTI
= gep_type_begin(U
), E
= gep_type_end(U
);
642 const Value
*Op
= GTI
.getOperand();
643 if (StructType
*STy
= GTI
.getStructTypeOrNull()) {
644 const StructLayout
*SL
= DL
.getStructLayout(STy
);
645 unsigned Idx
= cast
<ConstantInt
>(Op
)->getZExtValue();
646 TmpOffset
+= SL
->getElementOffset(Idx
);
648 uint64_t S
= GTI
.getSequentialElementStride(DL
);
650 if (const ConstantInt
*CI
= dyn_cast
<ConstantInt
>(Op
)) {
651 // Constant-offset addressing.
652 TmpOffset
+= CI
->getSExtValue() * S
;
655 if (canFoldAddIntoGEP(U
, Op
)) {
656 // A compatible add with a constant operand. Fold the constant.
658 cast
<ConstantInt
>(cast
<AddOperator
>(Op
)->getOperand(1));
659 TmpOffset
+= CI
->getSExtValue() * S
;
660 // Iterate on the other operand.
661 Op
= cast
<AddOperator
>(Op
)->getOperand(0);
665 goto unsupported_gep
;
670 // Try to grab the base operand now.
671 Addr
.setOffset(TmpOffset
);
672 if (computeAddress(U
->getOperand(0), Addr
, Ty
))
675 // We failed, restore everything and try the other options.
681 case Instruction::Alloca
: {
682 const AllocaInst
*AI
= cast
<AllocaInst
>(Obj
);
683 DenseMap
<const AllocaInst
*, int>::iterator SI
=
684 FuncInfo
.StaticAllocaMap
.find(AI
);
685 if (SI
!= FuncInfo
.StaticAllocaMap
.end()) {
686 Addr
.setKind(Address::FrameIndexBase
);
687 Addr
.setFI(SI
->second
);
692 case Instruction::Add
: {
693 // Adds of constants are common and easy enough.
694 const Value
*LHS
= U
->getOperand(0);
695 const Value
*RHS
= U
->getOperand(1);
697 if (isa
<ConstantInt
>(LHS
))
700 if (const ConstantInt
*CI
= dyn_cast
<ConstantInt
>(RHS
)) {
701 Addr
.setOffset(Addr
.getOffset() + CI
->getSExtValue());
702 return computeAddress(LHS
, Addr
, Ty
);
705 Address Backup
= Addr
;
706 if (computeAddress(LHS
, Addr
, Ty
) && computeAddress(RHS
, Addr
, Ty
))
712 case Instruction::Sub
: {
713 // Subs of constants are common and easy enough.
714 const Value
*LHS
= U
->getOperand(0);
715 const Value
*RHS
= U
->getOperand(1);
717 if (const ConstantInt
*CI
= dyn_cast
<ConstantInt
>(RHS
)) {
718 Addr
.setOffset(Addr
.getOffset() - CI
->getSExtValue());
719 return computeAddress(LHS
, Addr
, Ty
);
723 case Instruction::Shl
: {
724 if (Addr
.getOffsetReg())
727 const auto *CI
= dyn_cast
<ConstantInt
>(U
->getOperand(1));
731 unsigned Val
= CI
->getZExtValue();
732 if (Val
< 1 || Val
> 3)
735 uint64_t NumBytes
= 0;
736 if (Ty
&& Ty
->isSized()) {
737 uint64_t NumBits
= DL
.getTypeSizeInBits(Ty
);
738 NumBytes
= NumBits
/ 8;
739 if (!isPowerOf2_64(NumBits
))
743 if (NumBytes
!= (1ULL << Val
))
747 Addr
.setExtendType(AArch64_AM::LSL
);
749 const Value
*Src
= U
->getOperand(0);
750 if (const auto *I
= dyn_cast
<Instruction
>(Src
)) {
751 if (FuncInfo
.MBBMap
[I
->getParent()] == FuncInfo
.MBB
) {
752 // Fold the zext or sext when it won't become a noop.
753 if (const auto *ZE
= dyn_cast
<ZExtInst
>(I
)) {
754 if (!isIntExtFree(ZE
) &&
755 ZE
->getOperand(0)->getType()->isIntegerTy(32)) {
756 Addr
.setExtendType(AArch64_AM::UXTW
);
757 Src
= ZE
->getOperand(0);
759 } else if (const auto *SE
= dyn_cast
<SExtInst
>(I
)) {
760 if (!isIntExtFree(SE
) &&
761 SE
->getOperand(0)->getType()->isIntegerTy(32)) {
762 Addr
.setExtendType(AArch64_AM::SXTW
);
763 Src
= SE
->getOperand(0);
769 if (const auto *AI
= dyn_cast
<BinaryOperator
>(Src
))
770 if (AI
->getOpcode() == Instruction::And
) {
771 const Value
*LHS
= AI
->getOperand(0);
772 const Value
*RHS
= AI
->getOperand(1);
774 if (const auto *C
= dyn_cast
<ConstantInt
>(LHS
))
775 if (C
->getValue() == 0xffffffff)
778 if (const auto *C
= dyn_cast
<ConstantInt
>(RHS
))
779 if (C
->getValue() == 0xffffffff) {
780 Addr
.setExtendType(AArch64_AM::UXTW
);
781 Register Reg
= getRegForValue(LHS
);
784 Reg
= fastEmitInst_extractsubreg(MVT::i32
, Reg
, AArch64::sub_32
);
785 Addr
.setOffsetReg(Reg
);
790 Register Reg
= getRegForValue(Src
);
793 Addr
.setOffsetReg(Reg
);
796 case Instruction::Mul
: {
797 if (Addr
.getOffsetReg())
803 const Value
*LHS
= U
->getOperand(0);
804 const Value
*RHS
= U
->getOperand(1);
806 // Canonicalize power-of-2 value to the RHS.
807 if (const auto *C
= dyn_cast
<ConstantInt
>(LHS
))
808 if (C
->getValue().isPowerOf2())
811 assert(isa
<ConstantInt
>(RHS
) && "Expected an ConstantInt.");
812 const auto *C
= cast
<ConstantInt
>(RHS
);
813 unsigned Val
= C
->getValue().logBase2();
814 if (Val
< 1 || Val
> 3)
817 uint64_t NumBytes
= 0;
818 if (Ty
&& Ty
->isSized()) {
819 uint64_t NumBits
= DL
.getTypeSizeInBits(Ty
);
820 NumBytes
= NumBits
/ 8;
821 if (!isPowerOf2_64(NumBits
))
825 if (NumBytes
!= (1ULL << Val
))
829 Addr
.setExtendType(AArch64_AM::LSL
);
831 const Value
*Src
= LHS
;
832 if (const auto *I
= dyn_cast
<Instruction
>(Src
)) {
833 if (FuncInfo
.MBBMap
[I
->getParent()] == FuncInfo
.MBB
) {
834 // Fold the zext or sext when it won't become a noop.
835 if (const auto *ZE
= dyn_cast
<ZExtInst
>(I
)) {
836 if (!isIntExtFree(ZE
) &&
837 ZE
->getOperand(0)->getType()->isIntegerTy(32)) {
838 Addr
.setExtendType(AArch64_AM::UXTW
);
839 Src
= ZE
->getOperand(0);
841 } else if (const auto *SE
= dyn_cast
<SExtInst
>(I
)) {
842 if (!isIntExtFree(SE
) &&
843 SE
->getOperand(0)->getType()->isIntegerTy(32)) {
844 Addr
.setExtendType(AArch64_AM::SXTW
);
845 Src
= SE
->getOperand(0);
851 Register Reg
= getRegForValue(Src
);
854 Addr
.setOffsetReg(Reg
);
857 case Instruction::And
: {
858 if (Addr
.getOffsetReg())
861 if (!Ty
|| DL
.getTypeSizeInBits(Ty
) != 8)
864 const Value
*LHS
= U
->getOperand(0);
865 const Value
*RHS
= U
->getOperand(1);
867 if (const auto *C
= dyn_cast
<ConstantInt
>(LHS
))
868 if (C
->getValue() == 0xffffffff)
871 if (const auto *C
= dyn_cast
<ConstantInt
>(RHS
))
872 if (C
->getValue() == 0xffffffff) {
874 Addr
.setExtendType(AArch64_AM::LSL
);
875 Addr
.setExtendType(AArch64_AM::UXTW
);
877 Register Reg
= getRegForValue(LHS
);
880 Reg
= fastEmitInst_extractsubreg(MVT::i32
, Reg
, AArch64::sub_32
);
881 Addr
.setOffsetReg(Reg
);
886 case Instruction::SExt
:
887 case Instruction::ZExt
: {
888 if (!Addr
.getReg() || Addr
.getOffsetReg())
891 const Value
*Src
= nullptr;
892 // Fold the zext or sext when it won't become a noop.
893 if (const auto *ZE
= dyn_cast
<ZExtInst
>(U
)) {
894 if (!isIntExtFree(ZE
) && ZE
->getOperand(0)->getType()->isIntegerTy(32)) {
895 Addr
.setExtendType(AArch64_AM::UXTW
);
896 Src
= ZE
->getOperand(0);
898 } else if (const auto *SE
= dyn_cast
<SExtInst
>(U
)) {
899 if (!isIntExtFree(SE
) && SE
->getOperand(0)->getType()->isIntegerTy(32)) {
900 Addr
.setExtendType(AArch64_AM::SXTW
);
901 Src
= SE
->getOperand(0);
909 Register Reg
= getRegForValue(Src
);
912 Addr
.setOffsetReg(Reg
);
917 if (Addr
.isRegBase() && !Addr
.getReg()) {
918 Register Reg
= getRegForValue(Obj
);
925 if (!Addr
.getOffsetReg()) {
926 Register Reg
= getRegForValue(Obj
);
929 Addr
.setOffsetReg(Reg
);
936 bool AArch64FastISel::computeCallAddress(const Value
*V
, Address
&Addr
) {
937 const User
*U
= nullptr;
938 unsigned Opcode
= Instruction::UserOp1
;
941 if (const auto *I
= dyn_cast
<Instruction
>(V
)) {
942 Opcode
= I
->getOpcode();
944 InMBB
= I
->getParent() == FuncInfo
.MBB
->getBasicBlock();
945 } else if (const auto *C
= dyn_cast
<ConstantExpr
>(V
)) {
946 Opcode
= C
->getOpcode();
952 case Instruction::BitCast
:
953 // Look past bitcasts if its operand is in the same BB.
955 return computeCallAddress(U
->getOperand(0), Addr
);
957 case Instruction::IntToPtr
:
958 // Look past no-op inttoptrs if its operand is in the same BB.
960 TLI
.getValueType(DL
, U
->getOperand(0)->getType()) ==
961 TLI
.getPointerTy(DL
))
962 return computeCallAddress(U
->getOperand(0), Addr
);
964 case Instruction::PtrToInt
:
965 // Look past no-op ptrtoints if its operand is in the same BB.
966 if (InMBB
&& TLI
.getValueType(DL
, U
->getType()) == TLI
.getPointerTy(DL
))
967 return computeCallAddress(U
->getOperand(0), Addr
);
971 if (const GlobalValue
*GV
= dyn_cast
<GlobalValue
>(V
)) {
972 Addr
.setGlobalValue(GV
);
976 // If all else fails, try to materialize the value in a register.
977 if (!Addr
.getGlobalValue()) {
978 Addr
.setReg(getRegForValue(V
));
979 return Addr
.getReg() != 0;
985 bool AArch64FastISel::isTypeLegal(Type
*Ty
, MVT
&VT
) {
986 EVT evt
= TLI
.getValueType(DL
, Ty
, true);
988 if (Subtarget
->isTargetILP32() && Ty
->isPointerTy())
991 // Only handle simple types.
992 if (evt
== MVT::Other
|| !evt
.isSimple())
994 VT
= evt
.getSimpleVT();
996 // This is a legal type, but it's not something we handle in fast-isel.
1000 // Handle all other legal types, i.e. a register that will directly hold this
1002 return TLI
.isTypeLegal(VT
);
1005 /// Determine if the value type is supported by FastISel.
1007 /// FastISel for AArch64 can handle more value types than are legal. This adds
1008 /// simple value type such as i1, i8, and i16.
1009 bool AArch64FastISel::isTypeSupported(Type
*Ty
, MVT
&VT
, bool IsVectorAllowed
) {
1010 if (Ty
->isVectorTy() && !IsVectorAllowed
)
1013 if (isTypeLegal(Ty
, VT
))
1016 // If this is a type than can be sign or zero-extended to a basic operation
1017 // go ahead and accept it now.
1018 if (VT
== MVT::i1
|| VT
== MVT::i8
|| VT
== MVT::i16
)
1024 bool AArch64FastISel::isValueAvailable(const Value
*V
) const {
1025 if (!isa
<Instruction
>(V
))
1028 const auto *I
= cast
<Instruction
>(V
);
1029 return FuncInfo
.MBBMap
[I
->getParent()] == FuncInfo
.MBB
;
1032 bool AArch64FastISel::simplifyAddress(Address
&Addr
, MVT VT
) {
1033 if (Subtarget
->isTargetILP32())
1036 unsigned ScaleFactor
= getImplicitScaleFactor(VT
);
1040 bool ImmediateOffsetNeedsLowering
= false;
1041 bool RegisterOffsetNeedsLowering
= false;
1042 int64_t Offset
= Addr
.getOffset();
1043 if (((Offset
< 0) || (Offset
& (ScaleFactor
- 1))) && !isInt
<9>(Offset
))
1044 ImmediateOffsetNeedsLowering
= true;
1045 else if (Offset
> 0 && !(Offset
& (ScaleFactor
- 1)) &&
1046 !isUInt
<12>(Offset
/ ScaleFactor
))
1047 ImmediateOffsetNeedsLowering
= true;
1049 // Cannot encode an offset register and an immediate offset in the same
1050 // instruction. Fold the immediate offset into the load/store instruction and
1051 // emit an additional add to take care of the offset register.
1052 if (!ImmediateOffsetNeedsLowering
&& Addr
.getOffset() && Addr
.getOffsetReg())
1053 RegisterOffsetNeedsLowering
= true;
1055 // Cannot encode zero register as base.
1056 if (Addr
.isRegBase() && Addr
.getOffsetReg() && !Addr
.getReg())
1057 RegisterOffsetNeedsLowering
= true;
1059 // If this is a stack pointer and the offset needs to be simplified then put
1060 // the alloca address into a register, set the base type back to register and
1061 // continue. This should almost never happen.
1062 if ((ImmediateOffsetNeedsLowering
|| Addr
.getOffsetReg()) && Addr
.isFIBase())
1064 Register ResultReg
= createResultReg(&AArch64::GPR64spRegClass
);
1065 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, MIMD
, TII
.get(AArch64::ADDXri
),
1067 .addFrameIndex(Addr
.getFI())
1070 Addr
.setKind(Address::RegBase
);
1071 Addr
.setReg(ResultReg
);
1074 if (RegisterOffsetNeedsLowering
) {
1075 unsigned ResultReg
= 0;
1076 if (Addr
.getReg()) {
1077 if (Addr
.getExtendType() == AArch64_AM::SXTW
||
1078 Addr
.getExtendType() == AArch64_AM::UXTW
)
1079 ResultReg
= emitAddSub_rx(/*UseAdd=*/true, MVT::i64
, Addr
.getReg(),
1080 Addr
.getOffsetReg(), Addr
.getExtendType(),
1083 ResultReg
= emitAddSub_rs(/*UseAdd=*/true, MVT::i64
, Addr
.getReg(),
1084 Addr
.getOffsetReg(), AArch64_AM::LSL
,
1087 if (Addr
.getExtendType() == AArch64_AM::UXTW
)
1088 ResultReg
= emitLSL_ri(MVT::i64
, MVT::i32
, Addr
.getOffsetReg(),
1089 Addr
.getShift(), /*IsZExt=*/true);
1090 else if (Addr
.getExtendType() == AArch64_AM::SXTW
)
1091 ResultReg
= emitLSL_ri(MVT::i64
, MVT::i32
, Addr
.getOffsetReg(),
1092 Addr
.getShift(), /*IsZExt=*/false);
1094 ResultReg
= emitLSL_ri(MVT::i64
, MVT::i64
, Addr
.getOffsetReg(),
1100 Addr
.setReg(ResultReg
);
1101 Addr
.setOffsetReg(0);
1103 Addr
.setExtendType(AArch64_AM::InvalidShiftExtend
);
1106 // Since the offset is too large for the load/store instruction get the
1107 // reg+offset into a register.
1108 if (ImmediateOffsetNeedsLowering
) {
1111 // Try to fold the immediate into the add instruction.
1112 ResultReg
= emitAdd_ri_(MVT::i64
, Addr
.getReg(), Offset
);
1114 ResultReg
= fastEmit_i(MVT::i64
, MVT::i64
, ISD::Constant
, Offset
);
1118 Addr
.setReg(ResultReg
);
1124 void AArch64FastISel::addLoadStoreOperands(Address
&Addr
,
1125 const MachineInstrBuilder
&MIB
,
1126 MachineMemOperand::Flags Flags
,
1127 unsigned ScaleFactor
,
1128 MachineMemOperand
*MMO
) {
1129 int64_t Offset
= Addr
.getOffset() / ScaleFactor
;
1130 // Frame base works a bit differently. Handle it separately.
1131 if (Addr
.isFIBase()) {
1132 int FI
= Addr
.getFI();
1133 // FIXME: We shouldn't be using getObjectSize/getObjectAlignment. The size
1134 // and alignment should be based on the VT.
1135 MMO
= FuncInfo
.MF
->getMachineMemOperand(
1136 MachinePointerInfo::getFixedStack(*FuncInfo
.MF
, FI
, Offset
), Flags
,
1137 MFI
.getObjectSize(FI
), MFI
.getObjectAlign(FI
));
1138 // Now add the rest of the operands.
1139 MIB
.addFrameIndex(FI
).addImm(Offset
);
1141 assert(Addr
.isRegBase() && "Unexpected address kind.");
1142 const MCInstrDesc
&II
= MIB
->getDesc();
1143 unsigned Idx
= (Flags
& MachineMemOperand::MOStore
) ? 1 : 0;
1145 constrainOperandRegClass(II
, Addr
.getReg(), II
.getNumDefs()+Idx
));
1147 constrainOperandRegClass(II
, Addr
.getOffsetReg(), II
.getNumDefs()+Idx
+1));
1148 if (Addr
.getOffsetReg()) {
1149 assert(Addr
.getOffset() == 0 && "Unexpected offset");
1150 bool IsSigned
= Addr
.getExtendType() == AArch64_AM::SXTW
||
1151 Addr
.getExtendType() == AArch64_AM::SXTX
;
1152 MIB
.addReg(Addr
.getReg());
1153 MIB
.addReg(Addr
.getOffsetReg());
1154 MIB
.addImm(IsSigned
);
1155 MIB
.addImm(Addr
.getShift() != 0);
1157 MIB
.addReg(Addr
.getReg()).addImm(Offset
);
1161 MIB
.addMemOperand(MMO
);
1164 unsigned AArch64FastISel::emitAddSub(bool UseAdd
, MVT RetVT
, const Value
*LHS
,
1165 const Value
*RHS
, bool SetFlags
,
1166 bool WantResult
, bool IsZExt
) {
1167 AArch64_AM::ShiftExtendType ExtendType
= AArch64_AM::InvalidShiftExtend
;
1168 bool NeedExtend
= false;
1169 switch (RetVT
.SimpleTy
) {
1177 ExtendType
= IsZExt
? AArch64_AM::UXTB
: AArch64_AM::SXTB
;
1181 ExtendType
= IsZExt
? AArch64_AM::UXTH
: AArch64_AM::SXTH
;
1183 case MVT::i32
: // fall-through
1188 RetVT
.SimpleTy
= std::max(RetVT
.SimpleTy
, MVT::i32
);
1190 // Canonicalize immediates to the RHS first.
1191 if (UseAdd
&& isa
<Constant
>(LHS
) && !isa
<Constant
>(RHS
))
1192 std::swap(LHS
, RHS
);
1194 // Canonicalize mul by power of 2 to the RHS.
1195 if (UseAdd
&& LHS
->hasOneUse() && isValueAvailable(LHS
))
1196 if (isMulPowOf2(LHS
))
1197 std::swap(LHS
, RHS
);
1199 // Canonicalize shift immediate to the RHS.
1200 if (UseAdd
&& LHS
->hasOneUse() && isValueAvailable(LHS
))
1201 if (const auto *SI
= dyn_cast
<BinaryOperator
>(LHS
))
1202 if (isa
<ConstantInt
>(SI
->getOperand(1)))
1203 if (SI
->getOpcode() == Instruction::Shl
||
1204 SI
->getOpcode() == Instruction::LShr
||
1205 SI
->getOpcode() == Instruction::AShr
)
1206 std::swap(LHS
, RHS
);
1208 Register LHSReg
= getRegForValue(LHS
);
1213 LHSReg
= emitIntExt(SrcVT
, LHSReg
, RetVT
, IsZExt
);
1215 unsigned ResultReg
= 0;
1216 if (const auto *C
= dyn_cast
<ConstantInt
>(RHS
)) {
1217 uint64_t Imm
= IsZExt
? C
->getZExtValue() : C
->getSExtValue();
1218 if (C
->isNegative())
1219 ResultReg
= emitAddSub_ri(!UseAdd
, RetVT
, LHSReg
, -Imm
, SetFlags
,
1222 ResultReg
= emitAddSub_ri(UseAdd
, RetVT
, LHSReg
, Imm
, SetFlags
,
1224 } else if (const auto *C
= dyn_cast
<Constant
>(RHS
))
1225 if (C
->isNullValue())
1226 ResultReg
= emitAddSub_ri(UseAdd
, RetVT
, LHSReg
, 0, SetFlags
, WantResult
);
1231 // Only extend the RHS within the instruction if there is a valid extend type.
1232 if (ExtendType
!= AArch64_AM::InvalidShiftExtend
&& RHS
->hasOneUse() &&
1233 isValueAvailable(RHS
)) {
1234 Register RHSReg
= getRegForValue(RHS
);
1237 return emitAddSub_rx(UseAdd
, RetVT
, LHSReg
, RHSReg
, ExtendType
, 0,
1238 SetFlags
, WantResult
);
1241 // Check if the mul can be folded into the instruction.
1242 if (RHS
->hasOneUse() && isValueAvailable(RHS
)) {
1243 if (isMulPowOf2(RHS
)) {
1244 const Value
*MulLHS
= cast
<MulOperator
>(RHS
)->getOperand(0);
1245 const Value
*MulRHS
= cast
<MulOperator
>(RHS
)->getOperand(1);
1247 if (const auto *C
= dyn_cast
<ConstantInt
>(MulLHS
))
1248 if (C
->getValue().isPowerOf2())
1249 std::swap(MulLHS
, MulRHS
);
1251 assert(isa
<ConstantInt
>(MulRHS
) && "Expected a ConstantInt.");
1252 uint64_t ShiftVal
= cast
<ConstantInt
>(MulRHS
)->getValue().logBase2();
1253 Register RHSReg
= getRegForValue(MulLHS
);
1256 ResultReg
= emitAddSub_rs(UseAdd
, RetVT
, LHSReg
, RHSReg
, AArch64_AM::LSL
,
1257 ShiftVal
, SetFlags
, WantResult
);
1263 // Check if the shift can be folded into the instruction.
1264 if (RHS
->hasOneUse() && isValueAvailable(RHS
)) {
1265 if (const auto *SI
= dyn_cast
<BinaryOperator
>(RHS
)) {
1266 if (const auto *C
= dyn_cast
<ConstantInt
>(SI
->getOperand(1))) {
1267 AArch64_AM::ShiftExtendType ShiftType
= AArch64_AM::InvalidShiftExtend
;
1268 switch (SI
->getOpcode()) {
1270 case Instruction::Shl
: ShiftType
= AArch64_AM::LSL
; break;
1271 case Instruction::LShr
: ShiftType
= AArch64_AM::LSR
; break;
1272 case Instruction::AShr
: ShiftType
= AArch64_AM::ASR
; break;
1274 uint64_t ShiftVal
= C
->getZExtValue();
1275 if (ShiftType
!= AArch64_AM::InvalidShiftExtend
) {
1276 Register RHSReg
= getRegForValue(SI
->getOperand(0));
1279 ResultReg
= emitAddSub_rs(UseAdd
, RetVT
, LHSReg
, RHSReg
, ShiftType
,
1280 ShiftVal
, SetFlags
, WantResult
);
1288 Register RHSReg
= getRegForValue(RHS
);
1293 RHSReg
= emitIntExt(SrcVT
, RHSReg
, RetVT
, IsZExt
);
1295 return emitAddSub_rr(UseAdd
, RetVT
, LHSReg
, RHSReg
, SetFlags
, WantResult
);
1298 unsigned AArch64FastISel::emitAddSub_rr(bool UseAdd
, MVT RetVT
, unsigned LHSReg
,
1299 unsigned RHSReg
, bool SetFlags
,
1301 assert(LHSReg
&& RHSReg
&& "Invalid register number.");
1303 if (LHSReg
== AArch64::SP
|| LHSReg
== AArch64::WSP
||
1304 RHSReg
== AArch64::SP
|| RHSReg
== AArch64::WSP
)
1307 if (RetVT
!= MVT::i32
&& RetVT
!= MVT::i64
)
1310 static const unsigned OpcTable
[2][2][2] = {
1311 { { AArch64::SUBWrr
, AArch64::SUBXrr
},
1312 { AArch64::ADDWrr
, AArch64::ADDXrr
} },
1313 { { AArch64::SUBSWrr
, AArch64::SUBSXrr
},
1314 { AArch64::ADDSWrr
, AArch64::ADDSXrr
} }
1316 bool Is64Bit
= RetVT
== MVT::i64
;
1317 unsigned Opc
= OpcTable
[SetFlags
][UseAdd
][Is64Bit
];
1318 const TargetRegisterClass
*RC
=
1319 Is64Bit
? &AArch64::GPR64RegClass
: &AArch64::GPR32RegClass
;
1322 ResultReg
= createResultReg(RC
);
1324 ResultReg
= Is64Bit
? AArch64::XZR
: AArch64::WZR
;
1326 const MCInstrDesc
&II
= TII
.get(Opc
);
1327 LHSReg
= constrainOperandRegClass(II
, LHSReg
, II
.getNumDefs());
1328 RHSReg
= constrainOperandRegClass(II
, RHSReg
, II
.getNumDefs() + 1);
1329 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, MIMD
, II
, ResultReg
)
1335 unsigned AArch64FastISel::emitAddSub_ri(bool UseAdd
, MVT RetVT
, unsigned LHSReg
,
1336 uint64_t Imm
, bool SetFlags
,
1338 assert(LHSReg
&& "Invalid register number.");
1340 if (RetVT
!= MVT::i32
&& RetVT
!= MVT::i64
)
1344 if (isUInt
<12>(Imm
))
1346 else if ((Imm
& 0xfff000) == Imm
) {
1352 static const unsigned OpcTable
[2][2][2] = {
1353 { { AArch64::SUBWri
, AArch64::SUBXri
},
1354 { AArch64::ADDWri
, AArch64::ADDXri
} },
1355 { { AArch64::SUBSWri
, AArch64::SUBSXri
},
1356 { AArch64::ADDSWri
, AArch64::ADDSXri
} }
1358 bool Is64Bit
= RetVT
== MVT::i64
;
1359 unsigned Opc
= OpcTable
[SetFlags
][UseAdd
][Is64Bit
];
1360 const TargetRegisterClass
*RC
;
1362 RC
= Is64Bit
? &AArch64::GPR64RegClass
: &AArch64::GPR32RegClass
;
1364 RC
= Is64Bit
? &AArch64::GPR64spRegClass
: &AArch64::GPR32spRegClass
;
1367 ResultReg
= createResultReg(RC
);
1369 ResultReg
= Is64Bit
? AArch64::XZR
: AArch64::WZR
;
1371 const MCInstrDesc
&II
= TII
.get(Opc
);
1372 LHSReg
= constrainOperandRegClass(II
, LHSReg
, II
.getNumDefs());
1373 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, MIMD
, II
, ResultReg
)
1376 .addImm(getShifterImm(AArch64_AM::LSL
, ShiftImm
));
1380 unsigned AArch64FastISel::emitAddSub_rs(bool UseAdd
, MVT RetVT
, unsigned LHSReg
,
1382 AArch64_AM::ShiftExtendType ShiftType
,
1383 uint64_t ShiftImm
, bool SetFlags
,
1385 assert(LHSReg
&& RHSReg
&& "Invalid register number.");
1386 assert(LHSReg
!= AArch64::SP
&& LHSReg
!= AArch64::WSP
&&
1387 RHSReg
!= AArch64::SP
&& RHSReg
!= AArch64::WSP
);
1389 if (RetVT
!= MVT::i32
&& RetVT
!= MVT::i64
)
1392 // Don't deal with undefined shifts.
1393 if (ShiftImm
>= RetVT
.getSizeInBits())
1396 static const unsigned OpcTable
[2][2][2] = {
1397 { { AArch64::SUBWrs
, AArch64::SUBXrs
},
1398 { AArch64::ADDWrs
, AArch64::ADDXrs
} },
1399 { { AArch64::SUBSWrs
, AArch64::SUBSXrs
},
1400 { AArch64::ADDSWrs
, AArch64::ADDSXrs
} }
1402 bool Is64Bit
= RetVT
== MVT::i64
;
1403 unsigned Opc
= OpcTable
[SetFlags
][UseAdd
][Is64Bit
];
1404 const TargetRegisterClass
*RC
=
1405 Is64Bit
? &AArch64::GPR64RegClass
: &AArch64::GPR32RegClass
;
1408 ResultReg
= createResultReg(RC
);
1410 ResultReg
= Is64Bit
? AArch64::XZR
: AArch64::WZR
;
1412 const MCInstrDesc
&II
= TII
.get(Opc
);
1413 LHSReg
= constrainOperandRegClass(II
, LHSReg
, II
.getNumDefs());
1414 RHSReg
= constrainOperandRegClass(II
, RHSReg
, II
.getNumDefs() + 1);
1415 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, MIMD
, II
, ResultReg
)
1418 .addImm(getShifterImm(ShiftType
, ShiftImm
));
1422 unsigned AArch64FastISel::emitAddSub_rx(bool UseAdd
, MVT RetVT
, unsigned LHSReg
,
1424 AArch64_AM::ShiftExtendType ExtType
,
1425 uint64_t ShiftImm
, bool SetFlags
,
1427 assert(LHSReg
&& RHSReg
&& "Invalid register number.");
1428 assert(LHSReg
!= AArch64::XZR
&& LHSReg
!= AArch64::WZR
&&
1429 RHSReg
!= AArch64::XZR
&& RHSReg
!= AArch64::WZR
);
1431 if (RetVT
!= MVT::i32
&& RetVT
!= MVT::i64
)
1437 static const unsigned OpcTable
[2][2][2] = {
1438 { { AArch64::SUBWrx
, AArch64::SUBXrx
},
1439 { AArch64::ADDWrx
, AArch64::ADDXrx
} },
1440 { { AArch64::SUBSWrx
, AArch64::SUBSXrx
},
1441 { AArch64::ADDSWrx
, AArch64::ADDSXrx
} }
1443 bool Is64Bit
= RetVT
== MVT::i64
;
1444 unsigned Opc
= OpcTable
[SetFlags
][UseAdd
][Is64Bit
];
1445 const TargetRegisterClass
*RC
= nullptr;
1447 RC
= Is64Bit
? &AArch64::GPR64RegClass
: &AArch64::GPR32RegClass
;
1449 RC
= Is64Bit
? &AArch64::GPR64spRegClass
: &AArch64::GPR32spRegClass
;
1452 ResultReg
= createResultReg(RC
);
1454 ResultReg
= Is64Bit
? AArch64::XZR
: AArch64::WZR
;
1456 const MCInstrDesc
&II
= TII
.get(Opc
);
1457 LHSReg
= constrainOperandRegClass(II
, LHSReg
, II
.getNumDefs());
1458 RHSReg
= constrainOperandRegClass(II
, RHSReg
, II
.getNumDefs() + 1);
1459 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, MIMD
, II
, ResultReg
)
1462 .addImm(getArithExtendImm(ExtType
, ShiftImm
));
1466 bool AArch64FastISel::emitCmp(const Value
*LHS
, const Value
*RHS
, bool IsZExt
) {
1467 Type
*Ty
= LHS
->getType();
1468 EVT EVT
= TLI
.getValueType(DL
, Ty
, true);
1469 if (!EVT
.isSimple())
1471 MVT VT
= EVT
.getSimpleVT();
1473 switch (VT
.SimpleTy
) {
1481 return emitICmp(VT
, LHS
, RHS
, IsZExt
);
1484 return emitFCmp(VT
, LHS
, RHS
);
1488 bool AArch64FastISel::emitICmp(MVT RetVT
, const Value
*LHS
, const Value
*RHS
,
1490 return emitSub(RetVT
, LHS
, RHS
, /*SetFlags=*/true, /*WantResult=*/false,
1494 bool AArch64FastISel::emitICmp_ri(MVT RetVT
, unsigned LHSReg
, uint64_t Imm
) {
1495 return emitAddSub_ri(/*UseAdd=*/false, RetVT
, LHSReg
, Imm
,
1496 /*SetFlags=*/true, /*WantResult=*/false) != 0;
1499 bool AArch64FastISel::emitFCmp(MVT RetVT
, const Value
*LHS
, const Value
*RHS
) {
1500 if (RetVT
!= MVT::f32
&& RetVT
!= MVT::f64
)
1503 // Check to see if the 2nd operand is a constant that we can encode directly
1505 bool UseImm
= false;
1506 if (const auto *CFP
= dyn_cast
<ConstantFP
>(RHS
))
1507 if (CFP
->isZero() && !CFP
->isNegative())
1510 Register LHSReg
= getRegForValue(LHS
);
1515 unsigned Opc
= (RetVT
== MVT::f64
) ? AArch64::FCMPDri
: AArch64::FCMPSri
;
1516 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, MIMD
, TII
.get(Opc
))
1521 Register RHSReg
= getRegForValue(RHS
);
1525 unsigned Opc
= (RetVT
== MVT::f64
) ? AArch64::FCMPDrr
: AArch64::FCMPSrr
;
1526 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, MIMD
, TII
.get(Opc
))
1532 unsigned AArch64FastISel::emitAdd(MVT RetVT
, const Value
*LHS
, const Value
*RHS
,
1533 bool SetFlags
, bool WantResult
, bool IsZExt
) {
1534 return emitAddSub(/*UseAdd=*/true, RetVT
, LHS
, RHS
, SetFlags
, WantResult
,
1538 /// This method is a wrapper to simplify add emission.
1540 /// First try to emit an add with an immediate operand using emitAddSub_ri. If
1541 /// that fails, then try to materialize the immediate into a register and use
1542 /// emitAddSub_rr instead.
1543 unsigned AArch64FastISel::emitAdd_ri_(MVT VT
, unsigned Op0
, int64_t Imm
) {
1546 ResultReg
= emitAddSub_ri(false, VT
, Op0
, -Imm
);
1548 ResultReg
= emitAddSub_ri(true, VT
, Op0
, Imm
);
1553 unsigned CReg
= fastEmit_i(VT
, VT
, ISD::Constant
, Imm
);
1557 ResultReg
= emitAddSub_rr(true, VT
, Op0
, CReg
);
1561 unsigned AArch64FastISel::emitSub(MVT RetVT
, const Value
*LHS
, const Value
*RHS
,
1562 bool SetFlags
, bool WantResult
, bool IsZExt
) {
1563 return emitAddSub(/*UseAdd=*/false, RetVT
, LHS
, RHS
, SetFlags
, WantResult
,
1567 unsigned AArch64FastISel::emitSubs_rr(MVT RetVT
, unsigned LHSReg
,
1568 unsigned RHSReg
, bool WantResult
) {
1569 return emitAddSub_rr(/*UseAdd=*/false, RetVT
, LHSReg
, RHSReg
,
1570 /*SetFlags=*/true, WantResult
);
1573 unsigned AArch64FastISel::emitSubs_rs(MVT RetVT
, unsigned LHSReg
,
1575 AArch64_AM::ShiftExtendType ShiftType
,
1576 uint64_t ShiftImm
, bool WantResult
) {
1577 return emitAddSub_rs(/*UseAdd=*/false, RetVT
, LHSReg
, RHSReg
, ShiftType
,
1578 ShiftImm
, /*SetFlags=*/true, WantResult
);
1581 unsigned AArch64FastISel::emitLogicalOp(unsigned ISDOpc
, MVT RetVT
,
1582 const Value
*LHS
, const Value
*RHS
) {
1583 // Canonicalize immediates to the RHS first.
1584 if (isa
<ConstantInt
>(LHS
) && !isa
<ConstantInt
>(RHS
))
1585 std::swap(LHS
, RHS
);
1587 // Canonicalize mul by power-of-2 to the RHS.
1588 if (LHS
->hasOneUse() && isValueAvailable(LHS
))
1589 if (isMulPowOf2(LHS
))
1590 std::swap(LHS
, RHS
);
1592 // Canonicalize shift immediate to the RHS.
1593 if (LHS
->hasOneUse() && isValueAvailable(LHS
))
1594 if (const auto *SI
= dyn_cast
<ShlOperator
>(LHS
))
1595 if (isa
<ConstantInt
>(SI
->getOperand(1)))
1596 std::swap(LHS
, RHS
);
1598 Register LHSReg
= getRegForValue(LHS
);
1602 unsigned ResultReg
= 0;
1603 if (const auto *C
= dyn_cast
<ConstantInt
>(RHS
)) {
1604 uint64_t Imm
= C
->getZExtValue();
1605 ResultReg
= emitLogicalOp_ri(ISDOpc
, RetVT
, LHSReg
, Imm
);
1610 // Check if the mul can be folded into the instruction.
1611 if (RHS
->hasOneUse() && isValueAvailable(RHS
)) {
1612 if (isMulPowOf2(RHS
)) {
1613 const Value
*MulLHS
= cast
<MulOperator
>(RHS
)->getOperand(0);
1614 const Value
*MulRHS
= cast
<MulOperator
>(RHS
)->getOperand(1);
1616 if (const auto *C
= dyn_cast
<ConstantInt
>(MulLHS
))
1617 if (C
->getValue().isPowerOf2())
1618 std::swap(MulLHS
, MulRHS
);
1620 assert(isa
<ConstantInt
>(MulRHS
) && "Expected a ConstantInt.");
1621 uint64_t ShiftVal
= cast
<ConstantInt
>(MulRHS
)->getValue().logBase2();
1623 Register RHSReg
= getRegForValue(MulLHS
);
1626 ResultReg
= emitLogicalOp_rs(ISDOpc
, RetVT
, LHSReg
, RHSReg
, ShiftVal
);
1632 // Check if the shift can be folded into the instruction.
1633 if (RHS
->hasOneUse() && isValueAvailable(RHS
)) {
1634 if (const auto *SI
= dyn_cast
<ShlOperator
>(RHS
))
1635 if (const auto *C
= dyn_cast
<ConstantInt
>(SI
->getOperand(1))) {
1636 uint64_t ShiftVal
= C
->getZExtValue();
1637 Register RHSReg
= getRegForValue(SI
->getOperand(0));
1640 ResultReg
= emitLogicalOp_rs(ISDOpc
, RetVT
, LHSReg
, RHSReg
, ShiftVal
);
1646 Register RHSReg
= getRegForValue(RHS
);
1650 MVT VT
= std::max(MVT::i32
, RetVT
.SimpleTy
);
1651 ResultReg
= fastEmit_rr(VT
, VT
, ISDOpc
, LHSReg
, RHSReg
);
1652 if (RetVT
>= MVT::i8
&& RetVT
<= MVT::i16
) {
1653 uint64_t Mask
= (RetVT
== MVT::i8
) ? 0xff : 0xffff;
1654 ResultReg
= emitAnd_ri(MVT::i32
, ResultReg
, Mask
);
1659 unsigned AArch64FastISel::emitLogicalOp_ri(unsigned ISDOpc
, MVT RetVT
,
1660 unsigned LHSReg
, uint64_t Imm
) {
1661 static_assert((ISD::AND
+ 1 == ISD::OR
) && (ISD::AND
+ 2 == ISD::XOR
),
1662 "ISD nodes are not consecutive!");
1663 static const unsigned OpcTable
[3][2] = {
1664 { AArch64::ANDWri
, AArch64::ANDXri
},
1665 { AArch64::ORRWri
, AArch64::ORRXri
},
1666 { AArch64::EORWri
, AArch64::EORXri
}
1668 const TargetRegisterClass
*RC
;
1671 switch (RetVT
.SimpleTy
) {
1678 unsigned Idx
= ISDOpc
- ISD::AND
;
1679 Opc
= OpcTable
[Idx
][0];
1680 RC
= &AArch64::GPR32spRegClass
;
1685 Opc
= OpcTable
[ISDOpc
- ISD::AND
][1];
1686 RC
= &AArch64::GPR64spRegClass
;
1691 if (!AArch64_AM::isLogicalImmediate(Imm
, RegSize
))
1694 Register ResultReg
=
1695 fastEmitInst_ri(Opc
, RC
, LHSReg
,
1696 AArch64_AM::encodeLogicalImmediate(Imm
, RegSize
));
1697 if (RetVT
>= MVT::i8
&& RetVT
<= MVT::i16
&& ISDOpc
!= ISD::AND
) {
1698 uint64_t Mask
= (RetVT
== MVT::i8
) ? 0xff : 0xffff;
1699 ResultReg
= emitAnd_ri(MVT::i32
, ResultReg
, Mask
);
1704 unsigned AArch64FastISel::emitLogicalOp_rs(unsigned ISDOpc
, MVT RetVT
,
1705 unsigned LHSReg
, unsigned RHSReg
,
1706 uint64_t ShiftImm
) {
1707 static_assert((ISD::AND
+ 1 == ISD::OR
) && (ISD::AND
+ 2 == ISD::XOR
),
1708 "ISD nodes are not consecutive!");
1709 static const unsigned OpcTable
[3][2] = {
1710 { AArch64::ANDWrs
, AArch64::ANDXrs
},
1711 { AArch64::ORRWrs
, AArch64::ORRXrs
},
1712 { AArch64::EORWrs
, AArch64::EORXrs
}
1715 // Don't deal with undefined shifts.
1716 if (ShiftImm
>= RetVT
.getSizeInBits())
1719 const TargetRegisterClass
*RC
;
1721 switch (RetVT
.SimpleTy
) {
1728 Opc
= OpcTable
[ISDOpc
- ISD::AND
][0];
1729 RC
= &AArch64::GPR32RegClass
;
1732 Opc
= OpcTable
[ISDOpc
- ISD::AND
][1];
1733 RC
= &AArch64::GPR64RegClass
;
1736 Register ResultReg
=
1737 fastEmitInst_rri(Opc
, RC
, LHSReg
, RHSReg
,
1738 AArch64_AM::getShifterImm(AArch64_AM::LSL
, ShiftImm
));
1739 if (RetVT
>= MVT::i8
&& RetVT
<= MVT::i16
) {
1740 uint64_t Mask
= (RetVT
== MVT::i8
) ? 0xff : 0xffff;
1741 ResultReg
= emitAnd_ri(MVT::i32
, ResultReg
, Mask
);
1746 unsigned AArch64FastISel::emitAnd_ri(MVT RetVT
, unsigned LHSReg
,
1748 return emitLogicalOp_ri(ISD::AND
, RetVT
, LHSReg
, Imm
);
1751 unsigned AArch64FastISel::emitLoad(MVT VT
, MVT RetVT
, Address Addr
,
1752 bool WantZExt
, MachineMemOperand
*MMO
) {
1753 if (!TLI
.allowsMisalignedMemoryAccesses(VT
))
1756 // Simplify this down to something we can handle.
1757 if (!simplifyAddress(Addr
, VT
))
1760 unsigned ScaleFactor
= getImplicitScaleFactor(VT
);
1762 llvm_unreachable("Unexpected value type.");
1764 // Negative offsets require unscaled, 9-bit, signed immediate offsets.
1765 // Otherwise, we try using scaled, 12-bit, unsigned immediate offsets.
1766 bool UseScaled
= true;
1767 if ((Addr
.getOffset() < 0) || (Addr
.getOffset() & (ScaleFactor
- 1))) {
1772 static const unsigned GPOpcTable
[2][8][4] = {
1774 { { AArch64::LDURSBWi
, AArch64::LDURSHWi
, AArch64::LDURWi
,
1776 { AArch64::LDURSBXi
, AArch64::LDURSHXi
, AArch64::LDURSWi
,
1778 { AArch64::LDRSBWui
, AArch64::LDRSHWui
, AArch64::LDRWui
,
1780 { AArch64::LDRSBXui
, AArch64::LDRSHXui
, AArch64::LDRSWui
,
1782 { AArch64::LDRSBWroX
, AArch64::LDRSHWroX
, AArch64::LDRWroX
,
1784 { AArch64::LDRSBXroX
, AArch64::LDRSHXroX
, AArch64::LDRSWroX
,
1786 { AArch64::LDRSBWroW
, AArch64::LDRSHWroW
, AArch64::LDRWroW
,
1788 { AArch64::LDRSBXroW
, AArch64::LDRSHXroW
, AArch64::LDRSWroW
,
1792 { { AArch64::LDURBBi
, AArch64::LDURHHi
, AArch64::LDURWi
,
1794 { AArch64::LDURBBi
, AArch64::LDURHHi
, AArch64::LDURWi
,
1796 { AArch64::LDRBBui
, AArch64::LDRHHui
, AArch64::LDRWui
,
1798 { AArch64::LDRBBui
, AArch64::LDRHHui
, AArch64::LDRWui
,
1800 { AArch64::LDRBBroX
, AArch64::LDRHHroX
, AArch64::LDRWroX
,
1802 { AArch64::LDRBBroX
, AArch64::LDRHHroX
, AArch64::LDRWroX
,
1804 { AArch64::LDRBBroW
, AArch64::LDRHHroW
, AArch64::LDRWroW
,
1806 { AArch64::LDRBBroW
, AArch64::LDRHHroW
, AArch64::LDRWroW
,
1811 static const unsigned FPOpcTable
[4][2] = {
1812 { AArch64::LDURSi
, AArch64::LDURDi
},
1813 { AArch64::LDRSui
, AArch64::LDRDui
},
1814 { AArch64::LDRSroX
, AArch64::LDRDroX
},
1815 { AArch64::LDRSroW
, AArch64::LDRDroW
}
1819 const TargetRegisterClass
*RC
;
1820 bool UseRegOffset
= Addr
.isRegBase() && !Addr
.getOffset() && Addr
.getReg() &&
1821 Addr
.getOffsetReg();
1822 unsigned Idx
= UseRegOffset
? 2 : UseScaled
? 1 : 0;
1823 if (Addr
.getExtendType() == AArch64_AM::UXTW
||
1824 Addr
.getExtendType() == AArch64_AM::SXTW
)
1827 bool IsRet64Bit
= RetVT
== MVT::i64
;
1828 switch (VT
.SimpleTy
) {
1830 llvm_unreachable("Unexpected value type.");
1831 case MVT::i1
: // Intentional fall-through.
1833 Opc
= GPOpcTable
[WantZExt
][2 * Idx
+ IsRet64Bit
][0];
1834 RC
= (IsRet64Bit
&& !WantZExt
) ?
1835 &AArch64::GPR64RegClass
: &AArch64::GPR32RegClass
;
1838 Opc
= GPOpcTable
[WantZExt
][2 * Idx
+ IsRet64Bit
][1];
1839 RC
= (IsRet64Bit
&& !WantZExt
) ?
1840 &AArch64::GPR64RegClass
: &AArch64::GPR32RegClass
;
1843 Opc
= GPOpcTable
[WantZExt
][2 * Idx
+ IsRet64Bit
][2];
1844 RC
= (IsRet64Bit
&& !WantZExt
) ?
1845 &AArch64::GPR64RegClass
: &AArch64::GPR32RegClass
;
1848 Opc
= GPOpcTable
[WantZExt
][2 * Idx
+ IsRet64Bit
][3];
1849 RC
= &AArch64::GPR64RegClass
;
1852 Opc
= FPOpcTable
[Idx
][0];
1853 RC
= &AArch64::FPR32RegClass
;
1856 Opc
= FPOpcTable
[Idx
][1];
1857 RC
= &AArch64::FPR64RegClass
;
1861 // Create the base instruction, then add the operands.
1862 Register ResultReg
= createResultReg(RC
);
1863 MachineInstrBuilder MIB
= BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, MIMD
,
1864 TII
.get(Opc
), ResultReg
);
1865 addLoadStoreOperands(Addr
, MIB
, MachineMemOperand::MOLoad
, ScaleFactor
, MMO
);
1867 // Loading an i1 requires special handling.
1868 if (VT
== MVT::i1
) {
1869 unsigned ANDReg
= emitAnd_ri(MVT::i32
, ResultReg
, 1);
1870 assert(ANDReg
&& "Unexpected AND instruction emission failure.");
1874 // For zero-extending loads to 64bit we emit a 32bit load and then convert
1875 // the 32bit reg to a 64bit reg.
1876 if (WantZExt
&& RetVT
== MVT::i64
&& VT
<= MVT::i32
) {
1877 Register Reg64
= createResultReg(&AArch64::GPR64RegClass
);
1878 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, MIMD
,
1879 TII
.get(AArch64::SUBREG_TO_REG
), Reg64
)
1881 .addReg(ResultReg
, getKillRegState(true))
1882 .addImm(AArch64::sub_32
);
1888 bool AArch64FastISel::selectAddSub(const Instruction
*I
) {
1890 if (!isTypeSupported(I
->getType(), VT
, /*IsVectorAllowed=*/true))
1894 return selectOperator(I
, I
->getOpcode());
1897 switch (I
->getOpcode()) {
1899 llvm_unreachable("Unexpected instruction.");
1900 case Instruction::Add
:
1901 ResultReg
= emitAdd(VT
, I
->getOperand(0), I
->getOperand(1));
1903 case Instruction::Sub
:
1904 ResultReg
= emitSub(VT
, I
->getOperand(0), I
->getOperand(1));
1910 updateValueMap(I
, ResultReg
);
1914 bool AArch64FastISel::selectLogicalOp(const Instruction
*I
) {
1916 if (!isTypeSupported(I
->getType(), VT
, /*IsVectorAllowed=*/true))
1920 return selectOperator(I
, I
->getOpcode());
1923 switch (I
->getOpcode()) {
1925 llvm_unreachable("Unexpected instruction.");
1926 case Instruction::And
:
1927 ResultReg
= emitLogicalOp(ISD::AND
, VT
, I
->getOperand(0), I
->getOperand(1));
1929 case Instruction::Or
:
1930 ResultReg
= emitLogicalOp(ISD::OR
, VT
, I
->getOperand(0), I
->getOperand(1));
1932 case Instruction::Xor
:
1933 ResultReg
= emitLogicalOp(ISD::XOR
, VT
, I
->getOperand(0), I
->getOperand(1));
1939 updateValueMap(I
, ResultReg
);
1943 bool AArch64FastISel::selectLoad(const Instruction
*I
) {
1945 // Verify we have a legal type before going any further. Currently, we handle
1946 // simple types that will directly fit in a register (i32/f32/i64/f64) or
1947 // those that can be sign or zero-extended to a basic operation (i1/i8/i16).
1948 if (!isTypeSupported(I
->getType(), VT
, /*IsVectorAllowed=*/true) ||
1949 cast
<LoadInst
>(I
)->isAtomic())
1952 const Value
*SV
= I
->getOperand(0);
1953 if (TLI
.supportSwiftError()) {
1954 // Swifterror values can come from either a function parameter with
1955 // swifterror attribute or an alloca with swifterror attribute.
1956 if (const Argument
*Arg
= dyn_cast
<Argument
>(SV
)) {
1957 if (Arg
->hasSwiftErrorAttr())
1961 if (const AllocaInst
*Alloca
= dyn_cast
<AllocaInst
>(SV
)) {
1962 if (Alloca
->isSwiftError())
1967 // See if we can handle this address.
1969 if (!computeAddress(I
->getOperand(0), Addr
, I
->getType()))
1972 // Fold the following sign-/zero-extend into the load instruction.
1973 bool WantZExt
= true;
1975 const Value
*IntExtVal
= nullptr;
1976 if (I
->hasOneUse()) {
1977 if (const auto *ZE
= dyn_cast
<ZExtInst
>(I
->use_begin()->getUser())) {
1978 if (isTypeSupported(ZE
->getType(), RetVT
))
1982 } else if (const auto *SE
= dyn_cast
<SExtInst
>(I
->use_begin()->getUser())) {
1983 if (isTypeSupported(SE
->getType(), RetVT
))
1991 unsigned ResultReg
=
1992 emitLoad(VT
, RetVT
, Addr
, WantZExt
, createMachineMemOperandFor(I
));
1996 // There are a few different cases we have to handle, because the load or the
1997 // sign-/zero-extend might not be selected by FastISel if we fall-back to
1998 // SelectionDAG. There is also an ordering issue when both instructions are in
1999 // different basic blocks.
2000 // 1.) The load instruction is selected by FastISel, but the integer extend
2001 // not. This usually happens when the integer extend is in a different
2002 // basic block and SelectionDAG took over for that basic block.
2003 // 2.) The load instruction is selected before the integer extend. This only
2004 // happens when the integer extend is in a different basic block.
2005 // 3.) The load instruction is selected by SelectionDAG and the integer extend
2006 // by FastISel. This happens if there are instructions between the load
2007 // and the integer extend that couldn't be selected by FastISel.
2009 // The integer extend hasn't been emitted yet. FastISel or SelectionDAG
2010 // could select it. Emit a copy to subreg if necessary. FastISel will remove
2011 // it when it selects the integer extend.
2012 Register Reg
= lookUpRegForValue(IntExtVal
);
2013 auto *MI
= MRI
.getUniqueVRegDef(Reg
);
2015 if (RetVT
== MVT::i64
&& VT
<= MVT::i32
) {
2017 // Delete the last emitted instruction from emitLoad (SUBREG_TO_REG).
2018 MachineBasicBlock::iterator
I(std::prev(FuncInfo
.InsertPt
));
2019 ResultReg
= std::prev(I
)->getOperand(0).getReg();
2020 removeDeadCode(I
, std::next(I
));
2022 ResultReg
= fastEmitInst_extractsubreg(MVT::i32
, ResultReg
,
2025 updateValueMap(I
, ResultReg
);
2029 // The integer extend has already been emitted - delete all the instructions
2030 // that have been emitted by the integer extend lowering code and use the
2031 // result from the load instruction directly.
2034 for (auto &Opnd
: MI
->uses()) {
2036 Reg
= Opnd
.getReg();
2040 MachineBasicBlock::iterator
I(MI
);
2041 removeDeadCode(I
, std::next(I
));
2044 MI
= MRI
.getUniqueVRegDef(Reg
);
2046 updateValueMap(IntExtVal
, ResultReg
);
2050 updateValueMap(I
, ResultReg
);
2054 bool AArch64FastISel::emitStoreRelease(MVT VT
, unsigned SrcReg
,
2056 MachineMemOperand
*MMO
) {
2058 switch (VT
.SimpleTy
) {
2059 default: return false;
2060 case MVT::i8
: Opc
= AArch64::STLRB
; break;
2061 case MVT::i16
: Opc
= AArch64::STLRH
; break;
2062 case MVT::i32
: Opc
= AArch64::STLRW
; break;
2063 case MVT::i64
: Opc
= AArch64::STLRX
; break;
2066 const MCInstrDesc
&II
= TII
.get(Opc
);
2067 SrcReg
= constrainOperandRegClass(II
, SrcReg
, 0);
2068 AddrReg
= constrainOperandRegClass(II
, AddrReg
, 1);
2069 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, MIMD
, II
)
2072 .addMemOperand(MMO
);
2076 bool AArch64FastISel::emitStore(MVT VT
, unsigned SrcReg
, Address Addr
,
2077 MachineMemOperand
*MMO
) {
2078 if (!TLI
.allowsMisalignedMemoryAccesses(VT
))
2081 // Simplify this down to something we can handle.
2082 if (!simplifyAddress(Addr
, VT
))
2085 unsigned ScaleFactor
= getImplicitScaleFactor(VT
);
2087 llvm_unreachable("Unexpected value type.");
2089 // Negative offsets require unscaled, 9-bit, signed immediate offsets.
2090 // Otherwise, we try using scaled, 12-bit, unsigned immediate offsets.
2091 bool UseScaled
= true;
2092 if ((Addr
.getOffset() < 0) || (Addr
.getOffset() & (ScaleFactor
- 1))) {
2097 static const unsigned OpcTable
[4][6] = {
2098 { AArch64::STURBBi
, AArch64::STURHHi
, AArch64::STURWi
, AArch64::STURXi
,
2099 AArch64::STURSi
, AArch64::STURDi
},
2100 { AArch64::STRBBui
, AArch64::STRHHui
, AArch64::STRWui
, AArch64::STRXui
,
2101 AArch64::STRSui
, AArch64::STRDui
},
2102 { AArch64::STRBBroX
, AArch64::STRHHroX
, AArch64::STRWroX
, AArch64::STRXroX
,
2103 AArch64::STRSroX
, AArch64::STRDroX
},
2104 { AArch64::STRBBroW
, AArch64::STRHHroW
, AArch64::STRWroW
, AArch64::STRXroW
,
2105 AArch64::STRSroW
, AArch64::STRDroW
}
2109 bool VTIsi1
= false;
2110 bool UseRegOffset
= Addr
.isRegBase() && !Addr
.getOffset() && Addr
.getReg() &&
2111 Addr
.getOffsetReg();
2112 unsigned Idx
= UseRegOffset
? 2 : UseScaled
? 1 : 0;
2113 if (Addr
.getExtendType() == AArch64_AM::UXTW
||
2114 Addr
.getExtendType() == AArch64_AM::SXTW
)
2117 switch (VT
.SimpleTy
) {
2118 default: llvm_unreachable("Unexpected value type.");
2119 case MVT::i1
: VTIsi1
= true; [[fallthrough
]];
2120 case MVT::i8
: Opc
= OpcTable
[Idx
][0]; break;
2121 case MVT::i16
: Opc
= OpcTable
[Idx
][1]; break;
2122 case MVT::i32
: Opc
= OpcTable
[Idx
][2]; break;
2123 case MVT::i64
: Opc
= OpcTable
[Idx
][3]; break;
2124 case MVT::f32
: Opc
= OpcTable
[Idx
][4]; break;
2125 case MVT::f64
: Opc
= OpcTable
[Idx
][5]; break;
2128 // Storing an i1 requires special handling.
2129 if (VTIsi1
&& SrcReg
!= AArch64::WZR
) {
2130 unsigned ANDReg
= emitAnd_ri(MVT::i32
, SrcReg
, 1);
2131 assert(ANDReg
&& "Unexpected AND instruction emission failure.");
2134 // Create the base instruction, then add the operands.
2135 const MCInstrDesc
&II
= TII
.get(Opc
);
2136 SrcReg
= constrainOperandRegClass(II
, SrcReg
, II
.getNumDefs());
2137 MachineInstrBuilder MIB
=
2138 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, MIMD
, II
).addReg(SrcReg
);
2139 addLoadStoreOperands(Addr
, MIB
, MachineMemOperand::MOStore
, ScaleFactor
, MMO
);
2144 bool AArch64FastISel::selectStore(const Instruction
*I
) {
2146 const Value
*Op0
= I
->getOperand(0);
2147 // Verify we have a legal type before going any further. Currently, we handle
2148 // simple types that will directly fit in a register (i32/f32/i64/f64) or
2149 // those that can be sign or zero-extended to a basic operation (i1/i8/i16).
2150 if (!isTypeSupported(Op0
->getType(), VT
, /*IsVectorAllowed=*/true))
2153 const Value
*PtrV
= I
->getOperand(1);
2154 if (TLI
.supportSwiftError()) {
2155 // Swifterror values can come from either a function parameter with
2156 // swifterror attribute or an alloca with swifterror attribute.
2157 if (const Argument
*Arg
= dyn_cast
<Argument
>(PtrV
)) {
2158 if (Arg
->hasSwiftErrorAttr())
2162 if (const AllocaInst
*Alloca
= dyn_cast
<AllocaInst
>(PtrV
)) {
2163 if (Alloca
->isSwiftError())
2168 // Get the value to be stored into a register. Use the zero register directly
2169 // when possible to avoid an unnecessary copy and a wasted register.
2170 unsigned SrcReg
= 0;
2171 if (const auto *CI
= dyn_cast
<ConstantInt
>(Op0
)) {
2173 SrcReg
= (VT
== MVT::i64
) ? AArch64::XZR
: AArch64::WZR
;
2174 } else if (const auto *CF
= dyn_cast
<ConstantFP
>(Op0
)) {
2175 if (CF
->isZero() && !CF
->isNegative()) {
2176 VT
= MVT::getIntegerVT(VT
.getSizeInBits());
2177 SrcReg
= (VT
== MVT::i64
) ? AArch64::XZR
: AArch64::WZR
;
2182 SrcReg
= getRegForValue(Op0
);
2187 auto *SI
= cast
<StoreInst
>(I
);
2189 // Try to emit a STLR for seq_cst/release.
2190 if (SI
->isAtomic()) {
2191 AtomicOrdering Ord
= SI
->getOrdering();
2192 // The non-atomic instructions are sufficient for relaxed stores.
2193 if (isReleaseOrStronger(Ord
)) {
2194 // The STLR addressing mode only supports a base reg; pass that directly.
2195 Register AddrReg
= getRegForValue(PtrV
);
2196 return emitStoreRelease(VT
, SrcReg
, AddrReg
,
2197 createMachineMemOperandFor(I
));
2201 // See if we can handle this address.
2203 if (!computeAddress(PtrV
, Addr
, Op0
->getType()))
2206 if (!emitStore(VT
, SrcReg
, Addr
, createMachineMemOperandFor(I
)))
2211 static AArch64CC::CondCode
getCompareCC(CmpInst::Predicate Pred
) {
2213 case CmpInst::FCMP_ONE
:
2214 case CmpInst::FCMP_UEQ
:
2216 // AL is our "false" for now. The other two need more compares.
2217 return AArch64CC::AL
;
2218 case CmpInst::ICMP_EQ
:
2219 case CmpInst::FCMP_OEQ
:
2220 return AArch64CC::EQ
;
2221 case CmpInst::ICMP_SGT
:
2222 case CmpInst::FCMP_OGT
:
2223 return AArch64CC::GT
;
2224 case CmpInst::ICMP_SGE
:
2225 case CmpInst::FCMP_OGE
:
2226 return AArch64CC::GE
;
2227 case CmpInst::ICMP_UGT
:
2228 case CmpInst::FCMP_UGT
:
2229 return AArch64CC::HI
;
2230 case CmpInst::FCMP_OLT
:
2231 return AArch64CC::MI
;
2232 case CmpInst::ICMP_ULE
:
2233 case CmpInst::FCMP_OLE
:
2234 return AArch64CC::LS
;
2235 case CmpInst::FCMP_ORD
:
2236 return AArch64CC::VC
;
2237 case CmpInst::FCMP_UNO
:
2238 return AArch64CC::VS
;
2239 case CmpInst::FCMP_UGE
:
2240 return AArch64CC::PL
;
2241 case CmpInst::ICMP_SLT
:
2242 case CmpInst::FCMP_ULT
:
2243 return AArch64CC::LT
;
2244 case CmpInst::ICMP_SLE
:
2245 case CmpInst::FCMP_ULE
:
2246 return AArch64CC::LE
;
2247 case CmpInst::FCMP_UNE
:
2248 case CmpInst::ICMP_NE
:
2249 return AArch64CC::NE
;
2250 case CmpInst::ICMP_UGE
:
2251 return AArch64CC::HS
;
2252 case CmpInst::ICMP_ULT
:
2253 return AArch64CC::LO
;
2257 /// Try to emit a combined compare-and-branch instruction.
2258 bool AArch64FastISel::emitCompareAndBranch(const BranchInst
*BI
) {
2259 // Speculation tracking/SLH assumes that optimized TB(N)Z/CB(N)Z instructions
2260 // will not be produced, as they are conditional branch instructions that do
2262 if (FuncInfo
.MF
->getFunction().hasFnAttribute(
2263 Attribute::SpeculativeLoadHardening
))
2266 assert(isa
<CmpInst
>(BI
->getCondition()) && "Expected cmp instruction");
2267 const CmpInst
*CI
= cast
<CmpInst
>(BI
->getCondition());
2268 CmpInst::Predicate Predicate
= optimizeCmpPredicate(CI
);
2270 const Value
*LHS
= CI
->getOperand(0);
2271 const Value
*RHS
= CI
->getOperand(1);
2274 if (!isTypeSupported(LHS
->getType(), VT
))
2277 unsigned BW
= VT
.getSizeInBits();
2281 MachineBasicBlock
*TBB
= FuncInfo
.MBBMap
[BI
->getSuccessor(0)];
2282 MachineBasicBlock
*FBB
= FuncInfo
.MBBMap
[BI
->getSuccessor(1)];
2284 // Try to take advantage of fallthrough opportunities.
2285 if (FuncInfo
.MBB
->isLayoutSuccessor(TBB
)) {
2286 std::swap(TBB
, FBB
);
2287 Predicate
= CmpInst::getInversePredicate(Predicate
);
2292 switch (Predicate
) {
2295 case CmpInst::ICMP_EQ
:
2296 case CmpInst::ICMP_NE
:
2297 if (isa
<Constant
>(LHS
) && cast
<Constant
>(LHS
)->isNullValue())
2298 std::swap(LHS
, RHS
);
2300 if (!isa
<Constant
>(RHS
) || !cast
<Constant
>(RHS
)->isNullValue())
2303 if (const auto *AI
= dyn_cast
<BinaryOperator
>(LHS
))
2304 if (AI
->getOpcode() == Instruction::And
&& isValueAvailable(AI
)) {
2305 const Value
*AndLHS
= AI
->getOperand(0);
2306 const Value
*AndRHS
= AI
->getOperand(1);
2308 if (const auto *C
= dyn_cast
<ConstantInt
>(AndLHS
))
2309 if (C
->getValue().isPowerOf2())
2310 std::swap(AndLHS
, AndRHS
);
2312 if (const auto *C
= dyn_cast
<ConstantInt
>(AndRHS
))
2313 if (C
->getValue().isPowerOf2()) {
2314 TestBit
= C
->getValue().logBase2();
2322 IsCmpNE
= Predicate
== CmpInst::ICMP_NE
;
2324 case CmpInst::ICMP_SLT
:
2325 case CmpInst::ICMP_SGE
:
2326 if (!isa
<Constant
>(RHS
) || !cast
<Constant
>(RHS
)->isNullValue())
2330 IsCmpNE
= Predicate
== CmpInst::ICMP_SLT
;
2332 case CmpInst::ICMP_SGT
:
2333 case CmpInst::ICMP_SLE
:
2334 if (!isa
<ConstantInt
>(RHS
))
2337 if (cast
<ConstantInt
>(RHS
)->getValue() != APInt(BW
, -1, true))
2341 IsCmpNE
= Predicate
== CmpInst::ICMP_SLE
;
2345 static const unsigned OpcTable
[2][2][2] = {
2346 { {AArch64::CBZW
, AArch64::CBZX
},
2347 {AArch64::CBNZW
, AArch64::CBNZX
} },
2348 { {AArch64::TBZW
, AArch64::TBZX
},
2349 {AArch64::TBNZW
, AArch64::TBNZX
} }
2352 bool IsBitTest
= TestBit
!= -1;
2353 bool Is64Bit
= BW
== 64;
2354 if (TestBit
< 32 && TestBit
>= 0)
2357 unsigned Opc
= OpcTable
[IsBitTest
][IsCmpNE
][Is64Bit
];
2358 const MCInstrDesc
&II
= TII
.get(Opc
);
2360 Register SrcReg
= getRegForValue(LHS
);
2364 if (BW
== 64 && !Is64Bit
)
2365 SrcReg
= fastEmitInst_extractsubreg(MVT::i32
, SrcReg
, AArch64::sub_32
);
2367 if ((BW
< 32) && !IsBitTest
)
2368 SrcReg
= emitIntExt(VT
, SrcReg
, MVT::i32
, /*isZExt=*/true);
2370 // Emit the combined compare and branch instruction.
2371 SrcReg
= constrainOperandRegClass(II
, SrcReg
, II
.getNumDefs());
2372 MachineInstrBuilder MIB
=
2373 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, MIMD
, TII
.get(Opc
))
2376 MIB
.addImm(TestBit
);
2379 finishCondBranch(BI
->getParent(), TBB
, FBB
);
2383 bool AArch64FastISel::selectBranch(const Instruction
*I
) {
2384 const BranchInst
*BI
= cast
<BranchInst
>(I
);
2385 if (BI
->isUnconditional()) {
2386 MachineBasicBlock
*MSucc
= FuncInfo
.MBBMap
[BI
->getSuccessor(0)];
2387 fastEmitBranch(MSucc
, BI
->getDebugLoc());
2391 MachineBasicBlock
*TBB
= FuncInfo
.MBBMap
[BI
->getSuccessor(0)];
2392 MachineBasicBlock
*FBB
= FuncInfo
.MBBMap
[BI
->getSuccessor(1)];
2394 if (const CmpInst
*CI
= dyn_cast
<CmpInst
>(BI
->getCondition())) {
2395 if (CI
->hasOneUse() && isValueAvailable(CI
)) {
2396 // Try to optimize or fold the cmp.
2397 CmpInst::Predicate Predicate
= optimizeCmpPredicate(CI
);
2398 switch (Predicate
) {
2401 case CmpInst::FCMP_FALSE
:
2402 fastEmitBranch(FBB
, MIMD
.getDL());
2404 case CmpInst::FCMP_TRUE
:
2405 fastEmitBranch(TBB
, MIMD
.getDL());
2409 // Try to emit a combined compare-and-branch first.
2410 if (emitCompareAndBranch(BI
))
2413 // Try to take advantage of fallthrough opportunities.
2414 if (FuncInfo
.MBB
->isLayoutSuccessor(TBB
)) {
2415 std::swap(TBB
, FBB
);
2416 Predicate
= CmpInst::getInversePredicate(Predicate
);
2420 if (!emitCmp(CI
->getOperand(0), CI
->getOperand(1), CI
->isUnsigned()))
2423 // FCMP_UEQ and FCMP_ONE cannot be checked with a single branch
2425 AArch64CC::CondCode CC
= getCompareCC(Predicate
);
2426 AArch64CC::CondCode ExtraCC
= AArch64CC::AL
;
2427 switch (Predicate
) {
2430 case CmpInst::FCMP_UEQ
:
2431 ExtraCC
= AArch64CC::EQ
;
2434 case CmpInst::FCMP_ONE
:
2435 ExtraCC
= AArch64CC::MI
;
2439 assert((CC
!= AArch64CC::AL
) && "Unexpected condition code.");
2441 // Emit the extra branch for FCMP_UEQ and FCMP_ONE.
2442 if (ExtraCC
!= AArch64CC::AL
) {
2443 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, MIMD
, TII
.get(AArch64::Bcc
))
2449 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, MIMD
, TII
.get(AArch64::Bcc
))
2453 finishCondBranch(BI
->getParent(), TBB
, FBB
);
2456 } else if (const auto *CI
= dyn_cast
<ConstantInt
>(BI
->getCondition())) {
2457 uint64_t Imm
= CI
->getZExtValue();
2458 MachineBasicBlock
*Target
= (Imm
== 0) ? FBB
: TBB
;
2459 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, MIMD
, TII
.get(AArch64::B
))
2462 // Obtain the branch probability and add the target to the successor list.
2464 auto BranchProbability
= FuncInfo
.BPI
->getEdgeProbability(
2465 BI
->getParent(), Target
->getBasicBlock());
2466 FuncInfo
.MBB
->addSuccessor(Target
, BranchProbability
);
2468 FuncInfo
.MBB
->addSuccessorWithoutProb(Target
);
2471 AArch64CC::CondCode CC
= AArch64CC::NE
;
2472 if (foldXALUIntrinsic(CC
, I
, BI
->getCondition())) {
2473 // Fake request the condition, otherwise the intrinsic might be completely
2475 Register CondReg
= getRegForValue(BI
->getCondition());
2480 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, MIMD
, TII
.get(AArch64::Bcc
))
2484 finishCondBranch(BI
->getParent(), TBB
, FBB
);
2489 Register CondReg
= getRegForValue(BI
->getCondition());
2493 // i1 conditions come as i32 values, test the lowest bit with tb(n)z.
2494 unsigned Opcode
= AArch64::TBNZW
;
2495 if (FuncInfo
.MBB
->isLayoutSuccessor(TBB
)) {
2496 std::swap(TBB
, FBB
);
2497 Opcode
= AArch64::TBZW
;
2500 const MCInstrDesc
&II
= TII
.get(Opcode
);
2501 Register ConstrainedCondReg
2502 = constrainOperandRegClass(II
, CondReg
, II
.getNumDefs());
2503 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, MIMD
, II
)
2504 .addReg(ConstrainedCondReg
)
2508 finishCondBranch(BI
->getParent(), TBB
, FBB
);
2512 bool AArch64FastISel::selectIndirectBr(const Instruction
*I
) {
2513 const IndirectBrInst
*BI
= cast
<IndirectBrInst
>(I
);
2514 Register AddrReg
= getRegForValue(BI
->getOperand(0));
2518 // Emit the indirect branch.
2519 const MCInstrDesc
&II
= TII
.get(AArch64::BR
);
2520 AddrReg
= constrainOperandRegClass(II
, AddrReg
, II
.getNumDefs());
2521 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, MIMD
, II
).addReg(AddrReg
);
2523 // Make sure the CFG is up-to-date.
2524 for (const auto *Succ
: BI
->successors())
2525 FuncInfo
.MBB
->addSuccessor(FuncInfo
.MBBMap
[Succ
]);
2530 bool AArch64FastISel::selectCmp(const Instruction
*I
) {
2531 const CmpInst
*CI
= cast
<CmpInst
>(I
);
2533 // Vectors of i1 are weird: bail out.
2534 if (CI
->getType()->isVectorTy())
2537 // Try to optimize or fold the cmp.
2538 CmpInst::Predicate Predicate
= optimizeCmpPredicate(CI
);
2539 unsigned ResultReg
= 0;
2540 switch (Predicate
) {
2543 case CmpInst::FCMP_FALSE
:
2544 ResultReg
= createResultReg(&AArch64::GPR32RegClass
);
2545 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, MIMD
,
2546 TII
.get(TargetOpcode::COPY
), ResultReg
)
2547 .addReg(AArch64::WZR
, getKillRegState(true));
2549 case CmpInst::FCMP_TRUE
:
2550 ResultReg
= fastEmit_i(MVT::i32
, MVT::i32
, ISD::Constant
, 1);
2555 updateValueMap(I
, ResultReg
);
2560 if (!emitCmp(CI
->getOperand(0), CI
->getOperand(1), CI
->isUnsigned()))
2563 ResultReg
= createResultReg(&AArch64::GPR32RegClass
);
2565 // FCMP_UEQ and FCMP_ONE cannot be checked with a single instruction. These
2566 // condition codes are inverted, because they are used by CSINC.
2567 static unsigned CondCodeTable
[2][2] = {
2568 { AArch64CC::NE
, AArch64CC::VC
},
2569 { AArch64CC::PL
, AArch64CC::LE
}
2571 unsigned *CondCodes
= nullptr;
2572 switch (Predicate
) {
2575 case CmpInst::FCMP_UEQ
:
2576 CondCodes
= &CondCodeTable
[0][0];
2578 case CmpInst::FCMP_ONE
:
2579 CondCodes
= &CondCodeTable
[1][0];
2584 Register TmpReg1
= createResultReg(&AArch64::GPR32RegClass
);
2585 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, MIMD
, TII
.get(AArch64::CSINCWr
),
2587 .addReg(AArch64::WZR
, getKillRegState(true))
2588 .addReg(AArch64::WZR
, getKillRegState(true))
2589 .addImm(CondCodes
[0]);
2590 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, MIMD
, TII
.get(AArch64::CSINCWr
),
2592 .addReg(TmpReg1
, getKillRegState(true))
2593 .addReg(AArch64::WZR
, getKillRegState(true))
2594 .addImm(CondCodes
[1]);
2596 updateValueMap(I
, ResultReg
);
2600 // Now set a register based on the comparison.
2601 AArch64CC::CondCode CC
= getCompareCC(Predicate
);
2602 assert((CC
!= AArch64CC::AL
) && "Unexpected condition code.");
2603 AArch64CC::CondCode invertedCC
= getInvertedCondCode(CC
);
2604 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, MIMD
, TII
.get(AArch64::CSINCWr
),
2606 .addReg(AArch64::WZR
, getKillRegState(true))
2607 .addReg(AArch64::WZR
, getKillRegState(true))
2608 .addImm(invertedCC
);
2610 updateValueMap(I
, ResultReg
);
2614 /// Optimize selects of i1 if one of the operands has a 'true' or 'false'
2616 bool AArch64FastISel::optimizeSelect(const SelectInst
*SI
) {
2617 if (!SI
->getType()->isIntegerTy(1))
2620 const Value
*Src1Val
, *Src2Val
;
2622 bool NeedExtraOp
= false;
2623 if (auto *CI
= dyn_cast
<ConstantInt
>(SI
->getTrueValue())) {
2625 Src1Val
= SI
->getCondition();
2626 Src2Val
= SI
->getFalseValue();
2627 Opc
= AArch64::ORRWrr
;
2629 assert(CI
->isZero());
2630 Src1Val
= SI
->getFalseValue();
2631 Src2Val
= SI
->getCondition();
2632 Opc
= AArch64::BICWrr
;
2634 } else if (auto *CI
= dyn_cast
<ConstantInt
>(SI
->getFalseValue())) {
2636 Src1Val
= SI
->getCondition();
2637 Src2Val
= SI
->getTrueValue();
2638 Opc
= AArch64::ORRWrr
;
2641 assert(CI
->isZero());
2642 Src1Val
= SI
->getCondition();
2643 Src2Val
= SI
->getTrueValue();
2644 Opc
= AArch64::ANDWrr
;
2651 Register Src1Reg
= getRegForValue(Src1Val
);
2655 Register Src2Reg
= getRegForValue(Src2Val
);
2660 Src1Reg
= emitLogicalOp_ri(ISD::XOR
, MVT::i32
, Src1Reg
, 1);
2662 Register ResultReg
= fastEmitInst_rr(Opc
, &AArch64::GPR32RegClass
, Src1Reg
,
2664 updateValueMap(SI
, ResultReg
);
2668 bool AArch64FastISel::selectSelect(const Instruction
*I
) {
2669 assert(isa
<SelectInst
>(I
) && "Expected a select instruction.");
2671 if (!isTypeSupported(I
->getType(), VT
))
2675 const TargetRegisterClass
*RC
;
2676 switch (VT
.SimpleTy
) {
2683 Opc
= AArch64::CSELWr
;
2684 RC
= &AArch64::GPR32RegClass
;
2687 Opc
= AArch64::CSELXr
;
2688 RC
= &AArch64::GPR64RegClass
;
2691 Opc
= AArch64::FCSELSrrr
;
2692 RC
= &AArch64::FPR32RegClass
;
2695 Opc
= AArch64::FCSELDrrr
;
2696 RC
= &AArch64::FPR64RegClass
;
2700 const SelectInst
*SI
= cast
<SelectInst
>(I
);
2701 const Value
*Cond
= SI
->getCondition();
2702 AArch64CC::CondCode CC
= AArch64CC::NE
;
2703 AArch64CC::CondCode ExtraCC
= AArch64CC::AL
;
2705 if (optimizeSelect(SI
))
2708 // Try to pickup the flags, so we don't have to emit another compare.
2709 if (foldXALUIntrinsic(CC
, I
, Cond
)) {
2710 // Fake request the condition to force emission of the XALU intrinsic.
2711 Register CondReg
= getRegForValue(Cond
);
2714 } else if (isa
<CmpInst
>(Cond
) && cast
<CmpInst
>(Cond
)->hasOneUse() &&
2715 isValueAvailable(Cond
)) {
2716 const auto *Cmp
= cast
<CmpInst
>(Cond
);
2717 // Try to optimize or fold the cmp.
2718 CmpInst::Predicate Predicate
= optimizeCmpPredicate(Cmp
);
2719 const Value
*FoldSelect
= nullptr;
2720 switch (Predicate
) {
2723 case CmpInst::FCMP_FALSE
:
2724 FoldSelect
= SI
->getFalseValue();
2726 case CmpInst::FCMP_TRUE
:
2727 FoldSelect
= SI
->getTrueValue();
2732 Register SrcReg
= getRegForValue(FoldSelect
);
2736 updateValueMap(I
, SrcReg
);
2741 if (!emitCmp(Cmp
->getOperand(0), Cmp
->getOperand(1), Cmp
->isUnsigned()))
2744 // FCMP_UEQ and FCMP_ONE cannot be checked with a single select instruction.
2745 CC
= getCompareCC(Predicate
);
2746 switch (Predicate
) {
2749 case CmpInst::FCMP_UEQ
:
2750 ExtraCC
= AArch64CC::EQ
;
2753 case CmpInst::FCMP_ONE
:
2754 ExtraCC
= AArch64CC::MI
;
2758 assert((CC
!= AArch64CC::AL
) && "Unexpected condition code.");
2760 Register CondReg
= getRegForValue(Cond
);
2764 const MCInstrDesc
&II
= TII
.get(AArch64::ANDSWri
);
2765 CondReg
= constrainOperandRegClass(II
, CondReg
, 1);
2767 // Emit a TST instruction (ANDS wzr, reg, #imm).
2768 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, MIMD
, II
,
2771 .addImm(AArch64_AM::encodeLogicalImmediate(1, 32));
2774 Register Src1Reg
= getRegForValue(SI
->getTrueValue());
2775 Register Src2Reg
= getRegForValue(SI
->getFalseValue());
2777 if (!Src1Reg
|| !Src2Reg
)
2780 if (ExtraCC
!= AArch64CC::AL
)
2781 Src2Reg
= fastEmitInst_rri(Opc
, RC
, Src1Reg
, Src2Reg
, ExtraCC
);
2783 Register ResultReg
= fastEmitInst_rri(Opc
, RC
, Src1Reg
, Src2Reg
, CC
);
2784 updateValueMap(I
, ResultReg
);
2788 bool AArch64FastISel::selectFPExt(const Instruction
*I
) {
2789 Value
*V
= I
->getOperand(0);
2790 if (!I
->getType()->isDoubleTy() || !V
->getType()->isFloatTy())
2793 Register Op
= getRegForValue(V
);
2797 Register ResultReg
= createResultReg(&AArch64::FPR64RegClass
);
2798 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, MIMD
, TII
.get(AArch64::FCVTDSr
),
2799 ResultReg
).addReg(Op
);
2800 updateValueMap(I
, ResultReg
);
2804 bool AArch64FastISel::selectFPTrunc(const Instruction
*I
) {
2805 Value
*V
= I
->getOperand(0);
2806 if (!I
->getType()->isFloatTy() || !V
->getType()->isDoubleTy())
2809 Register Op
= getRegForValue(V
);
2813 Register ResultReg
= createResultReg(&AArch64::FPR32RegClass
);
2814 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, MIMD
, TII
.get(AArch64::FCVTSDr
),
2815 ResultReg
).addReg(Op
);
2816 updateValueMap(I
, ResultReg
);
2820 // FPToUI and FPToSI
2821 bool AArch64FastISel::selectFPToInt(const Instruction
*I
, bool Signed
) {
2823 if (!isTypeLegal(I
->getType(), DestVT
) || DestVT
.isVector())
2826 Register SrcReg
= getRegForValue(I
->getOperand(0));
2830 EVT SrcVT
= TLI
.getValueType(DL
, I
->getOperand(0)->getType(), true);
2831 if (SrcVT
== MVT::f128
|| SrcVT
== MVT::f16
|| SrcVT
== MVT::bf16
)
2835 if (SrcVT
== MVT::f64
) {
2837 Opc
= (DestVT
== MVT::i32
) ? AArch64::FCVTZSUWDr
: AArch64::FCVTZSUXDr
;
2839 Opc
= (DestVT
== MVT::i32
) ? AArch64::FCVTZUUWDr
: AArch64::FCVTZUUXDr
;
2842 Opc
= (DestVT
== MVT::i32
) ? AArch64::FCVTZSUWSr
: AArch64::FCVTZSUXSr
;
2844 Opc
= (DestVT
== MVT::i32
) ? AArch64::FCVTZUUWSr
: AArch64::FCVTZUUXSr
;
2846 Register ResultReg
= createResultReg(
2847 DestVT
== MVT::i32
? &AArch64::GPR32RegClass
: &AArch64::GPR64RegClass
);
2848 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, MIMD
, TII
.get(Opc
), ResultReg
)
2850 updateValueMap(I
, ResultReg
);
2854 bool AArch64FastISel::selectIntToFP(const Instruction
*I
, bool Signed
) {
2856 if (!isTypeLegal(I
->getType(), DestVT
) || DestVT
.isVector())
2858 // Let regular ISEL handle FP16
2859 if (DestVT
== MVT::f16
|| DestVT
== MVT::bf16
)
2862 assert((DestVT
== MVT::f32
|| DestVT
== MVT::f64
) &&
2863 "Unexpected value type.");
2865 Register SrcReg
= getRegForValue(I
->getOperand(0));
2869 EVT SrcVT
= TLI
.getValueType(DL
, I
->getOperand(0)->getType(), true);
2871 // Handle sign-extension.
2872 if (SrcVT
== MVT::i16
|| SrcVT
== MVT::i8
|| SrcVT
== MVT::i1
) {
2874 emitIntExt(SrcVT
.getSimpleVT(), SrcReg
, MVT::i32
, /*isZExt*/ !Signed
);
2880 if (SrcVT
== MVT::i64
) {
2882 Opc
= (DestVT
== MVT::f32
) ? AArch64::SCVTFUXSri
: AArch64::SCVTFUXDri
;
2884 Opc
= (DestVT
== MVT::f32
) ? AArch64::UCVTFUXSri
: AArch64::UCVTFUXDri
;
2887 Opc
= (DestVT
== MVT::f32
) ? AArch64::SCVTFUWSri
: AArch64::SCVTFUWDri
;
2889 Opc
= (DestVT
== MVT::f32
) ? AArch64::UCVTFUWSri
: AArch64::UCVTFUWDri
;
2892 Register ResultReg
= fastEmitInst_r(Opc
, TLI
.getRegClassFor(DestVT
), SrcReg
);
2893 updateValueMap(I
, ResultReg
);
2897 bool AArch64FastISel::fastLowerArguments() {
2898 if (!FuncInfo
.CanLowerReturn
)
2901 const Function
*F
= FuncInfo
.Fn
;
2905 CallingConv::ID CC
= F
->getCallingConv();
2906 if (CC
!= CallingConv::C
&& CC
!= CallingConv::Swift
)
2909 if (Subtarget
->hasCustomCallingConv())
2912 // Only handle simple cases of up to 8 GPR and FPR each.
2913 unsigned GPRCnt
= 0;
2914 unsigned FPRCnt
= 0;
2915 for (auto const &Arg
: F
->args()) {
2916 if (Arg
.hasAttribute(Attribute::ByVal
) ||
2917 Arg
.hasAttribute(Attribute::InReg
) ||
2918 Arg
.hasAttribute(Attribute::StructRet
) ||
2919 Arg
.hasAttribute(Attribute::SwiftSelf
) ||
2920 Arg
.hasAttribute(Attribute::SwiftAsync
) ||
2921 Arg
.hasAttribute(Attribute::SwiftError
) ||
2922 Arg
.hasAttribute(Attribute::Nest
))
2925 Type
*ArgTy
= Arg
.getType();
2926 if (ArgTy
->isStructTy() || ArgTy
->isArrayTy())
2929 EVT ArgVT
= TLI
.getValueType(DL
, ArgTy
);
2930 if (!ArgVT
.isSimple())
2933 MVT VT
= ArgVT
.getSimpleVT().SimpleTy
;
2934 if (VT
.isFloatingPoint() && !Subtarget
->hasFPARMv8())
2937 if (VT
.isVector() &&
2938 (!Subtarget
->hasNEON() || !Subtarget
->isLittleEndian()))
2941 if (VT
>= MVT::i1
&& VT
<= MVT::i64
)
2943 else if ((VT
>= MVT::f16
&& VT
<= MVT::f64
) || VT
.is64BitVector() ||
2944 VT
.is128BitVector())
2949 if (GPRCnt
> 8 || FPRCnt
> 8)
2953 static const MCPhysReg Registers
[6][8] = {
2954 { AArch64::W0
, AArch64::W1
, AArch64::W2
, AArch64::W3
, AArch64::W4
,
2955 AArch64::W5
, AArch64::W6
, AArch64::W7
},
2956 { AArch64::X0
, AArch64::X1
, AArch64::X2
, AArch64::X3
, AArch64::X4
,
2957 AArch64::X5
, AArch64::X6
, AArch64::X7
},
2958 { AArch64::H0
, AArch64::H1
, AArch64::H2
, AArch64::H3
, AArch64::H4
,
2959 AArch64::H5
, AArch64::H6
, AArch64::H7
},
2960 { AArch64::S0
, AArch64::S1
, AArch64::S2
, AArch64::S3
, AArch64::S4
,
2961 AArch64::S5
, AArch64::S6
, AArch64::S7
},
2962 { AArch64::D0
, AArch64::D1
, AArch64::D2
, AArch64::D3
, AArch64::D4
,
2963 AArch64::D5
, AArch64::D6
, AArch64::D7
},
2964 { AArch64::Q0
, AArch64::Q1
, AArch64::Q2
, AArch64::Q3
, AArch64::Q4
,
2965 AArch64::Q5
, AArch64::Q6
, AArch64::Q7
}
2968 unsigned GPRIdx
= 0;
2969 unsigned FPRIdx
= 0;
2970 for (auto const &Arg
: F
->args()) {
2971 MVT VT
= TLI
.getSimpleValueType(DL
, Arg
.getType());
2973 const TargetRegisterClass
*RC
;
2974 if (VT
>= MVT::i1
&& VT
<= MVT::i32
) {
2975 SrcReg
= Registers
[0][GPRIdx
++];
2976 RC
= &AArch64::GPR32RegClass
;
2978 } else if (VT
== MVT::i64
) {
2979 SrcReg
= Registers
[1][GPRIdx
++];
2980 RC
= &AArch64::GPR64RegClass
;
2981 } else if (VT
== MVT::f16
|| VT
== MVT::bf16
) {
2982 SrcReg
= Registers
[2][FPRIdx
++];
2983 RC
= &AArch64::FPR16RegClass
;
2984 } else if (VT
== MVT::f32
) {
2985 SrcReg
= Registers
[3][FPRIdx
++];
2986 RC
= &AArch64::FPR32RegClass
;
2987 } else if ((VT
== MVT::f64
) || VT
.is64BitVector()) {
2988 SrcReg
= Registers
[4][FPRIdx
++];
2989 RC
= &AArch64::FPR64RegClass
;
2990 } else if (VT
.is128BitVector()) {
2991 SrcReg
= Registers
[5][FPRIdx
++];
2992 RC
= &AArch64::FPR128RegClass
;
2994 llvm_unreachable("Unexpected value type.");
2996 Register DstReg
= FuncInfo
.MF
->addLiveIn(SrcReg
, RC
);
2997 // FIXME: Unfortunately it's necessary to emit a copy from the livein copy.
2998 // Without this, EmitLiveInCopies may eliminate the livein if its only
2999 // use is a bitcast (which isn't turned into an instruction).
3000 Register ResultReg
= createResultReg(RC
);
3001 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, MIMD
,
3002 TII
.get(TargetOpcode::COPY
), ResultReg
)
3003 .addReg(DstReg
, getKillRegState(true));
3004 updateValueMap(&Arg
, ResultReg
);
3009 bool AArch64FastISel::processCallArgs(CallLoweringInfo
&CLI
,
3010 SmallVectorImpl
<MVT
> &OutVTs
,
3011 unsigned &NumBytes
) {
3012 CallingConv::ID CC
= CLI
.CallConv
;
3013 SmallVector
<CCValAssign
, 16> ArgLocs
;
3014 CCState
CCInfo(CC
, false, *FuncInfo
.MF
, ArgLocs
, *Context
);
3015 CCInfo
.AnalyzeCallOperands(OutVTs
, CLI
.OutFlags
, CCAssignFnForCall(CC
));
3017 // Get a count of how many bytes are to be pushed on the stack.
3018 NumBytes
= CCInfo
.getStackSize();
3020 // Issue CALLSEQ_START
3021 unsigned AdjStackDown
= TII
.getCallFrameSetupOpcode();
3022 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, MIMD
, TII
.get(AdjStackDown
))
3023 .addImm(NumBytes
).addImm(0);
3025 // Process the args.
3026 for (CCValAssign
&VA
: ArgLocs
) {
3027 const Value
*ArgVal
= CLI
.OutVals
[VA
.getValNo()];
3028 MVT ArgVT
= OutVTs
[VA
.getValNo()];
3030 Register ArgReg
= getRegForValue(ArgVal
);
3034 // Handle arg promotion: SExt, ZExt, AExt.
3035 switch (VA
.getLocInfo()) {
3036 case CCValAssign::Full
:
3038 case CCValAssign::SExt
: {
3039 MVT DestVT
= VA
.getLocVT();
3041 ArgReg
= emitIntExt(SrcVT
, ArgReg
, DestVT
, /*isZExt=*/false);
3046 case CCValAssign::AExt
:
3047 // Intentional fall-through.
3048 case CCValAssign::ZExt
: {
3049 MVT DestVT
= VA
.getLocVT();
3051 ArgReg
= emitIntExt(SrcVT
, ArgReg
, DestVT
, /*isZExt=*/true);
3057 llvm_unreachable("Unknown arg promotion!");
3060 // Now copy/store arg to correct locations.
3061 if (VA
.isRegLoc() && !VA
.needsCustom()) {
3062 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, MIMD
,
3063 TII
.get(TargetOpcode::COPY
), VA
.getLocReg()).addReg(ArgReg
);
3064 CLI
.OutRegs
.push_back(VA
.getLocReg());
3065 } else if (VA
.needsCustom()) {
3066 // FIXME: Handle custom args.
3069 assert(VA
.isMemLoc() && "Assuming store on stack.");
3071 // Don't emit stores for undef values.
3072 if (isa
<UndefValue
>(ArgVal
))
3075 // Need to store on the stack.
3076 unsigned ArgSize
= (ArgVT
.getSizeInBits() + 7) / 8;
3078 unsigned BEAlign
= 0;
3079 if (ArgSize
< 8 && !Subtarget
->isLittleEndian())
3080 BEAlign
= 8 - ArgSize
;
3083 Addr
.setKind(Address::RegBase
);
3084 Addr
.setReg(AArch64::SP
);
3085 Addr
.setOffset(VA
.getLocMemOffset() + BEAlign
);
3087 Align Alignment
= DL
.getABITypeAlign(ArgVal
->getType());
3088 MachineMemOperand
*MMO
= FuncInfo
.MF
->getMachineMemOperand(
3089 MachinePointerInfo::getStack(*FuncInfo
.MF
, Addr
.getOffset()),
3090 MachineMemOperand::MOStore
, ArgVT
.getStoreSize(), Alignment
);
3092 if (!emitStore(ArgVT
, ArgReg
, Addr
, MMO
))
3099 bool AArch64FastISel::finishCall(CallLoweringInfo
&CLI
, unsigned NumBytes
) {
3100 CallingConv::ID CC
= CLI
.CallConv
;
3102 // Issue CALLSEQ_END
3103 unsigned AdjStackUp
= TII
.getCallFrameDestroyOpcode();
3104 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, MIMD
, TII
.get(AdjStackUp
))
3105 .addImm(NumBytes
).addImm(0);
3107 // Now the return values.
3108 SmallVector
<CCValAssign
, 16> RVLocs
;
3109 CCState
CCInfo(CC
, false, *FuncInfo
.MF
, RVLocs
, *Context
);
3110 CCInfo
.AnalyzeCallResult(CLI
.Ins
, CCAssignFnForCall(CC
));
3112 Register ResultReg
= FuncInfo
.CreateRegs(CLI
.RetTy
);
3113 for (unsigned i
= 0; i
!= RVLocs
.size(); ++i
) {
3114 CCValAssign
&VA
= RVLocs
[i
];
3115 MVT CopyVT
= VA
.getValVT();
3116 unsigned CopyReg
= ResultReg
+ i
;
3118 // TODO: Handle big-endian results
3119 if (CopyVT
.isVector() && !Subtarget
->isLittleEndian())
3122 // Copy result out of their specified physreg.
3123 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, MIMD
, TII
.get(TargetOpcode::COPY
),
3125 .addReg(VA
.getLocReg());
3126 CLI
.InRegs
.push_back(VA
.getLocReg());
3129 CLI
.ResultReg
= ResultReg
;
3130 CLI
.NumResultRegs
= RVLocs
.size();
3135 bool AArch64FastISel::fastLowerCall(CallLoweringInfo
&CLI
) {
3136 CallingConv::ID CC
= CLI
.CallConv
;
3137 bool IsTailCall
= CLI
.IsTailCall
;
3138 bool IsVarArg
= CLI
.IsVarArg
;
3139 const Value
*Callee
= CLI
.Callee
;
3140 MCSymbol
*Symbol
= CLI
.Symbol
;
3142 if (!Callee
&& !Symbol
)
3145 // Allow SelectionDAG isel to handle calls to functions like setjmp that need
3146 // a bti instruction following the call.
3147 if (CLI
.CB
&& CLI
.CB
->hasFnAttr(Attribute::ReturnsTwice
) &&
3148 !Subtarget
->noBTIAtReturnTwice() &&
3149 MF
->getInfo
<AArch64FunctionInfo
>()->branchTargetEnforcement())
3152 // Allow SelectionDAG isel to handle indirect calls with KCFI checks.
3153 if (CLI
.CB
&& CLI
.CB
->isIndirectCall() &&
3154 CLI
.CB
->getOperandBundle(LLVMContext::OB_kcfi
))
3157 // Allow SelectionDAG isel to handle tail calls.
3161 // FIXME: we could and should support this, but for now correctness at -O0 is
3163 if (Subtarget
->isTargetILP32())
3166 CodeModel::Model CM
= TM
.getCodeModel();
3167 // Only support the small-addressing and large code models.
3168 if (CM
!= CodeModel::Large
&& !Subtarget
->useSmallAddressing())
3171 // FIXME: Add large code model support for ELF.
3172 if (CM
== CodeModel::Large
&& !Subtarget
->isTargetMachO())
3175 // ELF -fno-plt compiled intrinsic calls do not have the nonlazybind
3176 // attribute. Check "RtLibUseGOT" instead.
3177 if (MF
->getFunction().getParent()->getRtLibUseGOT())
3180 // Let SDISel handle vararg functions.
3184 if (Subtarget
->isWindowsArm64EC())
3187 for (auto Flag
: CLI
.OutFlags
)
3188 if (Flag
.isInReg() || Flag
.isSRet() || Flag
.isNest() || Flag
.isByVal() ||
3189 Flag
.isSwiftSelf() || Flag
.isSwiftAsync() || Flag
.isSwiftError())
3192 // Set up the argument vectors.
3193 SmallVector
<MVT
, 16> OutVTs
;
3194 OutVTs
.reserve(CLI
.OutVals
.size());
3196 for (auto *Val
: CLI
.OutVals
) {
3198 if (!isTypeLegal(Val
->getType(), VT
) &&
3199 !(VT
== MVT::i1
|| VT
== MVT::i8
|| VT
== MVT::i16
))
3202 // We don't handle vector parameters yet.
3203 if (VT
.isVector() || VT
.getSizeInBits() > 64)
3206 OutVTs
.push_back(VT
);
3210 if (Callee
&& !computeCallAddress(Callee
, Addr
))
3213 // The weak function target may be zero; in that case we must use indirect
3214 // addressing via a stub on windows as it may be out of range for a
3215 // PC-relative jump.
3216 if (Subtarget
->isTargetWindows() && Addr
.getGlobalValue() &&
3217 Addr
.getGlobalValue()->hasExternalWeakLinkage())
3220 // Handle the arguments now that we've gotten them.
3222 if (!processCallArgs(CLI
, OutVTs
, NumBytes
))
3225 const AArch64RegisterInfo
*RegInfo
= Subtarget
->getRegisterInfo();
3226 if (RegInfo
->isAnyArgRegReserved(*MF
))
3227 RegInfo
->emitReservedArgRegCallError(*MF
);
3230 MachineInstrBuilder MIB
;
3231 if (Subtarget
->useSmallAddressing()) {
3232 const MCInstrDesc
&II
=
3233 TII
.get(Addr
.getReg() ? getBLRCallOpcode(*MF
) : (unsigned)AArch64::BL
);
3234 MIB
= BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, MIMD
, II
);
3236 MIB
.addSym(Symbol
, 0);
3237 else if (Addr
.getGlobalValue())
3238 MIB
.addGlobalAddress(Addr
.getGlobalValue(), 0, 0);
3239 else if (Addr
.getReg()) {
3240 Register Reg
= constrainOperandRegClass(II
, Addr
.getReg(), 0);
3245 unsigned CallReg
= 0;
3247 Register ADRPReg
= createResultReg(&AArch64::GPR64commonRegClass
);
3248 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, MIMD
, TII
.get(AArch64::ADRP
),
3250 .addSym(Symbol
, AArch64II::MO_GOT
| AArch64II::MO_PAGE
);
3252 CallReg
= createResultReg(&AArch64::GPR64RegClass
);
3253 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, MIMD
,
3254 TII
.get(AArch64::LDRXui
), CallReg
)
3257 AArch64II::MO_GOT
| AArch64II::MO_PAGEOFF
| AArch64II::MO_NC
);
3258 } else if (Addr
.getGlobalValue())
3259 CallReg
= materializeGV(Addr
.getGlobalValue());
3260 else if (Addr
.getReg())
3261 CallReg
= Addr
.getReg();
3266 const MCInstrDesc
&II
= TII
.get(getBLRCallOpcode(*MF
));
3267 CallReg
= constrainOperandRegClass(II
, CallReg
, 0);
3268 MIB
= BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, MIMD
, II
).addReg(CallReg
);
3271 // Add implicit physical register uses to the call.
3272 for (auto Reg
: CLI
.OutRegs
)
3273 MIB
.addReg(Reg
, RegState::Implicit
);
3275 // Add a register mask with the call-preserved registers.
3276 // Proper defs for return values will be added by setPhysRegsDeadExcept().
3277 MIB
.addRegMask(TRI
.getCallPreservedMask(*FuncInfo
.MF
, CC
));
3281 // Finish off the call including any return values.
3282 return finishCall(CLI
, NumBytes
);
3285 bool AArch64FastISel::isMemCpySmall(uint64_t Len
, MaybeAlign Alignment
) {
3287 return Len
/ Alignment
->value() <= 4;
3292 bool AArch64FastISel::tryEmitSmallMemCpy(Address Dest
, Address Src
,
3293 uint64_t Len
, MaybeAlign Alignment
) {
3294 // Make sure we don't bloat code by inlining very large memcpy's.
3295 if (!isMemCpySmall(Len
, Alignment
))
3298 int64_t UnscaledOffset
= 0;
3299 Address OrigDest
= Dest
;
3300 Address OrigSrc
= Src
;
3304 if (!Alignment
|| *Alignment
>= 8) {
3315 assert(Alignment
&& "Alignment is set in this branch");
3316 // Bound based on alignment.
3317 if (Len
>= 4 && *Alignment
== 4)
3319 else if (Len
>= 2 && *Alignment
== 2)
3326 unsigned ResultReg
= emitLoad(VT
, VT
, Src
);
3330 if (!emitStore(VT
, ResultReg
, Dest
))
3333 int64_t Size
= VT
.getSizeInBits() / 8;
3335 UnscaledOffset
+= Size
;
3337 // We need to recompute the unscaled offset for each iteration.
3338 Dest
.setOffset(OrigDest
.getOffset() + UnscaledOffset
);
3339 Src
.setOffset(OrigSrc
.getOffset() + UnscaledOffset
);
3345 /// Check if it is possible to fold the condition from the XALU intrinsic
3346 /// into the user. The condition code will only be updated on success.
3347 bool AArch64FastISel::foldXALUIntrinsic(AArch64CC::CondCode
&CC
,
3348 const Instruction
*I
,
3349 const Value
*Cond
) {
3350 if (!isa
<ExtractValueInst
>(Cond
))
3353 const auto *EV
= cast
<ExtractValueInst
>(Cond
);
3354 if (!isa
<IntrinsicInst
>(EV
->getAggregateOperand()))
3357 const auto *II
= cast
<IntrinsicInst
>(EV
->getAggregateOperand());
3359 const Function
*Callee
= II
->getCalledFunction();
3361 cast
<StructType
>(Callee
->getReturnType())->getTypeAtIndex(0U);
3362 if (!isTypeLegal(RetTy
, RetVT
))
3365 if (RetVT
!= MVT::i32
&& RetVT
!= MVT::i64
)
3368 const Value
*LHS
= II
->getArgOperand(0);
3369 const Value
*RHS
= II
->getArgOperand(1);
3371 // Canonicalize immediate to the RHS.
3372 if (isa
<ConstantInt
>(LHS
) && !isa
<ConstantInt
>(RHS
) && II
->isCommutative())
3373 std::swap(LHS
, RHS
);
3375 // Simplify multiplies.
3376 Intrinsic::ID IID
= II
->getIntrinsicID();
3380 case Intrinsic::smul_with_overflow
:
3381 if (const auto *C
= dyn_cast
<ConstantInt
>(RHS
))
3382 if (C
->getValue() == 2)
3383 IID
= Intrinsic::sadd_with_overflow
;
3385 case Intrinsic::umul_with_overflow
:
3386 if (const auto *C
= dyn_cast
<ConstantInt
>(RHS
))
3387 if (C
->getValue() == 2)
3388 IID
= Intrinsic::uadd_with_overflow
;
3392 AArch64CC::CondCode TmpCC
;
3396 case Intrinsic::sadd_with_overflow
:
3397 case Intrinsic::ssub_with_overflow
:
3398 TmpCC
= AArch64CC::VS
;
3400 case Intrinsic::uadd_with_overflow
:
3401 TmpCC
= AArch64CC::HS
;
3403 case Intrinsic::usub_with_overflow
:
3404 TmpCC
= AArch64CC::LO
;
3406 case Intrinsic::smul_with_overflow
:
3407 case Intrinsic::umul_with_overflow
:
3408 TmpCC
= AArch64CC::NE
;
3412 // Check if both instructions are in the same basic block.
3413 if (!isValueAvailable(II
))
3416 // Make sure nothing is in the way
3417 BasicBlock::const_iterator
Start(I
);
3418 BasicBlock::const_iterator
End(II
);
3419 for (auto Itr
= std::prev(Start
); Itr
!= End
; --Itr
) {
3420 // We only expect extractvalue instructions between the intrinsic and the
3421 // instruction to be selected.
3422 if (!isa
<ExtractValueInst
>(Itr
))
3425 // Check that the extractvalue operand comes from the intrinsic.
3426 const auto *EVI
= cast
<ExtractValueInst
>(Itr
);
3427 if (EVI
->getAggregateOperand() != II
)
3435 bool AArch64FastISel::fastLowerIntrinsicCall(const IntrinsicInst
*II
) {
3436 // FIXME: Handle more intrinsics.
3437 switch (II
->getIntrinsicID()) {
3438 default: return false;
3439 case Intrinsic::frameaddress
: {
3440 MachineFrameInfo
&MFI
= FuncInfo
.MF
->getFrameInfo();
3441 MFI
.setFrameAddressIsTaken(true);
3443 const AArch64RegisterInfo
*RegInfo
= Subtarget
->getRegisterInfo();
3444 Register FramePtr
= RegInfo
->getFrameRegister(*(FuncInfo
.MF
));
3445 Register SrcReg
= MRI
.createVirtualRegister(&AArch64::GPR64RegClass
);
3446 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, MIMD
,
3447 TII
.get(TargetOpcode::COPY
), SrcReg
).addReg(FramePtr
);
3448 // Recursively load frame address
3454 unsigned Depth
= cast
<ConstantInt
>(II
->getOperand(0))->getZExtValue();
3456 DestReg
= fastEmitInst_ri(AArch64::LDRXui
, &AArch64::GPR64RegClass
,
3458 assert(DestReg
&& "Unexpected LDR instruction emission failure.");
3462 updateValueMap(II
, SrcReg
);
3465 case Intrinsic::sponentry
: {
3466 MachineFrameInfo
&MFI
= FuncInfo
.MF
->getFrameInfo();
3468 // SP = FP + Fixed Object + 16
3469 int FI
= MFI
.CreateFixedObject(4, 0, false);
3470 Register ResultReg
= createResultReg(&AArch64::GPR64spRegClass
);
3471 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, MIMD
,
3472 TII
.get(AArch64::ADDXri
), ResultReg
)
3477 updateValueMap(II
, ResultReg
);
3480 case Intrinsic::memcpy
:
3481 case Intrinsic::memmove
: {
3482 const auto *MTI
= cast
<MemTransferInst
>(II
);
3483 // Don't handle volatile.
3484 if (MTI
->isVolatile())
3487 // Disable inlining for memmove before calls to ComputeAddress. Otherwise,
3488 // we would emit dead code because we don't currently handle memmoves.
3489 bool IsMemCpy
= (II
->getIntrinsicID() == Intrinsic::memcpy
);
3490 if (isa
<ConstantInt
>(MTI
->getLength()) && IsMemCpy
) {
3491 // Small memcpy's are common enough that we want to do them without a call
3493 uint64_t Len
= cast
<ConstantInt
>(MTI
->getLength())->getZExtValue();
3494 MaybeAlign Alignment
;
3495 if (MTI
->getDestAlign() || MTI
->getSourceAlign())
3496 Alignment
= std::min(MTI
->getDestAlign().valueOrOne(),
3497 MTI
->getSourceAlign().valueOrOne());
3498 if (isMemCpySmall(Len
, Alignment
)) {
3500 if (!computeAddress(MTI
->getRawDest(), Dest
) ||
3501 !computeAddress(MTI
->getRawSource(), Src
))
3503 if (tryEmitSmallMemCpy(Dest
, Src
, Len
, Alignment
))
3508 if (!MTI
->getLength()->getType()->isIntegerTy(64))
3511 if (MTI
->getSourceAddressSpace() > 255 || MTI
->getDestAddressSpace() > 255)
3512 // Fast instruction selection doesn't support the special
3516 const char *IntrMemName
= isa
<MemCpyInst
>(II
) ? "memcpy" : "memmove";
3517 return lowerCallTo(II
, IntrMemName
, II
->arg_size() - 1);
3519 case Intrinsic::memset
: {
3520 const MemSetInst
*MSI
= cast
<MemSetInst
>(II
);
3521 // Don't handle volatile.
3522 if (MSI
->isVolatile())
3525 if (!MSI
->getLength()->getType()->isIntegerTy(64))
3528 if (MSI
->getDestAddressSpace() > 255)
3529 // Fast instruction selection doesn't support the special
3533 return lowerCallTo(II
, "memset", II
->arg_size() - 1);
3535 case Intrinsic::sin
:
3536 case Intrinsic::cos
:
3537 case Intrinsic::pow
: {
3539 if (!isTypeLegal(II
->getType(), RetVT
))
3542 if (RetVT
!= MVT::f32
&& RetVT
!= MVT::f64
)
3545 static const RTLIB::Libcall LibCallTable
[3][2] = {
3546 { RTLIB::SIN_F32
, RTLIB::SIN_F64
},
3547 { RTLIB::COS_F32
, RTLIB::COS_F64
},
3548 { RTLIB::POW_F32
, RTLIB::POW_F64
}
3551 bool Is64Bit
= RetVT
== MVT::f64
;
3552 switch (II
->getIntrinsicID()) {
3554 llvm_unreachable("Unexpected intrinsic.");
3555 case Intrinsic::sin
:
3556 LC
= LibCallTable
[0][Is64Bit
];
3558 case Intrinsic::cos
:
3559 LC
= LibCallTable
[1][Is64Bit
];
3561 case Intrinsic::pow
:
3562 LC
= LibCallTable
[2][Is64Bit
];
3567 Args
.reserve(II
->arg_size());
3569 // Populate the argument list.
3570 for (auto &Arg
: II
->args()) {
3573 Entry
.Ty
= Arg
->getType();
3574 Args
.push_back(Entry
);
3577 CallLoweringInfo CLI
;
3578 MCContext
&Ctx
= MF
->getContext();
3579 CLI
.setCallee(DL
, Ctx
, TLI
.getLibcallCallingConv(LC
), II
->getType(),
3580 TLI
.getLibcallName(LC
), std::move(Args
));
3581 if (!lowerCallTo(CLI
))
3583 updateValueMap(II
, CLI
.ResultReg
);
3586 case Intrinsic::fabs
: {
3588 if (!isTypeLegal(II
->getType(), VT
))
3592 switch (VT
.SimpleTy
) {
3596 Opc
= AArch64::FABSSr
;
3599 Opc
= AArch64::FABSDr
;
3602 Register SrcReg
= getRegForValue(II
->getOperand(0));
3605 Register ResultReg
= createResultReg(TLI
.getRegClassFor(VT
));
3606 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, MIMD
, TII
.get(Opc
), ResultReg
)
3608 updateValueMap(II
, ResultReg
);
3611 case Intrinsic::trap
:
3612 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, MIMD
, TII
.get(AArch64::BRK
))
3615 case Intrinsic::debugtrap
:
3616 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, MIMD
, TII
.get(AArch64::BRK
))
3620 case Intrinsic::sqrt
: {
3621 Type
*RetTy
= II
->getCalledFunction()->getReturnType();
3624 if (!isTypeLegal(RetTy
, VT
))
3627 Register Op0Reg
= getRegForValue(II
->getOperand(0));
3631 unsigned ResultReg
= fastEmit_r(VT
, VT
, ISD::FSQRT
, Op0Reg
);
3635 updateValueMap(II
, ResultReg
);
3638 case Intrinsic::sadd_with_overflow
:
3639 case Intrinsic::uadd_with_overflow
:
3640 case Intrinsic::ssub_with_overflow
:
3641 case Intrinsic::usub_with_overflow
:
3642 case Intrinsic::smul_with_overflow
:
3643 case Intrinsic::umul_with_overflow
: {
3644 // This implements the basic lowering of the xalu with overflow intrinsics.
3645 const Function
*Callee
= II
->getCalledFunction();
3646 auto *Ty
= cast
<StructType
>(Callee
->getReturnType());
3647 Type
*RetTy
= Ty
->getTypeAtIndex(0U);
3650 if (!isTypeLegal(RetTy
, VT
))
3653 if (VT
!= MVT::i32
&& VT
!= MVT::i64
)
3656 const Value
*LHS
= II
->getArgOperand(0);
3657 const Value
*RHS
= II
->getArgOperand(1);
3658 // Canonicalize immediate to the RHS.
3659 if (isa
<ConstantInt
>(LHS
) && !isa
<ConstantInt
>(RHS
) && II
->isCommutative())
3660 std::swap(LHS
, RHS
);
3662 // Simplify multiplies.
3663 Intrinsic::ID IID
= II
->getIntrinsicID();
3667 case Intrinsic::smul_with_overflow
:
3668 if (const auto *C
= dyn_cast
<ConstantInt
>(RHS
))
3669 if (C
->getValue() == 2) {
3670 IID
= Intrinsic::sadd_with_overflow
;
3674 case Intrinsic::umul_with_overflow
:
3675 if (const auto *C
= dyn_cast
<ConstantInt
>(RHS
))
3676 if (C
->getValue() == 2) {
3677 IID
= Intrinsic::uadd_with_overflow
;
3683 unsigned ResultReg1
= 0, ResultReg2
= 0, MulReg
= 0;
3684 AArch64CC::CondCode CC
= AArch64CC::Invalid
;
3686 default: llvm_unreachable("Unexpected intrinsic!");
3687 case Intrinsic::sadd_with_overflow
:
3688 ResultReg1
= emitAdd(VT
, LHS
, RHS
, /*SetFlags=*/true);
3691 case Intrinsic::uadd_with_overflow
:
3692 ResultReg1
= emitAdd(VT
, LHS
, RHS
, /*SetFlags=*/true);
3695 case Intrinsic::ssub_with_overflow
:
3696 ResultReg1
= emitSub(VT
, LHS
, RHS
, /*SetFlags=*/true);
3699 case Intrinsic::usub_with_overflow
:
3700 ResultReg1
= emitSub(VT
, LHS
, RHS
, /*SetFlags=*/true);
3703 case Intrinsic::smul_with_overflow
: {
3705 Register LHSReg
= getRegForValue(LHS
);
3709 Register RHSReg
= getRegForValue(RHS
);
3713 if (VT
== MVT::i32
) {
3714 MulReg
= emitSMULL_rr(MVT::i64
, LHSReg
, RHSReg
);
3715 Register MulSubReg
=
3716 fastEmitInst_extractsubreg(VT
, MulReg
, AArch64::sub_32
);
3717 // cmp xreg, wreg, sxtw
3718 emitAddSub_rx(/*UseAdd=*/false, MVT::i64
, MulReg
, MulSubReg
,
3719 AArch64_AM::SXTW
, /*ShiftImm=*/0, /*SetFlags=*/true,
3720 /*WantResult=*/false);
3723 assert(VT
== MVT::i64
&& "Unexpected value type.");
3724 // LHSReg and RHSReg cannot be killed by this Mul, since they are
3725 // reused in the next instruction.
3726 MulReg
= emitMul_rr(VT
, LHSReg
, RHSReg
);
3727 unsigned SMULHReg
= fastEmit_rr(VT
, VT
, ISD::MULHS
, LHSReg
, RHSReg
);
3728 emitSubs_rs(VT
, SMULHReg
, MulReg
, AArch64_AM::ASR
, 63,
3729 /*WantResult=*/false);
3733 case Intrinsic::umul_with_overflow
: {
3735 Register LHSReg
= getRegForValue(LHS
);
3739 Register RHSReg
= getRegForValue(RHS
);
3743 if (VT
== MVT::i32
) {
3744 MulReg
= emitUMULL_rr(MVT::i64
, LHSReg
, RHSReg
);
3745 // tst xreg, #0xffffffff00000000
3746 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, MIMD
,
3747 TII
.get(AArch64::ANDSXri
), AArch64::XZR
)
3749 .addImm(AArch64_AM::encodeLogicalImmediate(0xFFFFFFFF00000000, 64));
3750 MulReg
= fastEmitInst_extractsubreg(VT
, MulReg
, AArch64::sub_32
);
3752 assert(VT
== MVT::i64
&& "Unexpected value type.");
3753 // LHSReg and RHSReg cannot be killed by this Mul, since they are
3754 // reused in the next instruction.
3755 MulReg
= emitMul_rr(VT
, LHSReg
, RHSReg
);
3756 unsigned UMULHReg
= fastEmit_rr(VT
, VT
, ISD::MULHU
, LHSReg
, RHSReg
);
3757 emitSubs_rr(VT
, AArch64::XZR
, UMULHReg
, /*WantResult=*/false);
3764 ResultReg1
= createResultReg(TLI
.getRegClassFor(VT
));
3765 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, MIMD
,
3766 TII
.get(TargetOpcode::COPY
), ResultReg1
).addReg(MulReg
);
3772 ResultReg2
= fastEmitInst_rri(AArch64::CSINCWr
, &AArch64::GPR32RegClass
,
3773 AArch64::WZR
, AArch64::WZR
,
3774 getInvertedCondCode(CC
));
3776 assert((ResultReg1
+ 1) == ResultReg2
&&
3777 "Nonconsecutive result registers.");
3778 updateValueMap(II
, ResultReg1
, 2);
3781 case Intrinsic::aarch64_crc32b
:
3782 case Intrinsic::aarch64_crc32h
:
3783 case Intrinsic::aarch64_crc32w
:
3784 case Intrinsic::aarch64_crc32x
:
3785 case Intrinsic::aarch64_crc32cb
:
3786 case Intrinsic::aarch64_crc32ch
:
3787 case Intrinsic::aarch64_crc32cw
:
3788 case Intrinsic::aarch64_crc32cx
: {
3789 if (!Subtarget
->hasCRC())
3793 switch (II
->getIntrinsicID()) {
3795 llvm_unreachable("Unexpected intrinsic!");
3796 case Intrinsic::aarch64_crc32b
:
3797 Opc
= AArch64::CRC32Brr
;
3799 case Intrinsic::aarch64_crc32h
:
3800 Opc
= AArch64::CRC32Hrr
;
3802 case Intrinsic::aarch64_crc32w
:
3803 Opc
= AArch64::CRC32Wrr
;
3805 case Intrinsic::aarch64_crc32x
:
3806 Opc
= AArch64::CRC32Xrr
;
3808 case Intrinsic::aarch64_crc32cb
:
3809 Opc
= AArch64::CRC32CBrr
;
3811 case Intrinsic::aarch64_crc32ch
:
3812 Opc
= AArch64::CRC32CHrr
;
3814 case Intrinsic::aarch64_crc32cw
:
3815 Opc
= AArch64::CRC32CWrr
;
3817 case Intrinsic::aarch64_crc32cx
:
3818 Opc
= AArch64::CRC32CXrr
;
3822 Register LHSReg
= getRegForValue(II
->getArgOperand(0));
3823 Register RHSReg
= getRegForValue(II
->getArgOperand(1));
3824 if (!LHSReg
|| !RHSReg
)
3827 Register ResultReg
=
3828 fastEmitInst_rr(Opc
, &AArch64::GPR32RegClass
, LHSReg
, RHSReg
);
3829 updateValueMap(II
, ResultReg
);
3836 bool AArch64FastISel::selectRet(const Instruction
*I
) {
3837 const ReturnInst
*Ret
= cast
<ReturnInst
>(I
);
3838 const Function
&F
= *I
->getParent()->getParent();
3840 if (!FuncInfo
.CanLowerReturn
)
3846 if (TLI
.supportSwiftError() &&
3847 F
.getAttributes().hasAttrSomewhere(Attribute::SwiftError
))
3850 if (TLI
.supportSplitCSR(FuncInfo
.MF
))
3853 // Build a list of return value registers.
3854 SmallVector
<unsigned, 4> RetRegs
;
3856 if (Ret
->getNumOperands() > 0) {
3857 CallingConv::ID CC
= F
.getCallingConv();
3858 SmallVector
<ISD::OutputArg
, 4> Outs
;
3859 GetReturnInfo(CC
, F
.getReturnType(), F
.getAttributes(), Outs
, TLI
, DL
);
3861 // Analyze operands of the call, assigning locations to each operand.
3862 SmallVector
<CCValAssign
, 16> ValLocs
;
3863 CCState
CCInfo(CC
, F
.isVarArg(), *FuncInfo
.MF
, ValLocs
, I
->getContext());
3864 CCInfo
.AnalyzeReturn(Outs
, RetCC_AArch64_AAPCS
);
3866 // Only handle a single return value for now.
3867 if (ValLocs
.size() != 1)
3870 CCValAssign
&VA
= ValLocs
[0];
3871 const Value
*RV
= Ret
->getOperand(0);
3873 // Don't bother handling odd stuff for now.
3874 if ((VA
.getLocInfo() != CCValAssign::Full
) &&
3875 (VA
.getLocInfo() != CCValAssign::BCvt
))
3878 // Only handle register returns for now.
3882 Register Reg
= getRegForValue(RV
);
3886 unsigned SrcReg
= Reg
+ VA
.getValNo();
3887 Register DestReg
= VA
.getLocReg();
3888 // Avoid a cross-class copy. This is very unlikely.
3889 if (!MRI
.getRegClass(SrcReg
)->contains(DestReg
))
3892 EVT RVEVT
= TLI
.getValueType(DL
, RV
->getType());
3893 if (!RVEVT
.isSimple())
3896 // Vectors (of > 1 lane) in big endian need tricky handling.
3897 if (RVEVT
.isVector() && RVEVT
.getVectorElementCount().isVector() &&
3898 !Subtarget
->isLittleEndian())
3901 MVT RVVT
= RVEVT
.getSimpleVT();
3902 if (RVVT
== MVT::f128
)
3905 MVT DestVT
= VA
.getValVT();
3906 // Special handling for extended integers.
3907 if (RVVT
!= DestVT
) {
3908 if (RVVT
!= MVT::i1
&& RVVT
!= MVT::i8
&& RVVT
!= MVT::i16
)
3911 if (!Outs
[0].Flags
.isZExt() && !Outs
[0].Flags
.isSExt())
3914 bool IsZExt
= Outs
[0].Flags
.isZExt();
3915 SrcReg
= emitIntExt(RVVT
, SrcReg
, DestVT
, IsZExt
);
3920 // "Callee" (i.e. value producer) zero extends pointers at function
3922 if (Subtarget
->isTargetILP32() && RV
->getType()->isPointerTy())
3923 SrcReg
= emitAnd_ri(MVT::i64
, SrcReg
, 0xffffffff);
3926 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, MIMD
,
3927 TII
.get(TargetOpcode::COPY
), DestReg
).addReg(SrcReg
);
3929 // Add register to return instruction.
3930 RetRegs
.push_back(VA
.getLocReg());
3933 MachineInstrBuilder MIB
= BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, MIMD
,
3934 TII
.get(AArch64::RET_ReallyLR
));
3935 for (unsigned RetReg
: RetRegs
)
3936 MIB
.addReg(RetReg
, RegState::Implicit
);
3940 bool AArch64FastISel::selectTrunc(const Instruction
*I
) {
3941 Type
*DestTy
= I
->getType();
3942 Value
*Op
= I
->getOperand(0);
3943 Type
*SrcTy
= Op
->getType();
3945 EVT SrcEVT
= TLI
.getValueType(DL
, SrcTy
, true);
3946 EVT DestEVT
= TLI
.getValueType(DL
, DestTy
, true);
3947 if (!SrcEVT
.isSimple())
3949 if (!DestEVT
.isSimple())
3952 MVT SrcVT
= SrcEVT
.getSimpleVT();
3953 MVT DestVT
= DestEVT
.getSimpleVT();
3955 if (SrcVT
!= MVT::i64
&& SrcVT
!= MVT::i32
&& SrcVT
!= MVT::i16
&&
3958 if (DestVT
!= MVT::i32
&& DestVT
!= MVT::i16
&& DestVT
!= MVT::i8
&&
3962 Register SrcReg
= getRegForValue(Op
);
3966 // If we're truncating from i64 to a smaller non-legal type then generate an
3967 // AND. Otherwise, we know the high bits are undefined and a truncate only
3968 // generate a COPY. We cannot mark the source register also as result
3969 // register, because this can incorrectly transfer the kill flag onto the
3972 if (SrcVT
== MVT::i64
) {
3974 switch (DestVT
.SimpleTy
) {
3976 // Trunc i64 to i32 is handled by the target-independent fast-isel.
3988 // Issue an extract_subreg to get the lower 32-bits.
3989 Register Reg32
= fastEmitInst_extractsubreg(MVT::i32
, SrcReg
,
3991 // Create the AND instruction which performs the actual truncation.
3992 ResultReg
= emitAnd_ri(MVT::i32
, Reg32
, Mask
);
3993 assert(ResultReg
&& "Unexpected AND instruction emission failure.");
3995 ResultReg
= createResultReg(&AArch64::GPR32RegClass
);
3996 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, MIMD
,
3997 TII
.get(TargetOpcode::COPY
), ResultReg
)
4001 updateValueMap(I
, ResultReg
);
4005 unsigned AArch64FastISel::emiti1Ext(unsigned SrcReg
, MVT DestVT
, bool IsZExt
) {
4006 assert((DestVT
== MVT::i8
|| DestVT
== MVT::i16
|| DestVT
== MVT::i32
||
4007 DestVT
== MVT::i64
) &&
4008 "Unexpected value type.");
4009 // Handle i8 and i16 as i32.
4010 if (DestVT
== MVT::i8
|| DestVT
== MVT::i16
)
4014 unsigned ResultReg
= emitAnd_ri(MVT::i32
, SrcReg
, 1);
4015 assert(ResultReg
&& "Unexpected AND instruction emission failure.");
4016 if (DestVT
== MVT::i64
) {
4017 // We're ZExt i1 to i64. The ANDWri Wd, Ws, #1 implicitly clears the
4018 // upper 32 bits. Emit a SUBREG_TO_REG to extend from Wd to Xd.
4019 Register Reg64
= MRI
.createVirtualRegister(&AArch64::GPR64RegClass
);
4020 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, MIMD
,
4021 TII
.get(AArch64::SUBREG_TO_REG
), Reg64
)
4024 .addImm(AArch64::sub_32
);
4029 if (DestVT
== MVT::i64
) {
4030 // FIXME: We're SExt i1 to i64.
4033 return fastEmitInst_rii(AArch64::SBFMWri
, &AArch64::GPR32RegClass
, SrcReg
,
4038 unsigned AArch64FastISel::emitMul_rr(MVT RetVT
, unsigned Op0
, unsigned Op1
) {
4040 switch (RetVT
.SimpleTy
) {
4046 Opc
= AArch64::MADDWrrr
; ZReg
= AArch64::WZR
; break;
4048 Opc
= AArch64::MADDXrrr
; ZReg
= AArch64::XZR
; break;
4051 const TargetRegisterClass
*RC
=
4052 (RetVT
== MVT::i64
) ? &AArch64::GPR64RegClass
: &AArch64::GPR32RegClass
;
4053 return fastEmitInst_rrr(Opc
, RC
, Op0
, Op1
, ZReg
);
4056 unsigned AArch64FastISel::emitSMULL_rr(MVT RetVT
, unsigned Op0
, unsigned Op1
) {
4057 if (RetVT
!= MVT::i64
)
4060 return fastEmitInst_rrr(AArch64::SMADDLrrr
, &AArch64::GPR64RegClass
,
4061 Op0
, Op1
, AArch64::XZR
);
4064 unsigned AArch64FastISel::emitUMULL_rr(MVT RetVT
, unsigned Op0
, unsigned Op1
) {
4065 if (RetVT
!= MVT::i64
)
4068 return fastEmitInst_rrr(AArch64::UMADDLrrr
, &AArch64::GPR64RegClass
,
4069 Op0
, Op1
, AArch64::XZR
);
4072 unsigned AArch64FastISel::emitLSL_rr(MVT RetVT
, unsigned Op0Reg
,
4075 bool NeedTrunc
= false;
4077 switch (RetVT
.SimpleTy
) {
4079 case MVT::i8
: Opc
= AArch64::LSLVWr
; NeedTrunc
= true; Mask
= 0xff; break;
4080 case MVT::i16
: Opc
= AArch64::LSLVWr
; NeedTrunc
= true; Mask
= 0xffff; break;
4081 case MVT::i32
: Opc
= AArch64::LSLVWr
; break;
4082 case MVT::i64
: Opc
= AArch64::LSLVXr
; break;
4085 const TargetRegisterClass
*RC
=
4086 (RetVT
== MVT::i64
) ? &AArch64::GPR64RegClass
: &AArch64::GPR32RegClass
;
4088 Op1Reg
= emitAnd_ri(MVT::i32
, Op1Reg
, Mask
);
4090 Register ResultReg
= fastEmitInst_rr(Opc
, RC
, Op0Reg
, Op1Reg
);
4092 ResultReg
= emitAnd_ri(MVT::i32
, ResultReg
, Mask
);
4096 unsigned AArch64FastISel::emitLSL_ri(MVT RetVT
, MVT SrcVT
, unsigned Op0
,
4097 uint64_t Shift
, bool IsZExt
) {
4098 assert(RetVT
.SimpleTy
>= SrcVT
.SimpleTy
&&
4099 "Unexpected source/return type pair.");
4100 assert((SrcVT
== MVT::i1
|| SrcVT
== MVT::i8
|| SrcVT
== MVT::i16
||
4101 SrcVT
== MVT::i32
|| SrcVT
== MVT::i64
) &&
4102 "Unexpected source value type.");
4103 assert((RetVT
== MVT::i8
|| RetVT
== MVT::i16
|| RetVT
== MVT::i32
||
4104 RetVT
== MVT::i64
) && "Unexpected return value type.");
4106 bool Is64Bit
= (RetVT
== MVT::i64
);
4107 unsigned RegSize
= Is64Bit
? 64 : 32;
4108 unsigned DstBits
= RetVT
.getSizeInBits();
4109 unsigned SrcBits
= SrcVT
.getSizeInBits();
4110 const TargetRegisterClass
*RC
=
4111 Is64Bit
? &AArch64::GPR64RegClass
: &AArch64::GPR32RegClass
;
4113 // Just emit a copy for "zero" shifts.
4115 if (RetVT
== SrcVT
) {
4116 Register ResultReg
= createResultReg(RC
);
4117 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, MIMD
,
4118 TII
.get(TargetOpcode::COPY
), ResultReg
)
4122 return emitIntExt(SrcVT
, Op0
, RetVT
, IsZExt
);
4125 // Don't deal with undefined shifts.
4126 if (Shift
>= DstBits
)
4129 // For immediate shifts we can fold the zero-/sign-extension into the shift.
4130 // {S|U}BFM Wd, Wn, #r, #s
4131 // Wd<32+s-r,32-r> = Wn<s:0> when r > s
4133 // %1 = {s|z}ext i8 {0b1010_1010|0b0101_0101} to i16
4134 // %2 = shl i16 %1, 4
4135 // Wd<32+7-28,32-28> = Wn<7:0> <- clamp s to 7
4136 // 0b1111_1111_1111_1111__1111_1010_1010_0000 sext
4137 // 0b0000_0000_0000_0000__0000_0101_0101_0000 sext | zext
4138 // 0b0000_0000_0000_0000__0000_1010_1010_0000 zext
4140 // %1 = {s|z}ext i8 {0b1010_1010|0b0101_0101} to i16
4141 // %2 = shl i16 %1, 8
4142 // Wd<32+7-24,32-24> = Wn<7:0>
4143 // 0b1111_1111_1111_1111__1010_1010_0000_0000 sext
4144 // 0b0000_0000_0000_0000__0101_0101_0000_0000 sext | zext
4145 // 0b0000_0000_0000_0000__1010_1010_0000_0000 zext
4147 // %1 = {s|z}ext i8 {0b1010_1010|0b0101_0101} to i16
4148 // %2 = shl i16 %1, 12
4149 // Wd<32+3-20,32-20> = Wn<3:0>
4150 // 0b1111_1111_1111_1111__1010_0000_0000_0000 sext
4151 // 0b0000_0000_0000_0000__0101_0000_0000_0000 sext | zext
4152 // 0b0000_0000_0000_0000__1010_0000_0000_0000 zext
4154 unsigned ImmR
= RegSize
- Shift
;
4155 // Limit the width to the length of the source type.
4156 unsigned ImmS
= std::min
<unsigned>(SrcBits
- 1, DstBits
- 1 - Shift
);
4157 static const unsigned OpcTable
[2][2] = {
4158 {AArch64::SBFMWri
, AArch64::SBFMXri
},
4159 {AArch64::UBFMWri
, AArch64::UBFMXri
}
4161 unsigned Opc
= OpcTable
[IsZExt
][Is64Bit
];
4162 if (SrcVT
.SimpleTy
<= MVT::i32
&& RetVT
== MVT::i64
) {
4163 Register TmpReg
= MRI
.createVirtualRegister(RC
);
4164 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, MIMD
,
4165 TII
.get(AArch64::SUBREG_TO_REG
), TmpReg
)
4168 .addImm(AArch64::sub_32
);
4171 return fastEmitInst_rii(Opc
, RC
, Op0
, ImmR
, ImmS
);
4174 unsigned AArch64FastISel::emitLSR_rr(MVT RetVT
, unsigned Op0Reg
,
4177 bool NeedTrunc
= false;
4179 switch (RetVT
.SimpleTy
) {
4181 case MVT::i8
: Opc
= AArch64::LSRVWr
; NeedTrunc
= true; Mask
= 0xff; break;
4182 case MVT::i16
: Opc
= AArch64::LSRVWr
; NeedTrunc
= true; Mask
= 0xffff; break;
4183 case MVT::i32
: Opc
= AArch64::LSRVWr
; break;
4184 case MVT::i64
: Opc
= AArch64::LSRVXr
; break;
4187 const TargetRegisterClass
*RC
=
4188 (RetVT
== MVT::i64
) ? &AArch64::GPR64RegClass
: &AArch64::GPR32RegClass
;
4190 Op0Reg
= emitAnd_ri(MVT::i32
, Op0Reg
, Mask
);
4191 Op1Reg
= emitAnd_ri(MVT::i32
, Op1Reg
, Mask
);
4193 Register ResultReg
= fastEmitInst_rr(Opc
, RC
, Op0Reg
, Op1Reg
);
4195 ResultReg
= emitAnd_ri(MVT::i32
, ResultReg
, Mask
);
4199 unsigned AArch64FastISel::emitLSR_ri(MVT RetVT
, MVT SrcVT
, unsigned Op0
,
4200 uint64_t Shift
, bool IsZExt
) {
4201 assert(RetVT
.SimpleTy
>= SrcVT
.SimpleTy
&&
4202 "Unexpected source/return type pair.");
4203 assert((SrcVT
== MVT::i1
|| SrcVT
== MVT::i8
|| SrcVT
== MVT::i16
||
4204 SrcVT
== MVT::i32
|| SrcVT
== MVT::i64
) &&
4205 "Unexpected source value type.");
4206 assert((RetVT
== MVT::i8
|| RetVT
== MVT::i16
|| RetVT
== MVT::i32
||
4207 RetVT
== MVT::i64
) && "Unexpected return value type.");
4209 bool Is64Bit
= (RetVT
== MVT::i64
);
4210 unsigned RegSize
= Is64Bit
? 64 : 32;
4211 unsigned DstBits
= RetVT
.getSizeInBits();
4212 unsigned SrcBits
= SrcVT
.getSizeInBits();
4213 const TargetRegisterClass
*RC
=
4214 Is64Bit
? &AArch64::GPR64RegClass
: &AArch64::GPR32RegClass
;
4216 // Just emit a copy for "zero" shifts.
4218 if (RetVT
== SrcVT
) {
4219 Register ResultReg
= createResultReg(RC
);
4220 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, MIMD
,
4221 TII
.get(TargetOpcode::COPY
), ResultReg
)
4225 return emitIntExt(SrcVT
, Op0
, RetVT
, IsZExt
);
4228 // Don't deal with undefined shifts.
4229 if (Shift
>= DstBits
)
4232 // For immediate shifts we can fold the zero-/sign-extension into the shift.
4233 // {S|U}BFM Wd, Wn, #r, #s
4234 // Wd<s-r:0> = Wn<s:r> when r <= s
4236 // %1 = {s|z}ext i8 {0b1010_1010|0b0101_0101} to i16
4237 // %2 = lshr i16 %1, 4
4238 // Wd<7-4:0> = Wn<7:4>
4239 // 0b0000_0000_0000_0000__0000_1111_1111_1010 sext
4240 // 0b0000_0000_0000_0000__0000_0000_0000_0101 sext | zext
4241 // 0b0000_0000_0000_0000__0000_0000_0000_1010 zext
4243 // %1 = {s|z}ext i8 {0b1010_1010|0b0101_0101} to i16
4244 // %2 = lshr i16 %1, 8
4245 // Wd<7-7,0> = Wn<7:7>
4246 // 0b0000_0000_0000_0000__0000_0000_1111_1111 sext
4247 // 0b0000_0000_0000_0000__0000_0000_0000_0000 sext
4248 // 0b0000_0000_0000_0000__0000_0000_0000_0000 zext
4250 // %1 = {s|z}ext i8 {0b1010_1010|0b0101_0101} to i16
4251 // %2 = lshr i16 %1, 12
4252 // Wd<7-7,0> = Wn<7:7> <- clamp r to 7
4253 // 0b0000_0000_0000_0000__0000_0000_0000_1111 sext
4254 // 0b0000_0000_0000_0000__0000_0000_0000_0000 sext
4255 // 0b0000_0000_0000_0000__0000_0000_0000_0000 zext
4257 if (Shift
>= SrcBits
&& IsZExt
)
4258 return materializeInt(ConstantInt::get(*Context
, APInt(RegSize
, 0)), RetVT
);
4260 // It is not possible to fold a sign-extend into the LShr instruction. In this
4261 // case emit a sign-extend.
4263 Op0
= emitIntExt(SrcVT
, Op0
, RetVT
, IsZExt
);
4267 SrcBits
= SrcVT
.getSizeInBits();
4271 unsigned ImmR
= std::min
<unsigned>(SrcBits
- 1, Shift
);
4272 unsigned ImmS
= SrcBits
- 1;
4273 static const unsigned OpcTable
[2][2] = {
4274 {AArch64::SBFMWri
, AArch64::SBFMXri
},
4275 {AArch64::UBFMWri
, AArch64::UBFMXri
}
4277 unsigned Opc
= OpcTable
[IsZExt
][Is64Bit
];
4278 if (SrcVT
.SimpleTy
<= MVT::i32
&& RetVT
== MVT::i64
) {
4279 Register TmpReg
= MRI
.createVirtualRegister(RC
);
4280 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, MIMD
,
4281 TII
.get(AArch64::SUBREG_TO_REG
), TmpReg
)
4284 .addImm(AArch64::sub_32
);
4287 return fastEmitInst_rii(Opc
, RC
, Op0
, ImmR
, ImmS
);
4290 unsigned AArch64FastISel::emitASR_rr(MVT RetVT
, unsigned Op0Reg
,
4293 bool NeedTrunc
= false;
4295 switch (RetVT
.SimpleTy
) {
4297 case MVT::i8
: Opc
= AArch64::ASRVWr
; NeedTrunc
= true; Mask
= 0xff; break;
4298 case MVT::i16
: Opc
= AArch64::ASRVWr
; NeedTrunc
= true; Mask
= 0xffff; break;
4299 case MVT::i32
: Opc
= AArch64::ASRVWr
; break;
4300 case MVT::i64
: Opc
= AArch64::ASRVXr
; break;
4303 const TargetRegisterClass
*RC
=
4304 (RetVT
== MVT::i64
) ? &AArch64::GPR64RegClass
: &AArch64::GPR32RegClass
;
4306 Op0Reg
= emitIntExt(RetVT
, Op0Reg
, MVT::i32
, /*isZExt=*/false);
4307 Op1Reg
= emitAnd_ri(MVT::i32
, Op1Reg
, Mask
);
4309 Register ResultReg
= fastEmitInst_rr(Opc
, RC
, Op0Reg
, Op1Reg
);
4311 ResultReg
= emitAnd_ri(MVT::i32
, ResultReg
, Mask
);
4315 unsigned AArch64FastISel::emitASR_ri(MVT RetVT
, MVT SrcVT
, unsigned Op0
,
4316 uint64_t Shift
, bool IsZExt
) {
4317 assert(RetVT
.SimpleTy
>= SrcVT
.SimpleTy
&&
4318 "Unexpected source/return type pair.");
4319 assert((SrcVT
== MVT::i1
|| SrcVT
== MVT::i8
|| SrcVT
== MVT::i16
||
4320 SrcVT
== MVT::i32
|| SrcVT
== MVT::i64
) &&
4321 "Unexpected source value type.");
4322 assert((RetVT
== MVT::i8
|| RetVT
== MVT::i16
|| RetVT
== MVT::i32
||
4323 RetVT
== MVT::i64
) && "Unexpected return value type.");
4325 bool Is64Bit
= (RetVT
== MVT::i64
);
4326 unsigned RegSize
= Is64Bit
? 64 : 32;
4327 unsigned DstBits
= RetVT
.getSizeInBits();
4328 unsigned SrcBits
= SrcVT
.getSizeInBits();
4329 const TargetRegisterClass
*RC
=
4330 Is64Bit
? &AArch64::GPR64RegClass
: &AArch64::GPR32RegClass
;
4332 // Just emit a copy for "zero" shifts.
4334 if (RetVT
== SrcVT
) {
4335 Register ResultReg
= createResultReg(RC
);
4336 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, MIMD
,
4337 TII
.get(TargetOpcode::COPY
), ResultReg
)
4341 return emitIntExt(SrcVT
, Op0
, RetVT
, IsZExt
);
4344 // Don't deal with undefined shifts.
4345 if (Shift
>= DstBits
)
4348 // For immediate shifts we can fold the zero-/sign-extension into the shift.
4349 // {S|U}BFM Wd, Wn, #r, #s
4350 // Wd<s-r:0> = Wn<s:r> when r <= s
4352 // %1 = {s|z}ext i8 {0b1010_1010|0b0101_0101} to i16
4353 // %2 = ashr i16 %1, 4
4354 // Wd<7-4:0> = Wn<7:4>
4355 // 0b1111_1111_1111_1111__1111_1111_1111_1010 sext
4356 // 0b0000_0000_0000_0000__0000_0000_0000_0101 sext | zext
4357 // 0b0000_0000_0000_0000__0000_0000_0000_1010 zext
4359 // %1 = {s|z}ext i8 {0b1010_1010|0b0101_0101} to i16
4360 // %2 = ashr i16 %1, 8
4361 // Wd<7-7,0> = Wn<7:7>
4362 // 0b1111_1111_1111_1111__1111_1111_1111_1111 sext
4363 // 0b0000_0000_0000_0000__0000_0000_0000_0000 sext
4364 // 0b0000_0000_0000_0000__0000_0000_0000_0000 zext
4366 // %1 = {s|z}ext i8 {0b1010_1010|0b0101_0101} to i16
4367 // %2 = ashr i16 %1, 12
4368 // Wd<7-7,0> = Wn<7:7> <- clamp r to 7
4369 // 0b1111_1111_1111_1111__1111_1111_1111_1111 sext
4370 // 0b0000_0000_0000_0000__0000_0000_0000_0000 sext
4371 // 0b0000_0000_0000_0000__0000_0000_0000_0000 zext
4373 if (Shift
>= SrcBits
&& IsZExt
)
4374 return materializeInt(ConstantInt::get(*Context
, APInt(RegSize
, 0)), RetVT
);
4376 unsigned ImmR
= std::min
<unsigned>(SrcBits
- 1, Shift
);
4377 unsigned ImmS
= SrcBits
- 1;
4378 static const unsigned OpcTable
[2][2] = {
4379 {AArch64::SBFMWri
, AArch64::SBFMXri
},
4380 {AArch64::UBFMWri
, AArch64::UBFMXri
}
4382 unsigned Opc
= OpcTable
[IsZExt
][Is64Bit
];
4383 if (SrcVT
.SimpleTy
<= MVT::i32
&& RetVT
== MVT::i64
) {
4384 Register TmpReg
= MRI
.createVirtualRegister(RC
);
4385 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, MIMD
,
4386 TII
.get(AArch64::SUBREG_TO_REG
), TmpReg
)
4389 .addImm(AArch64::sub_32
);
4392 return fastEmitInst_rii(Opc
, RC
, Op0
, ImmR
, ImmS
);
4395 unsigned AArch64FastISel::emitIntExt(MVT SrcVT
, unsigned SrcReg
, MVT DestVT
,
4397 assert(DestVT
!= MVT::i1
&& "ZeroExt/SignExt an i1?");
4399 // FastISel does not have plumbing to deal with extensions where the SrcVT or
4400 // DestVT are odd things, so test to make sure that they are both types we can
4401 // handle (i1/i8/i16/i32 for SrcVT and i8/i16/i32/i64 for DestVT), otherwise
4402 // bail out to SelectionDAG.
4403 if (((DestVT
!= MVT::i8
) && (DestVT
!= MVT::i16
) &&
4404 (DestVT
!= MVT::i32
) && (DestVT
!= MVT::i64
)) ||
4405 ((SrcVT
!= MVT::i1
) && (SrcVT
!= MVT::i8
) &&
4406 (SrcVT
!= MVT::i16
) && (SrcVT
!= MVT::i32
)))
4412 switch (SrcVT
.SimpleTy
) {
4416 return emiti1Ext(SrcReg
, DestVT
, IsZExt
);
4418 if (DestVT
== MVT::i64
)
4419 Opc
= IsZExt
? AArch64::UBFMXri
: AArch64::SBFMXri
;
4421 Opc
= IsZExt
? AArch64::UBFMWri
: AArch64::SBFMWri
;
4425 if (DestVT
== MVT::i64
)
4426 Opc
= IsZExt
? AArch64::UBFMXri
: AArch64::SBFMXri
;
4428 Opc
= IsZExt
? AArch64::UBFMWri
: AArch64::SBFMWri
;
4432 assert(DestVT
== MVT::i64
&& "IntExt i32 to i32?!?");
4433 Opc
= IsZExt
? AArch64::UBFMXri
: AArch64::SBFMXri
;
4438 // Handle i8 and i16 as i32.
4439 if (DestVT
== MVT::i8
|| DestVT
== MVT::i16
)
4441 else if (DestVT
== MVT::i64
) {
4442 Register Src64
= MRI
.createVirtualRegister(&AArch64::GPR64RegClass
);
4443 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, MIMD
,
4444 TII
.get(AArch64::SUBREG_TO_REG
), Src64
)
4447 .addImm(AArch64::sub_32
);
4451 const TargetRegisterClass
*RC
=
4452 (DestVT
== MVT::i64
) ? &AArch64::GPR64RegClass
: &AArch64::GPR32RegClass
;
4453 return fastEmitInst_rii(Opc
, RC
, SrcReg
, 0, Imm
);
4456 static bool isZExtLoad(const MachineInstr
*LI
) {
4457 switch (LI
->getOpcode()) {
4460 case AArch64::LDURBBi
:
4461 case AArch64::LDURHHi
:
4462 case AArch64::LDURWi
:
4463 case AArch64::LDRBBui
:
4464 case AArch64::LDRHHui
:
4465 case AArch64::LDRWui
:
4466 case AArch64::LDRBBroX
:
4467 case AArch64::LDRHHroX
:
4468 case AArch64::LDRWroX
:
4469 case AArch64::LDRBBroW
:
4470 case AArch64::LDRHHroW
:
4471 case AArch64::LDRWroW
:
4476 static bool isSExtLoad(const MachineInstr
*LI
) {
4477 switch (LI
->getOpcode()) {
4480 case AArch64::LDURSBWi
:
4481 case AArch64::LDURSHWi
:
4482 case AArch64::LDURSBXi
:
4483 case AArch64::LDURSHXi
:
4484 case AArch64::LDURSWi
:
4485 case AArch64::LDRSBWui
:
4486 case AArch64::LDRSHWui
:
4487 case AArch64::LDRSBXui
:
4488 case AArch64::LDRSHXui
:
4489 case AArch64::LDRSWui
:
4490 case AArch64::LDRSBWroX
:
4491 case AArch64::LDRSHWroX
:
4492 case AArch64::LDRSBXroX
:
4493 case AArch64::LDRSHXroX
:
4494 case AArch64::LDRSWroX
:
4495 case AArch64::LDRSBWroW
:
4496 case AArch64::LDRSHWroW
:
4497 case AArch64::LDRSBXroW
:
4498 case AArch64::LDRSHXroW
:
4499 case AArch64::LDRSWroW
:
4504 bool AArch64FastISel::optimizeIntExtLoad(const Instruction
*I
, MVT RetVT
,
4506 const auto *LI
= dyn_cast
<LoadInst
>(I
->getOperand(0));
4507 if (!LI
|| !LI
->hasOneUse())
4510 // Check if the load instruction has already been selected.
4511 Register Reg
= lookUpRegForValue(LI
);
4515 MachineInstr
*MI
= MRI
.getUniqueVRegDef(Reg
);
4519 // Check if the correct load instruction has been emitted - SelectionDAG might
4520 // have emitted a zero-extending load, but we need a sign-extending load.
4521 bool IsZExt
= isa
<ZExtInst
>(I
);
4522 const auto *LoadMI
= MI
;
4523 if (LoadMI
->getOpcode() == TargetOpcode::COPY
&&
4524 LoadMI
->getOperand(1).getSubReg() == AArch64::sub_32
) {
4525 Register LoadReg
= MI
->getOperand(1).getReg();
4526 LoadMI
= MRI
.getUniqueVRegDef(LoadReg
);
4527 assert(LoadMI
&& "Expected valid instruction");
4529 if (!(IsZExt
&& isZExtLoad(LoadMI
)) && !(!IsZExt
&& isSExtLoad(LoadMI
)))
4532 // Nothing to be done.
4533 if (RetVT
!= MVT::i64
|| SrcVT
> MVT::i32
) {
4534 updateValueMap(I
, Reg
);
4539 Register Reg64
= createResultReg(&AArch64::GPR64RegClass
);
4540 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, MIMD
,
4541 TII
.get(AArch64::SUBREG_TO_REG
), Reg64
)
4543 .addReg(Reg
, getKillRegState(true))
4544 .addImm(AArch64::sub_32
);
4547 assert((MI
->getOpcode() == TargetOpcode::COPY
&&
4548 MI
->getOperand(1).getSubReg() == AArch64::sub_32
) &&
4549 "Expected copy instruction");
4550 Reg
= MI
->getOperand(1).getReg();
4551 MachineBasicBlock::iterator
I(MI
);
4552 removeDeadCode(I
, std::next(I
));
4554 updateValueMap(I
, Reg
);
4558 bool AArch64FastISel::selectIntExt(const Instruction
*I
) {
4559 assert((isa
<ZExtInst
>(I
) || isa
<SExtInst
>(I
)) &&
4560 "Unexpected integer extend instruction.");
4563 if (!isTypeSupported(I
->getType(), RetVT
))
4566 if (!isTypeSupported(I
->getOperand(0)->getType(), SrcVT
))
4569 // Try to optimize already sign-/zero-extended values from load instructions.
4570 if (optimizeIntExtLoad(I
, RetVT
, SrcVT
))
4573 Register SrcReg
= getRegForValue(I
->getOperand(0));
4577 // Try to optimize already sign-/zero-extended values from function arguments.
4578 bool IsZExt
= isa
<ZExtInst
>(I
);
4579 if (const auto *Arg
= dyn_cast
<Argument
>(I
->getOperand(0))) {
4580 if ((IsZExt
&& Arg
->hasZExtAttr()) || (!IsZExt
&& Arg
->hasSExtAttr())) {
4581 if (RetVT
== MVT::i64
&& SrcVT
!= MVT::i64
) {
4582 Register ResultReg
= createResultReg(&AArch64::GPR64RegClass
);
4583 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, MIMD
,
4584 TII
.get(AArch64::SUBREG_TO_REG
), ResultReg
)
4587 .addImm(AArch64::sub_32
);
4591 updateValueMap(I
, SrcReg
);
4596 unsigned ResultReg
= emitIntExt(SrcVT
, SrcReg
, RetVT
, IsZExt
);
4600 updateValueMap(I
, ResultReg
);
4604 bool AArch64FastISel::selectRem(const Instruction
*I
, unsigned ISDOpcode
) {
4605 EVT DestEVT
= TLI
.getValueType(DL
, I
->getType(), true);
4606 if (!DestEVT
.isSimple())
4609 MVT DestVT
= DestEVT
.getSimpleVT();
4610 if (DestVT
!= MVT::i64
&& DestVT
!= MVT::i32
)
4614 bool Is64bit
= (DestVT
== MVT::i64
);
4615 switch (ISDOpcode
) {
4619 DivOpc
= Is64bit
? AArch64::SDIVXr
: AArch64::SDIVWr
;
4622 DivOpc
= Is64bit
? AArch64::UDIVXr
: AArch64::UDIVWr
;
4625 unsigned MSubOpc
= Is64bit
? AArch64::MSUBXrrr
: AArch64::MSUBWrrr
;
4626 Register Src0Reg
= getRegForValue(I
->getOperand(0));
4630 Register Src1Reg
= getRegForValue(I
->getOperand(1));
4634 const TargetRegisterClass
*RC
=
4635 (DestVT
== MVT::i64
) ? &AArch64::GPR64RegClass
: &AArch64::GPR32RegClass
;
4636 Register QuotReg
= fastEmitInst_rr(DivOpc
, RC
, Src0Reg
, Src1Reg
);
4637 assert(QuotReg
&& "Unexpected DIV instruction emission failure.");
4638 // The remainder is computed as numerator - (quotient * denominator) using the
4639 // MSUB instruction.
4640 Register ResultReg
= fastEmitInst_rrr(MSubOpc
, RC
, QuotReg
, Src1Reg
, Src0Reg
);
4641 updateValueMap(I
, ResultReg
);
4645 bool AArch64FastISel::selectMul(const Instruction
*I
) {
4647 if (!isTypeSupported(I
->getType(), VT
, /*IsVectorAllowed=*/true))
4651 return selectBinaryOp(I
, ISD::MUL
);
4653 const Value
*Src0
= I
->getOperand(0);
4654 const Value
*Src1
= I
->getOperand(1);
4655 if (const auto *C
= dyn_cast
<ConstantInt
>(Src0
))
4656 if (C
->getValue().isPowerOf2())
4657 std::swap(Src0
, Src1
);
4659 // Try to simplify to a shift instruction.
4660 if (const auto *C
= dyn_cast
<ConstantInt
>(Src1
))
4661 if (C
->getValue().isPowerOf2()) {
4662 uint64_t ShiftVal
= C
->getValue().logBase2();
4665 if (const auto *ZExt
= dyn_cast
<ZExtInst
>(Src0
)) {
4666 if (!isIntExtFree(ZExt
)) {
4668 if (isValueAvailable(ZExt
) && isTypeSupported(ZExt
->getSrcTy(), VT
)) {
4671 Src0
= ZExt
->getOperand(0);
4674 } else if (const auto *SExt
= dyn_cast
<SExtInst
>(Src0
)) {
4675 if (!isIntExtFree(SExt
)) {
4677 if (isValueAvailable(SExt
) && isTypeSupported(SExt
->getSrcTy(), VT
)) {
4680 Src0
= SExt
->getOperand(0);
4685 Register Src0Reg
= getRegForValue(Src0
);
4689 unsigned ResultReg
=
4690 emitLSL_ri(VT
, SrcVT
, Src0Reg
, ShiftVal
, IsZExt
);
4693 updateValueMap(I
, ResultReg
);
4698 Register Src0Reg
= getRegForValue(I
->getOperand(0));
4702 Register Src1Reg
= getRegForValue(I
->getOperand(1));
4706 unsigned ResultReg
= emitMul_rr(VT
, Src0Reg
, Src1Reg
);
4711 updateValueMap(I
, ResultReg
);
4715 bool AArch64FastISel::selectShift(const Instruction
*I
) {
4717 if (!isTypeSupported(I
->getType(), RetVT
, /*IsVectorAllowed=*/true))
4720 if (RetVT
.isVector())
4721 return selectOperator(I
, I
->getOpcode());
4723 if (const auto *C
= dyn_cast
<ConstantInt
>(I
->getOperand(1))) {
4724 unsigned ResultReg
= 0;
4725 uint64_t ShiftVal
= C
->getZExtValue();
4727 bool IsZExt
= I
->getOpcode() != Instruction::AShr
;
4728 const Value
*Op0
= I
->getOperand(0);
4729 if (const auto *ZExt
= dyn_cast
<ZExtInst
>(Op0
)) {
4730 if (!isIntExtFree(ZExt
)) {
4732 if (isValueAvailable(ZExt
) && isTypeSupported(ZExt
->getSrcTy(), TmpVT
)) {
4735 Op0
= ZExt
->getOperand(0);
4738 } else if (const auto *SExt
= dyn_cast
<SExtInst
>(Op0
)) {
4739 if (!isIntExtFree(SExt
)) {
4741 if (isValueAvailable(SExt
) && isTypeSupported(SExt
->getSrcTy(), TmpVT
)) {
4744 Op0
= SExt
->getOperand(0);
4749 Register Op0Reg
= getRegForValue(Op0
);
4753 switch (I
->getOpcode()) {
4754 default: llvm_unreachable("Unexpected instruction.");
4755 case Instruction::Shl
:
4756 ResultReg
= emitLSL_ri(RetVT
, SrcVT
, Op0Reg
, ShiftVal
, IsZExt
);
4758 case Instruction::AShr
:
4759 ResultReg
= emitASR_ri(RetVT
, SrcVT
, Op0Reg
, ShiftVal
, IsZExt
);
4761 case Instruction::LShr
:
4762 ResultReg
= emitLSR_ri(RetVT
, SrcVT
, Op0Reg
, ShiftVal
, IsZExt
);
4768 updateValueMap(I
, ResultReg
);
4772 Register Op0Reg
= getRegForValue(I
->getOperand(0));
4776 Register Op1Reg
= getRegForValue(I
->getOperand(1));
4780 unsigned ResultReg
= 0;
4781 switch (I
->getOpcode()) {
4782 default: llvm_unreachable("Unexpected instruction.");
4783 case Instruction::Shl
:
4784 ResultReg
= emitLSL_rr(RetVT
, Op0Reg
, Op1Reg
);
4786 case Instruction::AShr
:
4787 ResultReg
= emitASR_rr(RetVT
, Op0Reg
, Op1Reg
);
4789 case Instruction::LShr
:
4790 ResultReg
= emitLSR_rr(RetVT
, Op0Reg
, Op1Reg
);
4797 updateValueMap(I
, ResultReg
);
4801 bool AArch64FastISel::selectBitCast(const Instruction
*I
) {
4804 if (!isTypeLegal(I
->getOperand(0)->getType(), SrcVT
))
4806 if (!isTypeLegal(I
->getType(), RetVT
))
4810 if (RetVT
== MVT::f32
&& SrcVT
== MVT::i32
)
4811 Opc
= AArch64::FMOVWSr
;
4812 else if (RetVT
== MVT::f64
&& SrcVT
== MVT::i64
)
4813 Opc
= AArch64::FMOVXDr
;
4814 else if (RetVT
== MVT::i32
&& SrcVT
== MVT::f32
)
4815 Opc
= AArch64::FMOVSWr
;
4816 else if (RetVT
== MVT::i64
&& SrcVT
== MVT::f64
)
4817 Opc
= AArch64::FMOVDXr
;
4821 const TargetRegisterClass
*RC
= nullptr;
4822 switch (RetVT
.SimpleTy
) {
4823 default: llvm_unreachable("Unexpected value type.");
4824 case MVT::i32
: RC
= &AArch64::GPR32RegClass
; break;
4825 case MVT::i64
: RC
= &AArch64::GPR64RegClass
; break;
4826 case MVT::f32
: RC
= &AArch64::FPR32RegClass
; break;
4827 case MVT::f64
: RC
= &AArch64::FPR64RegClass
; break;
4829 Register Op0Reg
= getRegForValue(I
->getOperand(0));
4833 Register ResultReg
= fastEmitInst_r(Opc
, RC
, Op0Reg
);
4837 updateValueMap(I
, ResultReg
);
4841 bool AArch64FastISel::selectFRem(const Instruction
*I
) {
4843 if (!isTypeLegal(I
->getType(), RetVT
))
4847 switch (RetVT
.SimpleTy
) {
4851 LC
= RTLIB::REM_F32
;
4854 LC
= RTLIB::REM_F64
;
4859 Args
.reserve(I
->getNumOperands());
4861 // Populate the argument list.
4862 for (auto &Arg
: I
->operands()) {
4865 Entry
.Ty
= Arg
->getType();
4866 Args
.push_back(Entry
);
4869 CallLoweringInfo CLI
;
4870 MCContext
&Ctx
= MF
->getContext();
4871 CLI
.setCallee(DL
, Ctx
, TLI
.getLibcallCallingConv(LC
), I
->getType(),
4872 TLI
.getLibcallName(LC
), std::move(Args
));
4873 if (!lowerCallTo(CLI
))
4875 updateValueMap(I
, CLI
.ResultReg
);
4879 bool AArch64FastISel::selectSDiv(const Instruction
*I
) {
4881 if (!isTypeLegal(I
->getType(), VT
))
4884 if (!isa
<ConstantInt
>(I
->getOperand(1)))
4885 return selectBinaryOp(I
, ISD::SDIV
);
4887 const APInt
&C
= cast
<ConstantInt
>(I
->getOperand(1))->getValue();
4888 if ((VT
!= MVT::i32
&& VT
!= MVT::i64
) || !C
||
4889 !(C
.isPowerOf2() || C
.isNegatedPowerOf2()))
4890 return selectBinaryOp(I
, ISD::SDIV
);
4892 unsigned Lg2
= C
.countr_zero();
4893 Register Src0Reg
= getRegForValue(I
->getOperand(0));
4897 if (cast
<BinaryOperator
>(I
)->isExact()) {
4898 unsigned ResultReg
= emitASR_ri(VT
, VT
, Src0Reg
, Lg2
);
4901 updateValueMap(I
, ResultReg
);
4905 int64_t Pow2MinusOne
= (1ULL << Lg2
) - 1;
4906 unsigned AddReg
= emitAdd_ri_(VT
, Src0Reg
, Pow2MinusOne
);
4910 // (Src0 < 0) ? Pow2 - 1 : 0;
4911 if (!emitICmp_ri(VT
, Src0Reg
, 0))
4915 const TargetRegisterClass
*RC
;
4916 if (VT
== MVT::i64
) {
4917 SelectOpc
= AArch64::CSELXr
;
4918 RC
= &AArch64::GPR64RegClass
;
4920 SelectOpc
= AArch64::CSELWr
;
4921 RC
= &AArch64::GPR32RegClass
;
4923 Register SelectReg
= fastEmitInst_rri(SelectOpc
, RC
, AddReg
, Src0Reg
,
4928 // Divide by Pow2 --> ashr. If we're dividing by a negative value we must also
4929 // negate the result.
4930 unsigned ZeroReg
= (VT
== MVT::i64
) ? AArch64::XZR
: AArch64::WZR
;
4933 ResultReg
= emitAddSub_rs(/*UseAdd=*/false, VT
, ZeroReg
, SelectReg
,
4934 AArch64_AM::ASR
, Lg2
);
4936 ResultReg
= emitASR_ri(VT
, VT
, SelectReg
, Lg2
);
4941 updateValueMap(I
, ResultReg
);
4945 /// This is mostly a copy of the existing FastISel getRegForGEPIndex code. We
4946 /// have to duplicate it for AArch64, because otherwise we would fail during the
4947 /// sign-extend emission.
4948 unsigned AArch64FastISel::getRegForGEPIndex(const Value
*Idx
) {
4949 Register IdxN
= getRegForValue(Idx
);
4951 // Unhandled operand. Halt "fast" selection and bail.
4954 // If the index is smaller or larger than intptr_t, truncate or extend it.
4955 MVT PtrVT
= TLI
.getPointerTy(DL
);
4956 EVT IdxVT
= EVT::getEVT(Idx
->getType(), /*HandleUnknown=*/false);
4957 if (IdxVT
.bitsLT(PtrVT
)) {
4958 IdxN
= emitIntExt(IdxVT
.getSimpleVT(), IdxN
, PtrVT
, /*isZExt=*/false);
4959 } else if (IdxVT
.bitsGT(PtrVT
))
4960 llvm_unreachable("AArch64 FastISel doesn't support types larger than i64");
4964 /// This is mostly a copy of the existing FastISel GEP code, but we have to
4965 /// duplicate it for AArch64, because otherwise we would bail out even for
4966 /// simple cases. This is because the standard fastEmit functions don't cover
4967 /// MUL at all and ADD is lowered very inefficientily.
4968 bool AArch64FastISel::selectGetElementPtr(const Instruction
*I
) {
4969 if (Subtarget
->isTargetILP32())
4972 Register N
= getRegForValue(I
->getOperand(0));
4976 // Keep a running tab of the total offset to coalesce multiple N = N + Offset
4977 // into a single N = N + TotalOffset.
4978 uint64_t TotalOffs
= 0;
4979 MVT VT
= TLI
.getPointerTy(DL
);
4980 for (gep_type_iterator GTI
= gep_type_begin(I
), E
= gep_type_end(I
);
4982 const Value
*Idx
= GTI
.getOperand();
4983 if (auto *StTy
= GTI
.getStructTypeOrNull()) {
4984 unsigned Field
= cast
<ConstantInt
>(Idx
)->getZExtValue();
4987 TotalOffs
+= DL
.getStructLayout(StTy
)->getElementOffset(Field
);
4989 // If this is a constant subscript, handle it quickly.
4990 if (const auto *CI
= dyn_cast
<ConstantInt
>(Idx
)) {
4994 TotalOffs
+= GTI
.getSequentialElementStride(DL
) *
4995 cast
<ConstantInt
>(CI
)->getSExtValue();
4999 N
= emitAdd_ri_(VT
, N
, TotalOffs
);
5005 // N = N + Idx * ElementSize;
5006 uint64_t ElementSize
= GTI
.getSequentialElementStride(DL
);
5007 unsigned IdxN
= getRegForGEPIndex(Idx
);
5011 if (ElementSize
!= 1) {
5012 unsigned C
= fastEmit_i(VT
, VT
, ISD::Constant
, ElementSize
);
5015 IdxN
= emitMul_rr(VT
, IdxN
, C
);
5019 N
= fastEmit_rr(VT
, VT
, ISD::ADD
, N
, IdxN
);
5025 N
= emitAdd_ri_(VT
, N
, TotalOffs
);
5029 updateValueMap(I
, N
);
5033 bool AArch64FastISel::selectAtomicCmpXchg(const AtomicCmpXchgInst
*I
) {
5034 assert(TM
.getOptLevel() == CodeGenOptLevel::None
&&
5035 "cmpxchg survived AtomicExpand at optlevel > -O0");
5037 auto *RetPairTy
= cast
<StructType
>(I
->getType());
5038 Type
*RetTy
= RetPairTy
->getTypeAtIndex(0U);
5039 assert(RetPairTy
->getTypeAtIndex(1U)->isIntegerTy(1) &&
5040 "cmpxchg has a non-i1 status result");
5043 if (!isTypeLegal(RetTy
, VT
))
5046 const TargetRegisterClass
*ResRC
;
5047 unsigned Opc
, CmpOpc
;
5048 // This only supports i32/i64, because i8/i16 aren't legal, and the generic
5049 // extractvalue selection doesn't support that.
5050 if (VT
== MVT::i32
) {
5051 Opc
= AArch64::CMP_SWAP_32
;
5052 CmpOpc
= AArch64::SUBSWrs
;
5053 ResRC
= &AArch64::GPR32RegClass
;
5054 } else if (VT
== MVT::i64
) {
5055 Opc
= AArch64::CMP_SWAP_64
;
5056 CmpOpc
= AArch64::SUBSXrs
;
5057 ResRC
= &AArch64::GPR64RegClass
;
5062 const MCInstrDesc
&II
= TII
.get(Opc
);
5064 const Register AddrReg
= constrainOperandRegClass(
5065 II
, getRegForValue(I
->getPointerOperand()), II
.getNumDefs());
5066 const Register DesiredReg
= constrainOperandRegClass(
5067 II
, getRegForValue(I
->getCompareOperand()), II
.getNumDefs() + 1);
5068 const Register NewReg
= constrainOperandRegClass(
5069 II
, getRegForValue(I
->getNewValOperand()), II
.getNumDefs() + 2);
5071 const Register ResultReg1
= createResultReg(ResRC
);
5072 const Register ResultReg2
= createResultReg(&AArch64::GPR32RegClass
);
5073 const Register ScratchReg
= createResultReg(&AArch64::GPR32RegClass
);
5075 // FIXME: MachineMemOperand doesn't support cmpxchg yet.
5076 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, MIMD
, II
)
5083 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, MIMD
, TII
.get(CmpOpc
))
5084 .addDef(VT
== MVT::i32
? AArch64::WZR
: AArch64::XZR
)
5089 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, MIMD
, TII
.get(AArch64::CSINCWr
))
5091 .addUse(AArch64::WZR
)
5092 .addUse(AArch64::WZR
)
5093 .addImm(AArch64CC::NE
);
5095 assert((ResultReg1
+ 1) == ResultReg2
&& "Nonconsecutive result registers.");
5096 updateValueMap(I
, ResultReg1
, 2);
5100 bool AArch64FastISel::fastSelectInstruction(const Instruction
*I
) {
5101 if (TLI
.fallBackToDAGISel(*I
))
5103 switch (I
->getOpcode()) {
5106 case Instruction::Add
:
5107 case Instruction::Sub
:
5108 return selectAddSub(I
);
5109 case Instruction::Mul
:
5110 return selectMul(I
);
5111 case Instruction::SDiv
:
5112 return selectSDiv(I
);
5113 case Instruction::SRem
:
5114 if (!selectBinaryOp(I
, ISD::SREM
))
5115 return selectRem(I
, ISD::SREM
);
5117 case Instruction::URem
:
5118 if (!selectBinaryOp(I
, ISD::UREM
))
5119 return selectRem(I
, ISD::UREM
);
5121 case Instruction::Shl
:
5122 case Instruction::LShr
:
5123 case Instruction::AShr
:
5124 return selectShift(I
);
5125 case Instruction::And
:
5126 case Instruction::Or
:
5127 case Instruction::Xor
:
5128 return selectLogicalOp(I
);
5129 case Instruction::Br
:
5130 return selectBranch(I
);
5131 case Instruction::IndirectBr
:
5132 return selectIndirectBr(I
);
5133 case Instruction::BitCast
:
5134 if (!FastISel::selectBitCast(I
))
5135 return selectBitCast(I
);
5137 case Instruction::FPToSI
:
5138 if (!selectCast(I
, ISD::FP_TO_SINT
))
5139 return selectFPToInt(I
, /*Signed=*/true);
5141 case Instruction::FPToUI
:
5142 return selectFPToInt(I
, /*Signed=*/false);
5143 case Instruction::ZExt
:
5144 case Instruction::SExt
:
5145 return selectIntExt(I
);
5146 case Instruction::Trunc
:
5147 if (!selectCast(I
, ISD::TRUNCATE
))
5148 return selectTrunc(I
);
5150 case Instruction::FPExt
:
5151 return selectFPExt(I
);
5152 case Instruction::FPTrunc
:
5153 return selectFPTrunc(I
);
5154 case Instruction::SIToFP
:
5155 if (!selectCast(I
, ISD::SINT_TO_FP
))
5156 return selectIntToFP(I
, /*Signed=*/true);
5158 case Instruction::UIToFP
:
5159 return selectIntToFP(I
, /*Signed=*/false);
5160 case Instruction::Load
:
5161 return selectLoad(I
);
5162 case Instruction::Store
:
5163 return selectStore(I
);
5164 case Instruction::FCmp
:
5165 case Instruction::ICmp
:
5166 return selectCmp(I
);
5167 case Instruction::Select
:
5168 return selectSelect(I
);
5169 case Instruction::Ret
:
5170 return selectRet(I
);
5171 case Instruction::FRem
:
5172 return selectFRem(I
);
5173 case Instruction::GetElementPtr
:
5174 return selectGetElementPtr(I
);
5175 case Instruction::AtomicCmpXchg
:
5176 return selectAtomicCmpXchg(cast
<AtomicCmpXchgInst
>(I
));
5179 // fall-back to target-independent instruction selection.
5180 return selectOperator(I
, I
->getOpcode());
5183 FastISel
*AArch64::createFastISel(FunctionLoweringInfo
&FuncInfo
,
5184 const TargetLibraryInfo
*LibInfo
) {
5186 SMEAttrs
CallerAttrs(*FuncInfo
.Fn
);
5187 if (CallerAttrs
.hasZAState() || CallerAttrs
.hasZT0State() ||
5188 CallerAttrs
.hasStreamingInterfaceOrBody() ||
5189 CallerAttrs
.hasStreamingCompatibleInterface())
5191 return new AArch64FastISel(FuncInfo
, LibInfo
);