1 //===- FastISel.cpp - Implementation of the FastISel class ----------------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This file contains the implementation of the FastISel class.
11 // "Fast" instruction selection is designed to emit very poor code quickly.
12 // Also, it is not designed to be able to do much lowering, so most illegal
13 // types (e.g. i64 on 32-bit targets) and operations are not supported. It is
14 // also not intended to be able to do much optimization, except in a few cases
15 // where doing optimizations reduces overall compile time. For example, folding
16 // constants into immediate fields is often done, because it's cheap and it
17 // reduces the number of instructions later phases have to examine.
19 // "Fast" instruction selection is able to fail gracefully and transfer
20 // control to the SelectionDAG selector for operations that it doesn't
21 // support. In many cases, this allows us to avoid duplicating a lot of
22 // the complicated lowering logic that SelectionDAG currently has.
24 // The intended use for "fast" instruction selection is "-O0" mode
25 // compilation, where the quality of the generated code is irrelevant when
26 // weighed against the speed at which the code can be generated. Also,
27 // at -O0, the LLVM optimizers are not running, and this makes the
28 // compile time of codegen a much higher portion of the overall compile
29 // time. Despite its limitations, "fast" instruction selection is able to
30 // handle enough code on its own to provide noticeable overall speedups
33 // Basic operations are supported in a target-independent way, by reading
34 // the same instruction descriptions that the SelectionDAG selector reads,
35 // and identifying simple arithmetic operations that can be directly selected
36 // from simple operators. More complicated operations currently require
37 // target-specific code.
39 //===----------------------------------------------------------------------===//
41 #include "llvm/CodeGen/FastISel.h"
42 #include "llvm/ADT/APFloat.h"
43 #include "llvm/ADT/APSInt.h"
44 #include "llvm/ADT/DenseMap.h"
45 #include "llvm/ADT/Optional.h"
46 #include "llvm/ADT/SmallPtrSet.h"
47 #include "llvm/ADT/SmallString.h"
48 #include "llvm/ADT/SmallVector.h"
49 #include "llvm/ADT/Statistic.h"
50 #include "llvm/Analysis/BranchProbabilityInfo.h"
51 #include "llvm/Analysis/TargetLibraryInfo.h"
52 #include "llvm/CodeGen/Analysis.h"
53 #include "llvm/CodeGen/FunctionLoweringInfo.h"
54 #include "llvm/CodeGen/ISDOpcodes.h"
55 #include "llvm/CodeGen/MachineBasicBlock.h"
56 #include "llvm/CodeGen/MachineFrameInfo.h"
57 #include "llvm/CodeGen/MachineInstr.h"
58 #include "llvm/CodeGen/MachineInstrBuilder.h"
59 #include "llvm/CodeGen/MachineMemOperand.h"
60 #include "llvm/CodeGen/MachineModuleInfo.h"
61 #include "llvm/CodeGen/MachineOperand.h"
62 #include "llvm/CodeGen/MachineRegisterInfo.h"
63 #include "llvm/CodeGen/StackMaps.h"
64 #include "llvm/CodeGen/TargetInstrInfo.h"
65 #include "llvm/CodeGen/TargetLowering.h"
66 #include "llvm/CodeGen/TargetSubtargetInfo.h"
67 #include "llvm/CodeGen/ValueTypes.h"
68 #include "llvm/IR/Argument.h"
69 #include "llvm/IR/Attributes.h"
70 #include "llvm/IR/BasicBlock.h"
71 #include "llvm/IR/CallingConv.h"
72 #include "llvm/IR/Constant.h"
73 #include "llvm/IR/Constants.h"
74 #include "llvm/IR/DataLayout.h"
75 #include "llvm/IR/DebugInfo.h"
76 #include "llvm/IR/DebugLoc.h"
77 #include "llvm/IR/DerivedTypes.h"
78 #include "llvm/IR/DiagnosticInfo.h"
79 #include "llvm/IR/Function.h"
80 #include "llvm/IR/GetElementPtrTypeIterator.h"
81 #include "llvm/IR/GlobalValue.h"
82 #include "llvm/IR/InlineAsm.h"
83 #include "llvm/IR/InstrTypes.h"
84 #include "llvm/IR/Instruction.h"
85 #include "llvm/IR/Instructions.h"
86 #include "llvm/IR/IntrinsicInst.h"
87 #include "llvm/IR/LLVMContext.h"
88 #include "llvm/IR/Mangler.h"
89 #include "llvm/IR/Metadata.h"
90 #include "llvm/IR/Operator.h"
91 #include "llvm/IR/PatternMatch.h"
92 #include "llvm/IR/Type.h"
93 #include "llvm/IR/User.h"
94 #include "llvm/IR/Value.h"
95 #include "llvm/MC/MCContext.h"
96 #include "llvm/MC/MCInstrDesc.h"
97 #include "llvm/MC/MCRegisterInfo.h"
98 #include "llvm/Support/Casting.h"
99 #include "llvm/Support/Debug.h"
100 #include "llvm/Support/ErrorHandling.h"
101 #include "llvm/Support/MachineValueType.h"
102 #include "llvm/Support/MathExtras.h"
103 #include "llvm/Support/raw_ostream.h"
104 #include "llvm/Target/TargetMachine.h"
105 #include "llvm/Target/TargetOptions.h"
112 using namespace llvm
;
113 using namespace PatternMatch
;
115 #define DEBUG_TYPE "isel"
117 STATISTIC(NumFastIselSuccessIndependent
, "Number of insts selected by "
118 "target-independent selector");
119 STATISTIC(NumFastIselSuccessTarget
, "Number of insts selected by "
120 "target-specific selector");
121 STATISTIC(NumFastIselDead
, "Number of dead insts removed on failure");
123 /// Set the current block to which generated machine instructions will be
125 void FastISel::startNewBlock() {
126 assert(LocalValueMap
.empty() &&
127 "local values should be cleared after finishing a BB");
129 // Instructions are appended to FuncInfo.MBB. If the basic block already
130 // contains labels or copies, use the last instruction as the last local
132 EmitStartPt
= nullptr;
133 if (!FuncInfo
.MBB
->empty())
134 EmitStartPt
= &FuncInfo
.MBB
->back();
135 LastLocalValue
= EmitStartPt
;
138 void FastISel::finishBasicBlock() { flushLocalValueMap(); }
140 bool FastISel::lowerArguments() {
141 if (!FuncInfo
.CanLowerReturn
)
142 // Fallback to SDISel argument lowering code to deal with sret pointer
146 if (!fastLowerArguments())
149 // Enter arguments into ValueMap for uses in non-entry BBs.
150 for (Function::const_arg_iterator I
= FuncInfo
.Fn
->arg_begin(),
151 E
= FuncInfo
.Fn
->arg_end();
153 DenseMap
<const Value
*, Register
>::iterator VI
= LocalValueMap
.find(&*I
);
154 assert(VI
!= LocalValueMap
.end() && "Missed an argument?");
155 FuncInfo
.ValueMap
[&*I
] = VI
->second
;
160 /// Return the defined register if this instruction defines exactly one
161 /// virtual register and uses no other virtual registers. Otherwise return 0.
162 static Register
findLocalRegDef(MachineInstr
&MI
) {
164 for (const MachineOperand
&MO
: MI
.operands()) {
170 RegDef
= MO
.getReg();
171 } else if (MO
.getReg().isVirtual()) {
172 // This is another use of a vreg. Don't delete it.
179 static bool isRegUsedByPhiNodes(Register DefReg
,
180 FunctionLoweringInfo
&FuncInfo
) {
181 for (auto &P
: FuncInfo
.PHINodesToUpdate
)
182 if (P
.second
== DefReg
)
187 void FastISel::flushLocalValueMap() {
188 // If FastISel bails out, it could leave local value instructions behind
189 // that aren't used for anything. Detect and erase those.
190 if (LastLocalValue
!= EmitStartPt
) {
191 // Save the first instruction after local values, for later.
192 MachineBasicBlock::iterator
FirstNonValue(LastLocalValue
);
195 MachineBasicBlock::reverse_iterator RE
=
196 EmitStartPt
? MachineBasicBlock::reverse_iterator(EmitStartPt
)
197 : FuncInfo
.MBB
->rend();
198 MachineBasicBlock::reverse_iterator
RI(LastLocalValue
);
199 for (MachineInstr
&LocalMI
:
200 llvm::make_early_inc_range(llvm::make_range(RI
, RE
))) {
201 Register DefReg
= findLocalRegDef(LocalMI
);
204 if (FuncInfo
.RegsWithFixups
.count(DefReg
))
206 bool UsedByPHI
= isRegUsedByPhiNodes(DefReg
, FuncInfo
);
207 if (!UsedByPHI
&& MRI
.use_nodbg_empty(DefReg
)) {
208 if (EmitStartPt
== &LocalMI
)
209 EmitStartPt
= EmitStartPt
->getPrevNode();
210 LLVM_DEBUG(dbgs() << "removing dead local value materialization"
212 LocalMI
.eraseFromParent();
216 if (FirstNonValue
!= FuncInfo
.MBB
->end()) {
217 // See if there are any local value instructions left. If so, we want to
218 // make sure the first one has a debug location; if it doesn't, use the
219 // first non-value instruction's debug location.
221 // If EmitStartPt is non-null, this block had copies at the top before
222 // FastISel started doing anything; it points to the last one, so the
223 // first local value instruction is the one after EmitStartPt.
224 // If EmitStartPt is null, the first local value instruction is at the
226 MachineBasicBlock::iterator FirstLocalValue
=
227 EmitStartPt
? ++MachineBasicBlock::iterator(EmitStartPt
)
228 : FuncInfo
.MBB
->begin();
229 if (FirstLocalValue
!= FirstNonValue
&& !FirstLocalValue
->getDebugLoc())
230 FirstLocalValue
->setDebugLoc(FirstNonValue
->getDebugLoc());
234 LocalValueMap
.clear();
235 LastLocalValue
= EmitStartPt
;
237 SavedInsertPt
= FuncInfo
.InsertPt
;
240 Register
FastISel::getRegForValue(const Value
*V
) {
241 EVT RealVT
= TLI
.getValueType(DL
, V
->getType(), /*AllowUnknown=*/true);
242 // Don't handle non-simple values in FastISel.
243 if (!RealVT
.isSimple())
246 // Ignore illegal types. We must do this before looking up the value
247 // in ValueMap because Arguments are given virtual registers regardless
248 // of whether FastISel can handle them.
249 MVT VT
= RealVT
.getSimpleVT();
250 if (!TLI
.isTypeLegal(VT
)) {
251 // Handle integer promotions, though, because they're common and easy.
252 if (VT
== MVT::i1
|| VT
== MVT::i8
|| VT
== MVT::i16
)
253 VT
= TLI
.getTypeToTransformTo(V
->getContext(), VT
).getSimpleVT();
258 // Look up the value to see if we already have a register for it.
259 Register Reg
= lookUpRegForValue(V
);
263 // In bottom-up mode, just create the virtual register which will be used
264 // to hold the value. It will be materialized later.
265 if (isa
<Instruction
>(V
) &&
266 (!isa
<AllocaInst
>(V
) ||
267 !FuncInfo
.StaticAllocaMap
.count(cast
<AllocaInst
>(V
))))
268 return FuncInfo
.InitializeRegForValue(V
);
270 SavePoint SaveInsertPt
= enterLocalValueArea();
272 // Materialize the value in a register. Emit any instructions in the
274 Reg
= materializeRegForValue(V
, VT
);
276 leaveLocalValueArea(SaveInsertPt
);
281 Register
FastISel::materializeConstant(const Value
*V
, MVT VT
) {
283 if (const auto *CI
= dyn_cast
<ConstantInt
>(V
)) {
284 if (CI
->getValue().getActiveBits() <= 64)
285 Reg
= fastEmit_i(VT
, VT
, ISD::Constant
, CI
->getZExtValue());
286 } else if (isa
<AllocaInst
>(V
))
287 Reg
= fastMaterializeAlloca(cast
<AllocaInst
>(V
));
288 else if (isa
<ConstantPointerNull
>(V
))
289 // Translate this as an integer zero so that it can be
290 // local-CSE'd with actual integer zeros.
292 getRegForValue(Constant::getNullValue(DL
.getIntPtrType(V
->getType())));
293 else if (const auto *CF
= dyn_cast
<ConstantFP
>(V
)) {
294 if (CF
->isNullValue())
295 Reg
= fastMaterializeFloatZero(CF
);
297 // Try to emit the constant directly.
298 Reg
= fastEmit_f(VT
, VT
, ISD::ConstantFP
, CF
);
301 // Try to emit the constant by using an integer constant with a cast.
302 const APFloat
&Flt
= CF
->getValueAPF();
303 EVT IntVT
= TLI
.getPointerTy(DL
);
304 uint32_t IntBitWidth
= IntVT
.getSizeInBits();
305 APSInt
SIntVal(IntBitWidth
, /*isUnsigned=*/false);
307 (void)Flt
.convertToInteger(SIntVal
, APFloat::rmTowardZero
, &isExact
);
309 Register IntegerReg
=
310 getRegForValue(ConstantInt::get(V
->getContext(), SIntVal
));
312 Reg
= fastEmit_r(IntVT
.getSimpleVT(), VT
, ISD::SINT_TO_FP
,
316 } else if (const auto *Op
= dyn_cast
<Operator
>(V
)) {
317 if (!selectOperator(Op
, Op
->getOpcode()))
318 if (!isa
<Instruction
>(Op
) ||
319 !fastSelectInstruction(cast
<Instruction
>(Op
)))
321 Reg
= lookUpRegForValue(Op
);
322 } else if (isa
<UndefValue
>(V
)) {
323 Reg
= createResultReg(TLI
.getRegClassFor(VT
));
324 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, DbgLoc
,
325 TII
.get(TargetOpcode::IMPLICIT_DEF
), Reg
);
330 /// Helper for getRegForValue. This function is called when the value isn't
331 /// already available in a register and must be materialized with new
333 Register
FastISel::materializeRegForValue(const Value
*V
, MVT VT
) {
335 // Give the target-specific code a try first.
336 if (isa
<Constant
>(V
))
337 Reg
= fastMaterializeConstant(cast
<Constant
>(V
));
339 // If target-specific code couldn't or didn't want to handle the value, then
340 // give target-independent code a try.
342 Reg
= materializeConstant(V
, VT
);
344 // Don't cache constant materializations in the general ValueMap.
345 // To do so would require tracking what uses they dominate.
347 LocalValueMap
[V
] = Reg
;
348 LastLocalValue
= MRI
.getVRegDef(Reg
);
353 Register
FastISel::lookUpRegForValue(const Value
*V
) {
354 // Look up the value to see if we already have a register for it. We
355 // cache values defined by Instructions across blocks, and other values
356 // only locally. This is because Instructions already have the SSA
357 // def-dominates-use requirement enforced.
358 DenseMap
<const Value
*, Register
>::iterator I
= FuncInfo
.ValueMap
.find(V
);
359 if (I
!= FuncInfo
.ValueMap
.end())
361 return LocalValueMap
[V
];
364 void FastISel::updateValueMap(const Value
*I
, Register Reg
, unsigned NumRegs
) {
365 if (!isa
<Instruction
>(I
)) {
366 LocalValueMap
[I
] = Reg
;
370 Register
&AssignedReg
= FuncInfo
.ValueMap
[I
];
372 // Use the new register.
374 else if (Reg
!= AssignedReg
) {
375 // Arrange for uses of AssignedReg to be replaced by uses of Reg.
376 for (unsigned i
= 0; i
< NumRegs
; i
++) {
377 FuncInfo
.RegFixups
[AssignedReg
+ i
] = Reg
+ i
;
378 FuncInfo
.RegsWithFixups
.insert(Reg
+ i
);
385 Register
FastISel::getRegForGEPIndex(const Value
*Idx
) {
386 Register IdxN
= getRegForValue(Idx
);
388 // Unhandled operand. Halt "fast" selection and bail.
391 // If the index is smaller or larger than intptr_t, truncate or extend it.
392 MVT PtrVT
= TLI
.getPointerTy(DL
);
393 EVT IdxVT
= EVT::getEVT(Idx
->getType(), /*HandleUnknown=*/false);
394 if (IdxVT
.bitsLT(PtrVT
)) {
395 IdxN
= fastEmit_r(IdxVT
.getSimpleVT(), PtrVT
, ISD::SIGN_EXTEND
, IdxN
);
396 } else if (IdxVT
.bitsGT(PtrVT
)) {
398 fastEmit_r(IdxVT
.getSimpleVT(), PtrVT
, ISD::TRUNCATE
, IdxN
);
403 void FastISel::recomputeInsertPt() {
404 if (getLastLocalValue()) {
405 FuncInfo
.InsertPt
= getLastLocalValue();
406 FuncInfo
.MBB
= FuncInfo
.InsertPt
->getParent();
409 FuncInfo
.InsertPt
= FuncInfo
.MBB
->getFirstNonPHI();
411 // Now skip past any EH_LABELs, which must remain at the beginning.
412 while (FuncInfo
.InsertPt
!= FuncInfo
.MBB
->end() &&
413 FuncInfo
.InsertPt
->getOpcode() == TargetOpcode::EH_LABEL
)
417 void FastISel::removeDeadCode(MachineBasicBlock::iterator I
,
418 MachineBasicBlock::iterator E
) {
419 assert(I
.isValid() && E
.isValid() && std::distance(I
, E
) > 0 &&
420 "Invalid iterator!");
422 if (SavedInsertPt
== I
)
424 if (EmitStartPt
== I
)
425 EmitStartPt
= E
.isValid() ? &*E
: nullptr;
426 if (LastLocalValue
== I
)
427 LastLocalValue
= E
.isValid() ? &*E
: nullptr;
429 MachineInstr
*Dead
= &*I
;
431 Dead
->eraseFromParent();
437 FastISel::SavePoint
FastISel::enterLocalValueArea() {
438 SavePoint OldInsertPt
= FuncInfo
.InsertPt
;
443 void FastISel::leaveLocalValueArea(SavePoint OldInsertPt
) {
444 if (FuncInfo
.InsertPt
!= FuncInfo
.MBB
->begin())
445 LastLocalValue
= &*std::prev(FuncInfo
.InsertPt
);
447 // Restore the previous insert position.
448 FuncInfo
.InsertPt
= OldInsertPt
;
451 bool FastISel::selectBinaryOp(const User
*I
, unsigned ISDOpcode
) {
452 EVT VT
= EVT::getEVT(I
->getType(), /*HandleUnknown=*/true);
453 if (VT
== MVT::Other
|| !VT
.isSimple())
454 // Unhandled type. Halt "fast" selection and bail.
457 // We only handle legal types. For example, on x86-32 the instruction
458 // selector contains all of the 64-bit instructions from x86-64,
459 // under the assumption that i64 won't be used if the target doesn't
461 if (!TLI
.isTypeLegal(VT
)) {
462 // MVT::i1 is special. Allow AND, OR, or XOR because they
463 // don't require additional zeroing, which makes them easy.
464 if (VT
== MVT::i1
&& (ISDOpcode
== ISD::AND
|| ISDOpcode
== ISD::OR
||
465 ISDOpcode
== ISD::XOR
))
466 VT
= TLI
.getTypeToTransformTo(I
->getContext(), VT
);
471 // Check if the first operand is a constant, and handle it as "ri". At -O0,
472 // we don't have anything that canonicalizes operand order.
473 if (const auto *CI
= dyn_cast
<ConstantInt
>(I
->getOperand(0)))
474 if (isa
<Instruction
>(I
) && cast
<Instruction
>(I
)->isCommutative()) {
475 Register Op1
= getRegForValue(I
->getOperand(1));
480 fastEmit_ri_(VT
.getSimpleVT(), ISDOpcode
, Op1
, CI
->getZExtValue(),
485 // We successfully emitted code for the given LLVM Instruction.
486 updateValueMap(I
, ResultReg
);
490 Register Op0
= getRegForValue(I
->getOperand(0));
491 if (!Op0
) // Unhandled operand. Halt "fast" selection and bail.
494 // Check if the second operand is a constant and handle it appropriately.
495 if (const auto *CI
= dyn_cast
<ConstantInt
>(I
->getOperand(1))) {
496 uint64_t Imm
= CI
->getSExtValue();
498 // Transform "sdiv exact X, 8" -> "sra X, 3".
499 if (ISDOpcode
== ISD::SDIV
&& isa
<BinaryOperator
>(I
) &&
500 cast
<BinaryOperator
>(I
)->isExact() && isPowerOf2_64(Imm
)) {
502 ISDOpcode
= ISD::SRA
;
505 // Transform "urem x, pow2" -> "and x, pow2-1".
506 if (ISDOpcode
== ISD::UREM
&& isa
<BinaryOperator
>(I
) &&
507 isPowerOf2_64(Imm
)) {
509 ISDOpcode
= ISD::AND
;
512 Register ResultReg
= fastEmit_ri_(VT
.getSimpleVT(), ISDOpcode
, Op0
, Imm
,
517 // We successfully emitted code for the given LLVM Instruction.
518 updateValueMap(I
, ResultReg
);
522 Register Op1
= getRegForValue(I
->getOperand(1));
523 if (!Op1
) // Unhandled operand. Halt "fast" selection and bail.
526 // Now we have both operands in registers. Emit the instruction.
527 Register ResultReg
= fastEmit_rr(VT
.getSimpleVT(), VT
.getSimpleVT(),
528 ISDOpcode
, Op0
, Op1
);
530 // Target-specific code wasn't able to find a machine opcode for
531 // the given ISD opcode and type. Halt "fast" selection and bail.
534 // We successfully emitted code for the given LLVM Instruction.
535 updateValueMap(I
, ResultReg
);
539 bool FastISel::selectGetElementPtr(const User
*I
) {
540 Register N
= getRegForValue(I
->getOperand(0));
541 if (!N
) // Unhandled operand. Halt "fast" selection and bail.
544 // FIXME: The code below does not handle vector GEPs. Halt "fast" selection
546 if (isa
<VectorType
>(I
->getType()))
549 // Keep a running tab of the total offset to coalesce multiple N = N + Offset
550 // into a single N = N + TotalOffset.
551 uint64_t TotalOffs
= 0;
552 // FIXME: What's a good SWAG number for MaxOffs?
553 uint64_t MaxOffs
= 2048;
554 MVT VT
= TLI
.getPointerTy(DL
);
555 for (gep_type_iterator GTI
= gep_type_begin(I
), E
= gep_type_end(I
);
557 const Value
*Idx
= GTI
.getOperand();
558 if (StructType
*StTy
= GTI
.getStructTypeOrNull()) {
559 uint64_t Field
= cast
<ConstantInt
>(Idx
)->getZExtValue();
562 TotalOffs
+= DL
.getStructLayout(StTy
)->getElementOffset(Field
);
563 if (TotalOffs
>= MaxOffs
) {
564 N
= fastEmit_ri_(VT
, ISD::ADD
, N
, TotalOffs
, VT
);
565 if (!N
) // Unhandled operand. Halt "fast" selection and bail.
571 Type
*Ty
= GTI
.getIndexedType();
573 // If this is a constant subscript, handle it quickly.
574 if (const auto *CI
= dyn_cast
<ConstantInt
>(Idx
)) {
578 uint64_t IdxN
= CI
->getValue().sextOrTrunc(64).getSExtValue();
579 TotalOffs
+= DL
.getTypeAllocSize(Ty
) * IdxN
;
580 if (TotalOffs
>= MaxOffs
) {
581 N
= fastEmit_ri_(VT
, ISD::ADD
, N
, TotalOffs
, VT
);
582 if (!N
) // Unhandled operand. Halt "fast" selection and bail.
589 N
= fastEmit_ri_(VT
, ISD::ADD
, N
, TotalOffs
, VT
);
590 if (!N
) // Unhandled operand. Halt "fast" selection and bail.
595 // N = N + Idx * ElementSize;
596 uint64_t ElementSize
= DL
.getTypeAllocSize(Ty
);
597 Register IdxN
= getRegForGEPIndex(Idx
);
598 if (!IdxN
) // Unhandled operand. Halt "fast" selection and bail.
601 if (ElementSize
!= 1) {
602 IdxN
= fastEmit_ri_(VT
, ISD::MUL
, IdxN
, ElementSize
, VT
);
603 if (!IdxN
) // Unhandled operand. Halt "fast" selection and bail.
606 N
= fastEmit_rr(VT
, VT
, ISD::ADD
, N
, IdxN
);
607 if (!N
) // Unhandled operand. Halt "fast" selection and bail.
612 N
= fastEmit_ri_(VT
, ISD::ADD
, N
, TotalOffs
, VT
);
613 if (!N
) // Unhandled operand. Halt "fast" selection and bail.
617 // We successfully emitted code for the given LLVM Instruction.
618 updateValueMap(I
, N
);
622 bool FastISel::addStackMapLiveVars(SmallVectorImpl
<MachineOperand
> &Ops
,
623 const CallInst
*CI
, unsigned StartIdx
) {
624 for (unsigned i
= StartIdx
, e
= CI
->arg_size(); i
!= e
; ++i
) {
625 Value
*Val
= CI
->getArgOperand(i
);
626 // Check for constants and encode them with a StackMaps::ConstantOp prefix.
627 if (const auto *C
= dyn_cast
<ConstantInt
>(Val
)) {
628 Ops
.push_back(MachineOperand::CreateImm(StackMaps::ConstantOp
));
629 Ops
.push_back(MachineOperand::CreateImm(C
->getSExtValue()));
630 } else if (isa
<ConstantPointerNull
>(Val
)) {
631 Ops
.push_back(MachineOperand::CreateImm(StackMaps::ConstantOp
));
632 Ops
.push_back(MachineOperand::CreateImm(0));
633 } else if (auto *AI
= dyn_cast
<AllocaInst
>(Val
)) {
634 // Values coming from a stack location also require a special encoding,
635 // but that is added later on by the target specific frame index
636 // elimination implementation.
637 auto SI
= FuncInfo
.StaticAllocaMap
.find(AI
);
638 if (SI
!= FuncInfo
.StaticAllocaMap
.end())
639 Ops
.push_back(MachineOperand::CreateFI(SI
->second
));
643 Register Reg
= getRegForValue(Val
);
646 Ops
.push_back(MachineOperand::CreateReg(Reg
, /*isDef=*/false));
652 bool FastISel::selectStackmap(const CallInst
*I
) {
653 // void @llvm.experimental.stackmap(i64 <id>, i32 <numShadowBytes>,
654 // [live variables...])
655 assert(I
->getCalledFunction()->getReturnType()->isVoidTy() &&
656 "Stackmap cannot return a value.");
658 // The stackmap intrinsic only records the live variables (the arguments
659 // passed to it) and emits NOPS (if requested). Unlike the patchpoint
660 // intrinsic, this won't be lowered to a function call. This means we don't
661 // have to worry about calling conventions and target-specific lowering code.
662 // Instead we perform the call lowering right here.
664 // CALLSEQ_START(0, 0...)
665 // STACKMAP(id, nbytes, ...)
668 SmallVector
<MachineOperand
, 32> Ops
;
670 // Add the <id> and <numBytes> constants.
671 assert(isa
<ConstantInt
>(I
->getOperand(PatchPointOpers::IDPos
)) &&
672 "Expected a constant integer.");
673 const auto *ID
= cast
<ConstantInt
>(I
->getOperand(PatchPointOpers::IDPos
));
674 Ops
.push_back(MachineOperand::CreateImm(ID
->getZExtValue()));
676 assert(isa
<ConstantInt
>(I
->getOperand(PatchPointOpers::NBytesPos
)) &&
677 "Expected a constant integer.");
678 const auto *NumBytes
=
679 cast
<ConstantInt
>(I
->getOperand(PatchPointOpers::NBytesPos
));
680 Ops
.push_back(MachineOperand::CreateImm(NumBytes
->getZExtValue()));
682 // Push live variables for the stack map (skipping the first two arguments
683 // <id> and <numBytes>).
684 if (!addStackMapLiveVars(Ops
, I
, 2))
687 // We are not adding any register mask info here, because the stackmap doesn't
690 // Add scratch registers as implicit def and early clobber.
691 CallingConv::ID CC
= I
->getCallingConv();
692 const MCPhysReg
*ScratchRegs
= TLI
.getScratchRegisters(CC
);
693 for (unsigned i
= 0; ScratchRegs
[i
]; ++i
)
694 Ops
.push_back(MachineOperand::CreateReg(
695 ScratchRegs
[i
], /*isDef=*/true, /*isImp=*/true, /*isKill=*/false,
696 /*isDead=*/false, /*isUndef=*/false, /*isEarlyClobber=*/true));
698 // Issue CALLSEQ_START
699 unsigned AdjStackDown
= TII
.getCallFrameSetupOpcode();
701 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, DbgLoc
, TII
.get(AdjStackDown
));
702 const MCInstrDesc
&MCID
= Builder
.getInstr()->getDesc();
703 for (unsigned I
= 0, E
= MCID
.getNumOperands(); I
< E
; ++I
)
707 MachineInstrBuilder MIB
= BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, DbgLoc
,
708 TII
.get(TargetOpcode::STACKMAP
));
709 for (auto const &MO
: Ops
)
713 unsigned AdjStackUp
= TII
.getCallFrameDestroyOpcode();
714 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, DbgLoc
, TII
.get(AdjStackUp
))
718 // Inform the Frame Information that we have a stackmap in this function.
719 FuncInfo
.MF
->getFrameInfo().setHasStackMap();
724 /// Lower an argument list according to the target calling convention.
726 /// This is a helper for lowering intrinsics that follow a target calling
727 /// convention or require stack pointer adjustment. Only a subset of the
728 /// intrinsic's operands need to participate in the calling convention.
729 bool FastISel::lowerCallOperands(const CallInst
*CI
, unsigned ArgIdx
,
730 unsigned NumArgs
, const Value
*Callee
,
731 bool ForceRetVoidTy
, CallLoweringInfo
&CLI
) {
733 Args
.reserve(NumArgs
);
735 // Populate the argument list.
736 for (unsigned ArgI
= ArgIdx
, ArgE
= ArgIdx
+ NumArgs
; ArgI
!= ArgE
; ++ArgI
) {
737 Value
*V
= CI
->getOperand(ArgI
);
739 assert(!V
->getType()->isEmptyTy() && "Empty type passed to intrinsic.");
743 Entry
.Ty
= V
->getType();
744 Entry
.setAttributes(CI
, ArgI
);
745 Args
.push_back(Entry
);
748 Type
*RetTy
= ForceRetVoidTy
? Type::getVoidTy(CI
->getType()->getContext())
750 CLI
.setCallee(CI
->getCallingConv(), RetTy
, Callee
, std::move(Args
), NumArgs
);
752 return lowerCallTo(CLI
);
755 FastISel::CallLoweringInfo
&FastISel::CallLoweringInfo::setCallee(
756 const DataLayout
&DL
, MCContext
&Ctx
, CallingConv::ID CC
, Type
*ResultTy
,
757 StringRef Target
, ArgListTy
&&ArgsList
, unsigned FixedArgs
) {
758 SmallString
<32> MangledName
;
759 Mangler::getNameWithPrefix(MangledName
, Target
, DL
);
760 MCSymbol
*Sym
= Ctx
.getOrCreateSymbol(MangledName
);
761 return setCallee(CC
, ResultTy
, Sym
, std::move(ArgsList
), FixedArgs
);
764 bool FastISel::selectPatchpoint(const CallInst
*I
) {
765 // void|i64 @llvm.experimental.patchpoint.void|i64(i64 <id>,
770 // [live variables...])
771 CallingConv::ID CC
= I
->getCallingConv();
772 bool IsAnyRegCC
= CC
== CallingConv::AnyReg
;
773 bool HasDef
= !I
->getType()->isVoidTy();
774 Value
*Callee
= I
->getOperand(PatchPointOpers::TargetPos
)->stripPointerCasts();
776 // Get the real number of arguments participating in the call <numArgs>
777 assert(isa
<ConstantInt
>(I
->getOperand(PatchPointOpers::NArgPos
)) &&
778 "Expected a constant integer.");
779 const auto *NumArgsVal
=
780 cast
<ConstantInt
>(I
->getOperand(PatchPointOpers::NArgPos
));
781 unsigned NumArgs
= NumArgsVal
->getZExtValue();
783 // Skip the four meta args: <id>, <numNopBytes>, <target>, <numArgs>
784 // This includes all meta-operands up to but not including CC.
785 unsigned NumMetaOpers
= PatchPointOpers::CCPos
;
786 assert(I
->arg_size() >= NumMetaOpers
+ NumArgs
&&
787 "Not enough arguments provided to the patchpoint intrinsic");
789 // For AnyRegCC the arguments are lowered later on manually.
790 unsigned NumCallArgs
= IsAnyRegCC
? 0 : NumArgs
;
791 CallLoweringInfo CLI
;
792 CLI
.setIsPatchPoint();
793 if (!lowerCallOperands(I
, NumMetaOpers
, NumCallArgs
, Callee
, IsAnyRegCC
, CLI
))
796 assert(CLI
.Call
&& "No call instruction specified.");
798 SmallVector
<MachineOperand
, 32> Ops
;
800 // Add an explicit result reg if we use the anyreg calling convention.
801 if (IsAnyRegCC
&& HasDef
) {
802 assert(CLI
.NumResultRegs
== 0 && "Unexpected result register.");
803 CLI
.ResultReg
= createResultReg(TLI
.getRegClassFor(MVT::i64
));
804 CLI
.NumResultRegs
= 1;
805 Ops
.push_back(MachineOperand::CreateReg(CLI
.ResultReg
, /*isDef=*/true));
808 // Add the <id> and <numBytes> constants.
809 assert(isa
<ConstantInt
>(I
->getOperand(PatchPointOpers::IDPos
)) &&
810 "Expected a constant integer.");
811 const auto *ID
= cast
<ConstantInt
>(I
->getOperand(PatchPointOpers::IDPos
));
812 Ops
.push_back(MachineOperand::CreateImm(ID
->getZExtValue()));
814 assert(isa
<ConstantInt
>(I
->getOperand(PatchPointOpers::NBytesPos
)) &&
815 "Expected a constant integer.");
816 const auto *NumBytes
=
817 cast
<ConstantInt
>(I
->getOperand(PatchPointOpers::NBytesPos
));
818 Ops
.push_back(MachineOperand::CreateImm(NumBytes
->getZExtValue()));
820 // Add the call target.
821 if (const auto *C
= dyn_cast
<IntToPtrInst
>(Callee
)) {
822 uint64_t CalleeConstAddr
=
823 cast
<ConstantInt
>(C
->getOperand(0))->getZExtValue();
824 Ops
.push_back(MachineOperand::CreateImm(CalleeConstAddr
));
825 } else if (const auto *C
= dyn_cast
<ConstantExpr
>(Callee
)) {
826 if (C
->getOpcode() == Instruction::IntToPtr
) {
827 uint64_t CalleeConstAddr
=
828 cast
<ConstantInt
>(C
->getOperand(0))->getZExtValue();
829 Ops
.push_back(MachineOperand::CreateImm(CalleeConstAddr
));
831 llvm_unreachable("Unsupported ConstantExpr.");
832 } else if (const auto *GV
= dyn_cast
<GlobalValue
>(Callee
)) {
833 Ops
.push_back(MachineOperand::CreateGA(GV
, 0));
834 } else if (isa
<ConstantPointerNull
>(Callee
))
835 Ops
.push_back(MachineOperand::CreateImm(0));
837 llvm_unreachable("Unsupported callee address.");
839 // Adjust <numArgs> to account for any arguments that have been passed on
840 // the stack instead.
841 unsigned NumCallRegArgs
= IsAnyRegCC
? NumArgs
: CLI
.OutRegs
.size();
842 Ops
.push_back(MachineOperand::CreateImm(NumCallRegArgs
));
844 // Add the calling convention
845 Ops
.push_back(MachineOperand::CreateImm((unsigned)CC
));
847 // Add the arguments we omitted previously. The register allocator should
848 // place these in any free register.
850 for (unsigned i
= NumMetaOpers
, e
= NumMetaOpers
+ NumArgs
; i
!= e
; ++i
) {
851 Register Reg
= getRegForValue(I
->getArgOperand(i
));
854 Ops
.push_back(MachineOperand::CreateReg(Reg
, /*isDef=*/false));
858 // Push the arguments from the call instruction.
859 for (auto Reg
: CLI
.OutRegs
)
860 Ops
.push_back(MachineOperand::CreateReg(Reg
, /*isDef=*/false));
862 // Push live variables for the stack map.
863 if (!addStackMapLiveVars(Ops
, I
, NumMetaOpers
+ NumArgs
))
866 // Push the register mask info.
867 Ops
.push_back(MachineOperand::CreateRegMask(
868 TRI
.getCallPreservedMask(*FuncInfo
.MF
, CC
)));
870 // Add scratch registers as implicit def and early clobber.
871 const MCPhysReg
*ScratchRegs
= TLI
.getScratchRegisters(CC
);
872 for (unsigned i
= 0; ScratchRegs
[i
]; ++i
)
873 Ops
.push_back(MachineOperand::CreateReg(
874 ScratchRegs
[i
], /*isDef=*/true, /*isImp=*/true, /*isKill=*/false,
875 /*isDead=*/false, /*isUndef=*/false, /*isEarlyClobber=*/true));
877 // Add implicit defs (return values).
878 for (auto Reg
: CLI
.InRegs
)
879 Ops
.push_back(MachineOperand::CreateReg(Reg
, /*isDef=*/true,
882 // Insert the patchpoint instruction before the call generated by the target.
883 MachineInstrBuilder MIB
= BuildMI(*FuncInfo
.MBB
, CLI
.Call
, DbgLoc
,
884 TII
.get(TargetOpcode::PATCHPOINT
));
889 MIB
->setPhysRegsDeadExcept(CLI
.InRegs
, TRI
);
891 // Delete the original call instruction.
892 CLI
.Call
->eraseFromParent();
894 // Inform the Frame Information that we have a patchpoint in this function.
895 FuncInfo
.MF
->getFrameInfo().setHasPatchPoint();
897 if (CLI
.NumResultRegs
)
898 updateValueMap(I
, CLI
.ResultReg
, CLI
.NumResultRegs
);
902 bool FastISel::selectXRayCustomEvent(const CallInst
*I
) {
903 const auto &Triple
= TM
.getTargetTriple();
904 if (Triple
.getArch() != Triple::x86_64
|| !Triple
.isOSLinux())
905 return true; // don't do anything to this instruction.
906 SmallVector
<MachineOperand
, 8> Ops
;
907 Ops
.push_back(MachineOperand::CreateReg(getRegForValue(I
->getArgOperand(0)),
909 Ops
.push_back(MachineOperand::CreateReg(getRegForValue(I
->getArgOperand(1)),
911 MachineInstrBuilder MIB
=
912 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, DbgLoc
,
913 TII
.get(TargetOpcode::PATCHABLE_EVENT_CALL
));
917 // Insert the Patchable Event Call instruction, that gets lowered properly.
921 bool FastISel::selectXRayTypedEvent(const CallInst
*I
) {
922 const auto &Triple
= TM
.getTargetTriple();
923 if (Triple
.getArch() != Triple::x86_64
|| !Triple
.isOSLinux())
924 return true; // don't do anything to this instruction.
925 SmallVector
<MachineOperand
, 8> Ops
;
926 Ops
.push_back(MachineOperand::CreateReg(getRegForValue(I
->getArgOperand(0)),
928 Ops
.push_back(MachineOperand::CreateReg(getRegForValue(I
->getArgOperand(1)),
930 Ops
.push_back(MachineOperand::CreateReg(getRegForValue(I
->getArgOperand(2)),
932 MachineInstrBuilder MIB
=
933 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, DbgLoc
,
934 TII
.get(TargetOpcode::PATCHABLE_TYPED_EVENT_CALL
));
938 // Insert the Patchable Typed Event Call instruction, that gets lowered properly.
942 /// Returns an AttributeList representing the attributes applied to the return
943 /// value of the given call.
944 static AttributeList
getReturnAttrs(FastISel::CallLoweringInfo
&CLI
) {
945 SmallVector
<Attribute::AttrKind
, 2> Attrs
;
947 Attrs
.push_back(Attribute::SExt
);
949 Attrs
.push_back(Attribute::ZExt
);
951 Attrs
.push_back(Attribute::InReg
);
953 return AttributeList::get(CLI
.RetTy
->getContext(), AttributeList::ReturnIndex
,
957 bool FastISel::lowerCallTo(const CallInst
*CI
, const char *SymName
,
959 MCContext
&Ctx
= MF
->getContext();
960 SmallString
<32> MangledName
;
961 Mangler::getNameWithPrefix(MangledName
, SymName
, DL
);
962 MCSymbol
*Sym
= Ctx
.getOrCreateSymbol(MangledName
);
963 return lowerCallTo(CI
, Sym
, NumArgs
);
966 bool FastISel::lowerCallTo(const CallInst
*CI
, MCSymbol
*Symbol
,
968 FunctionType
*FTy
= CI
->getFunctionType();
969 Type
*RetTy
= CI
->getType();
972 Args
.reserve(NumArgs
);
974 // Populate the argument list.
975 // Attributes for args start at offset 1, after the return attribute.
976 for (unsigned ArgI
= 0; ArgI
!= NumArgs
; ++ArgI
) {
977 Value
*V
= CI
->getOperand(ArgI
);
979 assert(!V
->getType()->isEmptyTy() && "Empty type passed to intrinsic.");
983 Entry
.Ty
= V
->getType();
984 Entry
.setAttributes(CI
, ArgI
);
985 Args
.push_back(Entry
);
987 TLI
.markLibCallAttributes(MF
, CI
->getCallingConv(), Args
);
989 CallLoweringInfo CLI
;
990 CLI
.setCallee(RetTy
, FTy
, Symbol
, std::move(Args
), *CI
, NumArgs
);
992 return lowerCallTo(CLI
);
995 bool FastISel::lowerCallTo(CallLoweringInfo
&CLI
) {
996 // Handle the incoming return values from the call.
998 SmallVector
<EVT
, 4> RetTys
;
999 ComputeValueVTs(TLI
, DL
, CLI
.RetTy
, RetTys
);
1001 SmallVector
<ISD::OutputArg
, 4> Outs
;
1002 GetReturnInfo(CLI
.CallConv
, CLI
.RetTy
, getReturnAttrs(CLI
), Outs
, TLI
, DL
);
1004 bool CanLowerReturn
= TLI
.CanLowerReturn(
1005 CLI
.CallConv
, *FuncInfo
.MF
, CLI
.IsVarArg
, Outs
, CLI
.RetTy
->getContext());
1007 // FIXME: sret demotion isn't supported yet - bail out.
1008 if (!CanLowerReturn
)
1011 for (unsigned I
= 0, E
= RetTys
.size(); I
!= E
; ++I
) {
1013 MVT RegisterVT
= TLI
.getRegisterType(CLI
.RetTy
->getContext(), VT
);
1014 unsigned NumRegs
= TLI
.getNumRegisters(CLI
.RetTy
->getContext(), VT
);
1015 for (unsigned i
= 0; i
!= NumRegs
; ++i
) {
1016 ISD::InputArg MyFlags
;
1017 MyFlags
.VT
= RegisterVT
;
1019 MyFlags
.Used
= CLI
.IsReturnValueUsed
;
1021 MyFlags
.Flags
.setSExt();
1023 MyFlags
.Flags
.setZExt();
1025 MyFlags
.Flags
.setInReg();
1026 CLI
.Ins
.push_back(MyFlags
);
1030 // Handle all of the outgoing arguments.
1032 for (auto &Arg
: CLI
.getArgs()) {
1033 Type
*FinalType
= Arg
.Ty
;
1035 FinalType
= Arg
.IndirectType
;
1036 bool NeedsRegBlock
= TLI
.functionArgumentNeedsConsecutiveRegisters(
1037 FinalType
, CLI
.CallConv
, CLI
.IsVarArg
, DL
);
1039 ISD::ArgFlagsTy Flags
;
1048 if (Arg
.IsSwiftSelf
)
1049 Flags
.setSwiftSelf();
1050 if (Arg
.IsSwiftAsync
)
1051 Flags
.setSwiftAsync();
1052 if (Arg
.IsSwiftError
)
1053 Flags
.setSwiftError();
1054 if (Arg
.IsCFGuardTarget
)
1055 Flags
.setCFGuardTarget();
1058 if (Arg
.IsInAlloca
) {
1059 Flags
.setInAlloca();
1060 // Set the byval flag for CCAssignFn callbacks that don't know about
1061 // inalloca. This way we can know how many bytes we should've allocated
1062 // and how many bytes a callee cleanup function will pop. If we port
1063 // inalloca to more targets, we'll have to add custom inalloca handling in
1064 // the various CC lowering callbacks.
1067 if (Arg
.IsPreallocated
) {
1068 Flags
.setPreallocated();
1069 // Set the byval flag for CCAssignFn callbacks that don't know about
1070 // preallocated. This way we can know how many bytes we should've
1071 // allocated and how many bytes a callee cleanup function will pop. If we
1072 // port preallocated to more targets, we'll have to add custom
1073 // preallocated handling in the various CC lowering callbacks.
1076 MaybeAlign MemAlign
= Arg
.Alignment
;
1077 if (Arg
.IsByVal
|| Arg
.IsInAlloca
|| Arg
.IsPreallocated
) {
1078 unsigned FrameSize
= DL
.getTypeAllocSize(Arg
.IndirectType
);
1080 // For ByVal, alignment should come from FE. BE will guess if this info
1081 // is not there, but there are cases it cannot get right.
1083 MemAlign
= Align(TLI
.getByValTypeAlignment(Arg
.IndirectType
, DL
));
1084 Flags
.setByValSize(FrameSize
);
1085 } else if (!MemAlign
) {
1086 MemAlign
= DL
.getABITypeAlign(Arg
.Ty
);
1088 Flags
.setMemAlign(*MemAlign
);
1092 Flags
.setInConsecutiveRegs();
1093 Flags
.setOrigAlign(DL
.getABITypeAlign(Arg
.Ty
));
1094 CLI
.OutVals
.push_back(Arg
.Val
);
1095 CLI
.OutFlags
.push_back(Flags
);
1098 if (!fastLowerCall(CLI
))
1101 // Set all unused physreg defs as dead.
1102 assert(CLI
.Call
&& "No call instruction specified.");
1103 CLI
.Call
->setPhysRegsDeadExcept(CLI
.InRegs
, TRI
);
1105 if (CLI
.NumResultRegs
&& CLI
.CB
)
1106 updateValueMap(CLI
.CB
, CLI
.ResultReg
, CLI
.NumResultRegs
);
1108 // Set labels for heapallocsite call.
1110 if (MDNode
*MD
= CLI
.CB
->getMetadata("heapallocsite"))
1111 CLI
.Call
->setHeapAllocMarker(*MF
, MD
);
1116 bool FastISel::lowerCall(const CallInst
*CI
) {
1117 FunctionType
*FuncTy
= CI
->getFunctionType();
1118 Type
*RetTy
= CI
->getType();
1122 Args
.reserve(CI
->arg_size());
1124 for (auto i
= CI
->arg_begin(), e
= CI
->arg_end(); i
!= e
; ++i
) {
1128 if (V
->getType()->isEmptyTy())
1132 Entry
.Ty
= V
->getType();
1134 // Skip the first return-type Attribute to get to params.
1135 Entry
.setAttributes(CI
, i
- CI
->arg_begin());
1136 Args
.push_back(Entry
);
1139 // Check if target-independent constraints permit a tail call here.
1140 // Target-dependent constraints are checked within fastLowerCall.
1141 bool IsTailCall
= CI
->isTailCall();
1142 if (IsTailCall
&& !isInTailCallPosition(*CI
, TM
))
1144 if (IsTailCall
&& MF
->getFunction()
1145 .getFnAttribute("disable-tail-calls")
1149 CallLoweringInfo CLI
;
1150 CLI
.setCallee(RetTy
, FuncTy
, CI
->getCalledOperand(), std::move(Args
), *CI
)
1151 .setTailCall(IsTailCall
);
1153 diagnoseDontCall(*CI
);
1155 return lowerCallTo(CLI
);
1158 bool FastISel::selectCall(const User
*I
) {
1159 const CallInst
*Call
= cast
<CallInst
>(I
);
1161 // Handle simple inline asms.
1162 if (const InlineAsm
*IA
= dyn_cast
<InlineAsm
>(Call
->getCalledOperand())) {
1163 // Don't attempt to handle constraints.
1164 if (!IA
->getConstraintString().empty())
1167 unsigned ExtraInfo
= 0;
1168 if (IA
->hasSideEffects())
1169 ExtraInfo
|= InlineAsm::Extra_HasSideEffects
;
1170 if (IA
->isAlignStack())
1171 ExtraInfo
|= InlineAsm::Extra_IsAlignStack
;
1172 if (Call
->isConvergent())
1173 ExtraInfo
|= InlineAsm::Extra_IsConvergent
;
1174 ExtraInfo
|= IA
->getDialect() * InlineAsm::Extra_AsmDialect
;
1176 MachineInstrBuilder MIB
= BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, DbgLoc
,
1177 TII
.get(TargetOpcode::INLINEASM
));
1178 MIB
.addExternalSymbol(IA
->getAsmString().c_str());
1179 MIB
.addImm(ExtraInfo
);
1181 const MDNode
*SrcLoc
= Call
->getMetadata("srcloc");
1183 MIB
.addMetadata(SrcLoc
);
1188 // Handle intrinsic function calls.
1189 if (const auto *II
= dyn_cast
<IntrinsicInst
>(Call
))
1190 return selectIntrinsicCall(II
);
1192 return lowerCall(Call
);
1195 bool FastISel::selectIntrinsicCall(const IntrinsicInst
*II
) {
1196 switch (II
->getIntrinsicID()) {
1199 // At -O0 we don't care about the lifetime intrinsics.
1200 case Intrinsic::lifetime_start
:
1201 case Intrinsic::lifetime_end
:
1202 // The donothing intrinsic does, well, nothing.
1203 case Intrinsic::donothing
:
1204 // Neither does the sideeffect intrinsic.
1205 case Intrinsic::sideeffect
:
1206 // Neither does the assume intrinsic; it's also OK not to codegen its operand.
1207 case Intrinsic::assume
:
1208 // Neither does the llvm.experimental.noalias.scope.decl intrinsic
1209 case Intrinsic::experimental_noalias_scope_decl
:
1211 case Intrinsic::dbg_declare
: {
1212 const DbgDeclareInst
*DI
= cast
<DbgDeclareInst
>(II
);
1213 assert(DI
->getVariable() && "Missing variable");
1214 if (!FuncInfo
.MF
->getMMI().hasDebugInfo()) {
1215 LLVM_DEBUG(dbgs() << "Dropping debug info for " << *DI
1216 << " (!hasDebugInfo)\n");
1220 const Value
*Address
= DI
->getAddress();
1221 if (!Address
|| isa
<UndefValue
>(Address
)) {
1222 LLVM_DEBUG(dbgs() << "Dropping debug info for " << *DI
1223 << " (bad/undef address)\n");
1227 // Byval arguments with frame indices were already handled after argument
1228 // lowering and before isel.
1230 dyn_cast
<Argument
>(Address
->stripInBoundsConstantOffsets());
1231 if (Arg
&& FuncInfo
.getArgumentFrameIndex(Arg
) != INT_MAX
)
1234 Optional
<MachineOperand
> Op
;
1235 if (Register Reg
= lookUpRegForValue(Address
))
1236 Op
= MachineOperand::CreateReg(Reg
, false);
1238 // If we have a VLA that has a "use" in a metadata node that's then used
1239 // here but it has no other uses, then we have a problem. E.g.,
1241 // int foo (const int *x) {
1246 // If we assign 'a' a vreg and fast isel later on has to use the selection
1247 // DAG isel, it will want to copy the value to the vreg. However, there are
1248 // no uses, which goes counter to what selection DAG isel expects.
1249 if (!Op
&& !Address
->use_empty() && isa
<Instruction
>(Address
) &&
1250 (!isa
<AllocaInst
>(Address
) ||
1251 !FuncInfo
.StaticAllocaMap
.count(cast
<AllocaInst
>(Address
))))
1252 Op
= MachineOperand::CreateReg(FuncInfo
.InitializeRegForValue(Address
),
1256 assert(DI
->getVariable()->isValidLocationForIntrinsic(DbgLoc
) &&
1257 "Expected inlined-at fields to agree");
1258 // A dbg.declare describes the address of a source variable, so lower it
1259 // into an indirect DBG_VALUE.
1261 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, DbgLoc
,
1262 TII
.get(TargetOpcode::DBG_VALUE
), /*IsIndirect*/ true, *Op
,
1263 DI
->getVariable(), DI
->getExpression());
1265 // If using instruction referencing, mutate this into a DBG_INSTR_REF,
1266 // to be later patched up by finalizeDebugInstrRefs. Tack a deref onto
1267 // the expression, we don't have an "indirect" flag in DBG_INSTR_REF.
1268 if (FuncInfo
.MF
->useDebugInstrRef() && Op
->isReg()) {
1269 Builder
->setDesc(TII
.get(TargetOpcode::DBG_INSTR_REF
));
1270 Builder
->getOperand(1).ChangeToImmediate(0);
1272 DIExpression::prepend(DI
->getExpression(), DIExpression::DerefBefore
);
1273 Builder
->getOperand(3).setMetadata(NewExpr
);
1276 // We can't yet handle anything else here because it would require
1277 // generating code, thus altering codegen because of debug info.
1278 LLVM_DEBUG(dbgs() << "Dropping debug info for " << *DI
1279 << " (no materialized reg for address)\n");
1283 case Intrinsic::dbg_value
: {
1284 // This form of DBG_VALUE is target-independent.
1285 const DbgValueInst
*DI
= cast
<DbgValueInst
>(II
);
1286 const MCInstrDesc
&II
= TII
.get(TargetOpcode::DBG_VALUE
);
1287 const Value
*V
= DI
->getValue();
1288 assert(DI
->getVariable()->isValidLocationForIntrinsic(DbgLoc
) &&
1289 "Expected inlined-at fields to agree");
1290 if (!V
|| isa
<UndefValue
>(V
) || DI
->hasArgList()) {
1291 // DI is either undef or cannot produce a valid DBG_VALUE, so produce an
1292 // undef DBG_VALUE to terminate any prior location.
1293 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, DbgLoc
, II
, false, 0U,
1294 DI
->getVariable(), DI
->getExpression());
1295 } else if (const auto *CI
= dyn_cast
<ConstantInt
>(V
)) {
1296 // See if there's an expression to constant-fold.
1297 DIExpression
*Expr
= DI
->getExpression();
1299 std::tie(Expr
, CI
) = Expr
->constantFold(CI
);
1300 if (CI
->getBitWidth() > 64)
1301 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, DbgLoc
, II
)
1304 .addMetadata(DI
->getVariable())
1307 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, DbgLoc
, II
)
1308 .addImm(CI
->getZExtValue())
1310 .addMetadata(DI
->getVariable())
1312 } else if (const auto *CF
= dyn_cast
<ConstantFP
>(V
)) {
1313 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, DbgLoc
, II
)
1316 .addMetadata(DI
->getVariable())
1317 .addMetadata(DI
->getExpression());
1318 } else if (Register Reg
= lookUpRegForValue(V
)) {
1319 // FIXME: This does not handle register-indirect values at offset 0.
1320 bool IsIndirect
= false;
1322 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, DbgLoc
, II
, IsIndirect
, Reg
,
1323 DI
->getVariable(), DI
->getExpression());
1325 // If using instruction referencing, mutate this into a DBG_INSTR_REF,
1326 // to be later patched up by finalizeDebugInstrRefs.
1327 if (FuncInfo
.MF
->useDebugInstrRef()) {
1328 Builder
->setDesc(TII
.get(TargetOpcode::DBG_INSTR_REF
));
1329 Builder
->getOperand(1).ChangeToImmediate(0);
1332 // We don't know how to handle other cases, so we drop.
1333 LLVM_DEBUG(dbgs() << "Dropping debug info for " << *DI
<< "\n");
1337 case Intrinsic::dbg_label
: {
1338 const DbgLabelInst
*DI
= cast
<DbgLabelInst
>(II
);
1339 assert(DI
->getLabel() && "Missing label");
1340 if (!FuncInfo
.MF
->getMMI().hasDebugInfo()) {
1341 LLVM_DEBUG(dbgs() << "Dropping debug info for " << *DI
<< "\n");
1345 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, DbgLoc
,
1346 TII
.get(TargetOpcode::DBG_LABEL
)).addMetadata(DI
->getLabel());
1349 case Intrinsic::objectsize
:
1350 llvm_unreachable("llvm.objectsize.* should have been lowered already");
1352 case Intrinsic::is_constant
:
1353 llvm_unreachable("llvm.is.constant.* should have been lowered already");
1355 case Intrinsic::launder_invariant_group
:
1356 case Intrinsic::strip_invariant_group
:
1357 case Intrinsic::expect
: {
1358 Register ResultReg
= getRegForValue(II
->getArgOperand(0));
1361 updateValueMap(II
, ResultReg
);
1364 case Intrinsic::experimental_stackmap
:
1365 return selectStackmap(II
);
1366 case Intrinsic::experimental_patchpoint_void
:
1367 case Intrinsic::experimental_patchpoint_i64
:
1368 return selectPatchpoint(II
);
1370 case Intrinsic::xray_customevent
:
1371 return selectXRayCustomEvent(II
);
1372 case Intrinsic::xray_typedevent
:
1373 return selectXRayTypedEvent(II
);
1376 return fastLowerIntrinsicCall(II
);
1379 bool FastISel::selectCast(const User
*I
, unsigned Opcode
) {
1380 EVT SrcVT
= TLI
.getValueType(DL
, I
->getOperand(0)->getType());
1381 EVT DstVT
= TLI
.getValueType(DL
, I
->getType());
1383 if (SrcVT
== MVT::Other
|| !SrcVT
.isSimple() || DstVT
== MVT::Other
||
1385 // Unhandled type. Halt "fast" selection and bail.
1388 // Check if the destination type is legal.
1389 if (!TLI
.isTypeLegal(DstVT
))
1392 // Check if the source operand is legal.
1393 if (!TLI
.isTypeLegal(SrcVT
))
1396 Register InputReg
= getRegForValue(I
->getOperand(0));
1398 // Unhandled operand. Halt "fast" selection and bail.
1401 Register ResultReg
= fastEmit_r(SrcVT
.getSimpleVT(), DstVT
.getSimpleVT(),
1406 updateValueMap(I
, ResultReg
);
1410 bool FastISel::selectBitCast(const User
*I
) {
1411 // If the bitcast doesn't change the type, just use the operand value.
1412 if (I
->getType() == I
->getOperand(0)->getType()) {
1413 Register Reg
= getRegForValue(I
->getOperand(0));
1416 updateValueMap(I
, Reg
);
1420 // Bitcasts of other values become reg-reg copies or BITCAST operators.
1421 EVT SrcEVT
= TLI
.getValueType(DL
, I
->getOperand(0)->getType());
1422 EVT DstEVT
= TLI
.getValueType(DL
, I
->getType());
1423 if (SrcEVT
== MVT::Other
|| DstEVT
== MVT::Other
||
1424 !TLI
.isTypeLegal(SrcEVT
) || !TLI
.isTypeLegal(DstEVT
))
1425 // Unhandled type. Halt "fast" selection and bail.
1428 MVT SrcVT
= SrcEVT
.getSimpleVT();
1429 MVT DstVT
= DstEVT
.getSimpleVT();
1430 Register Op0
= getRegForValue(I
->getOperand(0));
1431 if (!Op0
) // Unhandled operand. Halt "fast" selection and bail.
1434 // First, try to perform the bitcast by inserting a reg-reg copy.
1436 if (SrcVT
== DstVT
) {
1437 const TargetRegisterClass
*SrcClass
= TLI
.getRegClassFor(SrcVT
);
1438 const TargetRegisterClass
*DstClass
= TLI
.getRegClassFor(DstVT
);
1439 // Don't attempt a cross-class copy. It will likely fail.
1440 if (SrcClass
== DstClass
) {
1441 ResultReg
= createResultReg(DstClass
);
1442 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, DbgLoc
,
1443 TII
.get(TargetOpcode::COPY
), ResultReg
).addReg(Op0
);
1447 // If the reg-reg copy failed, select a BITCAST opcode.
1449 ResultReg
= fastEmit_r(SrcVT
, DstVT
, ISD::BITCAST
, Op0
);
1454 updateValueMap(I
, ResultReg
);
1458 bool FastISel::selectFreeze(const User
*I
) {
1459 Register Reg
= getRegForValue(I
->getOperand(0));
1461 // Unhandled operand.
1464 EVT ETy
= TLI
.getValueType(DL
, I
->getOperand(0)->getType());
1465 if (ETy
== MVT::Other
|| !TLI
.isTypeLegal(ETy
))
1466 // Unhandled type, bail out.
1469 MVT Ty
= ETy
.getSimpleVT();
1470 const TargetRegisterClass
*TyRegClass
= TLI
.getRegClassFor(Ty
);
1471 Register ResultReg
= createResultReg(TyRegClass
);
1472 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, DbgLoc
,
1473 TII
.get(TargetOpcode::COPY
), ResultReg
).addReg(Reg
);
1475 updateValueMap(I
, ResultReg
);
1479 // Remove local value instructions starting from the instruction after
1480 // SavedLastLocalValue to the current function insert point.
1481 void FastISel::removeDeadLocalValueCode(MachineInstr
*SavedLastLocalValue
)
1483 MachineInstr
*CurLastLocalValue
= getLastLocalValue();
1484 if (CurLastLocalValue
!= SavedLastLocalValue
) {
1485 // Find the first local value instruction to be deleted.
1486 // This is the instruction after SavedLastLocalValue if it is non-NULL.
1487 // Otherwise it's the first instruction in the block.
1488 MachineBasicBlock::iterator
FirstDeadInst(SavedLastLocalValue
);
1489 if (SavedLastLocalValue
)
1492 FirstDeadInst
= FuncInfo
.MBB
->getFirstNonPHI();
1493 setLastLocalValue(SavedLastLocalValue
);
1494 removeDeadCode(FirstDeadInst
, FuncInfo
.InsertPt
);
1498 bool FastISel::selectInstruction(const Instruction
*I
) {
1499 // Flush the local value map before starting each instruction.
1500 // This improves locality and debugging, and can reduce spills.
1501 // Reuse of values across IR instructions is relatively uncommon.
1502 flushLocalValueMap();
1504 MachineInstr
*SavedLastLocalValue
= getLastLocalValue();
1505 // Just before the terminator instruction, insert instructions to
1506 // feed PHI nodes in successor blocks.
1507 if (I
->isTerminator()) {
1508 if (!handlePHINodesInSuccessorBlocks(I
->getParent())) {
1509 // PHI node handling may have generated local value instructions,
1510 // even though it failed to handle all PHI nodes.
1511 // We remove these instructions because SelectionDAGISel will generate
1513 removeDeadLocalValueCode(SavedLastLocalValue
);
1518 // FastISel does not handle any operand bundles except OB_funclet.
1519 if (auto *Call
= dyn_cast
<CallBase
>(I
))
1520 for (unsigned i
= 0, e
= Call
->getNumOperandBundles(); i
!= e
; ++i
)
1521 if (Call
->getOperandBundleAt(i
).getTagID() != LLVMContext::OB_funclet
)
1524 DbgLoc
= I
->getDebugLoc();
1526 SavedInsertPt
= FuncInfo
.InsertPt
;
1528 if (const auto *Call
= dyn_cast
<CallInst
>(I
)) {
1529 const Function
*F
= Call
->getCalledFunction();
1532 // As a special case, don't handle calls to builtin library functions that
1533 // may be translated directly to target instructions.
1534 if (F
&& !F
->hasLocalLinkage() && F
->hasName() &&
1535 LibInfo
->getLibFunc(F
->getName(), Func
) &&
1536 LibInfo
->hasOptimizedCodeGen(Func
))
1539 // Don't handle Intrinsic::trap if a trap function is specified.
1540 if (F
&& F
->getIntrinsicID() == Intrinsic::trap
&&
1541 Call
->hasFnAttr("trap-func-name"))
1545 // First, try doing target-independent selection.
1546 if (!SkipTargetIndependentISel
) {
1547 if (selectOperator(I
, I
->getOpcode())) {
1548 ++NumFastIselSuccessIndependent
;
1549 DbgLoc
= DebugLoc();
1552 // Remove dead code.
1553 recomputeInsertPt();
1554 if (SavedInsertPt
!= FuncInfo
.InsertPt
)
1555 removeDeadCode(FuncInfo
.InsertPt
, SavedInsertPt
);
1556 SavedInsertPt
= FuncInfo
.InsertPt
;
1558 // Next, try calling the target to attempt to handle the instruction.
1559 if (fastSelectInstruction(I
)) {
1560 ++NumFastIselSuccessTarget
;
1561 DbgLoc
= DebugLoc();
1564 // Remove dead code.
1565 recomputeInsertPt();
1566 if (SavedInsertPt
!= FuncInfo
.InsertPt
)
1567 removeDeadCode(FuncInfo
.InsertPt
, SavedInsertPt
);
1569 DbgLoc
= DebugLoc();
1570 // Undo phi node updates, because they will be added again by SelectionDAG.
1571 if (I
->isTerminator()) {
1572 // PHI node handling may have generated local value instructions.
1573 // We remove them because SelectionDAGISel will generate them again.
1574 removeDeadLocalValueCode(SavedLastLocalValue
);
1575 FuncInfo
.PHINodesToUpdate
.resize(FuncInfo
.OrigNumPHINodesToUpdate
);
1580 /// Emit an unconditional branch to the given block, unless it is the immediate
1581 /// (fall-through) successor, and update the CFG.
1582 void FastISel::fastEmitBranch(MachineBasicBlock
*MSucc
,
1583 const DebugLoc
&DbgLoc
) {
1584 if (FuncInfo
.MBB
->getBasicBlock()->sizeWithoutDebug() > 1 &&
1585 FuncInfo
.MBB
->isLayoutSuccessor(MSucc
)) {
1586 // For more accurate line information if this is the only non-debug
1587 // instruction in the block then emit it, otherwise we have the
1588 // unconditional fall-through case, which needs no instructions.
1590 // The unconditional branch case.
1591 TII
.insertBranch(*FuncInfo
.MBB
, MSucc
, nullptr,
1592 SmallVector
<MachineOperand
, 0>(), DbgLoc
);
1595 auto BranchProbability
= FuncInfo
.BPI
->getEdgeProbability(
1596 FuncInfo
.MBB
->getBasicBlock(), MSucc
->getBasicBlock());
1597 FuncInfo
.MBB
->addSuccessor(MSucc
, BranchProbability
);
1599 FuncInfo
.MBB
->addSuccessorWithoutProb(MSucc
);
1602 void FastISel::finishCondBranch(const BasicBlock
*BranchBB
,
1603 MachineBasicBlock
*TrueMBB
,
1604 MachineBasicBlock
*FalseMBB
) {
1605 // Add TrueMBB as successor unless it is equal to the FalseMBB: This can
1606 // happen in degenerate IR and MachineIR forbids to have a block twice in the
1607 // successor/predecessor lists.
1608 if (TrueMBB
!= FalseMBB
) {
1610 auto BranchProbability
=
1611 FuncInfo
.BPI
->getEdgeProbability(BranchBB
, TrueMBB
->getBasicBlock());
1612 FuncInfo
.MBB
->addSuccessor(TrueMBB
, BranchProbability
);
1614 FuncInfo
.MBB
->addSuccessorWithoutProb(TrueMBB
);
1617 fastEmitBranch(FalseMBB
, DbgLoc
);
1620 /// Emit an FNeg operation.
1621 bool FastISel::selectFNeg(const User
*I
, const Value
*In
) {
1622 Register OpReg
= getRegForValue(In
);
1626 // If the target has ISD::FNEG, use it.
1627 EVT VT
= TLI
.getValueType(DL
, I
->getType());
1628 Register ResultReg
= fastEmit_r(VT
.getSimpleVT(), VT
.getSimpleVT(), ISD::FNEG
,
1631 updateValueMap(I
, ResultReg
);
1635 // Bitcast the value to integer, twiddle the sign bit with xor,
1636 // and then bitcast it back to floating-point.
1637 if (VT
.getSizeInBits() > 64)
1639 EVT IntVT
= EVT::getIntegerVT(I
->getContext(), VT
.getSizeInBits());
1640 if (!TLI
.isTypeLegal(IntVT
))
1643 Register IntReg
= fastEmit_r(VT
.getSimpleVT(), IntVT
.getSimpleVT(),
1644 ISD::BITCAST
, OpReg
);
1648 Register IntResultReg
= fastEmit_ri_(
1649 IntVT
.getSimpleVT(), ISD::XOR
, IntReg
,
1650 UINT64_C(1) << (VT
.getSizeInBits() - 1), IntVT
.getSimpleVT());
1654 ResultReg
= fastEmit_r(IntVT
.getSimpleVT(), VT
.getSimpleVT(), ISD::BITCAST
,
1659 updateValueMap(I
, ResultReg
);
1663 bool FastISel::selectExtractValue(const User
*U
) {
1664 const ExtractValueInst
*EVI
= dyn_cast
<ExtractValueInst
>(U
);
1668 // Make sure we only try to handle extracts with a legal result. But also
1669 // allow i1 because it's easy.
1670 EVT RealVT
= TLI
.getValueType(DL
, EVI
->getType(), /*AllowUnknown=*/true);
1671 if (!RealVT
.isSimple())
1673 MVT VT
= RealVT
.getSimpleVT();
1674 if (!TLI
.isTypeLegal(VT
) && VT
!= MVT::i1
)
1677 const Value
*Op0
= EVI
->getOperand(0);
1678 Type
*AggTy
= Op0
->getType();
1680 // Get the base result register.
1682 DenseMap
<const Value
*, Register
>::iterator I
= FuncInfo
.ValueMap
.find(Op0
);
1683 if (I
!= FuncInfo
.ValueMap
.end())
1684 ResultReg
= I
->second
;
1685 else if (isa
<Instruction
>(Op0
))
1686 ResultReg
= FuncInfo
.InitializeRegForValue(Op0
);
1688 return false; // fast-isel can't handle aggregate constants at the moment
1690 // Get the actual result register, which is an offset from the base register.
1691 unsigned VTIndex
= ComputeLinearIndex(AggTy
, EVI
->getIndices());
1693 SmallVector
<EVT
, 4> AggValueVTs
;
1694 ComputeValueVTs(TLI
, DL
, AggTy
, AggValueVTs
);
1696 for (unsigned i
= 0; i
< VTIndex
; i
++)
1697 ResultReg
+= TLI
.getNumRegisters(FuncInfo
.Fn
->getContext(), AggValueVTs
[i
]);
1699 updateValueMap(EVI
, ResultReg
);
1703 bool FastISel::selectOperator(const User
*I
, unsigned Opcode
) {
1705 case Instruction::Add
:
1706 return selectBinaryOp(I
, ISD::ADD
);
1707 case Instruction::FAdd
:
1708 return selectBinaryOp(I
, ISD::FADD
);
1709 case Instruction::Sub
:
1710 return selectBinaryOp(I
, ISD::SUB
);
1711 case Instruction::FSub
:
1712 return selectBinaryOp(I
, ISD::FSUB
);
1713 case Instruction::Mul
:
1714 return selectBinaryOp(I
, ISD::MUL
);
1715 case Instruction::FMul
:
1716 return selectBinaryOp(I
, ISD::FMUL
);
1717 case Instruction::SDiv
:
1718 return selectBinaryOp(I
, ISD::SDIV
);
1719 case Instruction::UDiv
:
1720 return selectBinaryOp(I
, ISD::UDIV
);
1721 case Instruction::FDiv
:
1722 return selectBinaryOp(I
, ISD::FDIV
);
1723 case Instruction::SRem
:
1724 return selectBinaryOp(I
, ISD::SREM
);
1725 case Instruction::URem
:
1726 return selectBinaryOp(I
, ISD::UREM
);
1727 case Instruction::FRem
:
1728 return selectBinaryOp(I
, ISD::FREM
);
1729 case Instruction::Shl
:
1730 return selectBinaryOp(I
, ISD::SHL
);
1731 case Instruction::LShr
:
1732 return selectBinaryOp(I
, ISD::SRL
);
1733 case Instruction::AShr
:
1734 return selectBinaryOp(I
, ISD::SRA
);
1735 case Instruction::And
:
1736 return selectBinaryOp(I
, ISD::AND
);
1737 case Instruction::Or
:
1738 return selectBinaryOp(I
, ISD::OR
);
1739 case Instruction::Xor
:
1740 return selectBinaryOp(I
, ISD::XOR
);
1742 case Instruction::FNeg
:
1743 return selectFNeg(I
, I
->getOperand(0));
1745 case Instruction::GetElementPtr
:
1746 return selectGetElementPtr(I
);
1748 case Instruction::Br
: {
1749 const BranchInst
*BI
= cast
<BranchInst
>(I
);
1751 if (BI
->isUnconditional()) {
1752 const BasicBlock
*LLVMSucc
= BI
->getSuccessor(0);
1753 MachineBasicBlock
*MSucc
= FuncInfo
.MBBMap
[LLVMSucc
];
1754 fastEmitBranch(MSucc
, BI
->getDebugLoc());
1758 // Conditional branches are not handed yet.
1759 // Halt "fast" selection and bail.
1763 case Instruction::Unreachable
:
1764 if (TM
.Options
.TrapUnreachable
)
1765 return fastEmit_(MVT::Other
, MVT::Other
, ISD::TRAP
) != 0;
1769 case Instruction::Alloca
:
1770 // FunctionLowering has the static-sized case covered.
1771 if (FuncInfo
.StaticAllocaMap
.count(cast
<AllocaInst
>(I
)))
1774 // Dynamic-sized alloca is not handled yet.
1777 case Instruction::Call
:
1778 // On AIX, normal call lowering uses the DAG-ISEL path currently so that the
1779 // callee of the direct function call instruction will be mapped to the
1780 // symbol for the function's entry point, which is distinct from the
1781 // function descriptor symbol. The latter is the symbol whose XCOFF symbol
1782 // name is the C-linkage name of the source level function.
1783 // But fast isel still has the ability to do selection for intrinsics.
1784 if (TM
.getTargetTriple().isOSAIX() && !isa
<IntrinsicInst
>(I
))
1786 return selectCall(I
);
1788 case Instruction::BitCast
:
1789 return selectBitCast(I
);
1791 case Instruction::FPToSI
:
1792 return selectCast(I
, ISD::FP_TO_SINT
);
1793 case Instruction::ZExt
:
1794 return selectCast(I
, ISD::ZERO_EXTEND
);
1795 case Instruction::SExt
:
1796 return selectCast(I
, ISD::SIGN_EXTEND
);
1797 case Instruction::Trunc
:
1798 return selectCast(I
, ISD::TRUNCATE
);
1799 case Instruction::SIToFP
:
1800 return selectCast(I
, ISD::SINT_TO_FP
);
1802 case Instruction::IntToPtr
: // Deliberate fall-through.
1803 case Instruction::PtrToInt
: {
1804 EVT SrcVT
= TLI
.getValueType(DL
, I
->getOperand(0)->getType());
1805 EVT DstVT
= TLI
.getValueType(DL
, I
->getType());
1806 if (DstVT
.bitsGT(SrcVT
))
1807 return selectCast(I
, ISD::ZERO_EXTEND
);
1808 if (DstVT
.bitsLT(SrcVT
))
1809 return selectCast(I
, ISD::TRUNCATE
);
1810 Register Reg
= getRegForValue(I
->getOperand(0));
1813 updateValueMap(I
, Reg
);
1817 case Instruction::ExtractValue
:
1818 return selectExtractValue(I
);
1820 case Instruction::Freeze
:
1821 return selectFreeze(I
);
1823 case Instruction::PHI
:
1824 llvm_unreachable("FastISel shouldn't visit PHI nodes!");
1827 // Unhandled instruction. Halt "fast" selection and bail.
1832 FastISel::FastISel(FunctionLoweringInfo
&FuncInfo
,
1833 const TargetLibraryInfo
*LibInfo
,
1834 bool SkipTargetIndependentISel
)
1835 : FuncInfo(FuncInfo
), MF(FuncInfo
.MF
), MRI(FuncInfo
.MF
->getRegInfo()),
1836 MFI(FuncInfo
.MF
->getFrameInfo()), MCP(*FuncInfo
.MF
->getConstantPool()),
1837 TM(FuncInfo
.MF
->getTarget()), DL(MF
->getDataLayout()),
1838 TII(*MF
->getSubtarget().getInstrInfo()),
1839 TLI(*MF
->getSubtarget().getTargetLowering()),
1840 TRI(*MF
->getSubtarget().getRegisterInfo()), LibInfo(LibInfo
),
1841 SkipTargetIndependentISel(SkipTargetIndependentISel
),
1842 LastLocalValue(nullptr), EmitStartPt(nullptr) {}
1844 FastISel::~FastISel() = default;
1846 bool FastISel::fastLowerArguments() { return false; }
1848 bool FastISel::fastLowerCall(CallLoweringInfo
& /*CLI*/) { return false; }
1850 bool FastISel::fastLowerIntrinsicCall(const IntrinsicInst
* /*II*/) {
1854 unsigned FastISel::fastEmit_(MVT
, MVT
, unsigned) { return 0; }
1856 unsigned FastISel::fastEmit_r(MVT
, MVT
, unsigned, unsigned /*Op0*/) {
1860 unsigned FastISel::fastEmit_rr(MVT
, MVT
, unsigned, unsigned /*Op0*/,
1865 unsigned FastISel::fastEmit_i(MVT
, MVT
, unsigned, uint64_t /*Imm*/) {
1869 unsigned FastISel::fastEmit_f(MVT
, MVT
, unsigned,
1870 const ConstantFP
* /*FPImm*/) {
1874 unsigned FastISel::fastEmit_ri(MVT
, MVT
, unsigned, unsigned /*Op0*/,
1879 /// This method is a wrapper of fastEmit_ri. It first tries to emit an
1880 /// instruction with an immediate operand using fastEmit_ri.
1881 /// If that fails, it materializes the immediate into a register and try
1882 /// fastEmit_rr instead.
1883 Register
FastISel::fastEmit_ri_(MVT VT
, unsigned Opcode
, unsigned Op0
,
1884 uint64_t Imm
, MVT ImmType
) {
1885 // If this is a multiply by a power of two, emit this as a shift left.
1886 if (Opcode
== ISD::MUL
&& isPowerOf2_64(Imm
)) {
1889 } else if (Opcode
== ISD::UDIV
&& isPowerOf2_64(Imm
)) {
1890 // div x, 8 -> srl x, 3
1895 // Horrible hack (to be removed), check to make sure shift amounts are
1897 if ((Opcode
== ISD::SHL
|| Opcode
== ISD::SRA
|| Opcode
== ISD::SRL
) &&
1898 Imm
>= VT
.getSizeInBits())
1901 // First check if immediate type is legal. If not, we can't use the ri form.
1902 Register ResultReg
= fastEmit_ri(VT
, VT
, Opcode
, Op0
, Imm
);
1905 Register MaterialReg
= fastEmit_i(ImmType
, ImmType
, ISD::Constant
, Imm
);
1907 // This is a bit ugly/slow, but failing here means falling out of
1908 // fast-isel, which would be very slow.
1910 IntegerType::get(FuncInfo
.Fn
->getContext(), VT
.getSizeInBits());
1911 MaterialReg
= getRegForValue(ConstantInt::get(ITy
, Imm
));
1915 return fastEmit_rr(VT
, VT
, Opcode
, Op0
, MaterialReg
);
1918 Register
FastISel::createResultReg(const TargetRegisterClass
*RC
) {
1919 return MRI
.createVirtualRegister(RC
);
1922 Register
FastISel::constrainOperandRegClass(const MCInstrDesc
&II
, Register Op
,
1924 if (Op
.isVirtual()) {
1925 const TargetRegisterClass
*RegClass
=
1926 TII
.getRegClass(II
, OpNum
, &TRI
, *FuncInfo
.MF
);
1927 if (!MRI
.constrainRegClass(Op
, RegClass
)) {
1928 // If it's not legal to COPY between the register classes, something
1929 // has gone very wrong before we got here.
1930 Register NewOp
= createResultReg(RegClass
);
1931 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, DbgLoc
,
1932 TII
.get(TargetOpcode::COPY
), NewOp
).addReg(Op
);
1939 Register
FastISel::fastEmitInst_(unsigned MachineInstOpcode
,
1940 const TargetRegisterClass
*RC
) {
1941 Register ResultReg
= createResultReg(RC
);
1942 const MCInstrDesc
&II
= TII
.get(MachineInstOpcode
);
1944 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, DbgLoc
, II
, ResultReg
);
1948 Register
FastISel::fastEmitInst_r(unsigned MachineInstOpcode
,
1949 const TargetRegisterClass
*RC
, unsigned Op0
) {
1950 const MCInstrDesc
&II
= TII
.get(MachineInstOpcode
);
1952 Register ResultReg
= createResultReg(RC
);
1953 Op0
= constrainOperandRegClass(II
, Op0
, II
.getNumDefs());
1955 if (II
.getNumDefs() >= 1)
1956 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, DbgLoc
, II
, ResultReg
)
1959 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, DbgLoc
, II
)
1961 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, DbgLoc
,
1962 TII
.get(TargetOpcode::COPY
), ResultReg
).addReg(II
.ImplicitDefs
[0]);
1968 Register
FastISel::fastEmitInst_rr(unsigned MachineInstOpcode
,
1969 const TargetRegisterClass
*RC
, unsigned Op0
,
1971 const MCInstrDesc
&II
= TII
.get(MachineInstOpcode
);
1973 Register ResultReg
= createResultReg(RC
);
1974 Op0
= constrainOperandRegClass(II
, Op0
, II
.getNumDefs());
1975 Op1
= constrainOperandRegClass(II
, Op1
, II
.getNumDefs() + 1);
1977 if (II
.getNumDefs() >= 1)
1978 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, DbgLoc
, II
, ResultReg
)
1982 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, DbgLoc
, II
)
1985 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, DbgLoc
,
1986 TII
.get(TargetOpcode::COPY
), ResultReg
).addReg(II
.ImplicitDefs
[0]);
1991 Register
FastISel::fastEmitInst_rrr(unsigned MachineInstOpcode
,
1992 const TargetRegisterClass
*RC
, unsigned Op0
,
1993 unsigned Op1
, unsigned Op2
) {
1994 const MCInstrDesc
&II
= TII
.get(MachineInstOpcode
);
1996 Register ResultReg
= createResultReg(RC
);
1997 Op0
= constrainOperandRegClass(II
, Op0
, II
.getNumDefs());
1998 Op1
= constrainOperandRegClass(II
, Op1
, II
.getNumDefs() + 1);
1999 Op2
= constrainOperandRegClass(II
, Op2
, II
.getNumDefs() + 2);
2001 if (II
.getNumDefs() >= 1)
2002 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, DbgLoc
, II
, ResultReg
)
2007 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, DbgLoc
, II
)
2011 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, DbgLoc
,
2012 TII
.get(TargetOpcode::COPY
), ResultReg
).addReg(II
.ImplicitDefs
[0]);
2017 Register
FastISel::fastEmitInst_ri(unsigned MachineInstOpcode
,
2018 const TargetRegisterClass
*RC
, unsigned Op0
,
2020 const MCInstrDesc
&II
= TII
.get(MachineInstOpcode
);
2022 Register ResultReg
= createResultReg(RC
);
2023 Op0
= constrainOperandRegClass(II
, Op0
, II
.getNumDefs());
2025 if (II
.getNumDefs() >= 1)
2026 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, DbgLoc
, II
, ResultReg
)
2030 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, DbgLoc
, II
)
2033 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, DbgLoc
,
2034 TII
.get(TargetOpcode::COPY
), ResultReg
).addReg(II
.ImplicitDefs
[0]);
2039 Register
FastISel::fastEmitInst_rii(unsigned MachineInstOpcode
,
2040 const TargetRegisterClass
*RC
, unsigned Op0
,
2041 uint64_t Imm1
, uint64_t Imm2
) {
2042 const MCInstrDesc
&II
= TII
.get(MachineInstOpcode
);
2044 Register ResultReg
= createResultReg(RC
);
2045 Op0
= constrainOperandRegClass(II
, Op0
, II
.getNumDefs());
2047 if (II
.getNumDefs() >= 1)
2048 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, DbgLoc
, II
, ResultReg
)
2053 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, DbgLoc
, II
)
2057 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, DbgLoc
,
2058 TII
.get(TargetOpcode::COPY
), ResultReg
).addReg(II
.ImplicitDefs
[0]);
2063 Register
FastISel::fastEmitInst_f(unsigned MachineInstOpcode
,
2064 const TargetRegisterClass
*RC
,
2065 const ConstantFP
*FPImm
) {
2066 const MCInstrDesc
&II
= TII
.get(MachineInstOpcode
);
2068 Register ResultReg
= createResultReg(RC
);
2070 if (II
.getNumDefs() >= 1)
2071 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, DbgLoc
, II
, ResultReg
)
2074 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, DbgLoc
, II
)
2076 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, DbgLoc
,
2077 TII
.get(TargetOpcode::COPY
), ResultReg
).addReg(II
.ImplicitDefs
[0]);
2082 Register
FastISel::fastEmitInst_rri(unsigned MachineInstOpcode
,
2083 const TargetRegisterClass
*RC
, unsigned Op0
,
2084 unsigned Op1
, uint64_t Imm
) {
2085 const MCInstrDesc
&II
= TII
.get(MachineInstOpcode
);
2087 Register ResultReg
= createResultReg(RC
);
2088 Op0
= constrainOperandRegClass(II
, Op0
, II
.getNumDefs());
2089 Op1
= constrainOperandRegClass(II
, Op1
, II
.getNumDefs() + 1);
2091 if (II
.getNumDefs() >= 1)
2092 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, DbgLoc
, II
, ResultReg
)
2097 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, DbgLoc
, II
)
2101 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, DbgLoc
,
2102 TII
.get(TargetOpcode::COPY
), ResultReg
).addReg(II
.ImplicitDefs
[0]);
2107 Register
FastISel::fastEmitInst_i(unsigned MachineInstOpcode
,
2108 const TargetRegisterClass
*RC
, uint64_t Imm
) {
2109 Register ResultReg
= createResultReg(RC
);
2110 const MCInstrDesc
&II
= TII
.get(MachineInstOpcode
);
2112 if (II
.getNumDefs() >= 1)
2113 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, DbgLoc
, II
, ResultReg
)
2116 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, DbgLoc
, II
).addImm(Imm
);
2117 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, DbgLoc
,
2118 TII
.get(TargetOpcode::COPY
), ResultReg
).addReg(II
.ImplicitDefs
[0]);
2123 Register
FastISel::fastEmitInst_extractsubreg(MVT RetVT
, unsigned Op0
,
2125 Register ResultReg
= createResultReg(TLI
.getRegClassFor(RetVT
));
2126 assert(Register::isVirtualRegister(Op0
) &&
2127 "Cannot yet extract from physregs");
2128 const TargetRegisterClass
*RC
= MRI
.getRegClass(Op0
);
2129 MRI
.constrainRegClass(Op0
, TRI
.getSubClassWithSubReg(RC
, Idx
));
2130 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, DbgLoc
, TII
.get(TargetOpcode::COPY
),
2131 ResultReg
).addReg(Op0
, 0, Idx
);
2135 /// Emit MachineInstrs to compute the value of Op with all but the least
2136 /// significant bit set to zero.
2137 Register
FastISel::fastEmitZExtFromI1(MVT VT
, unsigned Op0
) {
2138 return fastEmit_ri(VT
, VT
, ISD::AND
, Op0
, 1);
2141 /// HandlePHINodesInSuccessorBlocks - Handle PHI nodes in successor blocks.
2142 /// Emit code to ensure constants are copied into registers when needed.
2143 /// Remember the virtual registers that need to be added to the Machine PHI
2144 /// nodes as input. We cannot just directly add them, because expansion
2145 /// might result in multiple MBB's for one BB. As such, the start of the
2146 /// BB might correspond to a different MBB than the end.
2147 bool FastISel::handlePHINodesInSuccessorBlocks(const BasicBlock
*LLVMBB
) {
2148 const Instruction
*TI
= LLVMBB
->getTerminator();
2150 SmallPtrSet
<MachineBasicBlock
*, 4> SuccsHandled
;
2151 FuncInfo
.OrigNumPHINodesToUpdate
= FuncInfo
.PHINodesToUpdate
.size();
2153 // Check successor nodes' PHI nodes that expect a constant to be available
2155 for (unsigned succ
= 0, e
= TI
->getNumSuccessors(); succ
!= e
; ++succ
) {
2156 const BasicBlock
*SuccBB
= TI
->getSuccessor(succ
);
2157 if (!isa
<PHINode
>(SuccBB
->begin()))
2159 MachineBasicBlock
*SuccMBB
= FuncInfo
.MBBMap
[SuccBB
];
2161 // If this terminator has multiple identical successors (common for
2162 // switches), only handle each succ once.
2163 if (!SuccsHandled
.insert(SuccMBB
).second
)
2166 MachineBasicBlock::iterator MBBI
= SuccMBB
->begin();
2168 // At this point we know that there is a 1-1 correspondence between LLVM PHI
2169 // nodes and Machine PHI nodes, but the incoming operands have not been
2171 for (const PHINode
&PN
: SuccBB
->phis()) {
2172 // Ignore dead phi's.
2176 // Only handle legal types. Two interesting things to note here. First,
2177 // by bailing out early, we may leave behind some dead instructions,
2178 // since SelectionDAG's HandlePHINodesInSuccessorBlocks will insert its
2179 // own moves. Second, this check is necessary because FastISel doesn't
2180 // use CreateRegs to create registers, so it always creates
2181 // exactly one register for each non-void instruction.
2182 EVT VT
= TLI
.getValueType(DL
, PN
.getType(), /*AllowUnknown=*/true);
2183 if (VT
== MVT::Other
|| !TLI
.isTypeLegal(VT
)) {
2184 // Handle integer promotions, though, because they're common and easy.
2185 if (!(VT
== MVT::i1
|| VT
== MVT::i8
|| VT
== MVT::i16
)) {
2186 FuncInfo
.PHINodesToUpdate
.resize(FuncInfo
.OrigNumPHINodesToUpdate
);
2191 const Value
*PHIOp
= PN
.getIncomingValueForBlock(LLVMBB
);
2193 // Set the DebugLoc for the copy. Use the location of the operand if
2194 // there is one; otherwise no location, flushLocalValueMap will fix it.
2195 DbgLoc
= DebugLoc();
2196 if (const auto *Inst
= dyn_cast
<Instruction
>(PHIOp
))
2197 DbgLoc
= Inst
->getDebugLoc();
2199 Register Reg
= getRegForValue(PHIOp
);
2201 FuncInfo
.PHINodesToUpdate
.resize(FuncInfo
.OrigNumPHINodesToUpdate
);
2204 FuncInfo
.PHINodesToUpdate
.push_back(std::make_pair(&*MBBI
++, Reg
));
2205 DbgLoc
= DebugLoc();
2212 bool FastISel::tryToFoldLoad(const LoadInst
*LI
, const Instruction
*FoldInst
) {
2213 assert(LI
->hasOneUse() &&
2214 "tryToFoldLoad expected a LoadInst with a single use");
2215 // We know that the load has a single use, but don't know what it is. If it
2216 // isn't one of the folded instructions, then we can't succeed here. Handle
2217 // this by scanning the single-use users of the load until we get to FoldInst.
2218 unsigned MaxUsers
= 6; // Don't scan down huge single-use chains of instrs.
2220 const Instruction
*TheUser
= LI
->user_back();
2221 while (TheUser
!= FoldInst
&& // Scan up until we find FoldInst.
2222 // Stay in the right block.
2223 TheUser
->getParent() == FoldInst
->getParent() &&
2224 --MaxUsers
) { // Don't scan too far.
2225 // If there are multiple or no uses of this instruction, then bail out.
2226 if (!TheUser
->hasOneUse())
2229 TheUser
= TheUser
->user_back();
2232 // If we didn't find the fold instruction, then we failed to collapse the
2234 if (TheUser
!= FoldInst
)
2237 // Don't try to fold volatile loads. Target has to deal with alignment
2239 if (LI
->isVolatile())
2242 // Figure out which vreg this is going into. If there is no assigned vreg yet
2243 // then there actually was no reference to it. Perhaps the load is referenced
2244 // by a dead instruction.
2245 Register LoadReg
= getRegForValue(LI
);
2249 // We can't fold if this vreg has no uses or more than one use. Multiple uses
2250 // may mean that the instruction got lowered to multiple MIs, or the use of
2251 // the loaded value ended up being multiple operands of the result.
2252 if (!MRI
.hasOneUse(LoadReg
))
2255 MachineRegisterInfo::reg_iterator RI
= MRI
.reg_begin(LoadReg
);
2256 MachineInstr
*User
= RI
->getParent();
2258 // Set the insertion point properly. Folding the load can cause generation of
2259 // other random instructions (like sign extends) for addressing modes; make
2260 // sure they get inserted in a logical place before the new instruction.
2261 FuncInfo
.InsertPt
= User
;
2262 FuncInfo
.MBB
= User
->getParent();
2264 // Ask the target to try folding the load.
2265 return tryToFoldLoadIntoMI(User
, RI
.getOperandNo(), LI
);
2268 bool FastISel::canFoldAddIntoGEP(const User
*GEP
, const Value
*Add
) {
2270 if (!isa
<AddOperator
>(Add
))
2272 // Type size needs to match.
2273 if (DL
.getTypeSizeInBits(GEP
->getType()) !=
2274 DL
.getTypeSizeInBits(Add
->getType()))
2276 // Must be in the same basic block.
2277 if (isa
<Instruction
>(Add
) &&
2278 FuncInfo
.MBBMap
[cast
<Instruction
>(Add
)->getParent()] != FuncInfo
.MBB
)
2280 // Must have a constant operand.
2281 return isa
<ConstantInt
>(cast
<AddOperator
>(Add
)->getOperand(1));
2285 FastISel::createMachineMemOperandFor(const Instruction
*I
) const {
2288 MaybeAlign Alignment
;
2289 MachineMemOperand::Flags Flags
;
2292 if (const auto *LI
= dyn_cast
<LoadInst
>(I
)) {
2293 Alignment
= LI
->getAlign();
2294 IsVolatile
= LI
->isVolatile();
2295 Flags
= MachineMemOperand::MOLoad
;
2296 Ptr
= LI
->getPointerOperand();
2297 ValTy
= LI
->getType();
2298 } else if (const auto *SI
= dyn_cast
<StoreInst
>(I
)) {
2299 Alignment
= SI
->getAlign();
2300 IsVolatile
= SI
->isVolatile();
2301 Flags
= MachineMemOperand::MOStore
;
2302 Ptr
= SI
->getPointerOperand();
2303 ValTy
= SI
->getValueOperand()->getType();
2307 bool IsNonTemporal
= I
->hasMetadata(LLVMContext::MD_nontemporal
);
2308 bool IsInvariant
= I
->hasMetadata(LLVMContext::MD_invariant_load
);
2309 bool IsDereferenceable
= I
->hasMetadata(LLVMContext::MD_dereferenceable
);
2310 const MDNode
*Ranges
= I
->getMetadata(LLVMContext::MD_range
);
2312 AAMDNodes AAInfo
= I
->getAAMetadata();
2314 if (!Alignment
) // Ensure that codegen never sees alignment 0.
2315 Alignment
= DL
.getABITypeAlign(ValTy
);
2317 unsigned Size
= DL
.getTypeStoreSize(ValTy
);
2320 Flags
|= MachineMemOperand::MOVolatile
;
2322 Flags
|= MachineMemOperand::MONonTemporal
;
2323 if (IsDereferenceable
)
2324 Flags
|= MachineMemOperand::MODereferenceable
;
2326 Flags
|= MachineMemOperand::MOInvariant
;
2328 return FuncInfo
.MF
->getMachineMemOperand(MachinePointerInfo(Ptr
), Flags
, Size
,
2329 *Alignment
, AAInfo
, Ranges
);
2332 CmpInst::Predicate
FastISel::optimizeCmpPredicate(const CmpInst
*CI
) const {
2333 // If both operands are the same, then try to optimize or fold the cmp.
2334 CmpInst::Predicate Predicate
= CI
->getPredicate();
2335 if (CI
->getOperand(0) != CI
->getOperand(1))
2338 switch (Predicate
) {
2339 default: llvm_unreachable("Invalid predicate!");
2340 case CmpInst::FCMP_FALSE
: Predicate
= CmpInst::FCMP_FALSE
; break;
2341 case CmpInst::FCMP_OEQ
: Predicate
= CmpInst::FCMP_ORD
; break;
2342 case CmpInst::FCMP_OGT
: Predicate
= CmpInst::FCMP_FALSE
; break;
2343 case CmpInst::FCMP_OGE
: Predicate
= CmpInst::FCMP_ORD
; break;
2344 case CmpInst::FCMP_OLT
: Predicate
= CmpInst::FCMP_FALSE
; break;
2345 case CmpInst::FCMP_OLE
: Predicate
= CmpInst::FCMP_ORD
; break;
2346 case CmpInst::FCMP_ONE
: Predicate
= CmpInst::FCMP_FALSE
; break;
2347 case CmpInst::FCMP_ORD
: Predicate
= CmpInst::FCMP_ORD
; break;
2348 case CmpInst::FCMP_UNO
: Predicate
= CmpInst::FCMP_UNO
; break;
2349 case CmpInst::FCMP_UEQ
: Predicate
= CmpInst::FCMP_TRUE
; break;
2350 case CmpInst::FCMP_UGT
: Predicate
= CmpInst::FCMP_UNO
; break;
2351 case CmpInst::FCMP_UGE
: Predicate
= CmpInst::FCMP_TRUE
; break;
2352 case CmpInst::FCMP_ULT
: Predicate
= CmpInst::FCMP_UNO
; break;
2353 case CmpInst::FCMP_ULE
: Predicate
= CmpInst::FCMP_TRUE
; break;
2354 case CmpInst::FCMP_UNE
: Predicate
= CmpInst::FCMP_UNO
; break;
2355 case CmpInst::FCMP_TRUE
: Predicate
= CmpInst::FCMP_TRUE
; break;
2357 case CmpInst::ICMP_EQ
: Predicate
= CmpInst::FCMP_TRUE
; break;
2358 case CmpInst::ICMP_NE
: Predicate
= CmpInst::FCMP_FALSE
; break;
2359 case CmpInst::ICMP_UGT
: Predicate
= CmpInst::FCMP_FALSE
; break;
2360 case CmpInst::ICMP_UGE
: Predicate
= CmpInst::FCMP_TRUE
; break;
2361 case CmpInst::ICMP_ULT
: Predicate
= CmpInst::FCMP_FALSE
; break;
2362 case CmpInst::ICMP_ULE
: Predicate
= CmpInst::FCMP_TRUE
; break;
2363 case CmpInst::ICMP_SGT
: Predicate
= CmpInst::FCMP_FALSE
; break;
2364 case CmpInst::ICMP_SGE
: Predicate
= CmpInst::FCMP_TRUE
; break;
2365 case CmpInst::ICMP_SLT
: Predicate
= CmpInst::FCMP_FALSE
; break;
2366 case CmpInst::ICMP_SLE
: Predicate
= CmpInst::FCMP_TRUE
; break;