1 //===- Instructions.cpp - Implement the LLVM instructions -----------------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This file implements all of the non-inline methods for the LLVM instruction
12 //===----------------------------------------------------------------------===//
14 #include "llvm/IR/Instructions.h"
15 #include "LLVMContextImpl.h"
16 #include "llvm/ADT/SmallBitVector.h"
17 #include "llvm/ADT/SmallVector.h"
18 #include "llvm/ADT/Twine.h"
19 #include "llvm/IR/Attributes.h"
20 #include "llvm/IR/BasicBlock.h"
21 #include "llvm/IR/Constant.h"
22 #include "llvm/IR/Constants.h"
23 #include "llvm/IR/DataLayout.h"
24 #include "llvm/IR/DerivedTypes.h"
25 #include "llvm/IR/Function.h"
26 #include "llvm/IR/InstrTypes.h"
27 #include "llvm/IR/Instruction.h"
28 #include "llvm/IR/Intrinsics.h"
29 #include "llvm/IR/LLVMContext.h"
30 #include "llvm/IR/MDBuilder.h"
31 #include "llvm/IR/Metadata.h"
32 #include "llvm/IR/Module.h"
33 #include "llvm/IR/Operator.h"
34 #include "llvm/IR/ProfDataUtils.h"
35 #include "llvm/IR/Type.h"
36 #include "llvm/IR/Value.h"
37 #include "llvm/Support/AtomicOrdering.h"
38 #include "llvm/Support/Casting.h"
39 #include "llvm/Support/ErrorHandling.h"
40 #include "llvm/Support/MathExtras.h"
41 #include "llvm/Support/ModRef.h"
42 #include "llvm/Support/TypeSize.h"
51 static cl::opt
<bool> DisableI2pP2iOpt(
52 "disable-i2p-p2i-opt", cl::init(false),
53 cl::desc("Disables inttoptr/ptrtoint roundtrip optimization"));
55 //===----------------------------------------------------------------------===//
57 //===----------------------------------------------------------------------===//
59 std::optional
<TypeSize
>
60 AllocaInst::getAllocationSize(const DataLayout
&DL
) const {
61 TypeSize Size
= DL
.getTypeAllocSize(getAllocatedType());
62 if (isArrayAllocation()) {
63 auto *C
= dyn_cast
<ConstantInt
>(getArraySize());
66 assert(!Size
.isScalable() && "Array elements cannot have a scalable size");
67 Size
*= C
->getZExtValue();
72 std::optional
<TypeSize
>
73 AllocaInst::getAllocationSizeInBits(const DataLayout
&DL
) const {
74 std::optional
<TypeSize
> Size
= getAllocationSize(DL
);
80 //===----------------------------------------------------------------------===//
82 //===----------------------------------------------------------------------===//
84 /// areInvalidOperands - Return a string if the specified operands are invalid
85 /// for a select operation, otherwise return null.
86 const char *SelectInst::areInvalidOperands(Value
*Op0
, Value
*Op1
, Value
*Op2
) {
87 if (Op1
->getType() != Op2
->getType())
88 return "both values to select must have same type";
90 if (Op1
->getType()->isTokenTy())
91 return "select values cannot have token type";
93 if (VectorType
*VT
= dyn_cast
<VectorType
>(Op0
->getType())) {
95 if (VT
->getElementType() != Type::getInt1Ty(Op0
->getContext()))
96 return "vector select condition element type must be i1";
97 VectorType
*ET
= dyn_cast
<VectorType
>(Op1
->getType());
99 return "selected values for vector select must be vectors";
100 if (ET
->getElementCount() != VT
->getElementCount())
101 return "vector select requires selected vectors to have "
102 "the same vector length as select condition";
103 } else if (Op0
->getType() != Type::getInt1Ty(Op0
->getContext())) {
104 return "select condition must be i1 or <n x i1>";
109 //===----------------------------------------------------------------------===//
111 //===----------------------------------------------------------------------===//
113 PHINode::PHINode(const PHINode
&PN
)
114 : Instruction(PN
.getType(), Instruction::PHI
, nullptr, PN
.getNumOperands()),
115 ReservedSpace(PN
.getNumOperands()) {
116 allocHungoffUses(PN
.getNumOperands());
117 std::copy(PN
.op_begin(), PN
.op_end(), op_begin());
118 copyIncomingBlocks(make_range(PN
.block_begin(), PN
.block_end()));
119 SubclassOptionalData
= PN
.SubclassOptionalData
;
122 // removeIncomingValue - Remove an incoming value. This is useful if a
123 // predecessor basic block is deleted.
124 Value
*PHINode::removeIncomingValue(unsigned Idx
, bool DeletePHIIfEmpty
) {
125 Value
*Removed
= getIncomingValue(Idx
);
127 // Move everything after this operand down.
129 // FIXME: we could just swap with the end of the list, then erase. However,
130 // clients might not expect this to happen. The code as it is thrashes the
131 // use/def lists, which is kinda lame.
132 std::copy(op_begin() + Idx
+ 1, op_end(), op_begin() + Idx
);
133 copyIncomingBlocks(drop_begin(blocks(), Idx
+ 1), Idx
);
135 // Nuke the last value.
136 Op
<-1>().set(nullptr);
137 setNumHungOffUseOperands(getNumOperands() - 1);
139 // If the PHI node is dead, because it has zero entries, nuke it now.
140 if (getNumOperands() == 0 && DeletePHIIfEmpty
) {
141 // If anyone is using this PHI, make them use a dummy value instead...
142 replaceAllUsesWith(PoisonValue::get(getType()));
148 void PHINode::removeIncomingValueIf(function_ref
<bool(unsigned)> Predicate
,
149 bool DeletePHIIfEmpty
) {
150 SmallDenseSet
<unsigned> RemoveIndices
;
151 for (unsigned Idx
= 0; Idx
< getNumIncomingValues(); ++Idx
)
153 RemoveIndices
.insert(Idx
);
155 if (RemoveIndices
.empty())
159 auto NewOpEnd
= remove_if(operands(), [&](Use
&U
) {
160 return RemoveIndices
.contains(U
.getOperandNo());
162 for (Use
&U
: make_range(NewOpEnd
, op_end()))
165 // Remove incoming blocks.
166 (void)std::remove_if(const_cast<block_iterator
>(block_begin()),
167 const_cast<block_iterator
>(block_end()), [&](BasicBlock
*&BB
) {
168 return RemoveIndices
.contains(&BB
- block_begin());
171 setNumHungOffUseOperands(getNumOperands() - RemoveIndices
.size());
173 // If the PHI node is dead, because it has zero entries, nuke it now.
174 if (getNumOperands() == 0 && DeletePHIIfEmpty
) {
175 // If anyone is using this PHI, make them use a dummy value instead...
176 replaceAllUsesWith(PoisonValue::get(getType()));
181 /// growOperands - grow operands - This grows the operand list in response
182 /// to a push_back style of operation. This grows the number of ops by 1.5
185 void PHINode::growOperands() {
186 unsigned e
= getNumOperands();
187 unsigned NumOps
= e
+ e
/ 2;
188 if (NumOps
< 2) NumOps
= 2; // 2 op PHI nodes are VERY common.
190 ReservedSpace
= NumOps
;
191 growHungoffUses(ReservedSpace
, /* IsPhi */ true);
194 /// hasConstantValue - If the specified PHI node always merges together the same
195 /// value, return the value, otherwise return null.
196 Value
*PHINode::hasConstantValue() const {
197 // Exploit the fact that phi nodes always have at least one entry.
198 Value
*ConstantValue
= getIncomingValue(0);
199 for (unsigned i
= 1, e
= getNumIncomingValues(); i
!= e
; ++i
)
200 if (getIncomingValue(i
) != ConstantValue
&& getIncomingValue(i
) != this) {
201 if (ConstantValue
!= this)
202 return nullptr; // Incoming values not all the same.
203 // The case where the first value is this PHI.
204 ConstantValue
= getIncomingValue(i
);
206 if (ConstantValue
== this)
207 return UndefValue::get(getType());
208 return ConstantValue
;
211 /// hasConstantOrUndefValue - Whether the specified PHI node always merges
212 /// together the same value, assuming that undefs result in the same value as
214 /// Unlike \ref hasConstantValue, this does not return a value because the
215 /// unique non-undef incoming value need not dominate the PHI node.
216 bool PHINode::hasConstantOrUndefValue() const {
217 Value
*ConstantValue
= nullptr;
218 for (unsigned i
= 0, e
= getNumIncomingValues(); i
!= e
; ++i
) {
219 Value
*Incoming
= getIncomingValue(i
);
220 if (Incoming
!= this && !isa
<UndefValue
>(Incoming
)) {
221 if (ConstantValue
&& ConstantValue
!= Incoming
)
223 ConstantValue
= Incoming
;
229 //===----------------------------------------------------------------------===//
230 // LandingPadInst Implementation
231 //===----------------------------------------------------------------------===//
233 LandingPadInst::LandingPadInst(Type
*RetTy
, unsigned NumReservedValues
,
234 const Twine
&NameStr
, Instruction
*InsertBefore
)
235 : Instruction(RetTy
, Instruction::LandingPad
, nullptr, 0, InsertBefore
) {
236 init(NumReservedValues
, NameStr
);
239 LandingPadInst::LandingPadInst(Type
*RetTy
, unsigned NumReservedValues
,
240 const Twine
&NameStr
, BasicBlock
*InsertAtEnd
)
241 : Instruction(RetTy
, Instruction::LandingPad
, nullptr, 0, InsertAtEnd
) {
242 init(NumReservedValues
, NameStr
);
245 LandingPadInst::LandingPadInst(const LandingPadInst
&LP
)
246 : Instruction(LP
.getType(), Instruction::LandingPad
, nullptr,
247 LP
.getNumOperands()),
248 ReservedSpace(LP
.getNumOperands()) {
249 allocHungoffUses(LP
.getNumOperands());
250 Use
*OL
= getOperandList();
251 const Use
*InOL
= LP
.getOperandList();
252 for (unsigned I
= 0, E
= ReservedSpace
; I
!= E
; ++I
)
255 setCleanup(LP
.isCleanup());
258 LandingPadInst
*LandingPadInst::Create(Type
*RetTy
, unsigned NumReservedClauses
,
259 const Twine
&NameStr
,
260 Instruction
*InsertBefore
) {
261 return new LandingPadInst(RetTy
, NumReservedClauses
, NameStr
, InsertBefore
);
264 LandingPadInst
*LandingPadInst::Create(Type
*RetTy
, unsigned NumReservedClauses
,
265 const Twine
&NameStr
,
266 BasicBlock
*InsertAtEnd
) {
267 return new LandingPadInst(RetTy
, NumReservedClauses
, NameStr
, InsertAtEnd
);
270 void LandingPadInst::init(unsigned NumReservedValues
, const Twine
&NameStr
) {
271 ReservedSpace
= NumReservedValues
;
272 setNumHungOffUseOperands(0);
273 allocHungoffUses(ReservedSpace
);
278 /// growOperands - grow operands - This grows the operand list in response to a
279 /// push_back style of operation. This grows the number of ops by 2 times.
280 void LandingPadInst::growOperands(unsigned Size
) {
281 unsigned e
= getNumOperands();
282 if (ReservedSpace
>= e
+ Size
) return;
283 ReservedSpace
= (std::max(e
, 1U) + Size
/ 2) * 2;
284 growHungoffUses(ReservedSpace
);
287 void LandingPadInst::addClause(Constant
*Val
) {
288 unsigned OpNo
= getNumOperands();
290 assert(OpNo
< ReservedSpace
&& "Growing didn't work!");
291 setNumHungOffUseOperands(getNumOperands() + 1);
292 getOperandList()[OpNo
] = Val
;
295 //===----------------------------------------------------------------------===//
296 // CallBase Implementation
297 //===----------------------------------------------------------------------===//
299 CallBase
*CallBase::Create(CallBase
*CB
, ArrayRef
<OperandBundleDef
> Bundles
,
300 Instruction
*InsertPt
) {
301 switch (CB
->getOpcode()) {
302 case Instruction::Call
:
303 return CallInst::Create(cast
<CallInst
>(CB
), Bundles
, InsertPt
);
304 case Instruction::Invoke
:
305 return InvokeInst::Create(cast
<InvokeInst
>(CB
), Bundles
, InsertPt
);
306 case Instruction::CallBr
:
307 return CallBrInst::Create(cast
<CallBrInst
>(CB
), Bundles
, InsertPt
);
309 llvm_unreachable("Unknown CallBase sub-class!");
313 CallBase
*CallBase::Create(CallBase
*CI
, OperandBundleDef OpB
,
314 Instruction
*InsertPt
) {
315 SmallVector
<OperandBundleDef
, 2> OpDefs
;
316 for (unsigned i
= 0, e
= CI
->getNumOperandBundles(); i
< e
; ++i
) {
317 auto ChildOB
= CI
->getOperandBundleAt(i
);
318 if (ChildOB
.getTagName() != OpB
.getTag())
319 OpDefs
.emplace_back(ChildOB
);
321 OpDefs
.emplace_back(OpB
);
322 return CallBase::Create(CI
, OpDefs
, InsertPt
);
326 Function
*CallBase::getCaller() { return getParent()->getParent(); }
328 unsigned CallBase::getNumSubclassExtraOperandsDynamic() const {
329 assert(getOpcode() == Instruction::CallBr
&& "Unexpected opcode!");
330 return cast
<CallBrInst
>(this)->getNumIndirectDests() + 1;
333 bool CallBase::isIndirectCall() const {
334 const Value
*V
= getCalledOperand();
335 if (isa
<Function
>(V
) || isa
<Constant
>(V
))
337 return !isInlineAsm();
340 /// Tests if this call site must be tail call optimized. Only a CallInst can
341 /// be tail call optimized.
342 bool CallBase::isMustTailCall() const {
343 if (auto *CI
= dyn_cast
<CallInst
>(this))
344 return CI
->isMustTailCall();
348 /// Tests if this call site is marked as a tail call.
349 bool CallBase::isTailCall() const {
350 if (auto *CI
= dyn_cast
<CallInst
>(this))
351 return CI
->isTailCall();
355 Intrinsic::ID
CallBase::getIntrinsicID() const {
356 if (auto *F
= getCalledFunction())
357 return F
->getIntrinsicID();
358 return Intrinsic::not_intrinsic
;
361 FPClassTest
CallBase::getRetNoFPClass() const {
362 FPClassTest Mask
= Attrs
.getRetNoFPClass();
364 if (const Function
*F
= getCalledFunction())
365 Mask
|= F
->getAttributes().getRetNoFPClass();
369 FPClassTest
CallBase::getParamNoFPClass(unsigned i
) const {
370 FPClassTest Mask
= Attrs
.getParamNoFPClass(i
);
372 if (const Function
*F
= getCalledFunction())
373 Mask
|= F
->getAttributes().getParamNoFPClass(i
);
377 bool CallBase::isReturnNonNull() const {
378 if (hasRetAttr(Attribute::NonNull
))
381 if (getRetDereferenceableBytes() > 0 &&
382 !NullPointerIsDefined(getCaller(), getType()->getPointerAddressSpace()))
388 Value
*CallBase::getArgOperandWithAttribute(Attribute::AttrKind Kind
) const {
391 if (Attrs
.hasAttrSomewhere(Kind
, &Index
))
392 return getArgOperand(Index
- AttributeList::FirstArgIndex
);
393 if (const Function
*F
= getCalledFunction())
394 if (F
->getAttributes().hasAttrSomewhere(Kind
, &Index
))
395 return getArgOperand(Index
- AttributeList::FirstArgIndex
);
400 /// Determine whether the argument or parameter has the given attribute.
401 bool CallBase::paramHasAttr(unsigned ArgNo
, Attribute::AttrKind Kind
) const {
402 assert(ArgNo
< arg_size() && "Param index out of bounds!");
404 if (Attrs
.hasParamAttr(ArgNo
, Kind
))
407 const Function
*F
= getCalledFunction();
411 if (!F
->getAttributes().hasParamAttr(ArgNo
, Kind
))
414 // Take into account mod/ref by operand bundles.
416 case Attribute::ReadNone
:
417 return !hasReadingOperandBundles() && !hasClobberingOperandBundles();
418 case Attribute::ReadOnly
:
419 return !hasClobberingOperandBundles();
420 case Attribute::WriteOnly
:
421 return !hasReadingOperandBundles();
427 bool CallBase::hasFnAttrOnCalledFunction(Attribute::AttrKind Kind
) const {
428 Value
*V
= getCalledOperand();
429 if (auto *CE
= dyn_cast
<ConstantExpr
>(V
))
430 if (CE
->getOpcode() == BitCast
)
431 V
= CE
->getOperand(0);
433 if (auto *F
= dyn_cast
<Function
>(V
))
434 return F
->getAttributes().hasFnAttr(Kind
);
439 bool CallBase::hasFnAttrOnCalledFunction(StringRef Kind
) const {
440 Value
*V
= getCalledOperand();
441 if (auto *CE
= dyn_cast
<ConstantExpr
>(V
))
442 if (CE
->getOpcode() == BitCast
)
443 V
= CE
->getOperand(0);
445 if (auto *F
= dyn_cast
<Function
>(V
))
446 return F
->getAttributes().hasFnAttr(Kind
);
451 template <typename AK
>
452 Attribute
CallBase::getFnAttrOnCalledFunction(AK Kind
) const {
453 if constexpr (std::is_same_v
<AK
, Attribute::AttrKind
>) {
454 // getMemoryEffects() correctly combines memory effects from the call-site,
455 // operand bundles and function.
456 assert(Kind
!= Attribute::Memory
&& "Use getMemoryEffects() instead");
459 Value
*V
= getCalledOperand();
460 if (auto *CE
= dyn_cast
<ConstantExpr
>(V
))
461 if (CE
->getOpcode() == BitCast
)
462 V
= CE
->getOperand(0);
464 if (auto *F
= dyn_cast
<Function
>(V
))
465 return F
->getAttributes().getFnAttr(Kind
);
471 CallBase::getFnAttrOnCalledFunction(Attribute::AttrKind Kind
) const;
472 template Attribute
CallBase::getFnAttrOnCalledFunction(StringRef Kind
) const;
474 void CallBase::getOperandBundlesAsDefs(
475 SmallVectorImpl
<OperandBundleDef
> &Defs
) const {
476 for (unsigned i
= 0, e
= getNumOperandBundles(); i
!= e
; ++i
)
477 Defs
.emplace_back(getOperandBundleAt(i
));
480 CallBase::op_iterator
481 CallBase::populateBundleOperandInfos(ArrayRef
<OperandBundleDef
> Bundles
,
482 const unsigned BeginIndex
) {
483 auto It
= op_begin() + BeginIndex
;
484 for (auto &B
: Bundles
)
485 It
= std::copy(B
.input_begin(), B
.input_end(), It
);
487 auto *ContextImpl
= getContext().pImpl
;
488 auto BI
= Bundles
.begin();
489 unsigned CurrentIndex
= BeginIndex
;
491 for (auto &BOI
: bundle_op_infos()) {
492 assert(BI
!= Bundles
.end() && "Incorrect allocation?");
494 BOI
.Tag
= ContextImpl
->getOrInsertBundleTag(BI
->getTag());
495 BOI
.Begin
= CurrentIndex
;
496 BOI
.End
= CurrentIndex
+ BI
->input_size();
497 CurrentIndex
= BOI
.End
;
501 assert(BI
== Bundles
.end() && "Incorrect allocation?");
506 CallBase::BundleOpInfo
&CallBase::getBundleOpInfoForOperand(unsigned OpIdx
) {
507 /// When there isn't many bundles, we do a simple linear search.
508 /// Else fallback to a binary-search that use the fact that bundles usually
509 /// have similar number of argument to get faster convergence.
510 if (bundle_op_info_end() - bundle_op_info_begin() < 8) {
511 for (auto &BOI
: bundle_op_infos())
512 if (BOI
.Begin
<= OpIdx
&& OpIdx
< BOI
.End
)
515 llvm_unreachable("Did not find operand bundle for operand!");
518 assert(OpIdx
>= arg_size() && "the Idx is not in the operand bundles");
519 assert(bundle_op_info_end() - bundle_op_info_begin() > 0 &&
520 OpIdx
< std::prev(bundle_op_info_end())->End
&&
521 "The Idx isn't in the operand bundle");
523 /// We need a decimal number below and to prevent using floating point numbers
524 /// we use an intergal value multiplied by this constant.
525 constexpr unsigned NumberScaling
= 1024;
527 bundle_op_iterator Begin
= bundle_op_info_begin();
528 bundle_op_iterator End
= bundle_op_info_end();
529 bundle_op_iterator Current
= Begin
;
531 while (Begin
!= End
) {
532 unsigned ScaledOperandPerBundle
=
533 NumberScaling
* (std::prev(End
)->End
- Begin
->Begin
) / (End
- Begin
);
534 Current
= Begin
+ (((OpIdx
- Begin
->Begin
) * NumberScaling
) /
535 ScaledOperandPerBundle
);
537 Current
= std::prev(End
);
538 assert(Current
< End
&& Current
>= Begin
&&
539 "the operand bundle doesn't cover every value in the range");
540 if (OpIdx
>= Current
->Begin
&& OpIdx
< Current
->End
)
542 if (OpIdx
>= Current
->End
)
548 assert(OpIdx
>= Current
->Begin
&& OpIdx
< Current
->End
&&
549 "the operand bundle doesn't cover every value in the range");
553 CallBase
*CallBase::addOperandBundle(CallBase
*CB
, uint32_t ID
,
555 Instruction
*InsertPt
) {
556 if (CB
->getOperandBundle(ID
))
559 SmallVector
<OperandBundleDef
, 1> Bundles
;
560 CB
->getOperandBundlesAsDefs(Bundles
);
561 Bundles
.push_back(OB
);
562 return Create(CB
, Bundles
, InsertPt
);
565 CallBase
*CallBase::removeOperandBundle(CallBase
*CB
, uint32_t ID
,
566 Instruction
*InsertPt
) {
567 SmallVector
<OperandBundleDef
, 1> Bundles
;
568 bool CreateNew
= false;
570 for (unsigned I
= 0, E
= CB
->getNumOperandBundles(); I
!= E
; ++I
) {
571 auto Bundle
= CB
->getOperandBundleAt(I
);
572 if (Bundle
.getTagID() == ID
) {
576 Bundles
.emplace_back(Bundle
);
579 return CreateNew
? Create(CB
, Bundles
, InsertPt
) : CB
;
582 bool CallBase::hasReadingOperandBundles() const {
583 // Implementation note: this is a conservative implementation of operand
584 // bundle semantics, where *any* non-assume operand bundle (other than
585 // ptrauth) forces a callsite to be at least readonly.
586 return hasOperandBundlesOtherThan(
587 {LLVMContext::OB_ptrauth
, LLVMContext::OB_kcfi
}) &&
588 getIntrinsicID() != Intrinsic::assume
;
591 bool CallBase::hasClobberingOperandBundles() const {
592 return hasOperandBundlesOtherThan(
593 {LLVMContext::OB_deopt
, LLVMContext::OB_funclet
,
594 LLVMContext::OB_ptrauth
, LLVMContext::OB_kcfi
}) &&
595 getIntrinsicID() != Intrinsic::assume
;
598 MemoryEffects
CallBase::getMemoryEffects() const {
599 MemoryEffects ME
= getAttributes().getMemoryEffects();
600 if (auto *Fn
= dyn_cast
<Function
>(getCalledOperand())) {
601 MemoryEffects FnME
= Fn
->getMemoryEffects();
602 if (hasOperandBundles()) {
603 // TODO: Add a method to get memory effects for operand bundles instead.
604 if (hasReadingOperandBundles())
605 FnME
|= MemoryEffects::readOnly();
606 if (hasClobberingOperandBundles())
607 FnME
|= MemoryEffects::writeOnly();
613 void CallBase::setMemoryEffects(MemoryEffects ME
) {
614 addFnAttr(Attribute::getWithMemoryEffects(getContext(), ME
));
617 /// Determine if the function does not access memory.
618 bool CallBase::doesNotAccessMemory() const {
619 return getMemoryEffects().doesNotAccessMemory();
621 void CallBase::setDoesNotAccessMemory() {
622 setMemoryEffects(MemoryEffects::none());
625 /// Determine if the function does not access or only reads memory.
626 bool CallBase::onlyReadsMemory() const {
627 return getMemoryEffects().onlyReadsMemory();
629 void CallBase::setOnlyReadsMemory() {
630 setMemoryEffects(getMemoryEffects() & MemoryEffects::readOnly());
633 /// Determine if the function does not access or only writes memory.
634 bool CallBase::onlyWritesMemory() const {
635 return getMemoryEffects().onlyWritesMemory();
637 void CallBase::setOnlyWritesMemory() {
638 setMemoryEffects(getMemoryEffects() & MemoryEffects::writeOnly());
641 /// Determine if the call can access memmory only using pointers based
642 /// on its arguments.
643 bool CallBase::onlyAccessesArgMemory() const {
644 return getMemoryEffects().onlyAccessesArgPointees();
646 void CallBase::setOnlyAccessesArgMemory() {
647 setMemoryEffects(getMemoryEffects() & MemoryEffects::argMemOnly());
650 /// Determine if the function may only access memory that is
651 /// inaccessible from the IR.
652 bool CallBase::onlyAccessesInaccessibleMemory() const {
653 return getMemoryEffects().onlyAccessesInaccessibleMem();
655 void CallBase::setOnlyAccessesInaccessibleMemory() {
656 setMemoryEffects(getMemoryEffects() & MemoryEffects::inaccessibleMemOnly());
659 /// Determine if the function may only access memory that is
660 /// either inaccessible from the IR or pointed to by its arguments.
661 bool CallBase::onlyAccessesInaccessibleMemOrArgMem() const {
662 return getMemoryEffects().onlyAccessesInaccessibleOrArgMem();
664 void CallBase::setOnlyAccessesInaccessibleMemOrArgMem() {
665 setMemoryEffects(getMemoryEffects() &
666 MemoryEffects::inaccessibleOrArgMemOnly());
669 //===----------------------------------------------------------------------===//
670 // CallInst Implementation
671 //===----------------------------------------------------------------------===//
673 void CallInst::init(FunctionType
*FTy
, Value
*Func
, ArrayRef
<Value
*> Args
,
674 ArrayRef
<OperandBundleDef
> Bundles
, const Twine
&NameStr
) {
676 assert(getNumOperands() == Args
.size() + CountBundleInputs(Bundles
) + 1 &&
677 "NumOperands not set up?");
680 assert((Args
.size() == FTy
->getNumParams() ||
681 (FTy
->isVarArg() && Args
.size() > FTy
->getNumParams())) &&
682 "Calling a function with bad signature!");
684 for (unsigned i
= 0; i
!= Args
.size(); ++i
)
685 assert((i
>= FTy
->getNumParams() ||
686 FTy
->getParamType(i
) == Args
[i
]->getType()) &&
687 "Calling a function with a bad signature!");
690 // Set operands in order of their index to match use-list-order
692 llvm::copy(Args
, op_begin());
693 setCalledOperand(Func
);
695 auto It
= populateBundleOperandInfos(Bundles
, Args
.size());
697 assert(It
+ 1 == op_end() && "Should add up!");
702 void CallInst::init(FunctionType
*FTy
, Value
*Func
, const Twine
&NameStr
) {
704 assert(getNumOperands() == 1 && "NumOperands not set up?");
705 setCalledOperand(Func
);
707 assert(FTy
->getNumParams() == 0 && "Calling a function with bad signature");
712 CallInst::CallInst(FunctionType
*Ty
, Value
*Func
, const Twine
&Name
,
713 Instruction
*InsertBefore
)
714 : CallBase(Ty
->getReturnType(), Instruction::Call
,
715 OperandTraits
<CallBase
>::op_end(this) - 1, 1, InsertBefore
) {
716 init(Ty
, Func
, Name
);
719 CallInst::CallInst(FunctionType
*Ty
, Value
*Func
, const Twine
&Name
,
720 BasicBlock
*InsertAtEnd
)
721 : CallBase(Ty
->getReturnType(), Instruction::Call
,
722 OperandTraits
<CallBase
>::op_end(this) - 1, 1, InsertAtEnd
) {
723 init(Ty
, Func
, Name
);
726 CallInst::CallInst(const CallInst
&CI
)
727 : CallBase(CI
.Attrs
, CI
.FTy
, CI
.getType(), Instruction::Call
,
728 OperandTraits
<CallBase
>::op_end(this) - CI
.getNumOperands(),
729 CI
.getNumOperands()) {
730 setTailCallKind(CI
.getTailCallKind());
731 setCallingConv(CI
.getCallingConv());
733 std::copy(CI
.op_begin(), CI
.op_end(), op_begin());
734 std::copy(CI
.bundle_op_info_begin(), CI
.bundle_op_info_end(),
735 bundle_op_info_begin());
736 SubclassOptionalData
= CI
.SubclassOptionalData
;
739 CallInst
*CallInst::Create(CallInst
*CI
, ArrayRef
<OperandBundleDef
> OpB
,
740 Instruction
*InsertPt
) {
741 std::vector
<Value
*> Args(CI
->arg_begin(), CI
->arg_end());
743 auto *NewCI
= CallInst::Create(CI
->getFunctionType(), CI
->getCalledOperand(),
744 Args
, OpB
, CI
->getName(), InsertPt
);
745 NewCI
->setTailCallKind(CI
->getTailCallKind());
746 NewCI
->setCallingConv(CI
->getCallingConv());
747 NewCI
->SubclassOptionalData
= CI
->SubclassOptionalData
;
748 NewCI
->setAttributes(CI
->getAttributes());
749 NewCI
->setDebugLoc(CI
->getDebugLoc());
753 // Update profile weight for call instruction by scaling it using the ratio
754 // of S/T. The meaning of "branch_weights" meta data for call instruction is
755 // transfered to represent call count.
756 void CallInst::updateProfWeight(uint64_t S
, uint64_t T
) {
757 auto *ProfileData
= getMetadata(LLVMContext::MD_prof
);
758 if (ProfileData
== nullptr)
761 auto *ProfDataName
= dyn_cast
<MDString
>(ProfileData
->getOperand(0));
762 if (!ProfDataName
|| (!ProfDataName
->getString().equals("branch_weights") &&
763 !ProfDataName
->getString().equals("VP")))
767 LLVM_DEBUG(dbgs() << "Attempting to update profile weights will result in "
768 "div by 0. Ignoring. Likely the function "
769 << getParent()->getParent()->getName()
770 << " has 0 entry count, and contains call instructions "
771 "with non-zero prof info.");
775 MDBuilder
MDB(getContext());
776 SmallVector
<Metadata
*, 3> Vals
;
777 Vals
.push_back(ProfileData
->getOperand(0));
778 APInt
APS(128, S
), APT(128, T
);
779 if (ProfDataName
->getString().equals("branch_weights") &&
780 ProfileData
->getNumOperands() > 0) {
781 // Using APInt::div may be expensive, but most cases should fit 64 bits.
782 APInt
Val(128, mdconst::dyn_extract
<ConstantInt
>(ProfileData
->getOperand(1))
786 Vals
.push_back(MDB
.createConstant(
787 ConstantInt::get(Type::getInt32Ty(getContext()),
788 Val
.udiv(APT
).getLimitedValue(UINT32_MAX
))));
789 } else if (ProfDataName
->getString().equals("VP"))
790 for (unsigned i
= 1; i
< ProfileData
->getNumOperands(); i
+= 2) {
791 // The first value is the key of the value profile, which will not change.
792 Vals
.push_back(ProfileData
->getOperand(i
));
794 mdconst::dyn_extract
<ConstantInt
>(ProfileData
->getOperand(i
+ 1))
797 // Don't scale the magic number.
798 if (Count
== NOMORE_ICP_MAGICNUM
) {
799 Vals
.push_back(ProfileData
->getOperand(i
+ 1));
802 // Using APInt::div may be expensive, but most cases should fit 64 bits.
803 APInt
Val(128, Count
);
805 Vals
.push_back(MDB
.createConstant(
806 ConstantInt::get(Type::getInt64Ty(getContext()),
807 Val
.udiv(APT
).getLimitedValue())));
809 setMetadata(LLVMContext::MD_prof
, MDNode::get(getContext(), Vals
));
812 //===----------------------------------------------------------------------===//
813 // InvokeInst Implementation
814 //===----------------------------------------------------------------------===//
816 void InvokeInst::init(FunctionType
*FTy
, Value
*Fn
, BasicBlock
*IfNormal
,
817 BasicBlock
*IfException
, ArrayRef
<Value
*> Args
,
818 ArrayRef
<OperandBundleDef
> Bundles
,
819 const Twine
&NameStr
) {
822 assert((int)getNumOperands() ==
823 ComputeNumOperands(Args
.size(), CountBundleInputs(Bundles
)) &&
824 "NumOperands not set up?");
827 assert(((Args
.size() == FTy
->getNumParams()) ||
828 (FTy
->isVarArg() && Args
.size() > FTy
->getNumParams())) &&
829 "Invoking a function with bad signature");
831 for (unsigned i
= 0, e
= Args
.size(); i
!= e
; i
++)
832 assert((i
>= FTy
->getNumParams() ||
833 FTy
->getParamType(i
) == Args
[i
]->getType()) &&
834 "Invoking a function with a bad signature!");
837 // Set operands in order of their index to match use-list-order
839 llvm::copy(Args
, op_begin());
840 setNormalDest(IfNormal
);
841 setUnwindDest(IfException
);
842 setCalledOperand(Fn
);
844 auto It
= populateBundleOperandInfos(Bundles
, Args
.size());
846 assert(It
+ 3 == op_end() && "Should add up!");
851 InvokeInst::InvokeInst(const InvokeInst
&II
)
852 : CallBase(II
.Attrs
, II
.FTy
, II
.getType(), Instruction::Invoke
,
853 OperandTraits
<CallBase
>::op_end(this) - II
.getNumOperands(),
854 II
.getNumOperands()) {
855 setCallingConv(II
.getCallingConv());
856 std::copy(II
.op_begin(), II
.op_end(), op_begin());
857 std::copy(II
.bundle_op_info_begin(), II
.bundle_op_info_end(),
858 bundle_op_info_begin());
859 SubclassOptionalData
= II
.SubclassOptionalData
;
862 InvokeInst
*InvokeInst::Create(InvokeInst
*II
, ArrayRef
<OperandBundleDef
> OpB
,
863 Instruction
*InsertPt
) {
864 std::vector
<Value
*> Args(II
->arg_begin(), II
->arg_end());
866 auto *NewII
= InvokeInst::Create(
867 II
->getFunctionType(), II
->getCalledOperand(), II
->getNormalDest(),
868 II
->getUnwindDest(), Args
, OpB
, II
->getName(), InsertPt
);
869 NewII
->setCallingConv(II
->getCallingConv());
870 NewII
->SubclassOptionalData
= II
->SubclassOptionalData
;
871 NewII
->setAttributes(II
->getAttributes());
872 NewII
->setDebugLoc(II
->getDebugLoc());
876 LandingPadInst
*InvokeInst::getLandingPadInst() const {
877 return cast
<LandingPadInst
>(getUnwindDest()->getFirstNonPHI());
880 //===----------------------------------------------------------------------===//
881 // CallBrInst Implementation
882 //===----------------------------------------------------------------------===//
884 void CallBrInst::init(FunctionType
*FTy
, Value
*Fn
, BasicBlock
*Fallthrough
,
885 ArrayRef
<BasicBlock
*> IndirectDests
,
886 ArrayRef
<Value
*> Args
,
887 ArrayRef
<OperandBundleDef
> Bundles
,
888 const Twine
&NameStr
) {
891 assert((int)getNumOperands() ==
892 ComputeNumOperands(Args
.size(), IndirectDests
.size(),
893 CountBundleInputs(Bundles
)) &&
894 "NumOperands not set up?");
897 assert(((Args
.size() == FTy
->getNumParams()) ||
898 (FTy
->isVarArg() && Args
.size() > FTy
->getNumParams())) &&
899 "Calling a function with bad signature");
901 for (unsigned i
= 0, e
= Args
.size(); i
!= e
; i
++)
902 assert((i
>= FTy
->getNumParams() ||
903 FTy
->getParamType(i
) == Args
[i
]->getType()) &&
904 "Calling a function with a bad signature!");
907 // Set operands in order of their index to match use-list-order
909 std::copy(Args
.begin(), Args
.end(), op_begin());
910 NumIndirectDests
= IndirectDests
.size();
911 setDefaultDest(Fallthrough
);
912 for (unsigned i
= 0; i
!= NumIndirectDests
; ++i
)
913 setIndirectDest(i
, IndirectDests
[i
]);
914 setCalledOperand(Fn
);
916 auto It
= populateBundleOperandInfos(Bundles
, Args
.size());
918 assert(It
+ 2 + IndirectDests
.size() == op_end() && "Should add up!");
923 CallBrInst::CallBrInst(const CallBrInst
&CBI
)
924 : CallBase(CBI
.Attrs
, CBI
.FTy
, CBI
.getType(), Instruction::CallBr
,
925 OperandTraits
<CallBase
>::op_end(this) - CBI
.getNumOperands(),
926 CBI
.getNumOperands()) {
927 setCallingConv(CBI
.getCallingConv());
928 std::copy(CBI
.op_begin(), CBI
.op_end(), op_begin());
929 std::copy(CBI
.bundle_op_info_begin(), CBI
.bundle_op_info_end(),
930 bundle_op_info_begin());
931 SubclassOptionalData
= CBI
.SubclassOptionalData
;
932 NumIndirectDests
= CBI
.NumIndirectDests
;
935 CallBrInst
*CallBrInst::Create(CallBrInst
*CBI
, ArrayRef
<OperandBundleDef
> OpB
,
936 Instruction
*InsertPt
) {
937 std::vector
<Value
*> Args(CBI
->arg_begin(), CBI
->arg_end());
939 auto *NewCBI
= CallBrInst::Create(
940 CBI
->getFunctionType(), CBI
->getCalledOperand(), CBI
->getDefaultDest(),
941 CBI
->getIndirectDests(), Args
, OpB
, CBI
->getName(), InsertPt
);
942 NewCBI
->setCallingConv(CBI
->getCallingConv());
943 NewCBI
->SubclassOptionalData
= CBI
->SubclassOptionalData
;
944 NewCBI
->setAttributes(CBI
->getAttributes());
945 NewCBI
->setDebugLoc(CBI
->getDebugLoc());
946 NewCBI
->NumIndirectDests
= CBI
->NumIndirectDests
;
950 //===----------------------------------------------------------------------===//
951 // ReturnInst Implementation
952 //===----------------------------------------------------------------------===//
954 ReturnInst::ReturnInst(const ReturnInst
&RI
)
955 : Instruction(Type::getVoidTy(RI
.getContext()), Instruction::Ret
,
956 OperandTraits
<ReturnInst
>::op_end(this) - RI
.getNumOperands(),
957 RI
.getNumOperands()) {
958 if (RI
.getNumOperands())
959 Op
<0>() = RI
.Op
<0>();
960 SubclassOptionalData
= RI
.SubclassOptionalData
;
963 ReturnInst::ReturnInst(LLVMContext
&C
, Value
*retVal
, Instruction
*InsertBefore
)
964 : Instruction(Type::getVoidTy(C
), Instruction::Ret
,
965 OperandTraits
<ReturnInst
>::op_end(this) - !!retVal
, !!retVal
,
971 ReturnInst::ReturnInst(LLVMContext
&C
, Value
*retVal
, BasicBlock
*InsertAtEnd
)
972 : Instruction(Type::getVoidTy(C
), Instruction::Ret
,
973 OperandTraits
<ReturnInst
>::op_end(this) - !!retVal
, !!retVal
,
979 ReturnInst::ReturnInst(LLVMContext
&Context
, BasicBlock
*InsertAtEnd
)
980 : Instruction(Type::getVoidTy(Context
), Instruction::Ret
,
981 OperandTraits
<ReturnInst
>::op_end(this), 0, InsertAtEnd
) {}
983 //===----------------------------------------------------------------------===//
984 // ResumeInst Implementation
985 //===----------------------------------------------------------------------===//
987 ResumeInst::ResumeInst(const ResumeInst
&RI
)
988 : Instruction(Type::getVoidTy(RI
.getContext()), Instruction::Resume
,
989 OperandTraits
<ResumeInst
>::op_begin(this), 1) {
990 Op
<0>() = RI
.Op
<0>();
993 ResumeInst::ResumeInst(Value
*Exn
, Instruction
*InsertBefore
)
994 : Instruction(Type::getVoidTy(Exn
->getContext()), Instruction::Resume
,
995 OperandTraits
<ResumeInst
>::op_begin(this), 1, InsertBefore
) {
999 ResumeInst::ResumeInst(Value
*Exn
, BasicBlock
*InsertAtEnd
)
1000 : Instruction(Type::getVoidTy(Exn
->getContext()), Instruction::Resume
,
1001 OperandTraits
<ResumeInst
>::op_begin(this), 1, InsertAtEnd
) {
1005 //===----------------------------------------------------------------------===//
1006 // CleanupReturnInst Implementation
1007 //===----------------------------------------------------------------------===//
1009 CleanupReturnInst::CleanupReturnInst(const CleanupReturnInst
&CRI
)
1010 : Instruction(CRI
.getType(), Instruction::CleanupRet
,
1011 OperandTraits
<CleanupReturnInst
>::op_end(this) -
1012 CRI
.getNumOperands(),
1013 CRI
.getNumOperands()) {
1014 setSubclassData
<Instruction::OpaqueField
>(
1015 CRI
.getSubclassData
<Instruction::OpaqueField
>());
1016 Op
<0>() = CRI
.Op
<0>();
1017 if (CRI
.hasUnwindDest())
1018 Op
<1>() = CRI
.Op
<1>();
1021 void CleanupReturnInst::init(Value
*CleanupPad
, BasicBlock
*UnwindBB
) {
1023 setSubclassData
<UnwindDestField
>(true);
1025 Op
<0>() = CleanupPad
;
1030 CleanupReturnInst::CleanupReturnInst(Value
*CleanupPad
, BasicBlock
*UnwindBB
,
1031 unsigned Values
, Instruction
*InsertBefore
)
1032 : Instruction(Type::getVoidTy(CleanupPad
->getContext()),
1033 Instruction::CleanupRet
,
1034 OperandTraits
<CleanupReturnInst
>::op_end(this) - Values
,
1035 Values
, InsertBefore
) {
1036 init(CleanupPad
, UnwindBB
);
1039 CleanupReturnInst::CleanupReturnInst(Value
*CleanupPad
, BasicBlock
*UnwindBB
,
1040 unsigned Values
, BasicBlock
*InsertAtEnd
)
1041 : Instruction(Type::getVoidTy(CleanupPad
->getContext()),
1042 Instruction::CleanupRet
,
1043 OperandTraits
<CleanupReturnInst
>::op_end(this) - Values
,
1044 Values
, InsertAtEnd
) {
1045 init(CleanupPad
, UnwindBB
);
1048 //===----------------------------------------------------------------------===//
1049 // CatchReturnInst Implementation
1050 //===----------------------------------------------------------------------===//
1051 void CatchReturnInst::init(Value
*CatchPad
, BasicBlock
*BB
) {
1056 CatchReturnInst::CatchReturnInst(const CatchReturnInst
&CRI
)
1057 : Instruction(Type::getVoidTy(CRI
.getContext()), Instruction::CatchRet
,
1058 OperandTraits
<CatchReturnInst
>::op_begin(this), 2) {
1059 Op
<0>() = CRI
.Op
<0>();
1060 Op
<1>() = CRI
.Op
<1>();
1063 CatchReturnInst::CatchReturnInst(Value
*CatchPad
, BasicBlock
*BB
,
1064 Instruction
*InsertBefore
)
1065 : Instruction(Type::getVoidTy(BB
->getContext()), Instruction::CatchRet
,
1066 OperandTraits
<CatchReturnInst
>::op_begin(this), 2,
1071 CatchReturnInst::CatchReturnInst(Value
*CatchPad
, BasicBlock
*BB
,
1072 BasicBlock
*InsertAtEnd
)
1073 : Instruction(Type::getVoidTy(BB
->getContext()), Instruction::CatchRet
,
1074 OperandTraits
<CatchReturnInst
>::op_begin(this), 2,
1079 //===----------------------------------------------------------------------===//
1080 // CatchSwitchInst Implementation
1081 //===----------------------------------------------------------------------===//
1083 CatchSwitchInst::CatchSwitchInst(Value
*ParentPad
, BasicBlock
*UnwindDest
,
1084 unsigned NumReservedValues
,
1085 const Twine
&NameStr
,
1086 Instruction
*InsertBefore
)
1087 : Instruction(ParentPad
->getType(), Instruction::CatchSwitch
, nullptr, 0,
1090 ++NumReservedValues
;
1091 init(ParentPad
, UnwindDest
, NumReservedValues
+ 1);
1095 CatchSwitchInst::CatchSwitchInst(Value
*ParentPad
, BasicBlock
*UnwindDest
,
1096 unsigned NumReservedValues
,
1097 const Twine
&NameStr
, BasicBlock
*InsertAtEnd
)
1098 : Instruction(ParentPad
->getType(), Instruction::CatchSwitch
, nullptr, 0,
1101 ++NumReservedValues
;
1102 init(ParentPad
, UnwindDest
, NumReservedValues
+ 1);
1106 CatchSwitchInst::CatchSwitchInst(const CatchSwitchInst
&CSI
)
1107 : Instruction(CSI
.getType(), Instruction::CatchSwitch
, nullptr,
1108 CSI
.getNumOperands()) {
1109 init(CSI
.getParentPad(), CSI
.getUnwindDest(), CSI
.getNumOperands());
1110 setNumHungOffUseOperands(ReservedSpace
);
1111 Use
*OL
= getOperandList();
1112 const Use
*InOL
= CSI
.getOperandList();
1113 for (unsigned I
= 1, E
= ReservedSpace
; I
!= E
; ++I
)
1117 void CatchSwitchInst::init(Value
*ParentPad
, BasicBlock
*UnwindDest
,
1118 unsigned NumReservedValues
) {
1119 assert(ParentPad
&& NumReservedValues
);
1121 ReservedSpace
= NumReservedValues
;
1122 setNumHungOffUseOperands(UnwindDest
? 2 : 1);
1123 allocHungoffUses(ReservedSpace
);
1125 Op
<0>() = ParentPad
;
1127 setSubclassData
<UnwindDestField
>(true);
1128 setUnwindDest(UnwindDest
);
1132 /// growOperands - grow operands - This grows the operand list in response to a
1133 /// push_back style of operation. This grows the number of ops by 2 times.
1134 void CatchSwitchInst::growOperands(unsigned Size
) {
1135 unsigned NumOperands
= getNumOperands();
1136 assert(NumOperands
>= 1);
1137 if (ReservedSpace
>= NumOperands
+ Size
)
1139 ReservedSpace
= (NumOperands
+ Size
/ 2) * 2;
1140 growHungoffUses(ReservedSpace
);
1143 void CatchSwitchInst::addHandler(BasicBlock
*Handler
) {
1144 unsigned OpNo
= getNumOperands();
1146 assert(OpNo
< ReservedSpace
&& "Growing didn't work!");
1147 setNumHungOffUseOperands(getNumOperands() + 1);
1148 getOperandList()[OpNo
] = Handler
;
1151 void CatchSwitchInst::removeHandler(handler_iterator HI
) {
1152 // Move all subsequent handlers up one.
1153 Use
*EndDst
= op_end() - 1;
1154 for (Use
*CurDst
= HI
.getCurrent(); CurDst
!= EndDst
; ++CurDst
)
1155 *CurDst
= *(CurDst
+ 1);
1156 // Null out the last handler use.
1159 setNumHungOffUseOperands(getNumOperands() - 1);
1162 //===----------------------------------------------------------------------===//
1163 // FuncletPadInst Implementation
1164 //===----------------------------------------------------------------------===//
1165 void FuncletPadInst::init(Value
*ParentPad
, ArrayRef
<Value
*> Args
,
1166 const Twine
&NameStr
) {
1167 assert(getNumOperands() == 1 + Args
.size() && "NumOperands not set up?");
1168 llvm::copy(Args
, op_begin());
1169 setParentPad(ParentPad
);
1173 FuncletPadInst::FuncletPadInst(const FuncletPadInst
&FPI
)
1174 : Instruction(FPI
.getType(), FPI
.getOpcode(),
1175 OperandTraits
<FuncletPadInst
>::op_end(this) -
1176 FPI
.getNumOperands(),
1177 FPI
.getNumOperands()) {
1178 std::copy(FPI
.op_begin(), FPI
.op_end(), op_begin());
1179 setParentPad(FPI
.getParentPad());
1182 FuncletPadInst::FuncletPadInst(Instruction::FuncletPadOps Op
, Value
*ParentPad
,
1183 ArrayRef
<Value
*> Args
, unsigned Values
,
1184 const Twine
&NameStr
, Instruction
*InsertBefore
)
1185 : Instruction(ParentPad
->getType(), Op
,
1186 OperandTraits
<FuncletPadInst
>::op_end(this) - Values
, Values
,
1188 init(ParentPad
, Args
, NameStr
);
1191 FuncletPadInst::FuncletPadInst(Instruction::FuncletPadOps Op
, Value
*ParentPad
,
1192 ArrayRef
<Value
*> Args
, unsigned Values
,
1193 const Twine
&NameStr
, BasicBlock
*InsertAtEnd
)
1194 : Instruction(ParentPad
->getType(), Op
,
1195 OperandTraits
<FuncletPadInst
>::op_end(this) - Values
, Values
,
1197 init(ParentPad
, Args
, NameStr
);
1200 //===----------------------------------------------------------------------===//
1201 // UnreachableInst Implementation
1202 //===----------------------------------------------------------------------===//
1204 UnreachableInst::UnreachableInst(LLVMContext
&Context
,
1205 Instruction
*InsertBefore
)
1206 : Instruction(Type::getVoidTy(Context
), Instruction::Unreachable
, nullptr,
1208 UnreachableInst::UnreachableInst(LLVMContext
&Context
, BasicBlock
*InsertAtEnd
)
1209 : Instruction(Type::getVoidTy(Context
), Instruction::Unreachable
, nullptr,
1212 //===----------------------------------------------------------------------===//
1213 // BranchInst Implementation
1214 //===----------------------------------------------------------------------===//
1216 void BranchInst::AssertOK() {
1217 if (isConditional())
1218 assert(getCondition()->getType()->isIntegerTy(1) &&
1219 "May only branch on boolean predicates!");
1222 BranchInst::BranchInst(BasicBlock
*IfTrue
, Instruction
*InsertBefore
)
1223 : Instruction(Type::getVoidTy(IfTrue
->getContext()), Instruction::Br
,
1224 OperandTraits
<BranchInst
>::op_end(this) - 1, 1,
1226 assert(IfTrue
&& "Branch destination may not be null!");
1230 BranchInst::BranchInst(BasicBlock
*IfTrue
, BasicBlock
*IfFalse
, Value
*Cond
,
1231 Instruction
*InsertBefore
)
1232 : Instruction(Type::getVoidTy(IfTrue
->getContext()), Instruction::Br
,
1233 OperandTraits
<BranchInst
>::op_end(this) - 3, 3,
1235 // Assign in order of operand index to make use-list order predictable.
1244 BranchInst::BranchInst(BasicBlock
*IfTrue
, BasicBlock
*InsertAtEnd
)
1245 : Instruction(Type::getVoidTy(IfTrue
->getContext()), Instruction::Br
,
1246 OperandTraits
<BranchInst
>::op_end(this) - 1, 1, InsertAtEnd
) {
1247 assert(IfTrue
&& "Branch destination may not be null!");
1251 BranchInst::BranchInst(BasicBlock
*IfTrue
, BasicBlock
*IfFalse
, Value
*Cond
,
1252 BasicBlock
*InsertAtEnd
)
1253 : Instruction(Type::getVoidTy(IfTrue
->getContext()), Instruction::Br
,
1254 OperandTraits
<BranchInst
>::op_end(this) - 3, 3, InsertAtEnd
) {
1255 // Assign in order of operand index to make use-list order predictable.
1264 BranchInst::BranchInst(const BranchInst
&BI
)
1265 : Instruction(Type::getVoidTy(BI
.getContext()), Instruction::Br
,
1266 OperandTraits
<BranchInst
>::op_end(this) - BI
.getNumOperands(),
1267 BI
.getNumOperands()) {
1268 // Assign in order of operand index to make use-list order predictable.
1269 if (BI
.getNumOperands() != 1) {
1270 assert(BI
.getNumOperands() == 3 && "BR can have 1 or 3 operands!");
1271 Op
<-3>() = BI
.Op
<-3>();
1272 Op
<-2>() = BI
.Op
<-2>();
1274 Op
<-1>() = BI
.Op
<-1>();
1275 SubclassOptionalData
= BI
.SubclassOptionalData
;
1278 void BranchInst::swapSuccessors() {
1279 assert(isConditional() &&
1280 "Cannot swap successors of an unconditional branch");
1281 Op
<-1>().swap(Op
<-2>());
1283 // Update profile metadata if present and it matches our structural
1288 //===----------------------------------------------------------------------===//
1289 // AllocaInst Implementation
1290 //===----------------------------------------------------------------------===//
1292 static Value
*getAISize(LLVMContext
&Context
, Value
*Amt
) {
1294 Amt
= ConstantInt::get(Type::getInt32Ty(Context
), 1);
1296 assert(!isa
<BasicBlock
>(Amt
) &&
1297 "Passed basic block into allocation size parameter! Use other ctor");
1298 assert(Amt
->getType()->isIntegerTy() &&
1299 "Allocation array size is not an integer!");
1304 static Align
computeAllocaDefaultAlign(Type
*Ty
, BasicBlock
*BB
) {
1305 assert(BB
&& "Insertion BB cannot be null when alignment not provided!");
1306 assert(BB
->getParent() &&
1307 "BB must be in a Function when alignment not provided!");
1308 const DataLayout
&DL
= BB
->getModule()->getDataLayout();
1309 return DL
.getPrefTypeAlign(Ty
);
1312 static Align
computeAllocaDefaultAlign(Type
*Ty
, Instruction
*I
) {
1313 assert(I
&& "Insertion position cannot be null when alignment not provided!");
1314 return computeAllocaDefaultAlign(Ty
, I
->getParent());
1317 AllocaInst::AllocaInst(Type
*Ty
, unsigned AddrSpace
, const Twine
&Name
,
1318 Instruction
*InsertBefore
)
1319 : AllocaInst(Ty
, AddrSpace
, /*ArraySize=*/nullptr, Name
, InsertBefore
) {}
1321 AllocaInst::AllocaInst(Type
*Ty
, unsigned AddrSpace
, const Twine
&Name
,
1322 BasicBlock
*InsertAtEnd
)
1323 : AllocaInst(Ty
, AddrSpace
, /*ArraySize=*/nullptr, Name
, InsertAtEnd
) {}
1325 AllocaInst::AllocaInst(Type
*Ty
, unsigned AddrSpace
, Value
*ArraySize
,
1326 const Twine
&Name
, Instruction
*InsertBefore
)
1327 : AllocaInst(Ty
, AddrSpace
, ArraySize
,
1328 computeAllocaDefaultAlign(Ty
, InsertBefore
), Name
,
1331 AllocaInst::AllocaInst(Type
*Ty
, unsigned AddrSpace
, Value
*ArraySize
,
1332 const Twine
&Name
, BasicBlock
*InsertAtEnd
)
1333 : AllocaInst(Ty
, AddrSpace
, ArraySize
,
1334 computeAllocaDefaultAlign(Ty
, InsertAtEnd
), Name
,
1337 AllocaInst::AllocaInst(Type
*Ty
, unsigned AddrSpace
, Value
*ArraySize
,
1338 Align Align
, const Twine
&Name
,
1339 Instruction
*InsertBefore
)
1340 : UnaryInstruction(PointerType::get(Ty
, AddrSpace
), Alloca
,
1341 getAISize(Ty
->getContext(), ArraySize
), InsertBefore
),
1343 setAlignment(Align
);
1344 assert(!Ty
->isVoidTy() && "Cannot allocate void!");
1348 AllocaInst::AllocaInst(Type
*Ty
, unsigned AddrSpace
, Value
*ArraySize
,
1349 Align Align
, const Twine
&Name
, BasicBlock
*InsertAtEnd
)
1350 : UnaryInstruction(PointerType::get(Ty
, AddrSpace
), Alloca
,
1351 getAISize(Ty
->getContext(), ArraySize
), InsertAtEnd
),
1353 setAlignment(Align
);
1354 assert(!Ty
->isVoidTy() && "Cannot allocate void!");
1359 bool AllocaInst::isArrayAllocation() const {
1360 if (ConstantInt
*CI
= dyn_cast
<ConstantInt
>(getOperand(0)))
1361 return !CI
->isOne();
1365 /// isStaticAlloca - Return true if this alloca is in the entry block of the
1366 /// function and is a constant size. If so, the code generator will fold it
1367 /// into the prolog/epilog code, so it is basically free.
1368 bool AllocaInst::isStaticAlloca() const {
1369 // Must be constant size.
1370 if (!isa
<ConstantInt
>(getArraySize())) return false;
1372 // Must be in the entry block.
1373 const BasicBlock
*Parent
= getParent();
1374 return Parent
->isEntryBlock() && !isUsedWithInAlloca();
1377 //===----------------------------------------------------------------------===//
1378 // LoadInst Implementation
1379 //===----------------------------------------------------------------------===//
1381 void LoadInst::AssertOK() {
1382 assert(getOperand(0)->getType()->isPointerTy() &&
1383 "Ptr must have pointer type.");
1386 static Align
computeLoadStoreDefaultAlign(Type
*Ty
, BasicBlock
*BB
) {
1387 assert(BB
&& "Insertion BB cannot be null when alignment not provided!");
1388 assert(BB
->getParent() &&
1389 "BB must be in a Function when alignment not provided!");
1390 const DataLayout
&DL
= BB
->getModule()->getDataLayout();
1391 return DL
.getABITypeAlign(Ty
);
1394 static Align
computeLoadStoreDefaultAlign(Type
*Ty
, Instruction
*I
) {
1395 assert(I
&& "Insertion position cannot be null when alignment not provided!");
1396 return computeLoadStoreDefaultAlign(Ty
, I
->getParent());
1399 LoadInst::LoadInst(Type
*Ty
, Value
*Ptr
, const Twine
&Name
,
1400 Instruction
*InsertBef
)
1401 : LoadInst(Ty
, Ptr
, Name
, /*isVolatile=*/false, InsertBef
) {}
1403 LoadInst::LoadInst(Type
*Ty
, Value
*Ptr
, const Twine
&Name
,
1404 BasicBlock
*InsertAE
)
1405 : LoadInst(Ty
, Ptr
, Name
, /*isVolatile=*/false, InsertAE
) {}
1407 LoadInst::LoadInst(Type
*Ty
, Value
*Ptr
, const Twine
&Name
, bool isVolatile
,
1408 Instruction
*InsertBef
)
1409 : LoadInst(Ty
, Ptr
, Name
, isVolatile
,
1410 computeLoadStoreDefaultAlign(Ty
, InsertBef
), InsertBef
) {}
1412 LoadInst::LoadInst(Type
*Ty
, Value
*Ptr
, const Twine
&Name
, bool isVolatile
,
1413 BasicBlock
*InsertAE
)
1414 : LoadInst(Ty
, Ptr
, Name
, isVolatile
,
1415 computeLoadStoreDefaultAlign(Ty
, InsertAE
), InsertAE
) {}
1417 LoadInst::LoadInst(Type
*Ty
, Value
*Ptr
, const Twine
&Name
, bool isVolatile
,
1418 Align Align
, Instruction
*InsertBef
)
1419 : LoadInst(Ty
, Ptr
, Name
, isVolatile
, Align
, AtomicOrdering::NotAtomic
,
1420 SyncScope::System
, InsertBef
) {}
1422 LoadInst::LoadInst(Type
*Ty
, Value
*Ptr
, const Twine
&Name
, bool isVolatile
,
1423 Align Align
, BasicBlock
*InsertAE
)
1424 : LoadInst(Ty
, Ptr
, Name
, isVolatile
, Align
, AtomicOrdering::NotAtomic
,
1425 SyncScope::System
, InsertAE
) {}
1427 LoadInst::LoadInst(Type
*Ty
, Value
*Ptr
, const Twine
&Name
, bool isVolatile
,
1428 Align Align
, AtomicOrdering Order
, SyncScope::ID SSID
,
1429 Instruction
*InsertBef
)
1430 : UnaryInstruction(Ty
, Load
, Ptr
, InsertBef
) {
1431 setVolatile(isVolatile
);
1432 setAlignment(Align
);
1433 setAtomic(Order
, SSID
);
1438 LoadInst::LoadInst(Type
*Ty
, Value
*Ptr
, const Twine
&Name
, bool isVolatile
,
1439 Align Align
, AtomicOrdering Order
, SyncScope::ID SSID
,
1440 BasicBlock
*InsertAE
)
1441 : UnaryInstruction(Ty
, Load
, Ptr
, InsertAE
) {
1442 setVolatile(isVolatile
);
1443 setAlignment(Align
);
1444 setAtomic(Order
, SSID
);
1449 //===----------------------------------------------------------------------===//
1450 // StoreInst Implementation
1451 //===----------------------------------------------------------------------===//
1453 void StoreInst::AssertOK() {
1454 assert(getOperand(0) && getOperand(1) && "Both operands must be non-null!");
1455 assert(getOperand(1)->getType()->isPointerTy() &&
1456 "Ptr must have pointer type!");
1459 StoreInst::StoreInst(Value
*val
, Value
*addr
, Instruction
*InsertBefore
)
1460 : StoreInst(val
, addr
, /*isVolatile=*/false, InsertBefore
) {}
1462 StoreInst::StoreInst(Value
*val
, Value
*addr
, BasicBlock
*InsertAtEnd
)
1463 : StoreInst(val
, addr
, /*isVolatile=*/false, InsertAtEnd
) {}
1465 StoreInst::StoreInst(Value
*val
, Value
*addr
, bool isVolatile
,
1466 Instruction
*InsertBefore
)
1467 : StoreInst(val
, addr
, isVolatile
,
1468 computeLoadStoreDefaultAlign(val
->getType(), InsertBefore
),
1471 StoreInst::StoreInst(Value
*val
, Value
*addr
, bool isVolatile
,
1472 BasicBlock
*InsertAtEnd
)
1473 : StoreInst(val
, addr
, isVolatile
,
1474 computeLoadStoreDefaultAlign(val
->getType(), InsertAtEnd
),
1477 StoreInst::StoreInst(Value
*val
, Value
*addr
, bool isVolatile
, Align Align
,
1478 Instruction
*InsertBefore
)
1479 : StoreInst(val
, addr
, isVolatile
, Align
, AtomicOrdering::NotAtomic
,
1480 SyncScope::System
, InsertBefore
) {}
1482 StoreInst::StoreInst(Value
*val
, Value
*addr
, bool isVolatile
, Align Align
,
1483 BasicBlock
*InsertAtEnd
)
1484 : StoreInst(val
, addr
, isVolatile
, Align
, AtomicOrdering::NotAtomic
,
1485 SyncScope::System
, InsertAtEnd
) {}
1487 StoreInst::StoreInst(Value
*val
, Value
*addr
, bool isVolatile
, Align Align
,
1488 AtomicOrdering Order
, SyncScope::ID SSID
,
1489 Instruction
*InsertBefore
)
1490 : Instruction(Type::getVoidTy(val
->getContext()), Store
,
1491 OperandTraits
<StoreInst
>::op_begin(this),
1492 OperandTraits
<StoreInst
>::operands(this), InsertBefore
) {
1495 setVolatile(isVolatile
);
1496 setAlignment(Align
);
1497 setAtomic(Order
, SSID
);
1501 StoreInst::StoreInst(Value
*val
, Value
*addr
, bool isVolatile
, Align Align
,
1502 AtomicOrdering Order
, SyncScope::ID SSID
,
1503 BasicBlock
*InsertAtEnd
)
1504 : Instruction(Type::getVoidTy(val
->getContext()), Store
,
1505 OperandTraits
<StoreInst
>::op_begin(this),
1506 OperandTraits
<StoreInst
>::operands(this), InsertAtEnd
) {
1509 setVolatile(isVolatile
);
1510 setAlignment(Align
);
1511 setAtomic(Order
, SSID
);
1516 //===----------------------------------------------------------------------===//
1517 // AtomicCmpXchgInst Implementation
1518 //===----------------------------------------------------------------------===//
1520 void AtomicCmpXchgInst::Init(Value
*Ptr
, Value
*Cmp
, Value
*NewVal
,
1521 Align Alignment
, AtomicOrdering SuccessOrdering
,
1522 AtomicOrdering FailureOrdering
,
1523 SyncScope::ID SSID
) {
1527 setSuccessOrdering(SuccessOrdering
);
1528 setFailureOrdering(FailureOrdering
);
1529 setSyncScopeID(SSID
);
1530 setAlignment(Alignment
);
1532 assert(getOperand(0) && getOperand(1) && getOperand(2) &&
1533 "All operands must be non-null!");
1534 assert(getOperand(0)->getType()->isPointerTy() &&
1535 "Ptr must have pointer type!");
1536 assert(getOperand(1)->getType() == getOperand(2)->getType() &&
1537 "Cmp type and NewVal type must be same!");
1540 AtomicCmpXchgInst::AtomicCmpXchgInst(Value
*Ptr
, Value
*Cmp
, Value
*NewVal
,
1542 AtomicOrdering SuccessOrdering
,
1543 AtomicOrdering FailureOrdering
,
1545 Instruction
*InsertBefore
)
1547 StructType::get(Cmp
->getType(), Type::getInt1Ty(Cmp
->getContext())),
1548 AtomicCmpXchg
, OperandTraits
<AtomicCmpXchgInst
>::op_begin(this),
1549 OperandTraits
<AtomicCmpXchgInst
>::operands(this), InsertBefore
) {
1550 Init(Ptr
, Cmp
, NewVal
, Alignment
, SuccessOrdering
, FailureOrdering
, SSID
);
1553 AtomicCmpXchgInst::AtomicCmpXchgInst(Value
*Ptr
, Value
*Cmp
, Value
*NewVal
,
1555 AtomicOrdering SuccessOrdering
,
1556 AtomicOrdering FailureOrdering
,
1558 BasicBlock
*InsertAtEnd
)
1560 StructType::get(Cmp
->getType(), Type::getInt1Ty(Cmp
->getContext())),
1561 AtomicCmpXchg
, OperandTraits
<AtomicCmpXchgInst
>::op_begin(this),
1562 OperandTraits
<AtomicCmpXchgInst
>::operands(this), InsertAtEnd
) {
1563 Init(Ptr
, Cmp
, NewVal
, Alignment
, SuccessOrdering
, FailureOrdering
, SSID
);
1566 //===----------------------------------------------------------------------===//
1567 // AtomicRMWInst Implementation
1568 //===----------------------------------------------------------------------===//
1570 void AtomicRMWInst::Init(BinOp Operation
, Value
*Ptr
, Value
*Val
,
1571 Align Alignment
, AtomicOrdering Ordering
,
1572 SyncScope::ID SSID
) {
1573 assert(Ordering
!= AtomicOrdering::NotAtomic
&&
1574 "atomicrmw instructions can only be atomic.");
1575 assert(Ordering
!= AtomicOrdering::Unordered
&&
1576 "atomicrmw instructions cannot be unordered.");
1579 setOperation(Operation
);
1580 setOrdering(Ordering
);
1581 setSyncScopeID(SSID
);
1582 setAlignment(Alignment
);
1584 assert(getOperand(0) && getOperand(1) &&
1585 "All operands must be non-null!");
1586 assert(getOperand(0)->getType()->isPointerTy() &&
1587 "Ptr must have pointer type!");
1588 assert(Ordering
!= AtomicOrdering::NotAtomic
&&
1589 "AtomicRMW instructions must be atomic!");
1592 AtomicRMWInst::AtomicRMWInst(BinOp Operation
, Value
*Ptr
, Value
*Val
,
1593 Align Alignment
, AtomicOrdering Ordering
,
1594 SyncScope::ID SSID
, Instruction
*InsertBefore
)
1595 : Instruction(Val
->getType(), AtomicRMW
,
1596 OperandTraits
<AtomicRMWInst
>::op_begin(this),
1597 OperandTraits
<AtomicRMWInst
>::operands(this), InsertBefore
) {
1598 Init(Operation
, Ptr
, Val
, Alignment
, Ordering
, SSID
);
1601 AtomicRMWInst::AtomicRMWInst(BinOp Operation
, Value
*Ptr
, Value
*Val
,
1602 Align Alignment
, AtomicOrdering Ordering
,
1603 SyncScope::ID SSID
, BasicBlock
*InsertAtEnd
)
1604 : Instruction(Val
->getType(), AtomicRMW
,
1605 OperandTraits
<AtomicRMWInst
>::op_begin(this),
1606 OperandTraits
<AtomicRMWInst
>::operands(this), InsertAtEnd
) {
1607 Init(Operation
, Ptr
, Val
, Alignment
, Ordering
, SSID
);
1610 StringRef
AtomicRMWInst::getOperationName(BinOp Op
) {
1612 case AtomicRMWInst::Xchg
:
1614 case AtomicRMWInst::Add
:
1616 case AtomicRMWInst::Sub
:
1618 case AtomicRMWInst::And
:
1620 case AtomicRMWInst::Nand
:
1622 case AtomicRMWInst::Or
:
1624 case AtomicRMWInst::Xor
:
1626 case AtomicRMWInst::Max
:
1628 case AtomicRMWInst::Min
:
1630 case AtomicRMWInst::UMax
:
1632 case AtomicRMWInst::UMin
:
1634 case AtomicRMWInst::FAdd
:
1636 case AtomicRMWInst::FSub
:
1638 case AtomicRMWInst::FMax
:
1640 case AtomicRMWInst::FMin
:
1642 case AtomicRMWInst::UIncWrap
:
1644 case AtomicRMWInst::UDecWrap
:
1646 case AtomicRMWInst::BAD_BINOP
:
1647 return "<invalid operation>";
1650 llvm_unreachable("invalid atomicrmw operation");
1653 //===----------------------------------------------------------------------===//
1654 // FenceInst Implementation
1655 //===----------------------------------------------------------------------===//
1657 FenceInst::FenceInst(LLVMContext
&C
, AtomicOrdering Ordering
,
1659 Instruction
*InsertBefore
)
1660 : Instruction(Type::getVoidTy(C
), Fence
, nullptr, 0, InsertBefore
) {
1661 setOrdering(Ordering
);
1662 setSyncScopeID(SSID
);
1665 FenceInst::FenceInst(LLVMContext
&C
, AtomicOrdering Ordering
,
1667 BasicBlock
*InsertAtEnd
)
1668 : Instruction(Type::getVoidTy(C
), Fence
, nullptr, 0, InsertAtEnd
) {
1669 setOrdering(Ordering
);
1670 setSyncScopeID(SSID
);
1673 //===----------------------------------------------------------------------===//
1674 // GetElementPtrInst Implementation
1675 //===----------------------------------------------------------------------===//
1677 void GetElementPtrInst::init(Value
*Ptr
, ArrayRef
<Value
*> IdxList
,
1678 const Twine
&Name
) {
1679 assert(getNumOperands() == 1 + IdxList
.size() &&
1680 "NumOperands not initialized?");
1682 llvm::copy(IdxList
, op_begin() + 1);
1686 GetElementPtrInst::GetElementPtrInst(const GetElementPtrInst
&GEPI
)
1687 : Instruction(GEPI
.getType(), GetElementPtr
,
1688 OperandTraits
<GetElementPtrInst
>::op_end(this) -
1689 GEPI
.getNumOperands(),
1690 GEPI
.getNumOperands()),
1691 SourceElementType(GEPI
.SourceElementType
),
1692 ResultElementType(GEPI
.ResultElementType
) {
1693 std::copy(GEPI
.op_begin(), GEPI
.op_end(), op_begin());
1694 SubclassOptionalData
= GEPI
.SubclassOptionalData
;
1697 Type
*GetElementPtrInst::getTypeAtIndex(Type
*Ty
, Value
*Idx
) {
1698 if (auto *Struct
= dyn_cast
<StructType
>(Ty
)) {
1699 if (!Struct
->indexValid(Idx
))
1701 return Struct
->getTypeAtIndex(Idx
);
1703 if (!Idx
->getType()->isIntOrIntVectorTy())
1705 if (auto *Array
= dyn_cast
<ArrayType
>(Ty
))
1706 return Array
->getElementType();
1707 if (auto *Vector
= dyn_cast
<VectorType
>(Ty
))
1708 return Vector
->getElementType();
1712 Type
*GetElementPtrInst::getTypeAtIndex(Type
*Ty
, uint64_t Idx
) {
1713 if (auto *Struct
= dyn_cast
<StructType
>(Ty
)) {
1714 if (Idx
>= Struct
->getNumElements())
1716 return Struct
->getElementType(Idx
);
1718 if (auto *Array
= dyn_cast
<ArrayType
>(Ty
))
1719 return Array
->getElementType();
1720 if (auto *Vector
= dyn_cast
<VectorType
>(Ty
))
1721 return Vector
->getElementType();
1725 template <typename IndexTy
>
1726 static Type
*getIndexedTypeInternal(Type
*Ty
, ArrayRef
<IndexTy
> IdxList
) {
1727 if (IdxList
.empty())
1729 for (IndexTy V
: IdxList
.slice(1)) {
1730 Ty
= GetElementPtrInst::getTypeAtIndex(Ty
, V
);
1737 Type
*GetElementPtrInst::getIndexedType(Type
*Ty
, ArrayRef
<Value
*> IdxList
) {
1738 return getIndexedTypeInternal(Ty
, IdxList
);
1741 Type
*GetElementPtrInst::getIndexedType(Type
*Ty
,
1742 ArrayRef
<Constant
*> IdxList
) {
1743 return getIndexedTypeInternal(Ty
, IdxList
);
1746 Type
*GetElementPtrInst::getIndexedType(Type
*Ty
, ArrayRef
<uint64_t> IdxList
) {
1747 return getIndexedTypeInternal(Ty
, IdxList
);
1750 /// hasAllZeroIndices - Return true if all of the indices of this GEP are
1751 /// zeros. If so, the result pointer and the first operand have the same
1752 /// value, just potentially different types.
1753 bool GetElementPtrInst::hasAllZeroIndices() const {
1754 for (unsigned i
= 1, e
= getNumOperands(); i
!= e
; ++i
) {
1755 if (ConstantInt
*CI
= dyn_cast
<ConstantInt
>(getOperand(i
))) {
1756 if (!CI
->isZero()) return false;
1764 /// hasAllConstantIndices - Return true if all of the indices of this GEP are
1765 /// constant integers. If so, the result pointer and the first operand have
1766 /// a constant offset between them.
1767 bool GetElementPtrInst::hasAllConstantIndices() const {
1768 for (unsigned i
= 1, e
= getNumOperands(); i
!= e
; ++i
) {
1769 if (!isa
<ConstantInt
>(getOperand(i
)))
1775 void GetElementPtrInst::setIsInBounds(bool B
) {
1776 cast
<GEPOperator
>(this)->setIsInBounds(B
);
1779 bool GetElementPtrInst::isInBounds() const {
1780 return cast
<GEPOperator
>(this)->isInBounds();
1783 bool GetElementPtrInst::accumulateConstantOffset(const DataLayout
&DL
,
1784 APInt
&Offset
) const {
1785 // Delegate to the generic GEPOperator implementation.
1786 return cast
<GEPOperator
>(this)->accumulateConstantOffset(DL
, Offset
);
1789 bool GetElementPtrInst::collectOffset(
1790 const DataLayout
&DL
, unsigned BitWidth
,
1791 MapVector
<Value
*, APInt
> &VariableOffsets
,
1792 APInt
&ConstantOffset
) const {
1793 // Delegate to the generic GEPOperator implementation.
1794 return cast
<GEPOperator
>(this)->collectOffset(DL
, BitWidth
, VariableOffsets
,
1798 //===----------------------------------------------------------------------===//
1799 // ExtractElementInst Implementation
1800 //===----------------------------------------------------------------------===//
1802 ExtractElementInst::ExtractElementInst(Value
*Val
, Value
*Index
,
1804 Instruction
*InsertBef
)
1805 : Instruction(cast
<VectorType
>(Val
->getType())->getElementType(),
1807 OperandTraits
<ExtractElementInst
>::op_begin(this),
1809 assert(isValidOperands(Val
, Index
) &&
1810 "Invalid extractelement instruction operands!");
1816 ExtractElementInst::ExtractElementInst(Value
*Val
, Value
*Index
,
1818 BasicBlock
*InsertAE
)
1819 : Instruction(cast
<VectorType
>(Val
->getType())->getElementType(),
1821 OperandTraits
<ExtractElementInst
>::op_begin(this),
1823 assert(isValidOperands(Val
, Index
) &&
1824 "Invalid extractelement instruction operands!");
1831 bool ExtractElementInst::isValidOperands(const Value
*Val
, const Value
*Index
) {
1832 if (!Val
->getType()->isVectorTy() || !Index
->getType()->isIntegerTy())
1837 //===----------------------------------------------------------------------===//
1838 // InsertElementInst Implementation
1839 //===----------------------------------------------------------------------===//
1841 InsertElementInst::InsertElementInst(Value
*Vec
, Value
*Elt
, Value
*Index
,
1843 Instruction
*InsertBef
)
1844 : Instruction(Vec
->getType(), InsertElement
,
1845 OperandTraits
<InsertElementInst
>::op_begin(this),
1847 assert(isValidOperands(Vec
, Elt
, Index
) &&
1848 "Invalid insertelement instruction operands!");
1855 InsertElementInst::InsertElementInst(Value
*Vec
, Value
*Elt
, Value
*Index
,
1857 BasicBlock
*InsertAE
)
1858 : Instruction(Vec
->getType(), InsertElement
,
1859 OperandTraits
<InsertElementInst
>::op_begin(this),
1861 assert(isValidOperands(Vec
, Elt
, Index
) &&
1862 "Invalid insertelement instruction operands!");
1870 bool InsertElementInst::isValidOperands(const Value
*Vec
, const Value
*Elt
,
1871 const Value
*Index
) {
1872 if (!Vec
->getType()->isVectorTy())
1873 return false; // First operand of insertelement must be vector type.
1875 if (Elt
->getType() != cast
<VectorType
>(Vec
->getType())->getElementType())
1876 return false;// Second operand of insertelement must be vector element type.
1878 if (!Index
->getType()->isIntegerTy())
1879 return false; // Third operand of insertelement must be i32.
1883 //===----------------------------------------------------------------------===//
1884 // ShuffleVectorInst Implementation
1885 //===----------------------------------------------------------------------===//
1887 static Value
*createPlaceholderForShuffleVector(Value
*V
) {
1888 assert(V
&& "Cannot create placeholder of nullptr V");
1889 return PoisonValue::get(V
->getType());
1892 ShuffleVectorInst::ShuffleVectorInst(Value
*V1
, Value
*Mask
, const Twine
&Name
,
1893 Instruction
*InsertBefore
)
1894 : ShuffleVectorInst(V1
, createPlaceholderForShuffleVector(V1
), Mask
, Name
,
1897 ShuffleVectorInst::ShuffleVectorInst(Value
*V1
, Value
*Mask
, const Twine
&Name
,
1898 BasicBlock
*InsertAtEnd
)
1899 : ShuffleVectorInst(V1
, createPlaceholderForShuffleVector(V1
), Mask
, Name
,
1902 ShuffleVectorInst::ShuffleVectorInst(Value
*V1
, ArrayRef
<int> Mask
,
1904 Instruction
*InsertBefore
)
1905 : ShuffleVectorInst(V1
, createPlaceholderForShuffleVector(V1
), Mask
, Name
,
1908 ShuffleVectorInst::ShuffleVectorInst(Value
*V1
, ArrayRef
<int> Mask
,
1909 const Twine
&Name
, BasicBlock
*InsertAtEnd
)
1910 : ShuffleVectorInst(V1
, createPlaceholderForShuffleVector(V1
), Mask
, Name
,
1913 ShuffleVectorInst::ShuffleVectorInst(Value
*V1
, Value
*V2
, Value
*Mask
,
1915 Instruction
*InsertBefore
)
1917 VectorType::get(cast
<VectorType
>(V1
->getType())->getElementType(),
1918 cast
<VectorType
>(Mask
->getType())->getElementCount()),
1919 ShuffleVector
, OperandTraits
<ShuffleVectorInst
>::op_begin(this),
1920 OperandTraits
<ShuffleVectorInst
>::operands(this), InsertBefore
) {
1921 assert(isValidOperands(V1
, V2
, Mask
) &&
1922 "Invalid shuffle vector instruction operands!");
1926 SmallVector
<int, 16> MaskArr
;
1927 getShuffleMask(cast
<Constant
>(Mask
), MaskArr
);
1928 setShuffleMask(MaskArr
);
1932 ShuffleVectorInst::ShuffleVectorInst(Value
*V1
, Value
*V2
, Value
*Mask
,
1933 const Twine
&Name
, BasicBlock
*InsertAtEnd
)
1935 VectorType::get(cast
<VectorType
>(V1
->getType())->getElementType(),
1936 cast
<VectorType
>(Mask
->getType())->getElementCount()),
1937 ShuffleVector
, OperandTraits
<ShuffleVectorInst
>::op_begin(this),
1938 OperandTraits
<ShuffleVectorInst
>::operands(this), InsertAtEnd
) {
1939 assert(isValidOperands(V1
, V2
, Mask
) &&
1940 "Invalid shuffle vector instruction operands!");
1944 SmallVector
<int, 16> MaskArr
;
1945 getShuffleMask(cast
<Constant
>(Mask
), MaskArr
);
1946 setShuffleMask(MaskArr
);
1950 ShuffleVectorInst::ShuffleVectorInst(Value
*V1
, Value
*V2
, ArrayRef
<int> Mask
,
1952 Instruction
*InsertBefore
)
1954 VectorType::get(cast
<VectorType
>(V1
->getType())->getElementType(),
1955 Mask
.size(), isa
<ScalableVectorType
>(V1
->getType())),
1956 ShuffleVector
, OperandTraits
<ShuffleVectorInst
>::op_begin(this),
1957 OperandTraits
<ShuffleVectorInst
>::operands(this), InsertBefore
) {
1958 assert(isValidOperands(V1
, V2
, Mask
) &&
1959 "Invalid shuffle vector instruction operands!");
1962 setShuffleMask(Mask
);
1966 ShuffleVectorInst::ShuffleVectorInst(Value
*V1
, Value
*V2
, ArrayRef
<int> Mask
,
1967 const Twine
&Name
, BasicBlock
*InsertAtEnd
)
1969 VectorType::get(cast
<VectorType
>(V1
->getType())->getElementType(),
1970 Mask
.size(), isa
<ScalableVectorType
>(V1
->getType())),
1971 ShuffleVector
, OperandTraits
<ShuffleVectorInst
>::op_begin(this),
1972 OperandTraits
<ShuffleVectorInst
>::operands(this), InsertAtEnd
) {
1973 assert(isValidOperands(V1
, V2
, Mask
) &&
1974 "Invalid shuffle vector instruction operands!");
1978 setShuffleMask(Mask
);
1982 void ShuffleVectorInst::commute() {
1983 int NumOpElts
= cast
<FixedVectorType
>(Op
<0>()->getType())->getNumElements();
1984 int NumMaskElts
= ShuffleMask
.size();
1985 SmallVector
<int, 16> NewMask(NumMaskElts
);
1986 for (int i
= 0; i
!= NumMaskElts
; ++i
) {
1987 int MaskElt
= getMaskValue(i
);
1988 if (MaskElt
== PoisonMaskElem
) {
1989 NewMask
[i
] = PoisonMaskElem
;
1992 assert(MaskElt
>= 0 && MaskElt
< 2 * NumOpElts
&& "Out-of-range mask");
1993 MaskElt
= (MaskElt
< NumOpElts
) ? MaskElt
+ NumOpElts
: MaskElt
- NumOpElts
;
1994 NewMask
[i
] = MaskElt
;
1996 setShuffleMask(NewMask
);
1997 Op
<0>().swap(Op
<1>());
2000 bool ShuffleVectorInst::isValidOperands(const Value
*V1
, const Value
*V2
,
2001 ArrayRef
<int> Mask
) {
2002 // V1 and V2 must be vectors of the same type.
2003 if (!isa
<VectorType
>(V1
->getType()) || V1
->getType() != V2
->getType())
2006 // Make sure the mask elements make sense.
2008 cast
<VectorType
>(V1
->getType())->getElementCount().getKnownMinValue();
2009 for (int Elem
: Mask
)
2010 if (Elem
!= PoisonMaskElem
&& Elem
>= V1Size
* 2)
2013 if (isa
<ScalableVectorType
>(V1
->getType()))
2014 if ((Mask
[0] != 0 && Mask
[0] != PoisonMaskElem
) || !all_equal(Mask
))
2020 bool ShuffleVectorInst::isValidOperands(const Value
*V1
, const Value
*V2
,
2021 const Value
*Mask
) {
2022 // V1 and V2 must be vectors of the same type.
2023 if (!V1
->getType()->isVectorTy() || V1
->getType() != V2
->getType())
2026 // Mask must be vector of i32, and must be the same kind of vector as the
2028 auto *MaskTy
= dyn_cast
<VectorType
>(Mask
->getType());
2029 if (!MaskTy
|| !MaskTy
->getElementType()->isIntegerTy(32) ||
2030 isa
<ScalableVectorType
>(MaskTy
) != isa
<ScalableVectorType
>(V1
->getType()))
2033 // Check to see if Mask is valid.
2034 if (isa
<UndefValue
>(Mask
) || isa
<ConstantAggregateZero
>(Mask
))
2037 if (const auto *MV
= dyn_cast
<ConstantVector
>(Mask
)) {
2038 unsigned V1Size
= cast
<FixedVectorType
>(V1
->getType())->getNumElements();
2039 for (Value
*Op
: MV
->operands()) {
2040 if (auto *CI
= dyn_cast
<ConstantInt
>(Op
)) {
2041 if (CI
->uge(V1Size
*2))
2043 } else if (!isa
<UndefValue
>(Op
)) {
2050 if (const auto *CDS
= dyn_cast
<ConstantDataSequential
>(Mask
)) {
2051 unsigned V1Size
= cast
<FixedVectorType
>(V1
->getType())->getNumElements();
2052 for (unsigned i
= 0, e
= cast
<FixedVectorType
>(MaskTy
)->getNumElements();
2054 if (CDS
->getElementAsInteger(i
) >= V1Size
*2)
2062 void ShuffleVectorInst::getShuffleMask(const Constant
*Mask
,
2063 SmallVectorImpl
<int> &Result
) {
2064 ElementCount EC
= cast
<VectorType
>(Mask
->getType())->getElementCount();
2066 if (isa
<ConstantAggregateZero
>(Mask
)) {
2067 Result
.resize(EC
.getKnownMinValue(), 0);
2071 Result
.reserve(EC
.getKnownMinValue());
2073 if (EC
.isScalable()) {
2074 assert((isa
<ConstantAggregateZero
>(Mask
) || isa
<UndefValue
>(Mask
)) &&
2075 "Scalable vector shuffle mask must be undef or zeroinitializer");
2076 int MaskVal
= isa
<UndefValue
>(Mask
) ? -1 : 0;
2077 for (unsigned I
= 0; I
< EC
.getKnownMinValue(); ++I
)
2078 Result
.emplace_back(MaskVal
);
2082 unsigned NumElts
= EC
.getKnownMinValue();
2084 if (auto *CDS
= dyn_cast
<ConstantDataSequential
>(Mask
)) {
2085 for (unsigned i
= 0; i
!= NumElts
; ++i
)
2086 Result
.push_back(CDS
->getElementAsInteger(i
));
2089 for (unsigned i
= 0; i
!= NumElts
; ++i
) {
2090 Constant
*C
= Mask
->getAggregateElement(i
);
2091 Result
.push_back(isa
<UndefValue
>(C
) ? -1 :
2092 cast
<ConstantInt
>(C
)->getZExtValue());
2096 void ShuffleVectorInst::setShuffleMask(ArrayRef
<int> Mask
) {
2097 ShuffleMask
.assign(Mask
.begin(), Mask
.end());
2098 ShuffleMaskForBitcode
= convertShuffleMaskForBitcode(Mask
, getType());
2101 Constant
*ShuffleVectorInst::convertShuffleMaskForBitcode(ArrayRef
<int> Mask
,
2103 Type
*Int32Ty
= Type::getInt32Ty(ResultTy
->getContext());
2104 if (isa
<ScalableVectorType
>(ResultTy
)) {
2105 assert(all_equal(Mask
) && "Unexpected shuffle");
2106 Type
*VecTy
= VectorType::get(Int32Ty
, Mask
.size(), true);
2108 return Constant::getNullValue(VecTy
);
2109 return UndefValue::get(VecTy
);
2111 SmallVector
<Constant
*, 16> MaskConst
;
2112 for (int Elem
: Mask
) {
2113 if (Elem
== PoisonMaskElem
)
2114 MaskConst
.push_back(PoisonValue::get(Int32Ty
));
2116 MaskConst
.push_back(ConstantInt::get(Int32Ty
, Elem
));
2118 return ConstantVector::get(MaskConst
);
2121 static bool isSingleSourceMaskImpl(ArrayRef
<int> Mask
, int NumOpElts
) {
2122 assert(!Mask
.empty() && "Shuffle mask must contain elements");
2123 bool UsesLHS
= false;
2124 bool UsesRHS
= false;
2125 for (int I
: Mask
) {
2128 assert(I
>= 0 && I
< (NumOpElts
* 2) &&
2129 "Out-of-bounds shuffle mask element");
2130 UsesLHS
|= (I
< NumOpElts
);
2131 UsesRHS
|= (I
>= NumOpElts
);
2132 if (UsesLHS
&& UsesRHS
)
2135 // Allow for degenerate case: completely undef mask means neither source is used.
2136 return UsesLHS
|| UsesRHS
;
2139 bool ShuffleVectorInst::isSingleSourceMask(ArrayRef
<int> Mask
, int NumSrcElts
) {
2140 // We don't have vector operand size information, so assume operands are the
2141 // same size as the mask.
2142 return isSingleSourceMaskImpl(Mask
, NumSrcElts
);
2145 static bool isIdentityMaskImpl(ArrayRef
<int> Mask
, int NumOpElts
) {
2146 if (!isSingleSourceMaskImpl(Mask
, NumOpElts
))
2148 for (int i
= 0, NumMaskElts
= Mask
.size(); i
< NumMaskElts
; ++i
) {
2151 if (Mask
[i
] != i
&& Mask
[i
] != (NumOpElts
+ i
))
2157 bool ShuffleVectorInst::isIdentityMask(ArrayRef
<int> Mask
, int NumSrcElts
) {
2158 if (Mask
.size() != static_cast<unsigned>(NumSrcElts
))
2160 // We don't have vector operand size information, so assume operands are the
2161 // same size as the mask.
2162 return isIdentityMaskImpl(Mask
, NumSrcElts
);
2165 bool ShuffleVectorInst::isReverseMask(ArrayRef
<int> Mask
, int NumSrcElts
) {
2166 if (Mask
.size() != static_cast<unsigned>(NumSrcElts
))
2168 if (!isSingleSourceMask(Mask
, NumSrcElts
))
2171 // The number of elements in the mask must be at least 2.
2175 for (int I
= 0, E
= Mask
.size(); I
< E
; ++I
) {
2178 if (Mask
[I
] != (NumSrcElts
- 1 - I
) &&
2179 Mask
[I
] != (NumSrcElts
+ NumSrcElts
- 1 - I
))
2185 bool ShuffleVectorInst::isZeroEltSplatMask(ArrayRef
<int> Mask
, int NumSrcElts
) {
2186 if (Mask
.size() != static_cast<unsigned>(NumSrcElts
))
2188 if (!isSingleSourceMask(Mask
, NumSrcElts
))
2190 for (int I
= 0, E
= Mask
.size(); I
< E
; ++I
) {
2193 if (Mask
[I
] != 0 && Mask
[I
] != NumSrcElts
)
2199 bool ShuffleVectorInst::isSelectMask(ArrayRef
<int> Mask
, int NumSrcElts
) {
2200 if (Mask
.size() != static_cast<unsigned>(NumSrcElts
))
2202 // Select is differentiated from identity. It requires using both sources.
2203 if (isSingleSourceMask(Mask
, NumSrcElts
))
2205 for (int I
= 0, E
= Mask
.size(); I
< E
; ++I
) {
2208 if (Mask
[I
] != I
&& Mask
[I
] != (NumSrcElts
+ I
))
2214 bool ShuffleVectorInst::isTransposeMask(ArrayRef
<int> Mask
, int NumSrcElts
) {
2215 // Example masks that will return true:
2216 // v1 = <a, b, c, d>
2217 // v2 = <e, f, g, h>
2218 // trn1 = shufflevector v1, v2 <0, 4, 2, 6> = <a, e, c, g>
2219 // trn2 = shufflevector v1, v2 <1, 5, 3, 7> = <b, f, d, h>
2221 if (Mask
.size() != static_cast<unsigned>(NumSrcElts
))
2223 // 1. The number of elements in the mask must be a power-of-2 and at least 2.
2224 int Sz
= Mask
.size();
2225 if (Sz
< 2 || !isPowerOf2_32(Sz
))
2228 // 2. The first element of the mask must be either a 0 or a 1.
2229 if (Mask
[0] != 0 && Mask
[0] != 1)
2232 // 3. The difference between the first 2 elements must be equal to the
2233 // number of elements in the mask.
2234 if ((Mask
[1] - Mask
[0]) != NumSrcElts
)
2237 // 4. The difference between consecutive even-numbered and odd-numbered
2238 // elements must be equal to 2.
2239 for (int I
= 2; I
< Sz
; ++I
) {
2240 int MaskEltVal
= Mask
[I
];
2241 if (MaskEltVal
== -1)
2243 int MaskEltPrevVal
= Mask
[I
- 2];
2244 if (MaskEltVal
- MaskEltPrevVal
!= 2)
2250 bool ShuffleVectorInst::isSpliceMask(ArrayRef
<int> Mask
, int NumSrcElts
,
2252 if (Mask
.size() != static_cast<unsigned>(NumSrcElts
))
2254 // Example: shufflevector <4 x n> A, <4 x n> B, <1,2,3,4>
2255 int StartIndex
= -1;
2256 for (int I
= 0, E
= Mask
.size(); I
!= E
; ++I
) {
2257 int MaskEltVal
= Mask
[I
];
2258 if (MaskEltVal
== -1)
2261 if (StartIndex
== -1) {
2262 // Don't support a StartIndex that begins in the second input, or if the
2263 // first non-undef index would access below the StartIndex.
2264 if (MaskEltVal
< I
|| NumSrcElts
<= (MaskEltVal
- I
))
2267 StartIndex
= MaskEltVal
- I
;
2271 // Splice is sequential starting from StartIndex.
2272 if (MaskEltVal
!= (StartIndex
+ I
))
2276 if (StartIndex
== -1)
2279 // NOTE: This accepts StartIndex == 0 (COPY).
2284 bool ShuffleVectorInst::isExtractSubvectorMask(ArrayRef
<int> Mask
,
2285 int NumSrcElts
, int &Index
) {
2286 // Must extract from a single source.
2287 if (!isSingleSourceMaskImpl(Mask
, NumSrcElts
))
2290 // Must be smaller (else this is an Identity shuffle).
2291 if (NumSrcElts
<= (int)Mask
.size())
2294 // Find start of extraction, accounting that we may start with an UNDEF.
2296 for (int i
= 0, e
= Mask
.size(); i
!= e
; ++i
) {
2300 int Offset
= (M
% NumSrcElts
) - i
;
2301 if (0 <= SubIndex
&& SubIndex
!= Offset
)
2306 if (0 <= SubIndex
&& SubIndex
+ (int)Mask
.size() <= NumSrcElts
) {
2313 bool ShuffleVectorInst::isInsertSubvectorMask(ArrayRef
<int> Mask
,
2314 int NumSrcElts
, int &NumSubElts
,
2316 int NumMaskElts
= Mask
.size();
2318 // Don't try to match if we're shuffling to a smaller size.
2319 if (NumMaskElts
< NumSrcElts
)
2322 // TODO: We don't recognize self-insertion/widening.
2323 if (isSingleSourceMaskImpl(Mask
, NumSrcElts
))
2326 // Determine which mask elements are attributed to which source.
2327 APInt UndefElts
= APInt::getZero(NumMaskElts
);
2328 APInt Src0Elts
= APInt::getZero(NumMaskElts
);
2329 APInt Src1Elts
= APInt::getZero(NumMaskElts
);
2330 bool Src0Identity
= true;
2331 bool Src1Identity
= true;
2333 for (int i
= 0; i
!= NumMaskElts
; ++i
) {
2336 UndefElts
.setBit(i
);
2339 if (M
< NumSrcElts
) {
2341 Src0Identity
&= (M
== i
);
2345 Src1Identity
&= (M
== (i
+ NumSrcElts
));
2347 assert((Src0Elts
| Src1Elts
| UndefElts
).isAllOnes() &&
2348 "unknown shuffle elements");
2349 assert(!Src0Elts
.isZero() && !Src1Elts
.isZero() &&
2350 "2-source shuffle not found");
2352 // Determine lo/hi span ranges.
2353 // TODO: How should we handle undefs at the start of subvector insertions?
2354 int Src0Lo
= Src0Elts
.countr_zero();
2355 int Src1Lo
= Src1Elts
.countr_zero();
2356 int Src0Hi
= NumMaskElts
- Src0Elts
.countl_zero();
2357 int Src1Hi
= NumMaskElts
- Src1Elts
.countl_zero();
2359 // If src0 is in place, see if the src1 elements is inplace within its own
2362 int NumSub1Elts
= Src1Hi
- Src1Lo
;
2363 ArrayRef
<int> Sub1Mask
= Mask
.slice(Src1Lo
, NumSub1Elts
);
2364 if (isIdentityMaskImpl(Sub1Mask
, NumSrcElts
)) {
2365 NumSubElts
= NumSub1Elts
;
2371 // If src1 is in place, see if the src0 elements is inplace within its own
2374 int NumSub0Elts
= Src0Hi
- Src0Lo
;
2375 ArrayRef
<int> Sub0Mask
= Mask
.slice(Src0Lo
, NumSub0Elts
);
2376 if (isIdentityMaskImpl(Sub0Mask
, NumSrcElts
)) {
2377 NumSubElts
= NumSub0Elts
;
2386 bool ShuffleVectorInst::isIdentityWithPadding() const {
2387 if (isa
<UndefValue
>(Op
<2>()))
2390 // FIXME: Not currently possible to express a shuffle mask for a scalable
2391 // vector for this case.
2392 if (isa
<ScalableVectorType
>(getType()))
2395 int NumOpElts
= cast
<FixedVectorType
>(Op
<0>()->getType())->getNumElements();
2396 int NumMaskElts
= cast
<FixedVectorType
>(getType())->getNumElements();
2397 if (NumMaskElts
<= NumOpElts
)
2400 // The first part of the mask must choose elements from exactly 1 source op.
2401 ArrayRef
<int> Mask
= getShuffleMask();
2402 if (!isIdentityMaskImpl(Mask
, NumOpElts
))
2405 // All extending must be with undef elements.
2406 for (int i
= NumOpElts
; i
< NumMaskElts
; ++i
)
2413 bool ShuffleVectorInst::isIdentityWithExtract() const {
2414 if (isa
<UndefValue
>(Op
<2>()))
2417 // FIXME: Not currently possible to express a shuffle mask for a scalable
2418 // vector for this case.
2419 if (isa
<ScalableVectorType
>(getType()))
2422 int NumOpElts
= cast
<FixedVectorType
>(Op
<0>()->getType())->getNumElements();
2423 int NumMaskElts
= cast
<FixedVectorType
>(getType())->getNumElements();
2424 if (NumMaskElts
>= NumOpElts
)
2427 return isIdentityMaskImpl(getShuffleMask(), NumOpElts
);
2430 bool ShuffleVectorInst::isConcat() const {
2431 // Vector concatenation is differentiated from identity with padding.
2432 if (isa
<UndefValue
>(Op
<0>()) || isa
<UndefValue
>(Op
<1>()) ||
2433 isa
<UndefValue
>(Op
<2>()))
2436 // FIXME: Not currently possible to express a shuffle mask for a scalable
2437 // vector for this case.
2438 if (isa
<ScalableVectorType
>(getType()))
2441 int NumOpElts
= cast
<FixedVectorType
>(Op
<0>()->getType())->getNumElements();
2442 int NumMaskElts
= cast
<FixedVectorType
>(getType())->getNumElements();
2443 if (NumMaskElts
!= NumOpElts
* 2)
2446 // Use the mask length rather than the operands' vector lengths here. We
2447 // already know that the shuffle returns a vector twice as long as the inputs,
2448 // and neither of the inputs are undef vectors. If the mask picks consecutive
2449 // elements from both inputs, then this is a concatenation of the inputs.
2450 return isIdentityMaskImpl(getShuffleMask(), NumMaskElts
);
2453 static bool isReplicationMaskWithParams(ArrayRef
<int> Mask
,
2454 int ReplicationFactor
, int VF
) {
2455 assert(Mask
.size() == (unsigned)ReplicationFactor
* VF
&&
2456 "Unexpected mask size.");
2458 for (int CurrElt
: seq(VF
)) {
2459 ArrayRef
<int> CurrSubMask
= Mask
.take_front(ReplicationFactor
);
2460 assert(CurrSubMask
.size() == (unsigned)ReplicationFactor
&&
2461 "Run out of mask?");
2462 Mask
= Mask
.drop_front(ReplicationFactor
);
2463 if (!all_of(CurrSubMask
, [CurrElt
](int MaskElt
) {
2464 return MaskElt
== PoisonMaskElem
|| MaskElt
== CurrElt
;
2468 assert(Mask
.empty() && "Did not consume the whole mask?");
2473 bool ShuffleVectorInst::isReplicationMask(ArrayRef
<int> Mask
,
2474 int &ReplicationFactor
, int &VF
) {
2475 // undef-less case is trivial.
2476 if (!llvm::is_contained(Mask
, PoisonMaskElem
)) {
2478 Mask
.take_while([](int MaskElt
) { return MaskElt
== 0; }).size();
2479 if (ReplicationFactor
== 0 || Mask
.size() % ReplicationFactor
!= 0)
2481 VF
= Mask
.size() / ReplicationFactor
;
2482 return isReplicationMaskWithParams(Mask
, ReplicationFactor
, VF
);
2485 // However, if the mask contains undef's, we have to enumerate possible tuples
2486 // and pick one. There are bounds on replication factor: [1, mask size]
2487 // (where RF=1 is an identity shuffle, RF=mask size is a broadcast shuffle)
2488 // Additionally, mask size is a replication factor multiplied by vector size,
2489 // which further significantly reduces the search space.
2491 // Before doing that, let's perform basic correctness checking first.
2493 for (int MaskElt
: Mask
) {
2494 if (MaskElt
== PoisonMaskElem
)
2496 // Elements must be in non-decreasing order.
2497 if (MaskElt
< Largest
)
2499 Largest
= std::max(Largest
, MaskElt
);
2502 // Prefer larger replication factor if all else equal.
2503 for (int PossibleReplicationFactor
:
2504 reverse(seq_inclusive
<unsigned>(1, Mask
.size()))) {
2505 if (Mask
.size() % PossibleReplicationFactor
!= 0)
2507 int PossibleVF
= Mask
.size() / PossibleReplicationFactor
;
2508 if (!isReplicationMaskWithParams(Mask
, PossibleReplicationFactor
,
2511 ReplicationFactor
= PossibleReplicationFactor
;
2519 bool ShuffleVectorInst::isReplicationMask(int &ReplicationFactor
,
2521 // Not possible to express a shuffle mask for a scalable vector for this
2523 if (isa
<ScalableVectorType
>(getType()))
2526 VF
= cast
<FixedVectorType
>(Op
<0>()->getType())->getNumElements();
2527 if (ShuffleMask
.size() % VF
!= 0)
2529 ReplicationFactor
= ShuffleMask
.size() / VF
;
2531 return isReplicationMaskWithParams(ShuffleMask
, ReplicationFactor
, VF
);
2534 bool ShuffleVectorInst::isOneUseSingleSourceMask(ArrayRef
<int> Mask
, int VF
) {
2535 if (VF
<= 0 || Mask
.size() < static_cast<unsigned>(VF
) ||
2536 Mask
.size() % VF
!= 0)
2538 for (unsigned K
= 0, Sz
= Mask
.size(); K
< Sz
; K
+= VF
) {
2539 ArrayRef
<int> SubMask
= Mask
.slice(K
, VF
);
2540 if (all_of(SubMask
, [](int Idx
) { return Idx
== PoisonMaskElem
; }))
2542 SmallBitVector
Used(VF
, false);
2543 for (int Idx
: SubMask
) {
2544 if (Idx
!= PoisonMaskElem
&& Idx
< VF
)
2553 /// Return true if this shuffle mask is a replication mask.
2554 bool ShuffleVectorInst::isOneUseSingleSourceMask(int VF
) const {
2555 // Not possible to express a shuffle mask for a scalable vector for this
2557 if (isa
<ScalableVectorType
>(getType()))
2559 if (!isSingleSourceMask(ShuffleMask
, VF
))
2562 return isOneUseSingleSourceMask(ShuffleMask
, VF
);
2565 bool ShuffleVectorInst::isInterleave(unsigned Factor
) {
2566 FixedVectorType
*OpTy
= dyn_cast
<FixedVectorType
>(getOperand(0)->getType());
2567 // shuffle_vector can only interleave fixed length vectors - for scalable
2568 // vectors, see the @llvm.experimental.vector.interleave2 intrinsic
2571 unsigned OpNumElts
= OpTy
->getNumElements();
2573 return isInterleaveMask(ShuffleMask
, Factor
, OpNumElts
* 2);
2576 bool ShuffleVectorInst::isInterleaveMask(
2577 ArrayRef
<int> Mask
, unsigned Factor
, unsigned NumInputElts
,
2578 SmallVectorImpl
<unsigned> &StartIndexes
) {
2579 unsigned NumElts
= Mask
.size();
2580 if (NumElts
% Factor
)
2583 unsigned LaneLen
= NumElts
/ Factor
;
2584 if (!isPowerOf2_32(LaneLen
))
2587 StartIndexes
.resize(Factor
);
2589 // Check whether each element matches the general interleaved rule.
2590 // Ignore undef elements, as long as the defined elements match the rule.
2591 // Outer loop processes all factors (x, y, z in the above example)
2593 for (; I
< Factor
; I
++) {
2594 unsigned SavedLaneValue
;
2595 unsigned SavedNoUndefs
= 0;
2597 // Inner loop processes consecutive accesses (x, x+1... in the example)
2598 for (J
= 0; J
< LaneLen
- 1; J
++) {
2599 // Lane computes x's position in the Mask
2600 unsigned Lane
= J
* Factor
+ I
;
2601 unsigned NextLane
= Lane
+ Factor
;
2602 int LaneValue
= Mask
[Lane
];
2603 int NextLaneValue
= Mask
[NextLane
];
2605 // If both are defined, values must be sequential
2606 if (LaneValue
>= 0 && NextLaneValue
>= 0 &&
2607 LaneValue
+ 1 != NextLaneValue
)
2610 // If the next value is undef, save the current one as reference
2611 if (LaneValue
>= 0 && NextLaneValue
< 0) {
2612 SavedLaneValue
= LaneValue
;
2616 // Undefs are allowed, but defined elements must still be consecutive:
2617 // i.e.: x,..., undef,..., x + 2,..., undef,..., undef,..., x + 5, ....
2618 // Verify this by storing the last non-undef followed by an undef
2619 // Check that following non-undef masks are incremented with the
2620 // corresponding distance.
2621 if (SavedNoUndefs
> 0 && LaneValue
< 0) {
2623 if (NextLaneValue
>= 0 &&
2624 SavedLaneValue
+ SavedNoUndefs
!= (unsigned)NextLaneValue
)
2629 if (J
< LaneLen
- 1)
2634 // Check that the start of the I range (J=0) is greater than 0
2635 StartMask
= Mask
[I
];
2636 } else if (Mask
[(LaneLen
- 1) * Factor
+ I
] >= 0) {
2637 // StartMask defined by the last value in lane
2638 StartMask
= Mask
[(LaneLen
- 1) * Factor
+ I
] - J
;
2639 } else if (SavedNoUndefs
> 0) {
2640 // StartMask defined by some non-zero value in the j loop
2641 StartMask
= SavedLaneValue
- (LaneLen
- 1 - SavedNoUndefs
);
2643 // else StartMask remains set to 0, i.e. all elements are undefs
2647 // We must stay within the vectors; This case can happen with undefs.
2648 if (StartMask
+ LaneLen
> NumInputElts
)
2651 StartIndexes
[I
] = StartMask
;
2657 /// Try to lower a vector shuffle as a bit rotation.
2659 /// Look for a repeated rotation pattern in each sub group.
2660 /// Returns an element-wise left bit rotation amount or -1 if failed.
2661 static int matchShuffleAsBitRotate(ArrayRef
<int> Mask
, int NumSubElts
) {
2662 int NumElts
= Mask
.size();
2663 assert((NumElts
% NumSubElts
) == 0 && "Illegal shuffle mask");
2666 for (int i
= 0; i
!= NumElts
; i
+= NumSubElts
) {
2667 for (int j
= 0; j
!= NumSubElts
; ++j
) {
2668 int M
= Mask
[i
+ j
];
2671 if (M
< i
|| M
>= i
+ NumSubElts
)
2673 int Offset
= (NumSubElts
- (M
- (i
+ j
))) % NumSubElts
;
2674 if (0 <= RotateAmt
&& Offset
!= RotateAmt
)
2682 bool ShuffleVectorInst::isBitRotateMask(
2683 ArrayRef
<int> Mask
, unsigned EltSizeInBits
, unsigned MinSubElts
,
2684 unsigned MaxSubElts
, unsigned &NumSubElts
, unsigned &RotateAmt
) {
2685 for (NumSubElts
= MinSubElts
; NumSubElts
<= MaxSubElts
; NumSubElts
*= 2) {
2686 int EltRotateAmt
= matchShuffleAsBitRotate(Mask
, NumSubElts
);
2687 if (EltRotateAmt
< 0)
2689 RotateAmt
= EltRotateAmt
* EltSizeInBits
;
2696 //===----------------------------------------------------------------------===//
2697 // InsertValueInst Class
2698 //===----------------------------------------------------------------------===//
2700 void InsertValueInst::init(Value
*Agg
, Value
*Val
, ArrayRef
<unsigned> Idxs
,
2701 const Twine
&Name
) {
2702 assert(getNumOperands() == 2 && "NumOperands not initialized?");
2704 // There's no fundamental reason why we require at least one index
2705 // (other than weirdness with &*IdxBegin being invalid; see
2706 // getelementptr's init routine for example). But there's no
2707 // present need to support it.
2708 assert(!Idxs
.empty() && "InsertValueInst must have at least one index");
2710 assert(ExtractValueInst::getIndexedType(Agg
->getType(), Idxs
) ==
2711 Val
->getType() && "Inserted value must match indexed type!");
2715 Indices
.append(Idxs
.begin(), Idxs
.end());
2719 InsertValueInst::InsertValueInst(const InsertValueInst
&IVI
)
2720 : Instruction(IVI
.getType(), InsertValue
,
2721 OperandTraits
<InsertValueInst
>::op_begin(this), 2),
2722 Indices(IVI
.Indices
) {
2723 Op
<0>() = IVI
.getOperand(0);
2724 Op
<1>() = IVI
.getOperand(1);
2725 SubclassOptionalData
= IVI
.SubclassOptionalData
;
2728 //===----------------------------------------------------------------------===//
2729 // ExtractValueInst Class
2730 //===----------------------------------------------------------------------===//
2732 void ExtractValueInst::init(ArrayRef
<unsigned> Idxs
, const Twine
&Name
) {
2733 assert(getNumOperands() == 1 && "NumOperands not initialized?");
2735 // There's no fundamental reason why we require at least one index.
2736 // But there's no present need to support it.
2737 assert(!Idxs
.empty() && "ExtractValueInst must have at least one index");
2739 Indices
.append(Idxs
.begin(), Idxs
.end());
2743 ExtractValueInst::ExtractValueInst(const ExtractValueInst
&EVI
)
2744 : UnaryInstruction(EVI
.getType(), ExtractValue
, EVI
.getOperand(0)),
2745 Indices(EVI
.Indices
) {
2746 SubclassOptionalData
= EVI
.SubclassOptionalData
;
2749 // getIndexedType - Returns the type of the element that would be extracted
2750 // with an extractvalue instruction with the specified parameters.
2752 // A null type is returned if the indices are invalid for the specified
2755 Type
*ExtractValueInst::getIndexedType(Type
*Agg
,
2756 ArrayRef
<unsigned> Idxs
) {
2757 for (unsigned Index
: Idxs
) {
2758 // We can't use CompositeType::indexValid(Index) here.
2759 // indexValid() always returns true for arrays because getelementptr allows
2760 // out-of-bounds indices. Since we don't allow those for extractvalue and
2761 // insertvalue we need to check array indexing manually.
2762 // Since the only other types we can index into are struct types it's just
2763 // as easy to check those manually as well.
2764 if (ArrayType
*AT
= dyn_cast
<ArrayType
>(Agg
)) {
2765 if (Index
>= AT
->getNumElements())
2767 Agg
= AT
->getElementType();
2768 } else if (StructType
*ST
= dyn_cast
<StructType
>(Agg
)) {
2769 if (Index
>= ST
->getNumElements())
2771 Agg
= ST
->getElementType(Index
);
2773 // Not a valid type to index into.
2777 return const_cast<Type
*>(Agg
);
2780 //===----------------------------------------------------------------------===//
2781 // UnaryOperator Class
2782 //===----------------------------------------------------------------------===//
2784 UnaryOperator::UnaryOperator(UnaryOps iType
, Value
*S
,
2785 Type
*Ty
, const Twine
&Name
,
2786 Instruction
*InsertBefore
)
2787 : UnaryInstruction(Ty
, iType
, S
, InsertBefore
) {
2793 UnaryOperator::UnaryOperator(UnaryOps iType
, Value
*S
,
2794 Type
*Ty
, const Twine
&Name
,
2795 BasicBlock
*InsertAtEnd
)
2796 : UnaryInstruction(Ty
, iType
, S
, InsertAtEnd
) {
2802 UnaryOperator
*UnaryOperator::Create(UnaryOps Op
, Value
*S
,
2804 Instruction
*InsertBefore
) {
2805 return new UnaryOperator(Op
, S
, S
->getType(), Name
, InsertBefore
);
2808 UnaryOperator
*UnaryOperator::Create(UnaryOps Op
, Value
*S
,
2810 BasicBlock
*InsertAtEnd
) {
2811 UnaryOperator
*Res
= Create(Op
, S
, Name
);
2812 Res
->insertInto(InsertAtEnd
, InsertAtEnd
->end());
2816 void UnaryOperator::AssertOK() {
2817 Value
*LHS
= getOperand(0);
2818 (void)LHS
; // Silence warnings.
2820 switch (getOpcode()) {
2822 assert(getType() == LHS
->getType() &&
2823 "Unary operation should return same type as operand!");
2824 assert(getType()->isFPOrFPVectorTy() &&
2825 "Tried to create a floating-point operation on a "
2826 "non-floating-point type!");
2828 default: llvm_unreachable("Invalid opcode provided");
2833 //===----------------------------------------------------------------------===//
2834 // BinaryOperator Class
2835 //===----------------------------------------------------------------------===//
2837 BinaryOperator::BinaryOperator(BinaryOps iType
, Value
*S1
, Value
*S2
,
2838 Type
*Ty
, const Twine
&Name
,
2839 Instruction
*InsertBefore
)
2840 : Instruction(Ty
, iType
,
2841 OperandTraits
<BinaryOperator
>::op_begin(this),
2842 OperandTraits
<BinaryOperator
>::operands(this),
2850 BinaryOperator::BinaryOperator(BinaryOps iType
, Value
*S1
, Value
*S2
,
2851 Type
*Ty
, const Twine
&Name
,
2852 BasicBlock
*InsertAtEnd
)
2853 : Instruction(Ty
, iType
,
2854 OperandTraits
<BinaryOperator
>::op_begin(this),
2855 OperandTraits
<BinaryOperator
>::operands(this),
2863 void BinaryOperator::AssertOK() {
2864 Value
*LHS
= getOperand(0), *RHS
= getOperand(1);
2865 (void)LHS
; (void)RHS
; // Silence warnings.
2866 assert(LHS
->getType() == RHS
->getType() &&
2867 "Binary operator operand types must match!");
2869 switch (getOpcode()) {
2872 assert(getType() == LHS
->getType() &&
2873 "Arithmetic operation should return same type as operands!");
2874 assert(getType()->isIntOrIntVectorTy() &&
2875 "Tried to create an integer operation on a non-integer type!");
2877 case FAdd
: case FSub
:
2879 assert(getType() == LHS
->getType() &&
2880 "Arithmetic operation should return same type as operands!");
2881 assert(getType()->isFPOrFPVectorTy() &&
2882 "Tried to create a floating-point operation on a "
2883 "non-floating-point type!");
2887 assert(getType() == LHS
->getType() &&
2888 "Arithmetic operation should return same type as operands!");
2889 assert(getType()->isIntOrIntVectorTy() &&
2890 "Incorrect operand type (not integer) for S/UDIV");
2893 assert(getType() == LHS
->getType() &&
2894 "Arithmetic operation should return same type as operands!");
2895 assert(getType()->isFPOrFPVectorTy() &&
2896 "Incorrect operand type (not floating point) for FDIV");
2900 assert(getType() == LHS
->getType() &&
2901 "Arithmetic operation should return same type as operands!");
2902 assert(getType()->isIntOrIntVectorTy() &&
2903 "Incorrect operand type (not integer) for S/UREM");
2906 assert(getType() == LHS
->getType() &&
2907 "Arithmetic operation should return same type as operands!");
2908 assert(getType()->isFPOrFPVectorTy() &&
2909 "Incorrect operand type (not floating point) for FREM");
2914 assert(getType() == LHS
->getType() &&
2915 "Shift operation should return same type as operands!");
2916 assert(getType()->isIntOrIntVectorTy() &&
2917 "Tried to create a shift operation on a non-integral type!");
2921 assert(getType() == LHS
->getType() &&
2922 "Logical operation should return same type as operands!");
2923 assert(getType()->isIntOrIntVectorTy() &&
2924 "Tried to create a logical operation on a non-integral type!");
2926 default: llvm_unreachable("Invalid opcode provided");
2931 BinaryOperator
*BinaryOperator::Create(BinaryOps Op
, Value
*S1
, Value
*S2
,
2933 Instruction
*InsertBefore
) {
2934 assert(S1
->getType() == S2
->getType() &&
2935 "Cannot create binary operator with two operands of differing type!");
2936 return new BinaryOperator(Op
, S1
, S2
, S1
->getType(), Name
, InsertBefore
);
2939 BinaryOperator
*BinaryOperator::Create(BinaryOps Op
, Value
*S1
, Value
*S2
,
2941 BasicBlock
*InsertAtEnd
) {
2942 BinaryOperator
*Res
= Create(Op
, S1
, S2
, Name
);
2943 Res
->insertInto(InsertAtEnd
, InsertAtEnd
->end());
2947 BinaryOperator
*BinaryOperator::CreateNeg(Value
*Op
, const Twine
&Name
,
2948 Instruction
*InsertBefore
) {
2949 Value
*Zero
= ConstantInt::get(Op
->getType(), 0);
2950 return new BinaryOperator(Instruction::Sub
,
2952 Op
->getType(), Name
, InsertBefore
);
2955 BinaryOperator
*BinaryOperator::CreateNeg(Value
*Op
, const Twine
&Name
,
2956 BasicBlock
*InsertAtEnd
) {
2957 Value
*Zero
= ConstantInt::get(Op
->getType(), 0);
2958 return new BinaryOperator(Instruction::Sub
,
2960 Op
->getType(), Name
, InsertAtEnd
);
2963 BinaryOperator
*BinaryOperator::CreateNSWNeg(Value
*Op
, const Twine
&Name
,
2964 Instruction
*InsertBefore
) {
2965 Value
*Zero
= ConstantInt::get(Op
->getType(), 0);
2966 return BinaryOperator::CreateNSWSub(Zero
, Op
, Name
, InsertBefore
);
2969 BinaryOperator
*BinaryOperator::CreateNSWNeg(Value
*Op
, const Twine
&Name
,
2970 BasicBlock
*InsertAtEnd
) {
2971 Value
*Zero
= ConstantInt::get(Op
->getType(), 0);
2972 return BinaryOperator::CreateNSWSub(Zero
, Op
, Name
, InsertAtEnd
);
2975 BinaryOperator
*BinaryOperator::CreateNUWNeg(Value
*Op
, const Twine
&Name
,
2976 Instruction
*InsertBefore
) {
2977 Value
*Zero
= ConstantInt::get(Op
->getType(), 0);
2978 return BinaryOperator::CreateNUWSub(Zero
, Op
, Name
, InsertBefore
);
2981 BinaryOperator
*BinaryOperator::CreateNUWNeg(Value
*Op
, const Twine
&Name
,
2982 BasicBlock
*InsertAtEnd
) {
2983 Value
*Zero
= ConstantInt::get(Op
->getType(), 0);
2984 return BinaryOperator::CreateNUWSub(Zero
, Op
, Name
, InsertAtEnd
);
2987 BinaryOperator
*BinaryOperator::CreateNot(Value
*Op
, const Twine
&Name
,
2988 Instruction
*InsertBefore
) {
2989 Constant
*C
= Constant::getAllOnesValue(Op
->getType());
2990 return new BinaryOperator(Instruction::Xor
, Op
, C
,
2991 Op
->getType(), Name
, InsertBefore
);
2994 BinaryOperator
*BinaryOperator::CreateNot(Value
*Op
, const Twine
&Name
,
2995 BasicBlock
*InsertAtEnd
) {
2996 Constant
*AllOnes
= Constant::getAllOnesValue(Op
->getType());
2997 return new BinaryOperator(Instruction::Xor
, Op
, AllOnes
,
2998 Op
->getType(), Name
, InsertAtEnd
);
3001 // Exchange the two operands to this instruction. This instruction is safe to
3002 // use on any binary instruction and does not modify the semantics of the
3003 // instruction. If the instruction is order-dependent (SetLT f.e.), the opcode
3005 bool BinaryOperator::swapOperands() {
3006 if (!isCommutative())
3007 return true; // Can't commute operands
3008 Op
<0>().swap(Op
<1>());
3012 //===----------------------------------------------------------------------===//
3013 // FPMathOperator Class
3014 //===----------------------------------------------------------------------===//
3016 float FPMathOperator::getFPAccuracy() const {
3018 cast
<Instruction
>(this)->getMetadata(LLVMContext::MD_fpmath
);
3021 ConstantFP
*Accuracy
= mdconst::extract
<ConstantFP
>(MD
->getOperand(0));
3022 return Accuracy
->getValueAPF().convertToFloat();
3025 //===----------------------------------------------------------------------===//
3027 //===----------------------------------------------------------------------===//
3029 // Just determine if this cast only deals with integral->integral conversion.
3030 bool CastInst::isIntegerCast() const {
3031 switch (getOpcode()) {
3032 default: return false;
3033 case Instruction::ZExt
:
3034 case Instruction::SExt
:
3035 case Instruction::Trunc
:
3037 case Instruction::BitCast
:
3038 return getOperand(0)->getType()->isIntegerTy() &&
3039 getType()->isIntegerTy();
3043 /// This function determines if the CastInst does not require any bits to be
3044 /// changed in order to effect the cast. Essentially, it identifies cases where
3045 /// no code gen is necessary for the cast, hence the name no-op cast. For
3046 /// example, the following are all no-op casts:
3047 /// # bitcast i32* %x to i8*
3048 /// # bitcast <2 x i32> %x to <4 x i16>
3049 /// # ptrtoint i32* %x to i32 ; on 32-bit plaforms only
3050 /// Determine if the described cast is a no-op.
3051 bool CastInst::isNoopCast(Instruction::CastOps Opcode
,
3054 const DataLayout
&DL
) {
3055 assert(castIsValid(Opcode
, SrcTy
, DestTy
) && "method precondition");
3057 default: llvm_unreachable("Invalid CastOp");
3058 case Instruction::Trunc
:
3059 case Instruction::ZExt
:
3060 case Instruction::SExt
:
3061 case Instruction::FPTrunc
:
3062 case Instruction::FPExt
:
3063 case Instruction::UIToFP
:
3064 case Instruction::SIToFP
:
3065 case Instruction::FPToUI
:
3066 case Instruction::FPToSI
:
3067 case Instruction::AddrSpaceCast
:
3068 // TODO: Target informations may give a more accurate answer here.
3070 case Instruction::BitCast
:
3071 return true; // BitCast never modifies bits.
3072 case Instruction::PtrToInt
:
3073 return DL
.getIntPtrType(SrcTy
)->getScalarSizeInBits() ==
3074 DestTy
->getScalarSizeInBits();
3075 case Instruction::IntToPtr
:
3076 return DL
.getIntPtrType(DestTy
)->getScalarSizeInBits() ==
3077 SrcTy
->getScalarSizeInBits();
3081 bool CastInst::isNoopCast(const DataLayout
&DL
) const {
3082 return isNoopCast(getOpcode(), getOperand(0)->getType(), getType(), DL
);
3085 /// This function determines if a pair of casts can be eliminated and what
3086 /// opcode should be used in the elimination. This assumes that there are two
3087 /// instructions like this:
3088 /// * %F = firstOpcode SrcTy %x to MidTy
3089 /// * %S = secondOpcode MidTy %F to DstTy
3090 /// The function returns a resultOpcode so these two casts can be replaced with:
3091 /// * %Replacement = resultOpcode %SrcTy %x to DstTy
3092 /// If no such cast is permitted, the function returns 0.
3093 unsigned CastInst::isEliminableCastPair(
3094 Instruction::CastOps firstOp
, Instruction::CastOps secondOp
,
3095 Type
*SrcTy
, Type
*MidTy
, Type
*DstTy
, Type
*SrcIntPtrTy
, Type
*MidIntPtrTy
,
3096 Type
*DstIntPtrTy
) {
3097 // Define the 144 possibilities for these two cast instructions. The values
3098 // in this matrix determine what to do in a given situation and select the
3099 // case in the switch below. The rows correspond to firstOp, the columns
3100 // correspond to secondOp. In looking at the table below, keep in mind
3101 // the following cast properties:
3103 // Size Compare Source Destination
3104 // Operator Src ? Size Type Sign Type Sign
3105 // -------- ------------ ------------------- ---------------------
3106 // TRUNC > Integer Any Integral Any
3107 // ZEXT < Integral Unsigned Integer Any
3108 // SEXT < Integral Signed Integer Any
3109 // FPTOUI n/a FloatPt n/a Integral Unsigned
3110 // FPTOSI n/a FloatPt n/a Integral Signed
3111 // UITOFP n/a Integral Unsigned FloatPt n/a
3112 // SITOFP n/a Integral Signed FloatPt n/a
3113 // FPTRUNC > FloatPt n/a FloatPt n/a
3114 // FPEXT < FloatPt n/a FloatPt n/a
3115 // PTRTOINT n/a Pointer n/a Integral Unsigned
3116 // INTTOPTR n/a Integral Unsigned Pointer n/a
3117 // BITCAST = FirstClass n/a FirstClass n/a
3118 // ADDRSPCST n/a Pointer n/a Pointer n/a
3120 // NOTE: some transforms are safe, but we consider them to be non-profitable.
3121 // For example, we could merge "fptoui double to i32" + "zext i32 to i64",
3122 // into "fptoui double to i64", but this loses information about the range
3123 // of the produced value (we no longer know the top-part is all zeros).
3124 // Further this conversion is often much more expensive for typical hardware,
3125 // and causes issues when building libgcc. We disallow fptosi+sext for the
3127 const unsigned numCastOps
=
3128 Instruction::CastOpsEnd
- Instruction::CastOpsBegin
;
3129 static const uint8_t CastResults
[numCastOps
][numCastOps
] = {
3130 // T F F U S F F P I B A -+
3131 // R Z S P P I I T P 2 N T S |
3132 // U E E 2 2 2 2 R E I T C C +- secondOp
3133 // N X X U S F F N X N 2 V V |
3134 // C T T I I P P C T T P T T -+
3135 { 1, 0, 0,99,99, 0, 0,99,99,99, 0, 3, 0}, // Trunc -+
3136 { 8, 1, 9,99,99, 2,17,99,99,99, 2, 3, 0}, // ZExt |
3137 { 8, 0, 1,99,99, 0, 2,99,99,99, 0, 3, 0}, // SExt |
3138 { 0, 0, 0,99,99, 0, 0,99,99,99, 0, 3, 0}, // FPToUI |
3139 { 0, 0, 0,99,99, 0, 0,99,99,99, 0, 3, 0}, // FPToSI |
3140 { 99,99,99, 0, 0,99,99, 0, 0,99,99, 4, 0}, // UIToFP +- firstOp
3141 { 99,99,99, 0, 0,99,99, 0, 0,99,99, 4, 0}, // SIToFP |
3142 { 99,99,99, 0, 0,99,99, 0, 0,99,99, 4, 0}, // FPTrunc |
3143 { 99,99,99, 2, 2,99,99, 8, 2,99,99, 4, 0}, // FPExt |
3144 { 1, 0, 0,99,99, 0, 0,99,99,99, 7, 3, 0}, // PtrToInt |
3145 { 99,99,99,99,99,99,99,99,99,11,99,15, 0}, // IntToPtr |
3146 { 5, 5, 5, 6, 6, 5, 5, 6, 6,16, 5, 1,14}, // BitCast |
3147 { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,13,12}, // AddrSpaceCast -+
3150 // TODO: This logic could be encoded into the table above and handled in the
3152 // If either of the casts are a bitcast from scalar to vector, disallow the
3153 // merging. However, any pair of bitcasts are allowed.
3154 bool IsFirstBitcast
= (firstOp
== Instruction::BitCast
);
3155 bool IsSecondBitcast
= (secondOp
== Instruction::BitCast
);
3156 bool AreBothBitcasts
= IsFirstBitcast
&& IsSecondBitcast
;
3158 // Check if any of the casts convert scalars <-> vectors.
3159 if ((IsFirstBitcast
&& isa
<VectorType
>(SrcTy
) != isa
<VectorType
>(MidTy
)) ||
3160 (IsSecondBitcast
&& isa
<VectorType
>(MidTy
) != isa
<VectorType
>(DstTy
)))
3161 if (!AreBothBitcasts
)
3164 int ElimCase
= CastResults
[firstOp
-Instruction::CastOpsBegin
]
3165 [secondOp
-Instruction::CastOpsBegin
];
3168 // Categorically disallowed.
3171 // Allowed, use first cast's opcode.
3174 // Allowed, use second cast's opcode.
3177 // No-op cast in second op implies firstOp as long as the DestTy
3178 // is integer and we are not converting between a vector and a
3180 if (!SrcTy
->isVectorTy() && DstTy
->isIntegerTy())
3184 // No-op cast in second op implies firstOp as long as the DestTy
3185 // is floating point.
3186 if (DstTy
->isFloatingPointTy())
3190 // No-op cast in first op implies secondOp as long as the SrcTy
3192 if (SrcTy
->isIntegerTy())
3196 // No-op cast in first op implies secondOp as long as the SrcTy
3197 // is a floating point.
3198 if (SrcTy
->isFloatingPointTy())
3202 // Disable inttoptr/ptrtoint optimization if enabled.
3203 if (DisableI2pP2iOpt
)
3206 // Cannot simplify if address spaces are different!
3207 if (SrcTy
->getPointerAddressSpace() != DstTy
->getPointerAddressSpace())
3210 unsigned MidSize
= MidTy
->getScalarSizeInBits();
3211 // We can still fold this without knowing the actual sizes as long we
3212 // know that the intermediate pointer is the largest possible
3214 // FIXME: Is this always true?
3216 return Instruction::BitCast
;
3218 // ptrtoint, inttoptr -> bitcast (ptr -> ptr) if int size is >= ptr size.
3219 if (!SrcIntPtrTy
|| DstIntPtrTy
!= SrcIntPtrTy
)
3221 unsigned PtrSize
= SrcIntPtrTy
->getScalarSizeInBits();
3222 if (MidSize
>= PtrSize
)
3223 return Instruction::BitCast
;
3227 // ext, trunc -> bitcast, if the SrcTy and DstTy are the same
3228 // ext, trunc -> ext, if sizeof(SrcTy) < sizeof(DstTy)
3229 // ext, trunc -> trunc, if sizeof(SrcTy) > sizeof(DstTy)
3230 unsigned SrcSize
= SrcTy
->getScalarSizeInBits();
3231 unsigned DstSize
= DstTy
->getScalarSizeInBits();
3233 return Instruction::BitCast
;
3234 if (SrcSize
< DstSize
)
3236 if (SrcSize
> DstSize
)
3241 // zext, sext -> zext, because sext can't sign extend after zext
3242 return Instruction::ZExt
;
3244 // inttoptr, ptrtoint -> bitcast if SrcSize<=PtrSize and SrcSize==DstSize
3247 unsigned PtrSize
= MidIntPtrTy
->getScalarSizeInBits();
3248 unsigned SrcSize
= SrcTy
->getScalarSizeInBits();
3249 unsigned DstSize
= DstTy
->getScalarSizeInBits();
3250 if (SrcSize
<= PtrSize
&& SrcSize
== DstSize
)
3251 return Instruction::BitCast
;
3255 // addrspacecast, addrspacecast -> bitcast, if SrcAS == DstAS
3256 // addrspacecast, addrspacecast -> addrspacecast, if SrcAS != DstAS
3257 if (SrcTy
->getPointerAddressSpace() != DstTy
->getPointerAddressSpace())
3258 return Instruction::AddrSpaceCast
;
3259 return Instruction::BitCast
;
3261 // FIXME: this state can be merged with (1), but the following assert
3262 // is useful to check the correcteness of the sequence due to semantic
3263 // change of bitcast.
3265 SrcTy
->isPtrOrPtrVectorTy() &&
3266 MidTy
->isPtrOrPtrVectorTy() &&
3267 DstTy
->isPtrOrPtrVectorTy() &&
3268 SrcTy
->getPointerAddressSpace() != MidTy
->getPointerAddressSpace() &&
3269 MidTy
->getPointerAddressSpace() == DstTy
->getPointerAddressSpace() &&
3270 "Illegal addrspacecast, bitcast sequence!");
3271 // Allowed, use first cast's opcode
3274 // bitcast, addrspacecast -> addrspacecast
3275 return Instruction::AddrSpaceCast
;
3277 // FIXME: this state can be merged with (1), but the following assert
3278 // is useful to check the correcteness of the sequence due to semantic
3279 // change of bitcast.
3281 SrcTy
->isIntOrIntVectorTy() &&
3282 MidTy
->isPtrOrPtrVectorTy() &&
3283 DstTy
->isPtrOrPtrVectorTy() &&
3284 MidTy
->getPointerAddressSpace() == DstTy
->getPointerAddressSpace() &&
3285 "Illegal inttoptr, bitcast sequence!");
3286 // Allowed, use first cast's opcode
3289 // FIXME: this state can be merged with (2), but the following assert
3290 // is useful to check the correcteness of the sequence due to semantic
3291 // change of bitcast.
3293 SrcTy
->isPtrOrPtrVectorTy() &&
3294 MidTy
->isPtrOrPtrVectorTy() &&
3295 DstTy
->isIntOrIntVectorTy() &&
3296 SrcTy
->getPointerAddressSpace() == MidTy
->getPointerAddressSpace() &&
3297 "Illegal bitcast, ptrtoint sequence!");
3298 // Allowed, use second cast's opcode
3301 // (sitofp (zext x)) -> (uitofp x)
3302 return Instruction::UIToFP
;
3304 // Cast combination can't happen (error in input). This is for all cases
3305 // where the MidTy is not the same for the two cast instructions.
3306 llvm_unreachable("Invalid Cast Combination");
3308 llvm_unreachable("Error in CastResults table!!!");
3312 CastInst
*CastInst::Create(Instruction::CastOps op
, Value
*S
, Type
*Ty
,
3313 const Twine
&Name
, Instruction
*InsertBefore
) {
3314 assert(castIsValid(op
, S
, Ty
) && "Invalid cast!");
3315 // Construct and return the appropriate CastInst subclass
3317 case Trunc
: return new TruncInst (S
, Ty
, Name
, InsertBefore
);
3318 case ZExt
: return new ZExtInst (S
, Ty
, Name
, InsertBefore
);
3319 case SExt
: return new SExtInst (S
, Ty
, Name
, InsertBefore
);
3320 case FPTrunc
: return new FPTruncInst (S
, Ty
, Name
, InsertBefore
);
3321 case FPExt
: return new FPExtInst (S
, Ty
, Name
, InsertBefore
);
3322 case UIToFP
: return new UIToFPInst (S
, Ty
, Name
, InsertBefore
);
3323 case SIToFP
: return new SIToFPInst (S
, Ty
, Name
, InsertBefore
);
3324 case FPToUI
: return new FPToUIInst (S
, Ty
, Name
, InsertBefore
);
3325 case FPToSI
: return new FPToSIInst (S
, Ty
, Name
, InsertBefore
);
3326 case PtrToInt
: return new PtrToIntInst (S
, Ty
, Name
, InsertBefore
);
3327 case IntToPtr
: return new IntToPtrInst (S
, Ty
, Name
, InsertBefore
);
3328 case BitCast
: return new BitCastInst (S
, Ty
, Name
, InsertBefore
);
3329 case AddrSpaceCast
: return new AddrSpaceCastInst (S
, Ty
, Name
, InsertBefore
);
3330 default: llvm_unreachable("Invalid opcode provided");
3334 CastInst
*CastInst::Create(Instruction::CastOps op
, Value
*S
, Type
*Ty
,
3335 const Twine
&Name
, BasicBlock
*InsertAtEnd
) {
3336 assert(castIsValid(op
, S
, Ty
) && "Invalid cast!");
3337 // Construct and return the appropriate CastInst subclass
3339 case Trunc
: return new TruncInst (S
, Ty
, Name
, InsertAtEnd
);
3340 case ZExt
: return new ZExtInst (S
, Ty
, Name
, InsertAtEnd
);
3341 case SExt
: return new SExtInst (S
, Ty
, Name
, InsertAtEnd
);
3342 case FPTrunc
: return new FPTruncInst (S
, Ty
, Name
, InsertAtEnd
);
3343 case FPExt
: return new FPExtInst (S
, Ty
, Name
, InsertAtEnd
);
3344 case UIToFP
: return new UIToFPInst (S
, Ty
, Name
, InsertAtEnd
);
3345 case SIToFP
: return new SIToFPInst (S
, Ty
, Name
, InsertAtEnd
);
3346 case FPToUI
: return new FPToUIInst (S
, Ty
, Name
, InsertAtEnd
);
3347 case FPToSI
: return new FPToSIInst (S
, Ty
, Name
, InsertAtEnd
);
3348 case PtrToInt
: return new PtrToIntInst (S
, Ty
, Name
, InsertAtEnd
);
3349 case IntToPtr
: return new IntToPtrInst (S
, Ty
, Name
, InsertAtEnd
);
3350 case BitCast
: return new BitCastInst (S
, Ty
, Name
, InsertAtEnd
);
3351 case AddrSpaceCast
: return new AddrSpaceCastInst (S
, Ty
, Name
, InsertAtEnd
);
3352 default: llvm_unreachable("Invalid opcode provided");
3356 CastInst
*CastInst::CreateZExtOrBitCast(Value
*S
, Type
*Ty
,
3358 Instruction
*InsertBefore
) {
3359 if (S
->getType()->getScalarSizeInBits() == Ty
->getScalarSizeInBits())
3360 return Create(Instruction::BitCast
, S
, Ty
, Name
, InsertBefore
);
3361 return Create(Instruction::ZExt
, S
, Ty
, Name
, InsertBefore
);
3364 CastInst
*CastInst::CreateZExtOrBitCast(Value
*S
, Type
*Ty
,
3366 BasicBlock
*InsertAtEnd
) {
3367 if (S
->getType()->getScalarSizeInBits() == Ty
->getScalarSizeInBits())
3368 return Create(Instruction::BitCast
, S
, Ty
, Name
, InsertAtEnd
);
3369 return Create(Instruction::ZExt
, S
, Ty
, Name
, InsertAtEnd
);
3372 CastInst
*CastInst::CreateSExtOrBitCast(Value
*S
, Type
*Ty
,
3374 Instruction
*InsertBefore
) {
3375 if (S
->getType()->getScalarSizeInBits() == Ty
->getScalarSizeInBits())
3376 return Create(Instruction::BitCast
, S
, Ty
, Name
, InsertBefore
);
3377 return Create(Instruction::SExt
, S
, Ty
, Name
, InsertBefore
);
3380 CastInst
*CastInst::CreateSExtOrBitCast(Value
*S
, Type
*Ty
,
3382 BasicBlock
*InsertAtEnd
) {
3383 if (S
->getType()->getScalarSizeInBits() == Ty
->getScalarSizeInBits())
3384 return Create(Instruction::BitCast
, S
, Ty
, Name
, InsertAtEnd
);
3385 return Create(Instruction::SExt
, S
, Ty
, Name
, InsertAtEnd
);
3388 CastInst
*CastInst::CreateTruncOrBitCast(Value
*S
, Type
*Ty
,
3390 Instruction
*InsertBefore
) {
3391 if (S
->getType()->getScalarSizeInBits() == Ty
->getScalarSizeInBits())
3392 return Create(Instruction::BitCast
, S
, Ty
, Name
, InsertBefore
);
3393 return Create(Instruction::Trunc
, S
, Ty
, Name
, InsertBefore
);
3396 CastInst
*CastInst::CreateTruncOrBitCast(Value
*S
, Type
*Ty
,
3398 BasicBlock
*InsertAtEnd
) {
3399 if (S
->getType()->getScalarSizeInBits() == Ty
->getScalarSizeInBits())
3400 return Create(Instruction::BitCast
, S
, Ty
, Name
, InsertAtEnd
);
3401 return Create(Instruction::Trunc
, S
, Ty
, Name
, InsertAtEnd
);
3404 CastInst
*CastInst::CreatePointerCast(Value
*S
, Type
*Ty
,
3406 BasicBlock
*InsertAtEnd
) {
3407 assert(S
->getType()->isPtrOrPtrVectorTy() && "Invalid cast");
3408 assert((Ty
->isIntOrIntVectorTy() || Ty
->isPtrOrPtrVectorTy()) &&
3410 assert(Ty
->isVectorTy() == S
->getType()->isVectorTy() && "Invalid cast");
3411 assert((!Ty
->isVectorTy() ||
3412 cast
<VectorType
>(Ty
)->getElementCount() ==
3413 cast
<VectorType
>(S
->getType())->getElementCount()) &&
3416 if (Ty
->isIntOrIntVectorTy())
3417 return Create(Instruction::PtrToInt
, S
, Ty
, Name
, InsertAtEnd
);
3419 return CreatePointerBitCastOrAddrSpaceCast(S
, Ty
, Name
, InsertAtEnd
);
3422 /// Create a BitCast or a PtrToInt cast instruction
3423 CastInst
*CastInst::CreatePointerCast(Value
*S
, Type
*Ty
,
3425 Instruction
*InsertBefore
) {
3426 assert(S
->getType()->isPtrOrPtrVectorTy() && "Invalid cast");
3427 assert((Ty
->isIntOrIntVectorTy() || Ty
->isPtrOrPtrVectorTy()) &&
3429 assert(Ty
->isVectorTy() == S
->getType()->isVectorTy() && "Invalid cast");
3430 assert((!Ty
->isVectorTy() ||
3431 cast
<VectorType
>(Ty
)->getElementCount() ==
3432 cast
<VectorType
>(S
->getType())->getElementCount()) &&
3435 if (Ty
->isIntOrIntVectorTy())
3436 return Create(Instruction::PtrToInt
, S
, Ty
, Name
, InsertBefore
);
3438 return CreatePointerBitCastOrAddrSpaceCast(S
, Ty
, Name
, InsertBefore
);
3441 CastInst
*CastInst::CreatePointerBitCastOrAddrSpaceCast(
3444 BasicBlock
*InsertAtEnd
) {
3445 assert(S
->getType()->isPtrOrPtrVectorTy() && "Invalid cast");
3446 assert(Ty
->isPtrOrPtrVectorTy() && "Invalid cast");
3448 if (S
->getType()->getPointerAddressSpace() != Ty
->getPointerAddressSpace())
3449 return Create(Instruction::AddrSpaceCast
, S
, Ty
, Name
, InsertAtEnd
);
3451 return Create(Instruction::BitCast
, S
, Ty
, Name
, InsertAtEnd
);
3454 CastInst
*CastInst::CreatePointerBitCastOrAddrSpaceCast(
3457 Instruction
*InsertBefore
) {
3458 assert(S
->getType()->isPtrOrPtrVectorTy() && "Invalid cast");
3459 assert(Ty
->isPtrOrPtrVectorTy() && "Invalid cast");
3461 if (S
->getType()->getPointerAddressSpace() != Ty
->getPointerAddressSpace())
3462 return Create(Instruction::AddrSpaceCast
, S
, Ty
, Name
, InsertBefore
);
3464 return Create(Instruction::BitCast
, S
, Ty
, Name
, InsertBefore
);
3467 CastInst
*CastInst::CreateBitOrPointerCast(Value
*S
, Type
*Ty
,
3469 Instruction
*InsertBefore
) {
3470 if (S
->getType()->isPointerTy() && Ty
->isIntegerTy())
3471 return Create(Instruction::PtrToInt
, S
, Ty
, Name
, InsertBefore
);
3472 if (S
->getType()->isIntegerTy() && Ty
->isPointerTy())
3473 return Create(Instruction::IntToPtr
, S
, Ty
, Name
, InsertBefore
);
3475 return Create(Instruction::BitCast
, S
, Ty
, Name
, InsertBefore
);
3478 CastInst
*CastInst::CreateIntegerCast(Value
*C
, Type
*Ty
,
3479 bool isSigned
, const Twine
&Name
,
3480 Instruction
*InsertBefore
) {
3481 assert(C
->getType()->isIntOrIntVectorTy() && Ty
->isIntOrIntVectorTy() &&
3482 "Invalid integer cast");
3483 unsigned SrcBits
= C
->getType()->getScalarSizeInBits();
3484 unsigned DstBits
= Ty
->getScalarSizeInBits();
3485 Instruction::CastOps opcode
=
3486 (SrcBits
== DstBits
? Instruction::BitCast
:
3487 (SrcBits
> DstBits
? Instruction::Trunc
:
3488 (isSigned
? Instruction::SExt
: Instruction::ZExt
)));
3489 return Create(opcode
, C
, Ty
, Name
, InsertBefore
);
3492 CastInst
*CastInst::CreateIntegerCast(Value
*C
, Type
*Ty
,
3493 bool isSigned
, const Twine
&Name
,
3494 BasicBlock
*InsertAtEnd
) {
3495 assert(C
->getType()->isIntOrIntVectorTy() && Ty
->isIntOrIntVectorTy() &&
3497 unsigned SrcBits
= C
->getType()->getScalarSizeInBits();
3498 unsigned DstBits
= Ty
->getScalarSizeInBits();
3499 Instruction::CastOps opcode
=
3500 (SrcBits
== DstBits
? Instruction::BitCast
:
3501 (SrcBits
> DstBits
? Instruction::Trunc
:
3502 (isSigned
? Instruction::SExt
: Instruction::ZExt
)));
3503 return Create(opcode
, C
, Ty
, Name
, InsertAtEnd
);
3506 CastInst
*CastInst::CreateFPCast(Value
*C
, Type
*Ty
,
3508 Instruction
*InsertBefore
) {
3509 assert(C
->getType()->isFPOrFPVectorTy() && Ty
->isFPOrFPVectorTy() &&
3511 unsigned SrcBits
= C
->getType()->getScalarSizeInBits();
3512 unsigned DstBits
= Ty
->getScalarSizeInBits();
3513 Instruction::CastOps opcode
=
3514 (SrcBits
== DstBits
? Instruction::BitCast
:
3515 (SrcBits
> DstBits
? Instruction::FPTrunc
: Instruction::FPExt
));
3516 return Create(opcode
, C
, Ty
, Name
, InsertBefore
);
3519 CastInst
*CastInst::CreateFPCast(Value
*C
, Type
*Ty
,
3521 BasicBlock
*InsertAtEnd
) {
3522 assert(C
->getType()->isFPOrFPVectorTy() && Ty
->isFPOrFPVectorTy() &&
3524 unsigned SrcBits
= C
->getType()->getScalarSizeInBits();
3525 unsigned DstBits
= Ty
->getScalarSizeInBits();
3526 Instruction::CastOps opcode
=
3527 (SrcBits
== DstBits
? Instruction::BitCast
:
3528 (SrcBits
> DstBits
? Instruction::FPTrunc
: Instruction::FPExt
));
3529 return Create(opcode
, C
, Ty
, Name
, InsertAtEnd
);
3532 bool CastInst::isBitCastable(Type
*SrcTy
, Type
*DestTy
) {
3533 if (!SrcTy
->isFirstClassType() || !DestTy
->isFirstClassType())
3536 if (SrcTy
== DestTy
)
3539 if (VectorType
*SrcVecTy
= dyn_cast
<VectorType
>(SrcTy
)) {
3540 if (VectorType
*DestVecTy
= dyn_cast
<VectorType
>(DestTy
)) {
3541 if (SrcVecTy
->getElementCount() == DestVecTy
->getElementCount()) {
3542 // An element by element cast. Valid if casting the elements is valid.
3543 SrcTy
= SrcVecTy
->getElementType();
3544 DestTy
= DestVecTy
->getElementType();
3549 if (PointerType
*DestPtrTy
= dyn_cast
<PointerType
>(DestTy
)) {
3550 if (PointerType
*SrcPtrTy
= dyn_cast
<PointerType
>(SrcTy
)) {
3551 return SrcPtrTy
->getAddressSpace() == DestPtrTy
->getAddressSpace();
3555 TypeSize SrcBits
= SrcTy
->getPrimitiveSizeInBits(); // 0 for ptr
3556 TypeSize DestBits
= DestTy
->getPrimitiveSizeInBits(); // 0 for ptr
3558 // Could still have vectors of pointers if the number of elements doesn't
3560 if (SrcBits
.getKnownMinValue() == 0 || DestBits
.getKnownMinValue() == 0)
3563 if (SrcBits
!= DestBits
)
3566 if (DestTy
->isX86_MMXTy() || SrcTy
->isX86_MMXTy())
3572 bool CastInst::isBitOrNoopPointerCastable(Type
*SrcTy
, Type
*DestTy
,
3573 const DataLayout
&DL
) {
3574 // ptrtoint and inttoptr are not allowed on non-integral pointers
3575 if (auto *PtrTy
= dyn_cast
<PointerType
>(SrcTy
))
3576 if (auto *IntTy
= dyn_cast
<IntegerType
>(DestTy
))
3577 return (IntTy
->getBitWidth() == DL
.getPointerTypeSizeInBits(PtrTy
) &&
3578 !DL
.isNonIntegralPointerType(PtrTy
));
3579 if (auto *PtrTy
= dyn_cast
<PointerType
>(DestTy
))
3580 if (auto *IntTy
= dyn_cast
<IntegerType
>(SrcTy
))
3581 return (IntTy
->getBitWidth() == DL
.getPointerTypeSizeInBits(PtrTy
) &&
3582 !DL
.isNonIntegralPointerType(PtrTy
));
3584 return isBitCastable(SrcTy
, DestTy
);
3587 // Provide a way to get a "cast" where the cast opcode is inferred from the
3588 // types and size of the operand. This, basically, is a parallel of the
3589 // logic in the castIsValid function below. This axiom should hold:
3590 // castIsValid( getCastOpcode(Val, Ty), Val, Ty)
3591 // should not assert in castIsValid. In other words, this produces a "correct"
3592 // casting opcode for the arguments passed to it.
3593 Instruction::CastOps
3594 CastInst::getCastOpcode(
3595 const Value
*Src
, bool SrcIsSigned
, Type
*DestTy
, bool DestIsSigned
) {
3596 Type
*SrcTy
= Src
->getType();
3598 assert(SrcTy
->isFirstClassType() && DestTy
->isFirstClassType() &&
3599 "Only first class types are castable!");
3601 if (SrcTy
== DestTy
)
3604 // FIXME: Check address space sizes here
3605 if (VectorType
*SrcVecTy
= dyn_cast
<VectorType
>(SrcTy
))
3606 if (VectorType
*DestVecTy
= dyn_cast
<VectorType
>(DestTy
))
3607 if (SrcVecTy
->getElementCount() == DestVecTy
->getElementCount()) {
3608 // An element by element cast. Find the appropriate opcode based on the
3610 SrcTy
= SrcVecTy
->getElementType();
3611 DestTy
= DestVecTy
->getElementType();
3614 // Get the bit sizes, we'll need these
3615 unsigned SrcBits
= SrcTy
->getPrimitiveSizeInBits(); // 0 for ptr
3616 unsigned DestBits
= DestTy
->getPrimitiveSizeInBits(); // 0 for ptr
3618 // Run through the possibilities ...
3619 if (DestTy
->isIntegerTy()) { // Casting to integral
3620 if (SrcTy
->isIntegerTy()) { // Casting from integral
3621 if (DestBits
< SrcBits
)
3622 return Trunc
; // int -> smaller int
3623 else if (DestBits
> SrcBits
) { // its an extension
3625 return SExt
; // signed -> SEXT
3627 return ZExt
; // unsigned -> ZEXT
3629 return BitCast
; // Same size, No-op cast
3631 } else if (SrcTy
->isFloatingPointTy()) { // Casting from floating pt
3633 return FPToSI
; // FP -> sint
3635 return FPToUI
; // FP -> uint
3636 } else if (SrcTy
->isVectorTy()) {
3637 assert(DestBits
== SrcBits
&&
3638 "Casting vector to integer of different width");
3639 return BitCast
; // Same size, no-op cast
3641 assert(SrcTy
->isPointerTy() &&
3642 "Casting from a value that is not first-class type");
3643 return PtrToInt
; // ptr -> int
3645 } else if (DestTy
->isFloatingPointTy()) { // Casting to floating pt
3646 if (SrcTy
->isIntegerTy()) { // Casting from integral
3648 return SIToFP
; // sint -> FP
3650 return UIToFP
; // uint -> FP
3651 } else if (SrcTy
->isFloatingPointTy()) { // Casting from floating pt
3652 if (DestBits
< SrcBits
) {
3653 return FPTrunc
; // FP -> smaller FP
3654 } else if (DestBits
> SrcBits
) {
3655 return FPExt
; // FP -> larger FP
3657 return BitCast
; // same size, no-op cast
3659 } else if (SrcTy
->isVectorTy()) {
3660 assert(DestBits
== SrcBits
&&
3661 "Casting vector to floating point of different width");
3662 return BitCast
; // same size, no-op cast
3664 llvm_unreachable("Casting pointer or non-first class to float");
3665 } else if (DestTy
->isVectorTy()) {
3666 assert(DestBits
== SrcBits
&&
3667 "Illegal cast to vector (wrong type or size)");
3669 } else if (DestTy
->isPointerTy()) {
3670 if (SrcTy
->isPointerTy()) {
3671 if (DestTy
->getPointerAddressSpace() != SrcTy
->getPointerAddressSpace())
3672 return AddrSpaceCast
;
3673 return BitCast
; // ptr -> ptr
3674 } else if (SrcTy
->isIntegerTy()) {
3675 return IntToPtr
; // int -> ptr
3677 llvm_unreachable("Casting pointer to other than pointer or int");
3678 } else if (DestTy
->isX86_MMXTy()) {
3679 if (SrcTy
->isVectorTy()) {
3680 assert(DestBits
== SrcBits
&& "Casting vector of wrong width to X86_MMX");
3681 return BitCast
; // 64-bit vector to MMX
3683 llvm_unreachable("Illegal cast to X86_MMX");
3685 llvm_unreachable("Casting to type that is not first-class");
3688 //===----------------------------------------------------------------------===//
3689 // CastInst SubClass Constructors
3690 //===----------------------------------------------------------------------===//
3692 /// Check that the construction parameters for a CastInst are correct. This
3693 /// could be broken out into the separate constructors but it is useful to have
3694 /// it in one place and to eliminate the redundant code for getting the sizes
3695 /// of the types involved.
3697 CastInst::castIsValid(Instruction::CastOps op
, Type
*SrcTy
, Type
*DstTy
) {
3698 if (!SrcTy
->isFirstClassType() || !DstTy
->isFirstClassType() ||
3699 SrcTy
->isAggregateType() || DstTy
->isAggregateType())
3702 // Get the size of the types in bits, and whether we are dealing
3703 // with vector types, we'll need this later.
3704 bool SrcIsVec
= isa
<VectorType
>(SrcTy
);
3705 bool DstIsVec
= isa
<VectorType
>(DstTy
);
3706 unsigned SrcScalarBitSize
= SrcTy
->getScalarSizeInBits();
3707 unsigned DstScalarBitSize
= DstTy
->getScalarSizeInBits();
3709 // If these are vector types, get the lengths of the vectors (using zero for
3710 // scalar types means that checking that vector lengths match also checks that
3711 // scalars are not being converted to vectors or vectors to scalars).
3712 ElementCount SrcEC
= SrcIsVec
? cast
<VectorType
>(SrcTy
)->getElementCount()
3713 : ElementCount::getFixed(0);
3714 ElementCount DstEC
= DstIsVec
? cast
<VectorType
>(DstTy
)->getElementCount()
3715 : ElementCount::getFixed(0);
3717 // Switch on the opcode provided
3719 default: return false; // This is an input error
3720 case Instruction::Trunc
:
3721 return SrcTy
->isIntOrIntVectorTy() && DstTy
->isIntOrIntVectorTy() &&
3722 SrcEC
== DstEC
&& SrcScalarBitSize
> DstScalarBitSize
;
3723 case Instruction::ZExt
:
3724 return SrcTy
->isIntOrIntVectorTy() && DstTy
->isIntOrIntVectorTy() &&
3725 SrcEC
== DstEC
&& SrcScalarBitSize
< DstScalarBitSize
;
3726 case Instruction::SExt
:
3727 return SrcTy
->isIntOrIntVectorTy() && DstTy
->isIntOrIntVectorTy() &&
3728 SrcEC
== DstEC
&& SrcScalarBitSize
< DstScalarBitSize
;
3729 case Instruction::FPTrunc
:
3730 return SrcTy
->isFPOrFPVectorTy() && DstTy
->isFPOrFPVectorTy() &&
3731 SrcEC
== DstEC
&& SrcScalarBitSize
> DstScalarBitSize
;
3732 case Instruction::FPExt
:
3733 return SrcTy
->isFPOrFPVectorTy() && DstTy
->isFPOrFPVectorTy() &&
3734 SrcEC
== DstEC
&& SrcScalarBitSize
< DstScalarBitSize
;
3735 case Instruction::UIToFP
:
3736 case Instruction::SIToFP
:
3737 return SrcTy
->isIntOrIntVectorTy() && DstTy
->isFPOrFPVectorTy() &&
3739 case Instruction::FPToUI
:
3740 case Instruction::FPToSI
:
3741 return SrcTy
->isFPOrFPVectorTy() && DstTy
->isIntOrIntVectorTy() &&
3743 case Instruction::PtrToInt
:
3746 return SrcTy
->isPtrOrPtrVectorTy() && DstTy
->isIntOrIntVectorTy();
3747 case Instruction::IntToPtr
:
3750 return SrcTy
->isIntOrIntVectorTy() && DstTy
->isPtrOrPtrVectorTy();
3751 case Instruction::BitCast
: {
3752 PointerType
*SrcPtrTy
= dyn_cast
<PointerType
>(SrcTy
->getScalarType());
3753 PointerType
*DstPtrTy
= dyn_cast
<PointerType
>(DstTy
->getScalarType());
3755 // BitCast implies a no-op cast of type only. No bits change.
3756 // However, you can't cast pointers to anything but pointers.
3757 if (!SrcPtrTy
!= !DstPtrTy
)
3760 // For non-pointer cases, the cast is okay if the source and destination bit
3761 // widths are identical.
3763 return SrcTy
->getPrimitiveSizeInBits() == DstTy
->getPrimitiveSizeInBits();
3765 // If both are pointers then the address spaces must match.
3766 if (SrcPtrTy
->getAddressSpace() != DstPtrTy
->getAddressSpace())
3769 // A vector of pointers must have the same number of elements.
3770 if (SrcIsVec
&& DstIsVec
)
3771 return SrcEC
== DstEC
;
3773 return SrcEC
== ElementCount::getFixed(1);
3775 return DstEC
== ElementCount::getFixed(1);
3779 case Instruction::AddrSpaceCast
: {
3780 PointerType
*SrcPtrTy
= dyn_cast
<PointerType
>(SrcTy
->getScalarType());
3784 PointerType
*DstPtrTy
= dyn_cast
<PointerType
>(DstTy
->getScalarType());
3788 if (SrcPtrTy
->getAddressSpace() == DstPtrTy
->getAddressSpace())
3791 return SrcEC
== DstEC
;
3796 TruncInst::TruncInst(
3797 Value
*S
, Type
*Ty
, const Twine
&Name
, Instruction
*InsertBefore
3798 ) : CastInst(Ty
, Trunc
, S
, Name
, InsertBefore
) {
3799 assert(castIsValid(getOpcode(), S
, Ty
) && "Illegal Trunc");
3802 TruncInst::TruncInst(
3803 Value
*S
, Type
*Ty
, const Twine
&Name
, BasicBlock
*InsertAtEnd
3804 ) : CastInst(Ty
, Trunc
, S
, Name
, InsertAtEnd
) {
3805 assert(castIsValid(getOpcode(), S
, Ty
) && "Illegal Trunc");
3809 Value
*S
, Type
*Ty
, const Twine
&Name
, Instruction
*InsertBefore
3810 ) : CastInst(Ty
, ZExt
, S
, Name
, InsertBefore
) {
3811 assert(castIsValid(getOpcode(), S
, Ty
) && "Illegal ZExt");
3815 Value
*S
, Type
*Ty
, const Twine
&Name
, BasicBlock
*InsertAtEnd
3816 ) : CastInst(Ty
, ZExt
, S
, Name
, InsertAtEnd
) {
3817 assert(castIsValid(getOpcode(), S
, Ty
) && "Illegal ZExt");
3820 Value
*S
, Type
*Ty
, const Twine
&Name
, Instruction
*InsertBefore
3821 ) : CastInst(Ty
, SExt
, S
, Name
, InsertBefore
) {
3822 assert(castIsValid(getOpcode(), S
, Ty
) && "Illegal SExt");
3826 Value
*S
, Type
*Ty
, const Twine
&Name
, BasicBlock
*InsertAtEnd
3827 ) : CastInst(Ty
, SExt
, S
, Name
, InsertAtEnd
) {
3828 assert(castIsValid(getOpcode(), S
, Ty
) && "Illegal SExt");
3831 FPTruncInst::FPTruncInst(
3832 Value
*S
, Type
*Ty
, const Twine
&Name
, Instruction
*InsertBefore
3833 ) : CastInst(Ty
, FPTrunc
, S
, Name
, InsertBefore
) {
3834 assert(castIsValid(getOpcode(), S
, Ty
) && "Illegal FPTrunc");
3837 FPTruncInst::FPTruncInst(
3838 Value
*S
, Type
*Ty
, const Twine
&Name
, BasicBlock
*InsertAtEnd
3839 ) : CastInst(Ty
, FPTrunc
, S
, Name
, InsertAtEnd
) {
3840 assert(castIsValid(getOpcode(), S
, Ty
) && "Illegal FPTrunc");
3843 FPExtInst::FPExtInst(
3844 Value
*S
, Type
*Ty
, const Twine
&Name
, Instruction
*InsertBefore
3845 ) : CastInst(Ty
, FPExt
, S
, Name
, InsertBefore
) {
3846 assert(castIsValid(getOpcode(), S
, Ty
) && "Illegal FPExt");
3849 FPExtInst::FPExtInst(
3850 Value
*S
, Type
*Ty
, const Twine
&Name
, BasicBlock
*InsertAtEnd
3851 ) : CastInst(Ty
, FPExt
, S
, Name
, InsertAtEnd
) {
3852 assert(castIsValid(getOpcode(), S
, Ty
) && "Illegal FPExt");
3855 UIToFPInst::UIToFPInst(
3856 Value
*S
, Type
*Ty
, const Twine
&Name
, Instruction
*InsertBefore
3857 ) : CastInst(Ty
, UIToFP
, S
, Name
, InsertBefore
) {
3858 assert(castIsValid(getOpcode(), S
, Ty
) && "Illegal UIToFP");
3861 UIToFPInst::UIToFPInst(
3862 Value
*S
, Type
*Ty
, const Twine
&Name
, BasicBlock
*InsertAtEnd
3863 ) : CastInst(Ty
, UIToFP
, S
, Name
, InsertAtEnd
) {
3864 assert(castIsValid(getOpcode(), S
, Ty
) && "Illegal UIToFP");
3867 SIToFPInst::SIToFPInst(
3868 Value
*S
, Type
*Ty
, const Twine
&Name
, Instruction
*InsertBefore
3869 ) : CastInst(Ty
, SIToFP
, S
, Name
, InsertBefore
) {
3870 assert(castIsValid(getOpcode(), S
, Ty
) && "Illegal SIToFP");
3873 SIToFPInst::SIToFPInst(
3874 Value
*S
, Type
*Ty
, const Twine
&Name
, BasicBlock
*InsertAtEnd
3875 ) : CastInst(Ty
, SIToFP
, S
, Name
, InsertAtEnd
) {
3876 assert(castIsValid(getOpcode(), S
, Ty
) && "Illegal SIToFP");
3879 FPToUIInst::FPToUIInst(
3880 Value
*S
, Type
*Ty
, const Twine
&Name
, Instruction
*InsertBefore
3881 ) : CastInst(Ty
, FPToUI
, S
, Name
, InsertBefore
) {
3882 assert(castIsValid(getOpcode(), S
, Ty
) && "Illegal FPToUI");
3885 FPToUIInst::FPToUIInst(
3886 Value
*S
, Type
*Ty
, const Twine
&Name
, BasicBlock
*InsertAtEnd
3887 ) : CastInst(Ty
, FPToUI
, S
, Name
, InsertAtEnd
) {
3888 assert(castIsValid(getOpcode(), S
, Ty
) && "Illegal FPToUI");
3891 FPToSIInst::FPToSIInst(
3892 Value
*S
, Type
*Ty
, const Twine
&Name
, Instruction
*InsertBefore
3893 ) : CastInst(Ty
, FPToSI
, S
, Name
, InsertBefore
) {
3894 assert(castIsValid(getOpcode(), S
, Ty
) && "Illegal FPToSI");
3897 FPToSIInst::FPToSIInst(
3898 Value
*S
, Type
*Ty
, const Twine
&Name
, BasicBlock
*InsertAtEnd
3899 ) : CastInst(Ty
, FPToSI
, S
, Name
, InsertAtEnd
) {
3900 assert(castIsValid(getOpcode(), S
, Ty
) && "Illegal FPToSI");
3903 PtrToIntInst::PtrToIntInst(
3904 Value
*S
, Type
*Ty
, const Twine
&Name
, Instruction
*InsertBefore
3905 ) : CastInst(Ty
, PtrToInt
, S
, Name
, InsertBefore
) {
3906 assert(castIsValid(getOpcode(), S
, Ty
) && "Illegal PtrToInt");
3909 PtrToIntInst::PtrToIntInst(
3910 Value
*S
, Type
*Ty
, const Twine
&Name
, BasicBlock
*InsertAtEnd
3911 ) : CastInst(Ty
, PtrToInt
, S
, Name
, InsertAtEnd
) {
3912 assert(castIsValid(getOpcode(), S
, Ty
) && "Illegal PtrToInt");
3915 IntToPtrInst::IntToPtrInst(
3916 Value
*S
, Type
*Ty
, const Twine
&Name
, Instruction
*InsertBefore
3917 ) : CastInst(Ty
, IntToPtr
, S
, Name
, InsertBefore
) {
3918 assert(castIsValid(getOpcode(), S
, Ty
) && "Illegal IntToPtr");
3921 IntToPtrInst::IntToPtrInst(
3922 Value
*S
, Type
*Ty
, const Twine
&Name
, BasicBlock
*InsertAtEnd
3923 ) : CastInst(Ty
, IntToPtr
, S
, Name
, InsertAtEnd
) {
3924 assert(castIsValid(getOpcode(), S
, Ty
) && "Illegal IntToPtr");
3927 BitCastInst::BitCastInst(
3928 Value
*S
, Type
*Ty
, const Twine
&Name
, Instruction
*InsertBefore
3929 ) : CastInst(Ty
, BitCast
, S
, Name
, InsertBefore
) {
3930 assert(castIsValid(getOpcode(), S
, Ty
) && "Illegal BitCast");
3933 BitCastInst::BitCastInst(
3934 Value
*S
, Type
*Ty
, const Twine
&Name
, BasicBlock
*InsertAtEnd
3935 ) : CastInst(Ty
, BitCast
, S
, Name
, InsertAtEnd
) {
3936 assert(castIsValid(getOpcode(), S
, Ty
) && "Illegal BitCast");
3939 AddrSpaceCastInst::AddrSpaceCastInst(
3940 Value
*S
, Type
*Ty
, const Twine
&Name
, Instruction
*InsertBefore
3941 ) : CastInst(Ty
, AddrSpaceCast
, S
, Name
, InsertBefore
) {
3942 assert(castIsValid(getOpcode(), S
, Ty
) && "Illegal AddrSpaceCast");
3945 AddrSpaceCastInst::AddrSpaceCastInst(
3946 Value
*S
, Type
*Ty
, const Twine
&Name
, BasicBlock
*InsertAtEnd
3947 ) : CastInst(Ty
, AddrSpaceCast
, S
, Name
, InsertAtEnd
) {
3948 assert(castIsValid(getOpcode(), S
, Ty
) && "Illegal AddrSpaceCast");
3951 //===----------------------------------------------------------------------===//
3953 //===----------------------------------------------------------------------===//
3955 CmpInst::CmpInst(Type
*ty
, OtherOps op
, Predicate predicate
, Value
*LHS
,
3956 Value
*RHS
, const Twine
&Name
, Instruction
*InsertBefore
,
3957 Instruction
*FlagsSource
)
3958 : Instruction(ty
, op
,
3959 OperandTraits
<CmpInst
>::op_begin(this),
3960 OperandTraits
<CmpInst
>::operands(this),
3964 setPredicate((Predicate
)predicate
);
3967 copyIRFlags(FlagsSource
);
3970 CmpInst::CmpInst(Type
*ty
, OtherOps op
, Predicate predicate
, Value
*LHS
,
3971 Value
*RHS
, const Twine
&Name
, BasicBlock
*InsertAtEnd
)
3972 : Instruction(ty
, op
,
3973 OperandTraits
<CmpInst
>::op_begin(this),
3974 OperandTraits
<CmpInst
>::operands(this),
3978 setPredicate((Predicate
)predicate
);
3983 CmpInst::Create(OtherOps Op
, Predicate predicate
, Value
*S1
, Value
*S2
,
3984 const Twine
&Name
, Instruction
*InsertBefore
) {
3985 if (Op
== Instruction::ICmp
) {
3987 return new ICmpInst(InsertBefore
, CmpInst::Predicate(predicate
),
3990 return new ICmpInst(CmpInst::Predicate(predicate
),
3995 return new FCmpInst(InsertBefore
, CmpInst::Predicate(predicate
),
3998 return new FCmpInst(CmpInst::Predicate(predicate
),
4003 CmpInst::Create(OtherOps Op
, Predicate predicate
, Value
*S1
, Value
*S2
,
4004 const Twine
&Name
, BasicBlock
*InsertAtEnd
) {
4005 if (Op
== Instruction::ICmp
) {
4006 return new ICmpInst(*InsertAtEnd
, CmpInst::Predicate(predicate
),
4009 return new FCmpInst(*InsertAtEnd
, CmpInst::Predicate(predicate
),
4013 void CmpInst::swapOperands() {
4014 if (ICmpInst
*IC
= dyn_cast
<ICmpInst
>(this))
4017 cast
<FCmpInst
>(this)->swapOperands();
4020 bool CmpInst::isCommutative() const {
4021 if (const ICmpInst
*IC
= dyn_cast
<ICmpInst
>(this))
4022 return IC
->isCommutative();
4023 return cast
<FCmpInst
>(this)->isCommutative();
4026 bool CmpInst::isEquality(Predicate P
) {
4027 if (ICmpInst::isIntPredicate(P
))
4028 return ICmpInst::isEquality(P
);
4029 if (FCmpInst::isFPPredicate(P
))
4030 return FCmpInst::isEquality(P
);
4031 llvm_unreachable("Unsupported predicate kind");
4034 CmpInst::Predicate
CmpInst::getInversePredicate(Predicate pred
) {
4036 default: llvm_unreachable("Unknown cmp predicate!");
4037 case ICMP_EQ
: return ICMP_NE
;
4038 case ICMP_NE
: return ICMP_EQ
;
4039 case ICMP_UGT
: return ICMP_ULE
;
4040 case ICMP_ULT
: return ICMP_UGE
;
4041 case ICMP_UGE
: return ICMP_ULT
;
4042 case ICMP_ULE
: return ICMP_UGT
;
4043 case ICMP_SGT
: return ICMP_SLE
;
4044 case ICMP_SLT
: return ICMP_SGE
;
4045 case ICMP_SGE
: return ICMP_SLT
;
4046 case ICMP_SLE
: return ICMP_SGT
;
4048 case FCMP_OEQ
: return FCMP_UNE
;
4049 case FCMP_ONE
: return FCMP_UEQ
;
4050 case FCMP_OGT
: return FCMP_ULE
;
4051 case FCMP_OLT
: return FCMP_UGE
;
4052 case FCMP_OGE
: return FCMP_ULT
;
4053 case FCMP_OLE
: return FCMP_UGT
;
4054 case FCMP_UEQ
: return FCMP_ONE
;
4055 case FCMP_UNE
: return FCMP_OEQ
;
4056 case FCMP_UGT
: return FCMP_OLE
;
4057 case FCMP_ULT
: return FCMP_OGE
;
4058 case FCMP_UGE
: return FCMP_OLT
;
4059 case FCMP_ULE
: return FCMP_OGT
;
4060 case FCMP_ORD
: return FCMP_UNO
;
4061 case FCMP_UNO
: return FCMP_ORD
;
4062 case FCMP_TRUE
: return FCMP_FALSE
;
4063 case FCMP_FALSE
: return FCMP_TRUE
;
4067 StringRef
CmpInst::getPredicateName(Predicate Pred
) {
4069 default: return "unknown";
4070 case FCmpInst::FCMP_FALSE
: return "false";
4071 case FCmpInst::FCMP_OEQ
: return "oeq";
4072 case FCmpInst::FCMP_OGT
: return "ogt";
4073 case FCmpInst::FCMP_OGE
: return "oge";
4074 case FCmpInst::FCMP_OLT
: return "olt";
4075 case FCmpInst::FCMP_OLE
: return "ole";
4076 case FCmpInst::FCMP_ONE
: return "one";
4077 case FCmpInst::FCMP_ORD
: return "ord";
4078 case FCmpInst::FCMP_UNO
: return "uno";
4079 case FCmpInst::FCMP_UEQ
: return "ueq";
4080 case FCmpInst::FCMP_UGT
: return "ugt";
4081 case FCmpInst::FCMP_UGE
: return "uge";
4082 case FCmpInst::FCMP_ULT
: return "ult";
4083 case FCmpInst::FCMP_ULE
: return "ule";
4084 case FCmpInst::FCMP_UNE
: return "une";
4085 case FCmpInst::FCMP_TRUE
: return "true";
4086 case ICmpInst::ICMP_EQ
: return "eq";
4087 case ICmpInst::ICMP_NE
: return "ne";
4088 case ICmpInst::ICMP_SGT
: return "sgt";
4089 case ICmpInst::ICMP_SGE
: return "sge";
4090 case ICmpInst::ICMP_SLT
: return "slt";
4091 case ICmpInst::ICMP_SLE
: return "sle";
4092 case ICmpInst::ICMP_UGT
: return "ugt";
4093 case ICmpInst::ICMP_UGE
: return "uge";
4094 case ICmpInst::ICMP_ULT
: return "ult";
4095 case ICmpInst::ICMP_ULE
: return "ule";
4099 raw_ostream
&llvm::operator<<(raw_ostream
&OS
, CmpInst::Predicate Pred
) {
4100 OS
<< CmpInst::getPredicateName(Pred
);
4104 ICmpInst::Predicate
ICmpInst::getSignedPredicate(Predicate pred
) {
4106 default: llvm_unreachable("Unknown icmp predicate!");
4107 case ICMP_EQ
: case ICMP_NE
:
4108 case ICMP_SGT
: case ICMP_SLT
: case ICMP_SGE
: case ICMP_SLE
:
4110 case ICMP_UGT
: return ICMP_SGT
;
4111 case ICMP_ULT
: return ICMP_SLT
;
4112 case ICMP_UGE
: return ICMP_SGE
;
4113 case ICMP_ULE
: return ICMP_SLE
;
4117 ICmpInst::Predicate
ICmpInst::getUnsignedPredicate(Predicate pred
) {
4119 default: llvm_unreachable("Unknown icmp predicate!");
4120 case ICMP_EQ
: case ICMP_NE
:
4121 case ICMP_UGT
: case ICMP_ULT
: case ICMP_UGE
: case ICMP_ULE
:
4123 case ICMP_SGT
: return ICMP_UGT
;
4124 case ICMP_SLT
: return ICMP_ULT
;
4125 case ICMP_SGE
: return ICMP_UGE
;
4126 case ICMP_SLE
: return ICMP_ULE
;
4130 CmpInst::Predicate
CmpInst::getSwappedPredicate(Predicate pred
) {
4132 default: llvm_unreachable("Unknown cmp predicate!");
4133 case ICMP_EQ
: case ICMP_NE
:
4135 case ICMP_SGT
: return ICMP_SLT
;
4136 case ICMP_SLT
: return ICMP_SGT
;
4137 case ICMP_SGE
: return ICMP_SLE
;
4138 case ICMP_SLE
: return ICMP_SGE
;
4139 case ICMP_UGT
: return ICMP_ULT
;
4140 case ICMP_ULT
: return ICMP_UGT
;
4141 case ICMP_UGE
: return ICMP_ULE
;
4142 case ICMP_ULE
: return ICMP_UGE
;
4144 case FCMP_FALSE
: case FCMP_TRUE
:
4145 case FCMP_OEQ
: case FCMP_ONE
:
4146 case FCMP_UEQ
: case FCMP_UNE
:
4147 case FCMP_ORD
: case FCMP_UNO
:
4149 case FCMP_OGT
: return FCMP_OLT
;
4150 case FCMP_OLT
: return FCMP_OGT
;
4151 case FCMP_OGE
: return FCMP_OLE
;
4152 case FCMP_OLE
: return FCMP_OGE
;
4153 case FCMP_UGT
: return FCMP_ULT
;
4154 case FCMP_ULT
: return FCMP_UGT
;
4155 case FCMP_UGE
: return FCMP_ULE
;
4156 case FCMP_ULE
: return FCMP_UGE
;
4160 bool CmpInst::isNonStrictPredicate(Predicate pred
) {
4176 bool CmpInst::isStrictPredicate(Predicate pred
) {
4192 CmpInst::Predicate
CmpInst::getStrictPredicate(Predicate pred
) {
4215 CmpInst::Predicate
CmpInst::getNonStrictPredicate(Predicate pred
) {
4238 CmpInst::Predicate
CmpInst::getFlippedStrictnessPredicate(Predicate pred
) {
4239 assert(CmpInst::isRelational(pred
) && "Call only with relational predicate!");
4241 if (isStrictPredicate(pred
))
4242 return getNonStrictPredicate(pred
);
4243 if (isNonStrictPredicate(pred
))
4244 return getStrictPredicate(pred
);
4246 llvm_unreachable("Unknown predicate!");
4249 CmpInst::Predicate
CmpInst::getSignedPredicate(Predicate pred
) {
4250 assert(CmpInst::isUnsigned(pred
) && "Call only with unsigned predicates!");
4254 llvm_unreachable("Unknown predicate!");
4255 case CmpInst::ICMP_ULT
:
4256 return CmpInst::ICMP_SLT
;
4257 case CmpInst::ICMP_ULE
:
4258 return CmpInst::ICMP_SLE
;
4259 case CmpInst::ICMP_UGT
:
4260 return CmpInst::ICMP_SGT
;
4261 case CmpInst::ICMP_UGE
:
4262 return CmpInst::ICMP_SGE
;
4266 CmpInst::Predicate
CmpInst::getUnsignedPredicate(Predicate pred
) {
4267 assert(CmpInst::isSigned(pred
) && "Call only with signed predicates!");
4271 llvm_unreachable("Unknown predicate!");
4272 case CmpInst::ICMP_SLT
:
4273 return CmpInst::ICMP_ULT
;
4274 case CmpInst::ICMP_SLE
:
4275 return CmpInst::ICMP_ULE
;
4276 case CmpInst::ICMP_SGT
:
4277 return CmpInst::ICMP_UGT
;
4278 case CmpInst::ICMP_SGE
:
4279 return CmpInst::ICMP_UGE
;
4283 bool CmpInst::isUnsigned(Predicate predicate
) {
4284 switch (predicate
) {
4285 default: return false;
4286 case ICmpInst::ICMP_ULT
: case ICmpInst::ICMP_ULE
: case ICmpInst::ICMP_UGT
:
4287 case ICmpInst::ICMP_UGE
: return true;
4291 bool CmpInst::isSigned(Predicate predicate
) {
4292 switch (predicate
) {
4293 default: return false;
4294 case ICmpInst::ICMP_SLT
: case ICmpInst::ICMP_SLE
: case ICmpInst::ICMP_SGT
:
4295 case ICmpInst::ICMP_SGE
: return true;
4299 bool ICmpInst::compare(const APInt
&LHS
, const APInt
&RHS
,
4300 ICmpInst::Predicate Pred
) {
4301 assert(ICmpInst::isIntPredicate(Pred
) && "Only for integer predicates!");
4303 case ICmpInst::Predicate::ICMP_EQ
:
4305 case ICmpInst::Predicate::ICMP_NE
:
4307 case ICmpInst::Predicate::ICMP_UGT
:
4308 return LHS
.ugt(RHS
);
4309 case ICmpInst::Predicate::ICMP_UGE
:
4310 return LHS
.uge(RHS
);
4311 case ICmpInst::Predicate::ICMP_ULT
:
4312 return LHS
.ult(RHS
);
4313 case ICmpInst::Predicate::ICMP_ULE
:
4314 return LHS
.ule(RHS
);
4315 case ICmpInst::Predicate::ICMP_SGT
:
4316 return LHS
.sgt(RHS
);
4317 case ICmpInst::Predicate::ICMP_SGE
:
4318 return LHS
.sge(RHS
);
4319 case ICmpInst::Predicate::ICMP_SLT
:
4320 return LHS
.slt(RHS
);
4321 case ICmpInst::Predicate::ICMP_SLE
:
4322 return LHS
.sle(RHS
);
4324 llvm_unreachable("Unexpected non-integer predicate.");
4328 bool FCmpInst::compare(const APFloat
&LHS
, const APFloat
&RHS
,
4329 FCmpInst::Predicate Pred
) {
4330 APFloat::cmpResult R
= LHS
.compare(RHS
);
4333 llvm_unreachable("Invalid FCmp Predicate");
4334 case FCmpInst::FCMP_FALSE
:
4336 case FCmpInst::FCMP_TRUE
:
4338 case FCmpInst::FCMP_UNO
:
4339 return R
== APFloat::cmpUnordered
;
4340 case FCmpInst::FCMP_ORD
:
4341 return R
!= APFloat::cmpUnordered
;
4342 case FCmpInst::FCMP_UEQ
:
4343 return R
== APFloat::cmpUnordered
|| R
== APFloat::cmpEqual
;
4344 case FCmpInst::FCMP_OEQ
:
4345 return R
== APFloat::cmpEqual
;
4346 case FCmpInst::FCMP_UNE
:
4347 return R
!= APFloat::cmpEqual
;
4348 case FCmpInst::FCMP_ONE
:
4349 return R
== APFloat::cmpLessThan
|| R
== APFloat::cmpGreaterThan
;
4350 case FCmpInst::FCMP_ULT
:
4351 return R
== APFloat::cmpUnordered
|| R
== APFloat::cmpLessThan
;
4352 case FCmpInst::FCMP_OLT
:
4353 return R
== APFloat::cmpLessThan
;
4354 case FCmpInst::FCMP_UGT
:
4355 return R
== APFloat::cmpUnordered
|| R
== APFloat::cmpGreaterThan
;
4356 case FCmpInst::FCMP_OGT
:
4357 return R
== APFloat::cmpGreaterThan
;
4358 case FCmpInst::FCMP_ULE
:
4359 return R
!= APFloat::cmpGreaterThan
;
4360 case FCmpInst::FCMP_OLE
:
4361 return R
== APFloat::cmpLessThan
|| R
== APFloat::cmpEqual
;
4362 case FCmpInst::FCMP_UGE
:
4363 return R
!= APFloat::cmpLessThan
;
4364 case FCmpInst::FCMP_OGE
:
4365 return R
== APFloat::cmpGreaterThan
|| R
== APFloat::cmpEqual
;
4369 CmpInst::Predicate
CmpInst::getFlippedSignednessPredicate(Predicate pred
) {
4370 assert(CmpInst::isRelational(pred
) &&
4371 "Call only with non-equality predicates!");
4374 return getUnsignedPredicate(pred
);
4375 if (isUnsigned(pred
))
4376 return getSignedPredicate(pred
);
4378 llvm_unreachable("Unknown predicate!");
4381 bool CmpInst::isOrdered(Predicate predicate
) {
4382 switch (predicate
) {
4383 default: return false;
4384 case FCmpInst::FCMP_OEQ
: case FCmpInst::FCMP_ONE
: case FCmpInst::FCMP_OGT
:
4385 case FCmpInst::FCMP_OLT
: case FCmpInst::FCMP_OGE
: case FCmpInst::FCMP_OLE
:
4386 case FCmpInst::FCMP_ORD
: return true;
4390 bool CmpInst::isUnordered(Predicate predicate
) {
4391 switch (predicate
) {
4392 default: return false;
4393 case FCmpInst::FCMP_UEQ
: case FCmpInst::FCMP_UNE
: case FCmpInst::FCMP_UGT
:
4394 case FCmpInst::FCMP_ULT
: case FCmpInst::FCMP_UGE
: case FCmpInst::FCMP_ULE
:
4395 case FCmpInst::FCMP_UNO
: return true;
4399 bool CmpInst::isTrueWhenEqual(Predicate predicate
) {
4401 default: return false;
4402 case ICMP_EQ
: case ICMP_UGE
: case ICMP_ULE
: case ICMP_SGE
: case ICMP_SLE
:
4403 case FCMP_TRUE
: case FCMP_UEQ
: case FCMP_UGE
: case FCMP_ULE
: return true;
4407 bool CmpInst::isFalseWhenEqual(Predicate predicate
) {
4409 case ICMP_NE
: case ICMP_UGT
: case ICMP_ULT
: case ICMP_SGT
: case ICMP_SLT
:
4410 case FCMP_FALSE
: case FCMP_ONE
: case FCMP_OGT
: case FCMP_OLT
: return true;
4411 default: return false;
4415 bool CmpInst::isImpliedTrueByMatchingCmp(Predicate Pred1
, Predicate Pred2
) {
4416 // If the predicates match, then we know the first condition implies the
4425 // A == B implies A >=u B, A <=u B, A >=s B, and A <=s B are true.
4426 return Pred2
== ICMP_UGE
|| Pred2
== ICMP_ULE
|| Pred2
== ICMP_SGE
||
4428 case ICMP_UGT
: // A >u B implies A != B and A >=u B are true.
4429 return Pred2
== ICMP_NE
|| Pred2
== ICMP_UGE
;
4430 case ICMP_ULT
: // A <u B implies A != B and A <=u B are true.
4431 return Pred2
== ICMP_NE
|| Pred2
== ICMP_ULE
;
4432 case ICMP_SGT
: // A >s B implies A != B and A >=s B are true.
4433 return Pred2
== ICMP_NE
|| Pred2
== ICMP_SGE
;
4434 case ICMP_SLT
: // A <s B implies A != B and A <=s B are true.
4435 return Pred2
== ICMP_NE
|| Pred2
== ICMP_SLE
;
4440 bool CmpInst::isImpliedFalseByMatchingCmp(Predicate Pred1
, Predicate Pred2
) {
4441 return isImpliedTrueByMatchingCmp(Pred1
, getInversePredicate(Pred2
));
4444 //===----------------------------------------------------------------------===//
4445 // SwitchInst Implementation
4446 //===----------------------------------------------------------------------===//
4448 void SwitchInst::init(Value
*Value
, BasicBlock
*Default
, unsigned NumReserved
) {
4449 assert(Value
&& Default
&& NumReserved
);
4450 ReservedSpace
= NumReserved
;
4451 setNumHungOffUseOperands(2);
4452 allocHungoffUses(ReservedSpace
);
4458 /// SwitchInst ctor - Create a new switch instruction, specifying a value to
4459 /// switch on and a default destination. The number of additional cases can
4460 /// be specified here to make memory allocation more efficient. This
4461 /// constructor can also autoinsert before another instruction.
4462 SwitchInst::SwitchInst(Value
*Value
, BasicBlock
*Default
, unsigned NumCases
,
4463 Instruction
*InsertBefore
)
4464 : Instruction(Type::getVoidTy(Value
->getContext()), Instruction::Switch
,
4465 nullptr, 0, InsertBefore
) {
4466 init(Value
, Default
, 2+NumCases
*2);
4469 /// SwitchInst ctor - Create a new switch instruction, specifying a value to
4470 /// switch on and a default destination. The number of additional cases can
4471 /// be specified here to make memory allocation more efficient. This
4472 /// constructor also autoinserts at the end of the specified BasicBlock.
4473 SwitchInst::SwitchInst(Value
*Value
, BasicBlock
*Default
, unsigned NumCases
,
4474 BasicBlock
*InsertAtEnd
)
4475 : Instruction(Type::getVoidTy(Value
->getContext()), Instruction::Switch
,
4476 nullptr, 0, InsertAtEnd
) {
4477 init(Value
, Default
, 2+NumCases
*2);
4480 SwitchInst::SwitchInst(const SwitchInst
&SI
)
4481 : Instruction(SI
.getType(), Instruction::Switch
, nullptr, 0) {
4482 init(SI
.getCondition(), SI
.getDefaultDest(), SI
.getNumOperands());
4483 setNumHungOffUseOperands(SI
.getNumOperands());
4484 Use
*OL
= getOperandList();
4485 const Use
*InOL
= SI
.getOperandList();
4486 for (unsigned i
= 2, E
= SI
.getNumOperands(); i
!= E
; i
+= 2) {
4488 OL
[i
+1] = InOL
[i
+1];
4490 SubclassOptionalData
= SI
.SubclassOptionalData
;
4493 /// addCase - Add an entry to the switch instruction...
4495 void SwitchInst::addCase(ConstantInt
*OnVal
, BasicBlock
*Dest
) {
4496 unsigned NewCaseIdx
= getNumCases();
4497 unsigned OpNo
= getNumOperands();
4498 if (OpNo
+2 > ReservedSpace
)
4499 growOperands(); // Get more space!
4500 // Initialize some new operands.
4501 assert(OpNo
+1 < ReservedSpace
&& "Growing didn't work!");
4502 setNumHungOffUseOperands(OpNo
+2);
4503 CaseHandle
Case(this, NewCaseIdx
);
4504 Case
.setValue(OnVal
);
4505 Case
.setSuccessor(Dest
);
4508 /// removeCase - This method removes the specified case and its successor
4509 /// from the switch instruction.
4510 SwitchInst::CaseIt
SwitchInst::removeCase(CaseIt I
) {
4511 unsigned idx
= I
->getCaseIndex();
4513 assert(2 + idx
*2 < getNumOperands() && "Case index out of range!!!");
4515 unsigned NumOps
= getNumOperands();
4516 Use
*OL
= getOperandList();
4518 // Overwrite this case with the end of the list.
4519 if (2 + (idx
+ 1) * 2 != NumOps
) {
4520 OL
[2 + idx
* 2] = OL
[NumOps
- 2];
4521 OL
[2 + idx
* 2 + 1] = OL
[NumOps
- 1];
4524 // Nuke the last value.
4525 OL
[NumOps
-2].set(nullptr);
4526 OL
[NumOps
-2+1].set(nullptr);
4527 setNumHungOffUseOperands(NumOps
-2);
4529 return CaseIt(this, idx
);
4532 /// growOperands - grow operands - This grows the operand list in response
4533 /// to a push_back style of operation. This grows the number of ops by 3 times.
4535 void SwitchInst::growOperands() {
4536 unsigned e
= getNumOperands();
4537 unsigned NumOps
= e
*3;
4539 ReservedSpace
= NumOps
;
4540 growHungoffUses(ReservedSpace
);
4543 MDNode
*SwitchInstProfUpdateWrapper::buildProfBranchWeightsMD() {
4544 assert(Changed
&& "called only if metadata has changed");
4549 assert(SI
.getNumSuccessors() == Weights
->size() &&
4550 "num of prof branch_weights must accord with num of successors");
4552 bool AllZeroes
= all_of(*Weights
, [](uint32_t W
) { return W
== 0; });
4554 if (AllZeroes
|| Weights
->size() < 2)
4557 return MDBuilder(SI
.getParent()->getContext()).createBranchWeights(*Weights
);
4560 void SwitchInstProfUpdateWrapper::init() {
4561 MDNode
*ProfileData
= getBranchWeightMDNode(SI
);
4565 if (ProfileData
->getNumOperands() != SI
.getNumSuccessors() + 1) {
4566 llvm_unreachable("number of prof branch_weights metadata operands does "
4567 "not correspond to number of succesors");
4570 SmallVector
<uint32_t, 8> Weights
;
4571 if (!extractBranchWeights(ProfileData
, Weights
))
4573 this->Weights
= std::move(Weights
);
4577 SwitchInstProfUpdateWrapper::removeCase(SwitchInst::CaseIt I
) {
4579 assert(SI
.getNumSuccessors() == Weights
->size() &&
4580 "num of prof branch_weights must accord with num of successors");
4582 // Copy the last case to the place of the removed one and shrink.
4583 // This is tightly coupled with the way SwitchInst::removeCase() removes
4584 // the cases in SwitchInst::removeCase(CaseIt).
4585 (*Weights
)[I
->getCaseIndex() + 1] = Weights
->back();
4586 Weights
->pop_back();
4588 return SI
.removeCase(I
);
4591 void SwitchInstProfUpdateWrapper::addCase(
4592 ConstantInt
*OnVal
, BasicBlock
*Dest
,
4593 SwitchInstProfUpdateWrapper::CaseWeightOpt W
) {
4594 SI
.addCase(OnVal
, Dest
);
4596 if (!Weights
&& W
&& *W
) {
4598 Weights
= SmallVector
<uint32_t, 8>(SI
.getNumSuccessors(), 0);
4599 (*Weights
)[SI
.getNumSuccessors() - 1] = *W
;
4600 } else if (Weights
) {
4602 Weights
->push_back(W
.value_or(0));
4605 assert(SI
.getNumSuccessors() == Weights
->size() &&
4606 "num of prof branch_weights must accord with num of successors");
4609 Instruction::InstListType::iterator
4610 SwitchInstProfUpdateWrapper::eraseFromParent() {
4611 // Instruction is erased. Mark as unchanged to not touch it in the destructor.
4615 return SI
.eraseFromParent();
4618 SwitchInstProfUpdateWrapper::CaseWeightOpt
4619 SwitchInstProfUpdateWrapper::getSuccessorWeight(unsigned idx
) {
4621 return std::nullopt
;
4622 return (*Weights
)[idx
];
4625 void SwitchInstProfUpdateWrapper::setSuccessorWeight(
4626 unsigned idx
, SwitchInstProfUpdateWrapper::CaseWeightOpt W
) {
4631 Weights
= SmallVector
<uint32_t, 8>(SI
.getNumSuccessors(), 0);
4634 auto &OldW
= (*Weights
)[idx
];
4642 SwitchInstProfUpdateWrapper::CaseWeightOpt
4643 SwitchInstProfUpdateWrapper::getSuccessorWeight(const SwitchInst
&SI
,
4645 if (MDNode
*ProfileData
= getBranchWeightMDNode(SI
))
4646 if (ProfileData
->getNumOperands() == SI
.getNumSuccessors() + 1)
4647 return mdconst::extract
<ConstantInt
>(ProfileData
->getOperand(idx
+ 1))
4651 return std::nullopt
;
4654 //===----------------------------------------------------------------------===//
4655 // IndirectBrInst Implementation
4656 //===----------------------------------------------------------------------===//
4658 void IndirectBrInst::init(Value
*Address
, unsigned NumDests
) {
4659 assert(Address
&& Address
->getType()->isPointerTy() &&
4660 "Address of indirectbr must be a pointer");
4661 ReservedSpace
= 1+NumDests
;
4662 setNumHungOffUseOperands(1);
4663 allocHungoffUses(ReservedSpace
);
4669 /// growOperands - grow operands - This grows the operand list in response
4670 /// to a push_back style of operation. This grows the number of ops by 2 times.
4672 void IndirectBrInst::growOperands() {
4673 unsigned e
= getNumOperands();
4674 unsigned NumOps
= e
*2;
4676 ReservedSpace
= NumOps
;
4677 growHungoffUses(ReservedSpace
);
4680 IndirectBrInst::IndirectBrInst(Value
*Address
, unsigned NumCases
,
4681 Instruction
*InsertBefore
)
4682 : Instruction(Type::getVoidTy(Address
->getContext()),
4683 Instruction::IndirectBr
, nullptr, 0, InsertBefore
) {
4684 init(Address
, NumCases
);
4687 IndirectBrInst::IndirectBrInst(Value
*Address
, unsigned NumCases
,
4688 BasicBlock
*InsertAtEnd
)
4689 : Instruction(Type::getVoidTy(Address
->getContext()),
4690 Instruction::IndirectBr
, nullptr, 0, InsertAtEnd
) {
4691 init(Address
, NumCases
);
4694 IndirectBrInst::IndirectBrInst(const IndirectBrInst
&IBI
)
4695 : Instruction(Type::getVoidTy(IBI
.getContext()), Instruction::IndirectBr
,
4696 nullptr, IBI
.getNumOperands()) {
4697 allocHungoffUses(IBI
.getNumOperands());
4698 Use
*OL
= getOperandList();
4699 const Use
*InOL
= IBI
.getOperandList();
4700 for (unsigned i
= 0, E
= IBI
.getNumOperands(); i
!= E
; ++i
)
4702 SubclassOptionalData
= IBI
.SubclassOptionalData
;
4705 /// addDestination - Add a destination.
4707 void IndirectBrInst::addDestination(BasicBlock
*DestBB
) {
4708 unsigned OpNo
= getNumOperands();
4709 if (OpNo
+1 > ReservedSpace
)
4710 growOperands(); // Get more space!
4711 // Initialize some new operands.
4712 assert(OpNo
< ReservedSpace
&& "Growing didn't work!");
4713 setNumHungOffUseOperands(OpNo
+1);
4714 getOperandList()[OpNo
] = DestBB
;
4717 /// removeDestination - This method removes the specified successor from the
4718 /// indirectbr instruction.
4719 void IndirectBrInst::removeDestination(unsigned idx
) {
4720 assert(idx
< getNumOperands()-1 && "Successor index out of range!");
4722 unsigned NumOps
= getNumOperands();
4723 Use
*OL
= getOperandList();
4725 // Replace this value with the last one.
4726 OL
[idx
+1] = OL
[NumOps
-1];
4728 // Nuke the last value.
4729 OL
[NumOps
-1].set(nullptr);
4730 setNumHungOffUseOperands(NumOps
-1);
4733 //===----------------------------------------------------------------------===//
4734 // FreezeInst Implementation
4735 //===----------------------------------------------------------------------===//
4737 FreezeInst::FreezeInst(Value
*S
,
4738 const Twine
&Name
, Instruction
*InsertBefore
)
4739 : UnaryInstruction(S
->getType(), Freeze
, S
, InsertBefore
) {
4743 FreezeInst::FreezeInst(Value
*S
,
4744 const Twine
&Name
, BasicBlock
*InsertAtEnd
)
4745 : UnaryInstruction(S
->getType(), Freeze
, S
, InsertAtEnd
) {
4749 //===----------------------------------------------------------------------===//
4750 // cloneImpl() implementations
4751 //===----------------------------------------------------------------------===//
4753 // Define these methods here so vtables don't get emitted into every translation
4754 // unit that uses these classes.
4756 GetElementPtrInst
*GetElementPtrInst::cloneImpl() const {
4757 return new (getNumOperands()) GetElementPtrInst(*this);
4760 UnaryOperator
*UnaryOperator::cloneImpl() const {
4761 return Create(getOpcode(), Op
<0>());
4764 BinaryOperator
*BinaryOperator::cloneImpl() const {
4765 return Create(getOpcode(), Op
<0>(), Op
<1>());
4768 FCmpInst
*FCmpInst::cloneImpl() const {
4769 return new FCmpInst(getPredicate(), Op
<0>(), Op
<1>());
4772 ICmpInst
*ICmpInst::cloneImpl() const {
4773 return new ICmpInst(getPredicate(), Op
<0>(), Op
<1>());
4776 ExtractValueInst
*ExtractValueInst::cloneImpl() const {
4777 return new ExtractValueInst(*this);
4780 InsertValueInst
*InsertValueInst::cloneImpl() const {
4781 return new InsertValueInst(*this);
4784 AllocaInst
*AllocaInst::cloneImpl() const {
4785 AllocaInst
*Result
= new AllocaInst(getAllocatedType(), getAddressSpace(),
4786 getOperand(0), getAlign());
4787 Result
->setUsedWithInAlloca(isUsedWithInAlloca());
4788 Result
->setSwiftError(isSwiftError());
4792 LoadInst
*LoadInst::cloneImpl() const {
4793 return new LoadInst(getType(), getOperand(0), Twine(), isVolatile(),
4794 getAlign(), getOrdering(), getSyncScopeID());
4797 StoreInst
*StoreInst::cloneImpl() const {
4798 return new StoreInst(getOperand(0), getOperand(1), isVolatile(), getAlign(),
4799 getOrdering(), getSyncScopeID());
4802 AtomicCmpXchgInst
*AtomicCmpXchgInst::cloneImpl() const {
4803 AtomicCmpXchgInst
*Result
= new AtomicCmpXchgInst(
4804 getOperand(0), getOperand(1), getOperand(2), getAlign(),
4805 getSuccessOrdering(), getFailureOrdering(), getSyncScopeID());
4806 Result
->setVolatile(isVolatile());
4807 Result
->setWeak(isWeak());
4811 AtomicRMWInst
*AtomicRMWInst::cloneImpl() const {
4812 AtomicRMWInst
*Result
=
4813 new AtomicRMWInst(getOperation(), getOperand(0), getOperand(1),
4814 getAlign(), getOrdering(), getSyncScopeID());
4815 Result
->setVolatile(isVolatile());
4819 FenceInst
*FenceInst::cloneImpl() const {
4820 return new FenceInst(getContext(), getOrdering(), getSyncScopeID());
4823 TruncInst
*TruncInst::cloneImpl() const {
4824 return new TruncInst(getOperand(0), getType());
4827 ZExtInst
*ZExtInst::cloneImpl() const {
4828 return new ZExtInst(getOperand(0), getType());
4831 SExtInst
*SExtInst::cloneImpl() const {
4832 return new SExtInst(getOperand(0), getType());
4835 FPTruncInst
*FPTruncInst::cloneImpl() const {
4836 return new FPTruncInst(getOperand(0), getType());
4839 FPExtInst
*FPExtInst::cloneImpl() const {
4840 return new FPExtInst(getOperand(0), getType());
4843 UIToFPInst
*UIToFPInst::cloneImpl() const {
4844 return new UIToFPInst(getOperand(0), getType());
4847 SIToFPInst
*SIToFPInst::cloneImpl() const {
4848 return new SIToFPInst(getOperand(0), getType());
4851 FPToUIInst
*FPToUIInst::cloneImpl() const {
4852 return new FPToUIInst(getOperand(0), getType());
4855 FPToSIInst
*FPToSIInst::cloneImpl() const {
4856 return new FPToSIInst(getOperand(0), getType());
4859 PtrToIntInst
*PtrToIntInst::cloneImpl() const {
4860 return new PtrToIntInst(getOperand(0), getType());
4863 IntToPtrInst
*IntToPtrInst::cloneImpl() const {
4864 return new IntToPtrInst(getOperand(0), getType());
4867 BitCastInst
*BitCastInst::cloneImpl() const {
4868 return new BitCastInst(getOperand(0), getType());
4871 AddrSpaceCastInst
*AddrSpaceCastInst::cloneImpl() const {
4872 return new AddrSpaceCastInst(getOperand(0), getType());
4875 CallInst
*CallInst::cloneImpl() const {
4876 if (hasOperandBundles()) {
4877 unsigned DescriptorBytes
= getNumOperandBundles() * sizeof(BundleOpInfo
);
4878 return new(getNumOperands(), DescriptorBytes
) CallInst(*this);
4880 return new(getNumOperands()) CallInst(*this);
4883 SelectInst
*SelectInst::cloneImpl() const {
4884 return SelectInst::Create(getOperand(0), getOperand(1), getOperand(2));
4887 VAArgInst
*VAArgInst::cloneImpl() const {
4888 return new VAArgInst(getOperand(0), getType());
4891 ExtractElementInst
*ExtractElementInst::cloneImpl() const {
4892 return ExtractElementInst::Create(getOperand(0), getOperand(1));
4895 InsertElementInst
*InsertElementInst::cloneImpl() const {
4896 return InsertElementInst::Create(getOperand(0), getOperand(1), getOperand(2));
4899 ShuffleVectorInst
*ShuffleVectorInst::cloneImpl() const {
4900 return new ShuffleVectorInst(getOperand(0), getOperand(1), getShuffleMask());
4903 PHINode
*PHINode::cloneImpl() const { return new PHINode(*this); }
4905 LandingPadInst
*LandingPadInst::cloneImpl() const {
4906 return new LandingPadInst(*this);
4909 ReturnInst
*ReturnInst::cloneImpl() const {
4910 return new(getNumOperands()) ReturnInst(*this);
4913 BranchInst
*BranchInst::cloneImpl() const {
4914 return new(getNumOperands()) BranchInst(*this);
4917 SwitchInst
*SwitchInst::cloneImpl() const { return new SwitchInst(*this); }
4919 IndirectBrInst
*IndirectBrInst::cloneImpl() const {
4920 return new IndirectBrInst(*this);
4923 InvokeInst
*InvokeInst::cloneImpl() const {
4924 if (hasOperandBundles()) {
4925 unsigned DescriptorBytes
= getNumOperandBundles() * sizeof(BundleOpInfo
);
4926 return new(getNumOperands(), DescriptorBytes
) InvokeInst(*this);
4928 return new(getNumOperands()) InvokeInst(*this);
4931 CallBrInst
*CallBrInst::cloneImpl() const {
4932 if (hasOperandBundles()) {
4933 unsigned DescriptorBytes
= getNumOperandBundles() * sizeof(BundleOpInfo
);
4934 return new (getNumOperands(), DescriptorBytes
) CallBrInst(*this);
4936 return new (getNumOperands()) CallBrInst(*this);
4939 ResumeInst
*ResumeInst::cloneImpl() const { return new (1) ResumeInst(*this); }
4941 CleanupReturnInst
*CleanupReturnInst::cloneImpl() const {
4942 return new (getNumOperands()) CleanupReturnInst(*this);
4945 CatchReturnInst
*CatchReturnInst::cloneImpl() const {
4946 return new (getNumOperands()) CatchReturnInst(*this);
4949 CatchSwitchInst
*CatchSwitchInst::cloneImpl() const {
4950 return new CatchSwitchInst(*this);
4953 FuncletPadInst
*FuncletPadInst::cloneImpl() const {
4954 return new (getNumOperands()) FuncletPadInst(*this);
4957 UnreachableInst
*UnreachableInst::cloneImpl() const {
4958 LLVMContext
&Context
= getContext();
4959 return new UnreachableInst(Context
);
4962 FreezeInst
*FreezeInst::cloneImpl() const {
4963 return new FreezeInst(getOperand(0));