1 //===- Instructions.cpp - Implement the LLVM instructions -----------------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This file implements all of the non-inline methods for the LLVM instruction
12 //===----------------------------------------------------------------------===//
14 #include "llvm/IR/Instructions.h"
15 #include "LLVMContextImpl.h"
16 #include "llvm/ADT/SmallBitVector.h"
17 #include "llvm/ADT/SmallVector.h"
18 #include "llvm/ADT/Twine.h"
19 #include "llvm/IR/Attributes.h"
20 #include "llvm/IR/BasicBlock.h"
21 #include "llvm/IR/Constant.h"
22 #include "llvm/IR/Constants.h"
23 #include "llvm/IR/DataLayout.h"
24 #include "llvm/IR/DerivedTypes.h"
25 #include "llvm/IR/Function.h"
26 #include "llvm/IR/InstrTypes.h"
27 #include "llvm/IR/Instruction.h"
28 #include "llvm/IR/Intrinsics.h"
29 #include "llvm/IR/LLVMContext.h"
30 #include "llvm/IR/MDBuilder.h"
31 #include "llvm/IR/Metadata.h"
32 #include "llvm/IR/Module.h"
33 #include "llvm/IR/Operator.h"
34 #include "llvm/IR/ProfDataUtils.h"
35 #include "llvm/IR/Type.h"
36 #include "llvm/IR/Value.h"
37 #include "llvm/Support/AtomicOrdering.h"
38 #include "llvm/Support/Casting.h"
39 #include "llvm/Support/ErrorHandling.h"
40 #include "llvm/Support/MathExtras.h"
41 #include "llvm/Support/ModRef.h"
42 #include "llvm/Support/TypeSize.h"
51 static cl::opt
<bool> DisableI2pP2iOpt(
52 "disable-i2p-p2i-opt", cl::init(false),
53 cl::desc("Disables inttoptr/ptrtoint roundtrip optimization"));
55 //===----------------------------------------------------------------------===//
57 //===----------------------------------------------------------------------===//
59 std::optional
<TypeSize
>
60 AllocaInst::getAllocationSize(const DataLayout
&DL
) const {
61 TypeSize Size
= DL
.getTypeAllocSize(getAllocatedType());
62 if (isArrayAllocation()) {
63 auto *C
= dyn_cast
<ConstantInt
>(getArraySize());
66 assert(!Size
.isScalable() && "Array elements cannot have a scalable size");
67 Size
*= C
->getZExtValue();
72 std::optional
<TypeSize
>
73 AllocaInst::getAllocationSizeInBits(const DataLayout
&DL
) const {
74 std::optional
<TypeSize
> Size
= getAllocationSize(DL
);
80 //===----------------------------------------------------------------------===//
82 //===----------------------------------------------------------------------===//
84 /// areInvalidOperands - Return a string if the specified operands are invalid
85 /// for a select operation, otherwise return null.
86 const char *SelectInst::areInvalidOperands(Value
*Op0
, Value
*Op1
, Value
*Op2
) {
87 if (Op1
->getType() != Op2
->getType())
88 return "both values to select must have same type";
90 if (Op1
->getType()->isTokenTy())
91 return "select values cannot have token type";
93 if (VectorType
*VT
= dyn_cast
<VectorType
>(Op0
->getType())) {
95 if (VT
->getElementType() != Type::getInt1Ty(Op0
->getContext()))
96 return "vector select condition element type must be i1";
97 VectorType
*ET
= dyn_cast
<VectorType
>(Op1
->getType());
99 return "selected values for vector select must be vectors";
100 if (ET
->getElementCount() != VT
->getElementCount())
101 return "vector select requires selected vectors to have "
102 "the same vector length as select condition";
103 } else if (Op0
->getType() != Type::getInt1Ty(Op0
->getContext())) {
104 return "select condition must be i1 or <n x i1>";
109 //===----------------------------------------------------------------------===//
111 //===----------------------------------------------------------------------===//
113 PHINode::PHINode(const PHINode
&PN
)
114 : Instruction(PN
.getType(), Instruction::PHI
, nullptr, PN
.getNumOperands()),
115 ReservedSpace(PN
.getNumOperands()) {
116 allocHungoffUses(PN
.getNumOperands());
117 std::copy(PN
.op_begin(), PN
.op_end(), op_begin());
118 copyIncomingBlocks(make_range(PN
.block_begin(), PN
.block_end()));
119 SubclassOptionalData
= PN
.SubclassOptionalData
;
122 // removeIncomingValue - Remove an incoming value. This is useful if a
123 // predecessor basic block is deleted.
124 Value
*PHINode::removeIncomingValue(unsigned Idx
, bool DeletePHIIfEmpty
) {
125 Value
*Removed
= getIncomingValue(Idx
);
127 // Move everything after this operand down.
129 // FIXME: we could just swap with the end of the list, then erase. However,
130 // clients might not expect this to happen. The code as it is thrashes the
131 // use/def lists, which is kinda lame.
132 std::copy(op_begin() + Idx
+ 1, op_end(), op_begin() + Idx
);
133 copyIncomingBlocks(drop_begin(blocks(), Idx
+ 1), Idx
);
135 // Nuke the last value.
136 Op
<-1>().set(nullptr);
137 setNumHungOffUseOperands(getNumOperands() - 1);
139 // If the PHI node is dead, because it has zero entries, nuke it now.
140 if (getNumOperands() == 0 && DeletePHIIfEmpty
) {
141 // If anyone is using this PHI, make them use a dummy value instead...
142 replaceAllUsesWith(PoisonValue::get(getType()));
148 void PHINode::removeIncomingValueIf(function_ref
<bool(unsigned)> Predicate
,
149 bool DeletePHIIfEmpty
) {
150 SmallDenseSet
<unsigned> RemoveIndices
;
151 for (unsigned Idx
= 0; Idx
< getNumIncomingValues(); ++Idx
)
153 RemoveIndices
.insert(Idx
);
155 if (RemoveIndices
.empty())
159 auto NewOpEnd
= remove_if(operands(), [&](Use
&U
) {
160 return RemoveIndices
.contains(U
.getOperandNo());
162 for (Use
&U
: make_range(NewOpEnd
, op_end()))
165 // Remove incoming blocks.
166 (void)std::remove_if(const_cast<block_iterator
>(block_begin()),
167 const_cast<block_iterator
>(block_end()), [&](BasicBlock
*&BB
) {
168 return RemoveIndices
.contains(&BB
- block_begin());
171 setNumHungOffUseOperands(getNumOperands() - RemoveIndices
.size());
173 // If the PHI node is dead, because it has zero entries, nuke it now.
174 if (getNumOperands() == 0 && DeletePHIIfEmpty
) {
175 // If anyone is using this PHI, make them use a dummy value instead...
176 replaceAllUsesWith(PoisonValue::get(getType()));
181 /// growOperands - grow operands - This grows the operand list in response
182 /// to a push_back style of operation. This grows the number of ops by 1.5
185 void PHINode::growOperands() {
186 unsigned e
= getNumOperands();
187 unsigned NumOps
= e
+ e
/ 2;
188 if (NumOps
< 2) NumOps
= 2; // 2 op PHI nodes are VERY common.
190 ReservedSpace
= NumOps
;
191 growHungoffUses(ReservedSpace
, /* IsPhi */ true);
194 /// hasConstantValue - If the specified PHI node always merges together the same
195 /// value, return the value, otherwise return null.
196 Value
*PHINode::hasConstantValue() const {
197 // Exploit the fact that phi nodes always have at least one entry.
198 Value
*ConstantValue
= getIncomingValue(0);
199 for (unsigned i
= 1, e
= getNumIncomingValues(); i
!= e
; ++i
)
200 if (getIncomingValue(i
) != ConstantValue
&& getIncomingValue(i
) != this) {
201 if (ConstantValue
!= this)
202 return nullptr; // Incoming values not all the same.
203 // The case where the first value is this PHI.
204 ConstantValue
= getIncomingValue(i
);
206 if (ConstantValue
== this)
207 return UndefValue::get(getType());
208 return ConstantValue
;
211 /// hasConstantOrUndefValue - Whether the specified PHI node always merges
212 /// together the same value, assuming that undefs result in the same value as
214 /// Unlike \ref hasConstantValue, this does not return a value because the
215 /// unique non-undef incoming value need not dominate the PHI node.
216 bool PHINode::hasConstantOrUndefValue() const {
217 Value
*ConstantValue
= nullptr;
218 for (unsigned i
= 0, e
= getNumIncomingValues(); i
!= e
; ++i
) {
219 Value
*Incoming
= getIncomingValue(i
);
220 if (Incoming
!= this && !isa
<UndefValue
>(Incoming
)) {
221 if (ConstantValue
&& ConstantValue
!= Incoming
)
223 ConstantValue
= Incoming
;
229 //===----------------------------------------------------------------------===//
230 // LandingPadInst Implementation
231 //===----------------------------------------------------------------------===//
233 LandingPadInst::LandingPadInst(Type
*RetTy
, unsigned NumReservedValues
,
234 const Twine
&NameStr
, Instruction
*InsertBefore
)
235 : Instruction(RetTy
, Instruction::LandingPad
, nullptr, 0, InsertBefore
) {
236 init(NumReservedValues
, NameStr
);
239 LandingPadInst::LandingPadInst(Type
*RetTy
, unsigned NumReservedValues
,
240 const Twine
&NameStr
, BasicBlock
*InsertAtEnd
)
241 : Instruction(RetTy
, Instruction::LandingPad
, nullptr, 0, InsertAtEnd
) {
242 init(NumReservedValues
, NameStr
);
245 LandingPadInst::LandingPadInst(const LandingPadInst
&LP
)
246 : Instruction(LP
.getType(), Instruction::LandingPad
, nullptr,
247 LP
.getNumOperands()),
248 ReservedSpace(LP
.getNumOperands()) {
249 allocHungoffUses(LP
.getNumOperands());
250 Use
*OL
= getOperandList();
251 const Use
*InOL
= LP
.getOperandList();
252 for (unsigned I
= 0, E
= ReservedSpace
; I
!= E
; ++I
)
255 setCleanup(LP
.isCleanup());
258 LandingPadInst
*LandingPadInst::Create(Type
*RetTy
, unsigned NumReservedClauses
,
259 const Twine
&NameStr
,
260 Instruction
*InsertBefore
) {
261 return new LandingPadInst(RetTy
, NumReservedClauses
, NameStr
, InsertBefore
);
264 LandingPadInst
*LandingPadInst::Create(Type
*RetTy
, unsigned NumReservedClauses
,
265 const Twine
&NameStr
,
266 BasicBlock
*InsertAtEnd
) {
267 return new LandingPadInst(RetTy
, NumReservedClauses
, NameStr
, InsertAtEnd
);
270 void LandingPadInst::init(unsigned NumReservedValues
, const Twine
&NameStr
) {
271 ReservedSpace
= NumReservedValues
;
272 setNumHungOffUseOperands(0);
273 allocHungoffUses(ReservedSpace
);
278 /// growOperands - grow operands - This grows the operand list in response to a
279 /// push_back style of operation. This grows the number of ops by 2 times.
280 void LandingPadInst::growOperands(unsigned Size
) {
281 unsigned e
= getNumOperands();
282 if (ReservedSpace
>= e
+ Size
) return;
283 ReservedSpace
= (std::max(e
, 1U) + Size
/ 2) * 2;
284 growHungoffUses(ReservedSpace
);
287 void LandingPadInst::addClause(Constant
*Val
) {
288 unsigned OpNo
= getNumOperands();
290 assert(OpNo
< ReservedSpace
&& "Growing didn't work!");
291 setNumHungOffUseOperands(getNumOperands() + 1);
292 getOperandList()[OpNo
] = Val
;
295 //===----------------------------------------------------------------------===//
296 // CallBase Implementation
297 //===----------------------------------------------------------------------===//
299 CallBase
*CallBase::Create(CallBase
*CB
, ArrayRef
<OperandBundleDef
> Bundles
,
300 Instruction
*InsertPt
) {
301 switch (CB
->getOpcode()) {
302 case Instruction::Call
:
303 return CallInst::Create(cast
<CallInst
>(CB
), Bundles
, InsertPt
);
304 case Instruction::Invoke
:
305 return InvokeInst::Create(cast
<InvokeInst
>(CB
), Bundles
, InsertPt
);
306 case Instruction::CallBr
:
307 return CallBrInst::Create(cast
<CallBrInst
>(CB
), Bundles
, InsertPt
);
309 llvm_unreachable("Unknown CallBase sub-class!");
313 CallBase
*CallBase::Create(CallBase
*CI
, OperandBundleDef OpB
,
314 Instruction
*InsertPt
) {
315 SmallVector
<OperandBundleDef
, 2> OpDefs
;
316 for (unsigned i
= 0, e
= CI
->getNumOperandBundles(); i
< e
; ++i
) {
317 auto ChildOB
= CI
->getOperandBundleAt(i
);
318 if (ChildOB
.getTagName() != OpB
.getTag())
319 OpDefs
.emplace_back(ChildOB
);
321 OpDefs
.emplace_back(OpB
);
322 return CallBase::Create(CI
, OpDefs
, InsertPt
);
326 Function
*CallBase::getCaller() { return getParent()->getParent(); }
328 unsigned CallBase::getNumSubclassExtraOperandsDynamic() const {
329 assert(getOpcode() == Instruction::CallBr
&& "Unexpected opcode!");
330 return cast
<CallBrInst
>(this)->getNumIndirectDests() + 1;
333 bool CallBase::isIndirectCall() const {
334 const Value
*V
= getCalledOperand();
335 if (isa
<Function
>(V
) || isa
<Constant
>(V
))
337 return !isInlineAsm();
340 /// Tests if this call site must be tail call optimized. Only a CallInst can
341 /// be tail call optimized.
342 bool CallBase::isMustTailCall() const {
343 if (auto *CI
= dyn_cast
<CallInst
>(this))
344 return CI
->isMustTailCall();
348 /// Tests if this call site is marked as a tail call.
349 bool CallBase::isTailCall() const {
350 if (auto *CI
= dyn_cast
<CallInst
>(this))
351 return CI
->isTailCall();
355 Intrinsic::ID
CallBase::getIntrinsicID() const {
356 if (auto *F
= getCalledFunction())
357 return F
->getIntrinsicID();
358 return Intrinsic::not_intrinsic
;
361 FPClassTest
CallBase::getRetNoFPClass() const {
362 FPClassTest Mask
= Attrs
.getRetNoFPClass();
364 if (const Function
*F
= getCalledFunction())
365 Mask
|= F
->getAttributes().getRetNoFPClass();
369 FPClassTest
CallBase::getParamNoFPClass(unsigned i
) const {
370 FPClassTest Mask
= Attrs
.getParamNoFPClass(i
);
372 if (const Function
*F
= getCalledFunction())
373 Mask
|= F
->getAttributes().getParamNoFPClass(i
);
377 bool CallBase::isReturnNonNull() const {
378 if (hasRetAttr(Attribute::NonNull
))
381 if (getRetDereferenceableBytes() > 0 &&
382 !NullPointerIsDefined(getCaller(), getType()->getPointerAddressSpace()))
388 Value
*CallBase::getArgOperandWithAttribute(Attribute::AttrKind Kind
) const {
391 if (Attrs
.hasAttrSomewhere(Kind
, &Index
))
392 return getArgOperand(Index
- AttributeList::FirstArgIndex
);
393 if (const Function
*F
= getCalledFunction())
394 if (F
->getAttributes().hasAttrSomewhere(Kind
, &Index
))
395 return getArgOperand(Index
- AttributeList::FirstArgIndex
);
400 /// Determine whether the argument or parameter has the given attribute.
401 bool CallBase::paramHasAttr(unsigned ArgNo
, Attribute::AttrKind Kind
) const {
402 assert(ArgNo
< arg_size() && "Param index out of bounds!");
404 if (Attrs
.hasParamAttr(ArgNo
, Kind
))
407 const Function
*F
= getCalledFunction();
411 if (!F
->getAttributes().hasParamAttr(ArgNo
, Kind
))
414 // Take into account mod/ref by operand bundles.
416 case Attribute::ReadNone
:
417 return !hasReadingOperandBundles() && !hasClobberingOperandBundles();
418 case Attribute::ReadOnly
:
419 return !hasClobberingOperandBundles();
420 case Attribute::WriteOnly
:
421 return !hasReadingOperandBundles();
427 bool CallBase::hasFnAttrOnCalledFunction(Attribute::AttrKind Kind
) const {
428 Value
*V
= getCalledOperand();
429 if (auto *CE
= dyn_cast
<ConstantExpr
>(V
))
430 if (CE
->getOpcode() == BitCast
)
431 V
= CE
->getOperand(0);
433 if (auto *F
= dyn_cast
<Function
>(V
))
434 return F
->getAttributes().hasFnAttr(Kind
);
439 bool CallBase::hasFnAttrOnCalledFunction(StringRef Kind
) const {
440 Value
*V
= getCalledOperand();
441 if (auto *CE
= dyn_cast
<ConstantExpr
>(V
))
442 if (CE
->getOpcode() == BitCast
)
443 V
= CE
->getOperand(0);
445 if (auto *F
= dyn_cast
<Function
>(V
))
446 return F
->getAttributes().hasFnAttr(Kind
);
451 template <typename AK
>
452 Attribute
CallBase::getFnAttrOnCalledFunction(AK Kind
) const {
453 if constexpr (std::is_same_v
<AK
, Attribute::AttrKind
>) {
454 // getMemoryEffects() correctly combines memory effects from the call-site,
455 // operand bundles and function.
456 assert(Kind
!= Attribute::Memory
&& "Use getMemoryEffects() instead");
459 Value
*V
= getCalledOperand();
460 if (auto *CE
= dyn_cast
<ConstantExpr
>(V
))
461 if (CE
->getOpcode() == BitCast
)
462 V
= CE
->getOperand(0);
464 if (auto *F
= dyn_cast
<Function
>(V
))
465 return F
->getAttributes().getFnAttr(Kind
);
471 CallBase::getFnAttrOnCalledFunction(Attribute::AttrKind Kind
) const;
472 template Attribute
CallBase::getFnAttrOnCalledFunction(StringRef Kind
) const;
474 void CallBase::getOperandBundlesAsDefs(
475 SmallVectorImpl
<OperandBundleDef
> &Defs
) const {
476 for (unsigned i
= 0, e
= getNumOperandBundles(); i
!= e
; ++i
)
477 Defs
.emplace_back(getOperandBundleAt(i
));
480 CallBase::op_iterator
481 CallBase::populateBundleOperandInfos(ArrayRef
<OperandBundleDef
> Bundles
,
482 const unsigned BeginIndex
) {
483 auto It
= op_begin() + BeginIndex
;
484 for (auto &B
: Bundles
)
485 It
= std::copy(B
.input_begin(), B
.input_end(), It
);
487 auto *ContextImpl
= getContext().pImpl
;
488 auto BI
= Bundles
.begin();
489 unsigned CurrentIndex
= BeginIndex
;
491 for (auto &BOI
: bundle_op_infos()) {
492 assert(BI
!= Bundles
.end() && "Incorrect allocation?");
494 BOI
.Tag
= ContextImpl
->getOrInsertBundleTag(BI
->getTag());
495 BOI
.Begin
= CurrentIndex
;
496 BOI
.End
= CurrentIndex
+ BI
->input_size();
497 CurrentIndex
= BOI
.End
;
501 assert(BI
== Bundles
.end() && "Incorrect allocation?");
506 CallBase::BundleOpInfo
&CallBase::getBundleOpInfoForOperand(unsigned OpIdx
) {
507 /// When there isn't many bundles, we do a simple linear search.
508 /// Else fallback to a binary-search that use the fact that bundles usually
509 /// have similar number of argument to get faster convergence.
510 if (bundle_op_info_end() - bundle_op_info_begin() < 8) {
511 for (auto &BOI
: bundle_op_infos())
512 if (BOI
.Begin
<= OpIdx
&& OpIdx
< BOI
.End
)
515 llvm_unreachable("Did not find operand bundle for operand!");
518 assert(OpIdx
>= arg_size() && "the Idx is not in the operand bundles");
519 assert(bundle_op_info_end() - bundle_op_info_begin() > 0 &&
520 OpIdx
< std::prev(bundle_op_info_end())->End
&&
521 "The Idx isn't in the operand bundle");
523 /// We need a decimal number below and to prevent using floating point numbers
524 /// we use an intergal value multiplied by this constant.
525 constexpr unsigned NumberScaling
= 1024;
527 bundle_op_iterator Begin
= bundle_op_info_begin();
528 bundle_op_iterator End
= bundle_op_info_end();
529 bundle_op_iterator Current
= Begin
;
531 while (Begin
!= End
) {
532 unsigned ScaledOperandPerBundle
=
533 NumberScaling
* (std::prev(End
)->End
- Begin
->Begin
) / (End
- Begin
);
534 Current
= Begin
+ (((OpIdx
- Begin
->Begin
) * NumberScaling
) /
535 ScaledOperandPerBundle
);
537 Current
= std::prev(End
);
538 assert(Current
< End
&& Current
>= Begin
&&
539 "the operand bundle doesn't cover every value in the range");
540 if (OpIdx
>= Current
->Begin
&& OpIdx
< Current
->End
)
542 if (OpIdx
>= Current
->End
)
548 assert(OpIdx
>= Current
->Begin
&& OpIdx
< Current
->End
&&
549 "the operand bundle doesn't cover every value in the range");
553 CallBase
*CallBase::addOperandBundle(CallBase
*CB
, uint32_t ID
,
555 Instruction
*InsertPt
) {
556 if (CB
->getOperandBundle(ID
))
559 SmallVector
<OperandBundleDef
, 1> Bundles
;
560 CB
->getOperandBundlesAsDefs(Bundles
);
561 Bundles
.push_back(OB
);
562 return Create(CB
, Bundles
, InsertPt
);
565 CallBase
*CallBase::removeOperandBundle(CallBase
*CB
, uint32_t ID
,
566 Instruction
*InsertPt
) {
567 SmallVector
<OperandBundleDef
, 1> Bundles
;
568 bool CreateNew
= false;
570 for (unsigned I
= 0, E
= CB
->getNumOperandBundles(); I
!= E
; ++I
) {
571 auto Bundle
= CB
->getOperandBundleAt(I
);
572 if (Bundle
.getTagID() == ID
) {
576 Bundles
.emplace_back(Bundle
);
579 return CreateNew
? Create(CB
, Bundles
, InsertPt
) : CB
;
582 bool CallBase::hasReadingOperandBundles() const {
583 // Implementation note: this is a conservative implementation of operand
584 // bundle semantics, where *any* non-assume operand bundle (other than
585 // ptrauth) forces a callsite to be at least readonly.
586 return hasOperandBundlesOtherThan(
587 {LLVMContext::OB_ptrauth
, LLVMContext::OB_kcfi
}) &&
588 getIntrinsicID() != Intrinsic::assume
;
591 bool CallBase::hasClobberingOperandBundles() const {
592 return hasOperandBundlesOtherThan(
593 {LLVMContext::OB_deopt
, LLVMContext::OB_funclet
,
594 LLVMContext::OB_ptrauth
, LLVMContext::OB_kcfi
}) &&
595 getIntrinsicID() != Intrinsic::assume
;
598 MemoryEffects
CallBase::getMemoryEffects() const {
599 MemoryEffects ME
= getAttributes().getMemoryEffects();
600 if (auto *Fn
= dyn_cast
<Function
>(getCalledOperand())) {
601 MemoryEffects FnME
= Fn
->getMemoryEffects();
602 if (hasOperandBundles()) {
603 // TODO: Add a method to get memory effects for operand bundles instead.
604 if (hasReadingOperandBundles())
605 FnME
|= MemoryEffects::readOnly();
606 if (hasClobberingOperandBundles())
607 FnME
|= MemoryEffects::writeOnly();
613 void CallBase::setMemoryEffects(MemoryEffects ME
) {
614 addFnAttr(Attribute::getWithMemoryEffects(getContext(), ME
));
617 /// Determine if the function does not access memory.
618 bool CallBase::doesNotAccessMemory() const {
619 return getMemoryEffects().doesNotAccessMemory();
621 void CallBase::setDoesNotAccessMemory() {
622 setMemoryEffects(MemoryEffects::none());
625 /// Determine if the function does not access or only reads memory.
626 bool CallBase::onlyReadsMemory() const {
627 return getMemoryEffects().onlyReadsMemory();
629 void CallBase::setOnlyReadsMemory() {
630 setMemoryEffects(getMemoryEffects() & MemoryEffects::readOnly());
633 /// Determine if the function does not access or only writes memory.
634 bool CallBase::onlyWritesMemory() const {
635 return getMemoryEffects().onlyWritesMemory();
637 void CallBase::setOnlyWritesMemory() {
638 setMemoryEffects(getMemoryEffects() & MemoryEffects::writeOnly());
641 /// Determine if the call can access memmory only using pointers based
642 /// on its arguments.
643 bool CallBase::onlyAccessesArgMemory() const {
644 return getMemoryEffects().onlyAccessesArgPointees();
646 void CallBase::setOnlyAccessesArgMemory() {
647 setMemoryEffects(getMemoryEffects() & MemoryEffects::argMemOnly());
650 /// Determine if the function may only access memory that is
651 /// inaccessible from the IR.
652 bool CallBase::onlyAccessesInaccessibleMemory() const {
653 return getMemoryEffects().onlyAccessesInaccessibleMem();
655 void CallBase::setOnlyAccessesInaccessibleMemory() {
656 setMemoryEffects(getMemoryEffects() & MemoryEffects::inaccessibleMemOnly());
659 /// Determine if the function may only access memory that is
660 /// either inaccessible from the IR or pointed to by its arguments.
661 bool CallBase::onlyAccessesInaccessibleMemOrArgMem() const {
662 return getMemoryEffects().onlyAccessesInaccessibleOrArgMem();
664 void CallBase::setOnlyAccessesInaccessibleMemOrArgMem() {
665 setMemoryEffects(getMemoryEffects() &
666 MemoryEffects::inaccessibleOrArgMemOnly());
669 //===----------------------------------------------------------------------===//
670 // CallInst Implementation
671 //===----------------------------------------------------------------------===//
673 void CallInst::init(FunctionType
*FTy
, Value
*Func
, ArrayRef
<Value
*> Args
,
674 ArrayRef
<OperandBundleDef
> Bundles
, const Twine
&NameStr
) {
676 assert(getNumOperands() == Args
.size() + CountBundleInputs(Bundles
) + 1 &&
677 "NumOperands not set up?");
680 assert((Args
.size() == FTy
->getNumParams() ||
681 (FTy
->isVarArg() && Args
.size() > FTy
->getNumParams())) &&
682 "Calling a function with bad signature!");
684 for (unsigned i
= 0; i
!= Args
.size(); ++i
)
685 assert((i
>= FTy
->getNumParams() ||
686 FTy
->getParamType(i
) == Args
[i
]->getType()) &&
687 "Calling a function with a bad signature!");
690 // Set operands in order of their index to match use-list-order
692 llvm::copy(Args
, op_begin());
693 setCalledOperand(Func
);
695 auto It
= populateBundleOperandInfos(Bundles
, Args
.size());
697 assert(It
+ 1 == op_end() && "Should add up!");
702 void CallInst::init(FunctionType
*FTy
, Value
*Func
, const Twine
&NameStr
) {
704 assert(getNumOperands() == 1 && "NumOperands not set up?");
705 setCalledOperand(Func
);
707 assert(FTy
->getNumParams() == 0 && "Calling a function with bad signature");
712 CallInst::CallInst(FunctionType
*Ty
, Value
*Func
, const Twine
&Name
,
713 Instruction
*InsertBefore
)
714 : CallBase(Ty
->getReturnType(), Instruction::Call
,
715 OperandTraits
<CallBase
>::op_end(this) - 1, 1, InsertBefore
) {
716 init(Ty
, Func
, Name
);
719 CallInst::CallInst(FunctionType
*Ty
, Value
*Func
, const Twine
&Name
,
720 BasicBlock
*InsertAtEnd
)
721 : CallBase(Ty
->getReturnType(), Instruction::Call
,
722 OperandTraits
<CallBase
>::op_end(this) - 1, 1, InsertAtEnd
) {
723 init(Ty
, Func
, Name
);
726 CallInst::CallInst(const CallInst
&CI
)
727 : CallBase(CI
.Attrs
, CI
.FTy
, CI
.getType(), Instruction::Call
,
728 OperandTraits
<CallBase
>::op_end(this) - CI
.getNumOperands(),
729 CI
.getNumOperands()) {
730 setTailCallKind(CI
.getTailCallKind());
731 setCallingConv(CI
.getCallingConv());
733 std::copy(CI
.op_begin(), CI
.op_end(), op_begin());
734 std::copy(CI
.bundle_op_info_begin(), CI
.bundle_op_info_end(),
735 bundle_op_info_begin());
736 SubclassOptionalData
= CI
.SubclassOptionalData
;
739 CallInst
*CallInst::Create(CallInst
*CI
, ArrayRef
<OperandBundleDef
> OpB
,
740 Instruction
*InsertPt
) {
741 std::vector
<Value
*> Args(CI
->arg_begin(), CI
->arg_end());
743 auto *NewCI
= CallInst::Create(CI
->getFunctionType(), CI
->getCalledOperand(),
744 Args
, OpB
, CI
->getName(), InsertPt
);
745 NewCI
->setTailCallKind(CI
->getTailCallKind());
746 NewCI
->setCallingConv(CI
->getCallingConv());
747 NewCI
->SubclassOptionalData
= CI
->SubclassOptionalData
;
748 NewCI
->setAttributes(CI
->getAttributes());
749 NewCI
->setDebugLoc(CI
->getDebugLoc());
753 // Update profile weight for call instruction by scaling it using the ratio
754 // of S/T. The meaning of "branch_weights" meta data for call instruction is
755 // transfered to represent call count.
756 void CallInst::updateProfWeight(uint64_t S
, uint64_t T
) {
757 auto *ProfileData
= getMetadata(LLVMContext::MD_prof
);
758 if (ProfileData
== nullptr)
761 auto *ProfDataName
= dyn_cast
<MDString
>(ProfileData
->getOperand(0));
762 if (!ProfDataName
|| (!ProfDataName
->getString().equals("branch_weights") &&
763 !ProfDataName
->getString().equals("VP")))
767 LLVM_DEBUG(dbgs() << "Attempting to update profile weights will result in "
768 "div by 0. Ignoring. Likely the function "
769 << getParent()->getParent()->getName()
770 << " has 0 entry count, and contains call instructions "
771 "with non-zero prof info.");
775 MDBuilder
MDB(getContext());
776 SmallVector
<Metadata
*, 3> Vals
;
777 Vals
.push_back(ProfileData
->getOperand(0));
778 APInt
APS(128, S
), APT(128, T
);
779 if (ProfDataName
->getString().equals("branch_weights") &&
780 ProfileData
->getNumOperands() > 0) {
781 // Using APInt::div may be expensive, but most cases should fit 64 bits.
782 APInt
Val(128, mdconst::dyn_extract
<ConstantInt
>(ProfileData
->getOperand(1))
786 Vals
.push_back(MDB
.createConstant(
787 ConstantInt::get(Type::getInt32Ty(getContext()),
788 Val
.udiv(APT
).getLimitedValue(UINT32_MAX
))));
789 } else if (ProfDataName
->getString().equals("VP"))
790 for (unsigned i
= 1; i
< ProfileData
->getNumOperands(); i
+= 2) {
791 // The first value is the key of the value profile, which will not change.
792 Vals
.push_back(ProfileData
->getOperand(i
));
794 mdconst::dyn_extract
<ConstantInt
>(ProfileData
->getOperand(i
+ 1))
797 // Don't scale the magic number.
798 if (Count
== NOMORE_ICP_MAGICNUM
) {
799 Vals
.push_back(ProfileData
->getOperand(i
+ 1));
802 // Using APInt::div may be expensive, but most cases should fit 64 bits.
803 APInt
Val(128, Count
);
805 Vals
.push_back(MDB
.createConstant(
806 ConstantInt::get(Type::getInt64Ty(getContext()),
807 Val
.udiv(APT
).getLimitedValue())));
809 setMetadata(LLVMContext::MD_prof
, MDNode::get(getContext(), Vals
));
812 //===----------------------------------------------------------------------===//
813 // InvokeInst Implementation
814 //===----------------------------------------------------------------------===//
816 void InvokeInst::init(FunctionType
*FTy
, Value
*Fn
, BasicBlock
*IfNormal
,
817 BasicBlock
*IfException
, ArrayRef
<Value
*> Args
,
818 ArrayRef
<OperandBundleDef
> Bundles
,
819 const Twine
&NameStr
) {
822 assert((int)getNumOperands() ==
823 ComputeNumOperands(Args
.size(), CountBundleInputs(Bundles
)) &&
824 "NumOperands not set up?");
827 assert(((Args
.size() == FTy
->getNumParams()) ||
828 (FTy
->isVarArg() && Args
.size() > FTy
->getNumParams())) &&
829 "Invoking a function with bad signature");
831 for (unsigned i
= 0, e
= Args
.size(); i
!= e
; i
++)
832 assert((i
>= FTy
->getNumParams() ||
833 FTy
->getParamType(i
) == Args
[i
]->getType()) &&
834 "Invoking a function with a bad signature!");
837 // Set operands in order of their index to match use-list-order
839 llvm::copy(Args
, op_begin());
840 setNormalDest(IfNormal
);
841 setUnwindDest(IfException
);
842 setCalledOperand(Fn
);
844 auto It
= populateBundleOperandInfos(Bundles
, Args
.size());
846 assert(It
+ 3 == op_end() && "Should add up!");
851 InvokeInst::InvokeInst(const InvokeInst
&II
)
852 : CallBase(II
.Attrs
, II
.FTy
, II
.getType(), Instruction::Invoke
,
853 OperandTraits
<CallBase
>::op_end(this) - II
.getNumOperands(),
854 II
.getNumOperands()) {
855 setCallingConv(II
.getCallingConv());
856 std::copy(II
.op_begin(), II
.op_end(), op_begin());
857 std::copy(II
.bundle_op_info_begin(), II
.bundle_op_info_end(),
858 bundle_op_info_begin());
859 SubclassOptionalData
= II
.SubclassOptionalData
;
862 InvokeInst
*InvokeInst::Create(InvokeInst
*II
, ArrayRef
<OperandBundleDef
> OpB
,
863 Instruction
*InsertPt
) {
864 std::vector
<Value
*> Args(II
->arg_begin(), II
->arg_end());
866 auto *NewII
= InvokeInst::Create(
867 II
->getFunctionType(), II
->getCalledOperand(), II
->getNormalDest(),
868 II
->getUnwindDest(), Args
, OpB
, II
->getName(), InsertPt
);
869 NewII
->setCallingConv(II
->getCallingConv());
870 NewII
->SubclassOptionalData
= II
->SubclassOptionalData
;
871 NewII
->setAttributes(II
->getAttributes());
872 NewII
->setDebugLoc(II
->getDebugLoc());
876 LandingPadInst
*InvokeInst::getLandingPadInst() const {
877 return cast
<LandingPadInst
>(getUnwindDest()->getFirstNonPHI());
880 //===----------------------------------------------------------------------===//
881 // CallBrInst Implementation
882 //===----------------------------------------------------------------------===//
884 void CallBrInst::init(FunctionType
*FTy
, Value
*Fn
, BasicBlock
*Fallthrough
,
885 ArrayRef
<BasicBlock
*> IndirectDests
,
886 ArrayRef
<Value
*> Args
,
887 ArrayRef
<OperandBundleDef
> Bundles
,
888 const Twine
&NameStr
) {
891 assert((int)getNumOperands() ==
892 ComputeNumOperands(Args
.size(), IndirectDests
.size(),
893 CountBundleInputs(Bundles
)) &&
894 "NumOperands not set up?");
897 assert(((Args
.size() == FTy
->getNumParams()) ||
898 (FTy
->isVarArg() && Args
.size() > FTy
->getNumParams())) &&
899 "Calling a function with bad signature");
901 for (unsigned i
= 0, e
= Args
.size(); i
!= e
; i
++)
902 assert((i
>= FTy
->getNumParams() ||
903 FTy
->getParamType(i
) == Args
[i
]->getType()) &&
904 "Calling a function with a bad signature!");
907 // Set operands in order of their index to match use-list-order
909 std::copy(Args
.begin(), Args
.end(), op_begin());
910 NumIndirectDests
= IndirectDests
.size();
911 setDefaultDest(Fallthrough
);
912 for (unsigned i
= 0; i
!= NumIndirectDests
; ++i
)
913 setIndirectDest(i
, IndirectDests
[i
]);
914 setCalledOperand(Fn
);
916 auto It
= populateBundleOperandInfos(Bundles
, Args
.size());
918 assert(It
+ 2 + IndirectDests
.size() == op_end() && "Should add up!");
923 CallBrInst::CallBrInst(const CallBrInst
&CBI
)
924 : CallBase(CBI
.Attrs
, CBI
.FTy
, CBI
.getType(), Instruction::CallBr
,
925 OperandTraits
<CallBase
>::op_end(this) - CBI
.getNumOperands(),
926 CBI
.getNumOperands()) {
927 setCallingConv(CBI
.getCallingConv());
928 std::copy(CBI
.op_begin(), CBI
.op_end(), op_begin());
929 std::copy(CBI
.bundle_op_info_begin(), CBI
.bundle_op_info_end(),
930 bundle_op_info_begin());
931 SubclassOptionalData
= CBI
.SubclassOptionalData
;
932 NumIndirectDests
= CBI
.NumIndirectDests
;
935 CallBrInst
*CallBrInst::Create(CallBrInst
*CBI
, ArrayRef
<OperandBundleDef
> OpB
,
936 Instruction
*InsertPt
) {
937 std::vector
<Value
*> Args(CBI
->arg_begin(), CBI
->arg_end());
939 auto *NewCBI
= CallBrInst::Create(
940 CBI
->getFunctionType(), CBI
->getCalledOperand(), CBI
->getDefaultDest(),
941 CBI
->getIndirectDests(), Args
, OpB
, CBI
->getName(), InsertPt
);
942 NewCBI
->setCallingConv(CBI
->getCallingConv());
943 NewCBI
->SubclassOptionalData
= CBI
->SubclassOptionalData
;
944 NewCBI
->setAttributes(CBI
->getAttributes());
945 NewCBI
->setDebugLoc(CBI
->getDebugLoc());
946 NewCBI
->NumIndirectDests
= CBI
->NumIndirectDests
;
950 //===----------------------------------------------------------------------===//
951 // ReturnInst Implementation
952 //===----------------------------------------------------------------------===//
954 ReturnInst::ReturnInst(const ReturnInst
&RI
)
955 : Instruction(Type::getVoidTy(RI
.getContext()), Instruction::Ret
,
956 OperandTraits
<ReturnInst
>::op_end(this) - RI
.getNumOperands(),
957 RI
.getNumOperands()) {
958 if (RI
.getNumOperands())
959 Op
<0>() = RI
.Op
<0>();
960 SubclassOptionalData
= RI
.SubclassOptionalData
;
963 ReturnInst::ReturnInst(LLVMContext
&C
, Value
*retVal
, Instruction
*InsertBefore
)
964 : Instruction(Type::getVoidTy(C
), Instruction::Ret
,
965 OperandTraits
<ReturnInst
>::op_end(this) - !!retVal
, !!retVal
,
971 ReturnInst::ReturnInst(LLVMContext
&C
, Value
*retVal
, BasicBlock
*InsertAtEnd
)
972 : Instruction(Type::getVoidTy(C
), Instruction::Ret
,
973 OperandTraits
<ReturnInst
>::op_end(this) - !!retVal
, !!retVal
,
979 ReturnInst::ReturnInst(LLVMContext
&Context
, BasicBlock
*InsertAtEnd
)
980 : Instruction(Type::getVoidTy(Context
), Instruction::Ret
,
981 OperandTraits
<ReturnInst
>::op_end(this), 0, InsertAtEnd
) {}
983 //===----------------------------------------------------------------------===//
984 // ResumeInst Implementation
985 //===----------------------------------------------------------------------===//
987 ResumeInst::ResumeInst(const ResumeInst
&RI
)
988 : Instruction(Type::getVoidTy(RI
.getContext()), Instruction::Resume
,
989 OperandTraits
<ResumeInst
>::op_begin(this), 1) {
990 Op
<0>() = RI
.Op
<0>();
993 ResumeInst::ResumeInst(Value
*Exn
, Instruction
*InsertBefore
)
994 : Instruction(Type::getVoidTy(Exn
->getContext()), Instruction::Resume
,
995 OperandTraits
<ResumeInst
>::op_begin(this), 1, InsertBefore
) {
999 ResumeInst::ResumeInst(Value
*Exn
, BasicBlock
*InsertAtEnd
)
1000 : Instruction(Type::getVoidTy(Exn
->getContext()), Instruction::Resume
,
1001 OperandTraits
<ResumeInst
>::op_begin(this), 1, InsertAtEnd
) {
1005 //===----------------------------------------------------------------------===//
1006 // CleanupReturnInst Implementation
1007 //===----------------------------------------------------------------------===//
1009 CleanupReturnInst::CleanupReturnInst(const CleanupReturnInst
&CRI
)
1010 : Instruction(CRI
.getType(), Instruction::CleanupRet
,
1011 OperandTraits
<CleanupReturnInst
>::op_end(this) -
1012 CRI
.getNumOperands(),
1013 CRI
.getNumOperands()) {
1014 setSubclassData
<Instruction::OpaqueField
>(
1015 CRI
.getSubclassData
<Instruction::OpaqueField
>());
1016 Op
<0>() = CRI
.Op
<0>();
1017 if (CRI
.hasUnwindDest())
1018 Op
<1>() = CRI
.Op
<1>();
1021 void CleanupReturnInst::init(Value
*CleanupPad
, BasicBlock
*UnwindBB
) {
1023 setSubclassData
<UnwindDestField
>(true);
1025 Op
<0>() = CleanupPad
;
1030 CleanupReturnInst::CleanupReturnInst(Value
*CleanupPad
, BasicBlock
*UnwindBB
,
1031 unsigned Values
, Instruction
*InsertBefore
)
1032 : Instruction(Type::getVoidTy(CleanupPad
->getContext()),
1033 Instruction::CleanupRet
,
1034 OperandTraits
<CleanupReturnInst
>::op_end(this) - Values
,
1035 Values
, InsertBefore
) {
1036 init(CleanupPad
, UnwindBB
);
1039 CleanupReturnInst::CleanupReturnInst(Value
*CleanupPad
, BasicBlock
*UnwindBB
,
1040 unsigned Values
, BasicBlock
*InsertAtEnd
)
1041 : Instruction(Type::getVoidTy(CleanupPad
->getContext()),
1042 Instruction::CleanupRet
,
1043 OperandTraits
<CleanupReturnInst
>::op_end(this) - Values
,
1044 Values
, InsertAtEnd
) {
1045 init(CleanupPad
, UnwindBB
);
1048 //===----------------------------------------------------------------------===//
1049 // CatchReturnInst Implementation
1050 //===----------------------------------------------------------------------===//
1051 void CatchReturnInst::init(Value
*CatchPad
, BasicBlock
*BB
) {
1056 CatchReturnInst::CatchReturnInst(const CatchReturnInst
&CRI
)
1057 : Instruction(Type::getVoidTy(CRI
.getContext()), Instruction::CatchRet
,
1058 OperandTraits
<CatchReturnInst
>::op_begin(this), 2) {
1059 Op
<0>() = CRI
.Op
<0>();
1060 Op
<1>() = CRI
.Op
<1>();
1063 CatchReturnInst::CatchReturnInst(Value
*CatchPad
, BasicBlock
*BB
,
1064 Instruction
*InsertBefore
)
1065 : Instruction(Type::getVoidTy(BB
->getContext()), Instruction::CatchRet
,
1066 OperandTraits
<CatchReturnInst
>::op_begin(this), 2,
1071 CatchReturnInst::CatchReturnInst(Value
*CatchPad
, BasicBlock
*BB
,
1072 BasicBlock
*InsertAtEnd
)
1073 : Instruction(Type::getVoidTy(BB
->getContext()), Instruction::CatchRet
,
1074 OperandTraits
<CatchReturnInst
>::op_begin(this), 2,
1079 //===----------------------------------------------------------------------===//
1080 // CatchSwitchInst Implementation
1081 //===----------------------------------------------------------------------===//
1083 CatchSwitchInst::CatchSwitchInst(Value
*ParentPad
, BasicBlock
*UnwindDest
,
1084 unsigned NumReservedValues
,
1085 const Twine
&NameStr
,
1086 Instruction
*InsertBefore
)
1087 : Instruction(ParentPad
->getType(), Instruction::CatchSwitch
, nullptr, 0,
1090 ++NumReservedValues
;
1091 init(ParentPad
, UnwindDest
, NumReservedValues
+ 1);
1095 CatchSwitchInst::CatchSwitchInst(Value
*ParentPad
, BasicBlock
*UnwindDest
,
1096 unsigned NumReservedValues
,
1097 const Twine
&NameStr
, BasicBlock
*InsertAtEnd
)
1098 : Instruction(ParentPad
->getType(), Instruction::CatchSwitch
, nullptr, 0,
1101 ++NumReservedValues
;
1102 init(ParentPad
, UnwindDest
, NumReservedValues
+ 1);
1106 CatchSwitchInst::CatchSwitchInst(const CatchSwitchInst
&CSI
)
1107 : Instruction(CSI
.getType(), Instruction::CatchSwitch
, nullptr,
1108 CSI
.getNumOperands()) {
1109 init(CSI
.getParentPad(), CSI
.getUnwindDest(), CSI
.getNumOperands());
1110 setNumHungOffUseOperands(ReservedSpace
);
1111 Use
*OL
= getOperandList();
1112 const Use
*InOL
= CSI
.getOperandList();
1113 for (unsigned I
= 1, E
= ReservedSpace
; I
!= E
; ++I
)
1117 void CatchSwitchInst::init(Value
*ParentPad
, BasicBlock
*UnwindDest
,
1118 unsigned NumReservedValues
) {
1119 assert(ParentPad
&& NumReservedValues
);
1121 ReservedSpace
= NumReservedValues
;
1122 setNumHungOffUseOperands(UnwindDest
? 2 : 1);
1123 allocHungoffUses(ReservedSpace
);
1125 Op
<0>() = ParentPad
;
1127 setSubclassData
<UnwindDestField
>(true);
1128 setUnwindDest(UnwindDest
);
1132 /// growOperands - grow operands - This grows the operand list in response to a
1133 /// push_back style of operation. This grows the number of ops by 2 times.
1134 void CatchSwitchInst::growOperands(unsigned Size
) {
1135 unsigned NumOperands
= getNumOperands();
1136 assert(NumOperands
>= 1);
1137 if (ReservedSpace
>= NumOperands
+ Size
)
1139 ReservedSpace
= (NumOperands
+ Size
/ 2) * 2;
1140 growHungoffUses(ReservedSpace
);
1143 void CatchSwitchInst::addHandler(BasicBlock
*Handler
) {
1144 unsigned OpNo
= getNumOperands();
1146 assert(OpNo
< ReservedSpace
&& "Growing didn't work!");
1147 setNumHungOffUseOperands(getNumOperands() + 1);
1148 getOperandList()[OpNo
] = Handler
;
1151 void CatchSwitchInst::removeHandler(handler_iterator HI
) {
1152 // Move all subsequent handlers up one.
1153 Use
*EndDst
= op_end() - 1;
1154 for (Use
*CurDst
= HI
.getCurrent(); CurDst
!= EndDst
; ++CurDst
)
1155 *CurDst
= *(CurDst
+ 1);
1156 // Null out the last handler use.
1159 setNumHungOffUseOperands(getNumOperands() - 1);
1162 //===----------------------------------------------------------------------===//
1163 // FuncletPadInst Implementation
1164 //===----------------------------------------------------------------------===//
1165 void FuncletPadInst::init(Value
*ParentPad
, ArrayRef
<Value
*> Args
,
1166 const Twine
&NameStr
) {
1167 assert(getNumOperands() == 1 + Args
.size() && "NumOperands not set up?");
1168 llvm::copy(Args
, op_begin());
1169 setParentPad(ParentPad
);
1173 FuncletPadInst::FuncletPadInst(const FuncletPadInst
&FPI
)
1174 : Instruction(FPI
.getType(), FPI
.getOpcode(),
1175 OperandTraits
<FuncletPadInst
>::op_end(this) -
1176 FPI
.getNumOperands(),
1177 FPI
.getNumOperands()) {
1178 std::copy(FPI
.op_begin(), FPI
.op_end(), op_begin());
1179 setParentPad(FPI
.getParentPad());
1182 FuncletPadInst::FuncletPadInst(Instruction::FuncletPadOps Op
, Value
*ParentPad
,
1183 ArrayRef
<Value
*> Args
, unsigned Values
,
1184 const Twine
&NameStr
, Instruction
*InsertBefore
)
1185 : Instruction(ParentPad
->getType(), Op
,
1186 OperandTraits
<FuncletPadInst
>::op_end(this) - Values
, Values
,
1188 init(ParentPad
, Args
, NameStr
);
1191 FuncletPadInst::FuncletPadInst(Instruction::FuncletPadOps Op
, Value
*ParentPad
,
1192 ArrayRef
<Value
*> Args
, unsigned Values
,
1193 const Twine
&NameStr
, BasicBlock
*InsertAtEnd
)
1194 : Instruction(ParentPad
->getType(), Op
,
1195 OperandTraits
<FuncletPadInst
>::op_end(this) - Values
, Values
,
1197 init(ParentPad
, Args
, NameStr
);
1200 //===----------------------------------------------------------------------===//
1201 // UnreachableInst Implementation
1202 //===----------------------------------------------------------------------===//
1204 UnreachableInst::UnreachableInst(LLVMContext
&Context
,
1205 Instruction
*InsertBefore
)
1206 : Instruction(Type::getVoidTy(Context
), Instruction::Unreachable
, nullptr,
1208 UnreachableInst::UnreachableInst(LLVMContext
&Context
, BasicBlock
*InsertAtEnd
)
1209 : Instruction(Type::getVoidTy(Context
), Instruction::Unreachable
, nullptr,
1212 //===----------------------------------------------------------------------===//
1213 // BranchInst Implementation
1214 //===----------------------------------------------------------------------===//
1216 void BranchInst::AssertOK() {
1217 if (isConditional())
1218 assert(getCondition()->getType()->isIntegerTy(1) &&
1219 "May only branch on boolean predicates!");
1222 BranchInst::BranchInst(BasicBlock
*IfTrue
, Instruction
*InsertBefore
)
1223 : Instruction(Type::getVoidTy(IfTrue
->getContext()), Instruction::Br
,
1224 OperandTraits
<BranchInst
>::op_end(this) - 1, 1,
1226 assert(IfTrue
&& "Branch destination may not be null!");
1230 BranchInst::BranchInst(BasicBlock
*IfTrue
, BasicBlock
*IfFalse
, Value
*Cond
,
1231 Instruction
*InsertBefore
)
1232 : Instruction(Type::getVoidTy(IfTrue
->getContext()), Instruction::Br
,
1233 OperandTraits
<BranchInst
>::op_end(this) - 3, 3,
1235 // Assign in order of operand index to make use-list order predictable.
1244 BranchInst::BranchInst(BasicBlock
*IfTrue
, BasicBlock
*InsertAtEnd
)
1245 : Instruction(Type::getVoidTy(IfTrue
->getContext()), Instruction::Br
,
1246 OperandTraits
<BranchInst
>::op_end(this) - 1, 1, InsertAtEnd
) {
1247 assert(IfTrue
&& "Branch destination may not be null!");
1251 BranchInst::BranchInst(BasicBlock
*IfTrue
, BasicBlock
*IfFalse
, Value
*Cond
,
1252 BasicBlock
*InsertAtEnd
)
1253 : Instruction(Type::getVoidTy(IfTrue
->getContext()), Instruction::Br
,
1254 OperandTraits
<BranchInst
>::op_end(this) - 3, 3, InsertAtEnd
) {
1255 // Assign in order of operand index to make use-list order predictable.
1264 BranchInst::BranchInst(const BranchInst
&BI
)
1265 : Instruction(Type::getVoidTy(BI
.getContext()), Instruction::Br
,
1266 OperandTraits
<BranchInst
>::op_end(this) - BI
.getNumOperands(),
1267 BI
.getNumOperands()) {
1268 // Assign in order of operand index to make use-list order predictable.
1269 if (BI
.getNumOperands() != 1) {
1270 assert(BI
.getNumOperands() == 3 && "BR can have 1 or 3 operands!");
1271 Op
<-3>() = BI
.Op
<-3>();
1272 Op
<-2>() = BI
.Op
<-2>();
1274 Op
<-1>() = BI
.Op
<-1>();
1275 SubclassOptionalData
= BI
.SubclassOptionalData
;
1278 void BranchInst::swapSuccessors() {
1279 assert(isConditional() &&
1280 "Cannot swap successors of an unconditional branch");
1281 Op
<-1>().swap(Op
<-2>());
1283 // Update profile metadata if present and it matches our structural
1288 //===----------------------------------------------------------------------===//
1289 // AllocaInst Implementation
1290 //===----------------------------------------------------------------------===//
1292 static Value
*getAISize(LLVMContext
&Context
, Value
*Amt
) {
1294 Amt
= ConstantInt::get(Type::getInt32Ty(Context
), 1);
1296 assert(!isa
<BasicBlock
>(Amt
) &&
1297 "Passed basic block into allocation size parameter! Use other ctor");
1298 assert(Amt
->getType()->isIntegerTy() &&
1299 "Allocation array size is not an integer!");
1304 static Align
computeAllocaDefaultAlign(Type
*Ty
, BasicBlock
*BB
) {
1305 assert(BB
&& "Insertion BB cannot be null when alignment not provided!");
1306 assert(BB
->getParent() &&
1307 "BB must be in a Function when alignment not provided!");
1308 const DataLayout
&DL
= BB
->getModule()->getDataLayout();
1309 return DL
.getPrefTypeAlign(Ty
);
1312 static Align
computeAllocaDefaultAlign(Type
*Ty
, Instruction
*I
) {
1313 assert(I
&& "Insertion position cannot be null when alignment not provided!");
1314 return computeAllocaDefaultAlign(Ty
, I
->getParent());
1317 AllocaInst::AllocaInst(Type
*Ty
, unsigned AddrSpace
, const Twine
&Name
,
1318 Instruction
*InsertBefore
)
1319 : AllocaInst(Ty
, AddrSpace
, /*ArraySize=*/nullptr, Name
, InsertBefore
) {}
1321 AllocaInst::AllocaInst(Type
*Ty
, unsigned AddrSpace
, const Twine
&Name
,
1322 BasicBlock
*InsertAtEnd
)
1323 : AllocaInst(Ty
, AddrSpace
, /*ArraySize=*/nullptr, Name
, InsertAtEnd
) {}
1325 AllocaInst::AllocaInst(Type
*Ty
, unsigned AddrSpace
, Value
*ArraySize
,
1326 const Twine
&Name
, Instruction
*InsertBefore
)
1327 : AllocaInst(Ty
, AddrSpace
, ArraySize
,
1328 computeAllocaDefaultAlign(Ty
, InsertBefore
), Name
,
1331 AllocaInst::AllocaInst(Type
*Ty
, unsigned AddrSpace
, Value
*ArraySize
,
1332 const Twine
&Name
, BasicBlock
*InsertAtEnd
)
1333 : AllocaInst(Ty
, AddrSpace
, ArraySize
,
1334 computeAllocaDefaultAlign(Ty
, InsertAtEnd
), Name
,
1337 AllocaInst::AllocaInst(Type
*Ty
, unsigned AddrSpace
, Value
*ArraySize
,
1338 Align Align
, const Twine
&Name
,
1339 Instruction
*InsertBefore
)
1340 : UnaryInstruction(PointerType::get(Ty
, AddrSpace
), Alloca
,
1341 getAISize(Ty
->getContext(), ArraySize
), InsertBefore
),
1343 setAlignment(Align
);
1344 assert(!Ty
->isVoidTy() && "Cannot allocate void!");
1348 AllocaInst::AllocaInst(Type
*Ty
, unsigned AddrSpace
, Value
*ArraySize
,
1349 Align Align
, const Twine
&Name
, BasicBlock
*InsertAtEnd
)
1350 : UnaryInstruction(PointerType::get(Ty
, AddrSpace
), Alloca
,
1351 getAISize(Ty
->getContext(), ArraySize
), InsertAtEnd
),
1353 setAlignment(Align
);
1354 assert(!Ty
->isVoidTy() && "Cannot allocate void!");
1359 bool AllocaInst::isArrayAllocation() const {
1360 if (ConstantInt
*CI
= dyn_cast
<ConstantInt
>(getOperand(0)))
1361 return !CI
->isOne();
1365 /// isStaticAlloca - Return true if this alloca is in the entry block of the
1366 /// function and is a constant size. If so, the code generator will fold it
1367 /// into the prolog/epilog code, so it is basically free.
1368 bool AllocaInst::isStaticAlloca() const {
1369 // Must be constant size.
1370 if (!isa
<ConstantInt
>(getArraySize())) return false;
1372 // Must be in the entry block.
1373 const BasicBlock
*Parent
= getParent();
1374 return Parent
->isEntryBlock() && !isUsedWithInAlloca();
1377 //===----------------------------------------------------------------------===//
1378 // LoadInst Implementation
1379 //===----------------------------------------------------------------------===//
1381 void LoadInst::AssertOK() {
1382 assert(getOperand(0)->getType()->isPointerTy() &&
1383 "Ptr must have pointer type.");
1386 static Align
computeLoadStoreDefaultAlign(Type
*Ty
, BasicBlock
*BB
) {
1387 assert(BB
&& "Insertion BB cannot be null when alignment not provided!");
1388 assert(BB
->getParent() &&
1389 "BB must be in a Function when alignment not provided!");
1390 const DataLayout
&DL
= BB
->getModule()->getDataLayout();
1391 return DL
.getABITypeAlign(Ty
);
1394 static Align
computeLoadStoreDefaultAlign(Type
*Ty
, Instruction
*I
) {
1395 assert(I
&& "Insertion position cannot be null when alignment not provided!");
1396 return computeLoadStoreDefaultAlign(Ty
, I
->getParent());
1399 LoadInst::LoadInst(Type
*Ty
, Value
*Ptr
, const Twine
&Name
,
1400 Instruction
*InsertBef
)
1401 : LoadInst(Ty
, Ptr
, Name
, /*isVolatile=*/false, InsertBef
) {}
1403 LoadInst::LoadInst(Type
*Ty
, Value
*Ptr
, const Twine
&Name
,
1404 BasicBlock
*InsertAE
)
1405 : LoadInst(Ty
, Ptr
, Name
, /*isVolatile=*/false, InsertAE
) {}
1407 LoadInst::LoadInst(Type
*Ty
, Value
*Ptr
, const Twine
&Name
, bool isVolatile
,
1408 Instruction
*InsertBef
)
1409 : LoadInst(Ty
, Ptr
, Name
, isVolatile
,
1410 computeLoadStoreDefaultAlign(Ty
, InsertBef
), InsertBef
) {}
1412 LoadInst::LoadInst(Type
*Ty
, Value
*Ptr
, const Twine
&Name
, bool isVolatile
,
1413 BasicBlock
*InsertAE
)
1414 : LoadInst(Ty
, Ptr
, Name
, isVolatile
,
1415 computeLoadStoreDefaultAlign(Ty
, InsertAE
), InsertAE
) {}
1417 LoadInst::LoadInst(Type
*Ty
, Value
*Ptr
, const Twine
&Name
, bool isVolatile
,
1418 Align Align
, Instruction
*InsertBef
)
1419 : LoadInst(Ty
, Ptr
, Name
, isVolatile
, Align
, AtomicOrdering::NotAtomic
,
1420 SyncScope::System
, InsertBef
) {}
1422 LoadInst::LoadInst(Type
*Ty
, Value
*Ptr
, const Twine
&Name
, bool isVolatile
,
1423 Align Align
, BasicBlock
*InsertAE
)
1424 : LoadInst(Ty
, Ptr
, Name
, isVolatile
, Align
, AtomicOrdering::NotAtomic
,
1425 SyncScope::System
, InsertAE
) {}
1427 LoadInst::LoadInst(Type
*Ty
, Value
*Ptr
, const Twine
&Name
, bool isVolatile
,
1428 Align Align
, AtomicOrdering Order
, SyncScope::ID SSID
,
1429 Instruction
*InsertBef
)
1430 : UnaryInstruction(Ty
, Load
, Ptr
, InsertBef
) {
1431 setVolatile(isVolatile
);
1432 setAlignment(Align
);
1433 setAtomic(Order
, SSID
);
1438 LoadInst::LoadInst(Type
*Ty
, Value
*Ptr
, const Twine
&Name
, bool isVolatile
,
1439 Align Align
, AtomicOrdering Order
, SyncScope::ID SSID
,
1440 BasicBlock
*InsertAE
)
1441 : UnaryInstruction(Ty
, Load
, Ptr
, InsertAE
) {
1442 setVolatile(isVolatile
);
1443 setAlignment(Align
);
1444 setAtomic(Order
, SSID
);
1449 //===----------------------------------------------------------------------===//
1450 // StoreInst Implementation
1451 //===----------------------------------------------------------------------===//
1453 void StoreInst::AssertOK() {
1454 assert(getOperand(0) && getOperand(1) && "Both operands must be non-null!");
1455 assert(getOperand(1)->getType()->isPointerTy() &&
1456 "Ptr must have pointer type!");
1459 StoreInst::StoreInst(Value
*val
, Value
*addr
, Instruction
*InsertBefore
)
1460 : StoreInst(val
, addr
, /*isVolatile=*/false, InsertBefore
) {}
1462 StoreInst::StoreInst(Value
*val
, Value
*addr
, BasicBlock
*InsertAtEnd
)
1463 : StoreInst(val
, addr
, /*isVolatile=*/false, InsertAtEnd
) {}
1465 StoreInst::StoreInst(Value
*val
, Value
*addr
, BasicBlock::iterator InsertBefore
)
1466 : StoreInst(val
, addr
, /*isVolatile=*/false, InsertBefore
) {}
1468 StoreInst::StoreInst(Value
*val
, Value
*addr
, bool isVolatile
,
1469 Instruction
*InsertBefore
)
1470 : StoreInst(val
, addr
, isVolatile
,
1471 computeLoadStoreDefaultAlign(val
->getType(), InsertBefore
),
1474 StoreInst::StoreInst(Value
*val
, Value
*addr
, bool isVolatile
,
1475 BasicBlock
*InsertAtEnd
)
1476 : StoreInst(val
, addr
, isVolatile
,
1477 computeLoadStoreDefaultAlign(val
->getType(), InsertAtEnd
),
1480 StoreInst::StoreInst(Value
*val
, Value
*addr
, bool isVolatile
,
1481 BasicBlock::iterator InsertBefore
)
1482 : StoreInst(val
, addr
, isVolatile
,
1483 computeLoadStoreDefaultAlign(val
->getType(), &*InsertBefore
),
1486 StoreInst::StoreInst(Value
*val
, Value
*addr
, bool isVolatile
, Align Align
,
1487 Instruction
*InsertBefore
)
1488 : StoreInst(val
, addr
, isVolatile
, Align
, AtomicOrdering::NotAtomic
,
1489 SyncScope::System
, InsertBefore
) {}
1491 StoreInst::StoreInst(Value
*val
, Value
*addr
, bool isVolatile
, Align Align
,
1492 BasicBlock
*InsertAtEnd
)
1493 : StoreInst(val
, addr
, isVolatile
, Align
, AtomicOrdering::NotAtomic
,
1494 SyncScope::System
, InsertAtEnd
) {}
1496 StoreInst::StoreInst(Value
*val
, Value
*addr
, bool isVolatile
, Align Align
,
1497 BasicBlock::iterator InsertBefore
)
1498 : StoreInst(val
, addr
, isVolatile
, Align
, AtomicOrdering::NotAtomic
,
1499 SyncScope::System
, InsertBefore
) {}
1501 StoreInst::StoreInst(Value
*val
, Value
*addr
, bool isVolatile
, Align Align
,
1502 AtomicOrdering Order
, SyncScope::ID SSID
,
1503 Instruction
*InsertBefore
)
1504 : Instruction(Type::getVoidTy(val
->getContext()), Store
,
1505 OperandTraits
<StoreInst
>::op_begin(this),
1506 OperandTraits
<StoreInst
>::operands(this), InsertBefore
) {
1509 setVolatile(isVolatile
);
1510 setAlignment(Align
);
1511 setAtomic(Order
, SSID
);
1515 StoreInst::StoreInst(Value
*val
, Value
*addr
, bool isVolatile
, Align Align
,
1516 AtomicOrdering Order
, SyncScope::ID SSID
,
1517 BasicBlock
*InsertAtEnd
)
1518 : Instruction(Type::getVoidTy(val
->getContext()), Store
,
1519 OperandTraits
<StoreInst
>::op_begin(this),
1520 OperandTraits
<StoreInst
>::operands(this), InsertAtEnd
) {
1523 setVolatile(isVolatile
);
1524 setAlignment(Align
);
1525 setAtomic(Order
, SSID
);
1529 StoreInst::StoreInst(Value
*val
, Value
*addr
, bool isVolatile
, Align Align
,
1530 AtomicOrdering Order
, SyncScope::ID SSID
,
1531 BasicBlock::iterator InsertBefore
)
1532 : Instruction(Type::getVoidTy(val
->getContext()), Store
,
1533 OperandTraits
<StoreInst
>::op_begin(this),
1534 OperandTraits
<StoreInst
>::operands(this)) {
1537 setVolatile(isVolatile
);
1538 setAlignment(Align
);
1539 setAtomic(Order
, SSID
);
1540 insertBefore(*InsertBefore
->getParent(), InsertBefore
);
1544 //===----------------------------------------------------------------------===//
1545 // AtomicCmpXchgInst Implementation
1546 //===----------------------------------------------------------------------===//
1548 void AtomicCmpXchgInst::Init(Value
*Ptr
, Value
*Cmp
, Value
*NewVal
,
1549 Align Alignment
, AtomicOrdering SuccessOrdering
,
1550 AtomicOrdering FailureOrdering
,
1551 SyncScope::ID SSID
) {
1555 setSuccessOrdering(SuccessOrdering
);
1556 setFailureOrdering(FailureOrdering
);
1557 setSyncScopeID(SSID
);
1558 setAlignment(Alignment
);
1560 assert(getOperand(0) && getOperand(1) && getOperand(2) &&
1561 "All operands must be non-null!");
1562 assert(getOperand(0)->getType()->isPointerTy() &&
1563 "Ptr must have pointer type!");
1564 assert(getOperand(1)->getType() == getOperand(2)->getType() &&
1565 "Cmp type and NewVal type must be same!");
1568 AtomicCmpXchgInst::AtomicCmpXchgInst(Value
*Ptr
, Value
*Cmp
, Value
*NewVal
,
1570 AtomicOrdering SuccessOrdering
,
1571 AtomicOrdering FailureOrdering
,
1573 Instruction
*InsertBefore
)
1575 StructType::get(Cmp
->getType(), Type::getInt1Ty(Cmp
->getContext())),
1576 AtomicCmpXchg
, OperandTraits
<AtomicCmpXchgInst
>::op_begin(this),
1577 OperandTraits
<AtomicCmpXchgInst
>::operands(this), InsertBefore
) {
1578 Init(Ptr
, Cmp
, NewVal
, Alignment
, SuccessOrdering
, FailureOrdering
, SSID
);
1581 AtomicCmpXchgInst::AtomicCmpXchgInst(Value
*Ptr
, Value
*Cmp
, Value
*NewVal
,
1583 AtomicOrdering SuccessOrdering
,
1584 AtomicOrdering FailureOrdering
,
1586 BasicBlock
*InsertAtEnd
)
1588 StructType::get(Cmp
->getType(), Type::getInt1Ty(Cmp
->getContext())),
1589 AtomicCmpXchg
, OperandTraits
<AtomicCmpXchgInst
>::op_begin(this),
1590 OperandTraits
<AtomicCmpXchgInst
>::operands(this), InsertAtEnd
) {
1591 Init(Ptr
, Cmp
, NewVal
, Alignment
, SuccessOrdering
, FailureOrdering
, SSID
);
1594 //===----------------------------------------------------------------------===//
1595 // AtomicRMWInst Implementation
1596 //===----------------------------------------------------------------------===//
1598 void AtomicRMWInst::Init(BinOp Operation
, Value
*Ptr
, Value
*Val
,
1599 Align Alignment
, AtomicOrdering Ordering
,
1600 SyncScope::ID SSID
) {
1601 assert(Ordering
!= AtomicOrdering::NotAtomic
&&
1602 "atomicrmw instructions can only be atomic.");
1603 assert(Ordering
!= AtomicOrdering::Unordered
&&
1604 "atomicrmw instructions cannot be unordered.");
1607 setOperation(Operation
);
1608 setOrdering(Ordering
);
1609 setSyncScopeID(SSID
);
1610 setAlignment(Alignment
);
1612 assert(getOperand(0) && getOperand(1) &&
1613 "All operands must be non-null!");
1614 assert(getOperand(0)->getType()->isPointerTy() &&
1615 "Ptr must have pointer type!");
1616 assert(Ordering
!= AtomicOrdering::NotAtomic
&&
1617 "AtomicRMW instructions must be atomic!");
1620 AtomicRMWInst::AtomicRMWInst(BinOp Operation
, Value
*Ptr
, Value
*Val
,
1621 Align Alignment
, AtomicOrdering Ordering
,
1622 SyncScope::ID SSID
, Instruction
*InsertBefore
)
1623 : Instruction(Val
->getType(), AtomicRMW
,
1624 OperandTraits
<AtomicRMWInst
>::op_begin(this),
1625 OperandTraits
<AtomicRMWInst
>::operands(this), InsertBefore
) {
1626 Init(Operation
, Ptr
, Val
, Alignment
, Ordering
, SSID
);
1629 AtomicRMWInst::AtomicRMWInst(BinOp Operation
, Value
*Ptr
, Value
*Val
,
1630 Align Alignment
, AtomicOrdering Ordering
,
1631 SyncScope::ID SSID
, BasicBlock
*InsertAtEnd
)
1632 : Instruction(Val
->getType(), AtomicRMW
,
1633 OperandTraits
<AtomicRMWInst
>::op_begin(this),
1634 OperandTraits
<AtomicRMWInst
>::operands(this), InsertAtEnd
) {
1635 Init(Operation
, Ptr
, Val
, Alignment
, Ordering
, SSID
);
1638 StringRef
AtomicRMWInst::getOperationName(BinOp Op
) {
1640 case AtomicRMWInst::Xchg
:
1642 case AtomicRMWInst::Add
:
1644 case AtomicRMWInst::Sub
:
1646 case AtomicRMWInst::And
:
1648 case AtomicRMWInst::Nand
:
1650 case AtomicRMWInst::Or
:
1652 case AtomicRMWInst::Xor
:
1654 case AtomicRMWInst::Max
:
1656 case AtomicRMWInst::Min
:
1658 case AtomicRMWInst::UMax
:
1660 case AtomicRMWInst::UMin
:
1662 case AtomicRMWInst::FAdd
:
1664 case AtomicRMWInst::FSub
:
1666 case AtomicRMWInst::FMax
:
1668 case AtomicRMWInst::FMin
:
1670 case AtomicRMWInst::UIncWrap
:
1672 case AtomicRMWInst::UDecWrap
:
1674 case AtomicRMWInst::BAD_BINOP
:
1675 return "<invalid operation>";
1678 llvm_unreachable("invalid atomicrmw operation");
1681 //===----------------------------------------------------------------------===//
1682 // FenceInst Implementation
1683 //===----------------------------------------------------------------------===//
1685 FenceInst::FenceInst(LLVMContext
&C
, AtomicOrdering Ordering
,
1687 Instruction
*InsertBefore
)
1688 : Instruction(Type::getVoidTy(C
), Fence
, nullptr, 0, InsertBefore
) {
1689 setOrdering(Ordering
);
1690 setSyncScopeID(SSID
);
1693 FenceInst::FenceInst(LLVMContext
&C
, AtomicOrdering Ordering
,
1695 BasicBlock
*InsertAtEnd
)
1696 : Instruction(Type::getVoidTy(C
), Fence
, nullptr, 0, InsertAtEnd
) {
1697 setOrdering(Ordering
);
1698 setSyncScopeID(SSID
);
1701 //===----------------------------------------------------------------------===//
1702 // GetElementPtrInst Implementation
1703 //===----------------------------------------------------------------------===//
1705 void GetElementPtrInst::init(Value
*Ptr
, ArrayRef
<Value
*> IdxList
,
1706 const Twine
&Name
) {
1707 assert(getNumOperands() == 1 + IdxList
.size() &&
1708 "NumOperands not initialized?");
1710 llvm::copy(IdxList
, op_begin() + 1);
1714 GetElementPtrInst::GetElementPtrInst(const GetElementPtrInst
&GEPI
)
1715 : Instruction(GEPI
.getType(), GetElementPtr
,
1716 OperandTraits
<GetElementPtrInst
>::op_end(this) -
1717 GEPI
.getNumOperands(),
1718 GEPI
.getNumOperands()),
1719 SourceElementType(GEPI
.SourceElementType
),
1720 ResultElementType(GEPI
.ResultElementType
) {
1721 std::copy(GEPI
.op_begin(), GEPI
.op_end(), op_begin());
1722 SubclassOptionalData
= GEPI
.SubclassOptionalData
;
1725 Type
*GetElementPtrInst::getTypeAtIndex(Type
*Ty
, Value
*Idx
) {
1726 if (auto *Struct
= dyn_cast
<StructType
>(Ty
)) {
1727 if (!Struct
->indexValid(Idx
))
1729 return Struct
->getTypeAtIndex(Idx
);
1731 if (!Idx
->getType()->isIntOrIntVectorTy())
1733 if (auto *Array
= dyn_cast
<ArrayType
>(Ty
))
1734 return Array
->getElementType();
1735 if (auto *Vector
= dyn_cast
<VectorType
>(Ty
))
1736 return Vector
->getElementType();
1740 Type
*GetElementPtrInst::getTypeAtIndex(Type
*Ty
, uint64_t Idx
) {
1741 if (auto *Struct
= dyn_cast
<StructType
>(Ty
)) {
1742 if (Idx
>= Struct
->getNumElements())
1744 return Struct
->getElementType(Idx
);
1746 if (auto *Array
= dyn_cast
<ArrayType
>(Ty
))
1747 return Array
->getElementType();
1748 if (auto *Vector
= dyn_cast
<VectorType
>(Ty
))
1749 return Vector
->getElementType();
1753 template <typename IndexTy
>
1754 static Type
*getIndexedTypeInternal(Type
*Ty
, ArrayRef
<IndexTy
> IdxList
) {
1755 if (IdxList
.empty())
1757 for (IndexTy V
: IdxList
.slice(1)) {
1758 Ty
= GetElementPtrInst::getTypeAtIndex(Ty
, V
);
1765 Type
*GetElementPtrInst::getIndexedType(Type
*Ty
, ArrayRef
<Value
*> IdxList
) {
1766 return getIndexedTypeInternal(Ty
, IdxList
);
1769 Type
*GetElementPtrInst::getIndexedType(Type
*Ty
,
1770 ArrayRef
<Constant
*> IdxList
) {
1771 return getIndexedTypeInternal(Ty
, IdxList
);
1774 Type
*GetElementPtrInst::getIndexedType(Type
*Ty
, ArrayRef
<uint64_t> IdxList
) {
1775 return getIndexedTypeInternal(Ty
, IdxList
);
1778 /// hasAllZeroIndices - Return true if all of the indices of this GEP are
1779 /// zeros. If so, the result pointer and the first operand have the same
1780 /// value, just potentially different types.
1781 bool GetElementPtrInst::hasAllZeroIndices() const {
1782 for (unsigned i
= 1, e
= getNumOperands(); i
!= e
; ++i
) {
1783 if (ConstantInt
*CI
= dyn_cast
<ConstantInt
>(getOperand(i
))) {
1784 if (!CI
->isZero()) return false;
1792 /// hasAllConstantIndices - Return true if all of the indices of this GEP are
1793 /// constant integers. If so, the result pointer and the first operand have
1794 /// a constant offset between them.
1795 bool GetElementPtrInst::hasAllConstantIndices() const {
1796 for (unsigned i
= 1, e
= getNumOperands(); i
!= e
; ++i
) {
1797 if (!isa
<ConstantInt
>(getOperand(i
)))
1803 void GetElementPtrInst::setIsInBounds(bool B
) {
1804 cast
<GEPOperator
>(this)->setIsInBounds(B
);
1807 bool GetElementPtrInst::isInBounds() const {
1808 return cast
<GEPOperator
>(this)->isInBounds();
1811 bool GetElementPtrInst::accumulateConstantOffset(const DataLayout
&DL
,
1812 APInt
&Offset
) const {
1813 // Delegate to the generic GEPOperator implementation.
1814 return cast
<GEPOperator
>(this)->accumulateConstantOffset(DL
, Offset
);
1817 bool GetElementPtrInst::collectOffset(
1818 const DataLayout
&DL
, unsigned BitWidth
,
1819 MapVector
<Value
*, APInt
> &VariableOffsets
,
1820 APInt
&ConstantOffset
) const {
1821 // Delegate to the generic GEPOperator implementation.
1822 return cast
<GEPOperator
>(this)->collectOffset(DL
, BitWidth
, VariableOffsets
,
1826 //===----------------------------------------------------------------------===//
1827 // ExtractElementInst Implementation
1828 //===----------------------------------------------------------------------===//
1830 ExtractElementInst::ExtractElementInst(Value
*Val
, Value
*Index
,
1832 Instruction
*InsertBef
)
1833 : Instruction(cast
<VectorType
>(Val
->getType())->getElementType(),
1835 OperandTraits
<ExtractElementInst
>::op_begin(this),
1837 assert(isValidOperands(Val
, Index
) &&
1838 "Invalid extractelement instruction operands!");
1844 ExtractElementInst::ExtractElementInst(Value
*Val
, Value
*Index
,
1846 BasicBlock
*InsertAE
)
1847 : Instruction(cast
<VectorType
>(Val
->getType())->getElementType(),
1849 OperandTraits
<ExtractElementInst
>::op_begin(this),
1851 assert(isValidOperands(Val
, Index
) &&
1852 "Invalid extractelement instruction operands!");
1859 bool ExtractElementInst::isValidOperands(const Value
*Val
, const Value
*Index
) {
1860 if (!Val
->getType()->isVectorTy() || !Index
->getType()->isIntegerTy())
1865 //===----------------------------------------------------------------------===//
1866 // InsertElementInst Implementation
1867 //===----------------------------------------------------------------------===//
1869 InsertElementInst::InsertElementInst(Value
*Vec
, Value
*Elt
, Value
*Index
,
1871 Instruction
*InsertBef
)
1872 : Instruction(Vec
->getType(), InsertElement
,
1873 OperandTraits
<InsertElementInst
>::op_begin(this),
1875 assert(isValidOperands(Vec
, Elt
, Index
) &&
1876 "Invalid insertelement instruction operands!");
1883 InsertElementInst::InsertElementInst(Value
*Vec
, Value
*Elt
, Value
*Index
,
1885 BasicBlock
*InsertAE
)
1886 : Instruction(Vec
->getType(), InsertElement
,
1887 OperandTraits
<InsertElementInst
>::op_begin(this),
1889 assert(isValidOperands(Vec
, Elt
, Index
) &&
1890 "Invalid insertelement instruction operands!");
1898 bool InsertElementInst::isValidOperands(const Value
*Vec
, const Value
*Elt
,
1899 const Value
*Index
) {
1900 if (!Vec
->getType()->isVectorTy())
1901 return false; // First operand of insertelement must be vector type.
1903 if (Elt
->getType() != cast
<VectorType
>(Vec
->getType())->getElementType())
1904 return false;// Second operand of insertelement must be vector element type.
1906 if (!Index
->getType()->isIntegerTy())
1907 return false; // Third operand of insertelement must be i32.
1911 //===----------------------------------------------------------------------===//
1912 // ShuffleVectorInst Implementation
1913 //===----------------------------------------------------------------------===//
1915 static Value
*createPlaceholderForShuffleVector(Value
*V
) {
1916 assert(V
&& "Cannot create placeholder of nullptr V");
1917 return PoisonValue::get(V
->getType());
1920 ShuffleVectorInst::ShuffleVectorInst(Value
*V1
, Value
*Mask
, const Twine
&Name
,
1921 Instruction
*InsertBefore
)
1922 : ShuffleVectorInst(V1
, createPlaceholderForShuffleVector(V1
), Mask
, Name
,
1925 ShuffleVectorInst::ShuffleVectorInst(Value
*V1
, Value
*Mask
, const Twine
&Name
,
1926 BasicBlock
*InsertAtEnd
)
1927 : ShuffleVectorInst(V1
, createPlaceholderForShuffleVector(V1
), Mask
, Name
,
1930 ShuffleVectorInst::ShuffleVectorInst(Value
*V1
, ArrayRef
<int> Mask
,
1932 Instruction
*InsertBefore
)
1933 : ShuffleVectorInst(V1
, createPlaceholderForShuffleVector(V1
), Mask
, Name
,
1936 ShuffleVectorInst::ShuffleVectorInst(Value
*V1
, ArrayRef
<int> Mask
,
1937 const Twine
&Name
, BasicBlock
*InsertAtEnd
)
1938 : ShuffleVectorInst(V1
, createPlaceholderForShuffleVector(V1
), Mask
, Name
,
1941 ShuffleVectorInst::ShuffleVectorInst(Value
*V1
, Value
*V2
, Value
*Mask
,
1943 Instruction
*InsertBefore
)
1945 VectorType::get(cast
<VectorType
>(V1
->getType())->getElementType(),
1946 cast
<VectorType
>(Mask
->getType())->getElementCount()),
1947 ShuffleVector
, OperandTraits
<ShuffleVectorInst
>::op_begin(this),
1948 OperandTraits
<ShuffleVectorInst
>::operands(this), InsertBefore
) {
1949 assert(isValidOperands(V1
, V2
, Mask
) &&
1950 "Invalid shuffle vector instruction operands!");
1954 SmallVector
<int, 16> MaskArr
;
1955 getShuffleMask(cast
<Constant
>(Mask
), MaskArr
);
1956 setShuffleMask(MaskArr
);
1960 ShuffleVectorInst::ShuffleVectorInst(Value
*V1
, Value
*V2
, Value
*Mask
,
1961 const Twine
&Name
, BasicBlock
*InsertAtEnd
)
1963 VectorType::get(cast
<VectorType
>(V1
->getType())->getElementType(),
1964 cast
<VectorType
>(Mask
->getType())->getElementCount()),
1965 ShuffleVector
, OperandTraits
<ShuffleVectorInst
>::op_begin(this),
1966 OperandTraits
<ShuffleVectorInst
>::operands(this), InsertAtEnd
) {
1967 assert(isValidOperands(V1
, V2
, Mask
) &&
1968 "Invalid shuffle vector instruction operands!");
1972 SmallVector
<int, 16> MaskArr
;
1973 getShuffleMask(cast
<Constant
>(Mask
), MaskArr
);
1974 setShuffleMask(MaskArr
);
1978 ShuffleVectorInst::ShuffleVectorInst(Value
*V1
, Value
*V2
, ArrayRef
<int> Mask
,
1980 Instruction
*InsertBefore
)
1982 VectorType::get(cast
<VectorType
>(V1
->getType())->getElementType(),
1983 Mask
.size(), isa
<ScalableVectorType
>(V1
->getType())),
1984 ShuffleVector
, OperandTraits
<ShuffleVectorInst
>::op_begin(this),
1985 OperandTraits
<ShuffleVectorInst
>::operands(this), InsertBefore
) {
1986 assert(isValidOperands(V1
, V2
, Mask
) &&
1987 "Invalid shuffle vector instruction operands!");
1990 setShuffleMask(Mask
);
1994 ShuffleVectorInst::ShuffleVectorInst(Value
*V1
, Value
*V2
, ArrayRef
<int> Mask
,
1995 const Twine
&Name
, BasicBlock
*InsertAtEnd
)
1997 VectorType::get(cast
<VectorType
>(V1
->getType())->getElementType(),
1998 Mask
.size(), isa
<ScalableVectorType
>(V1
->getType())),
1999 ShuffleVector
, OperandTraits
<ShuffleVectorInst
>::op_begin(this),
2000 OperandTraits
<ShuffleVectorInst
>::operands(this), InsertAtEnd
) {
2001 assert(isValidOperands(V1
, V2
, Mask
) &&
2002 "Invalid shuffle vector instruction operands!");
2006 setShuffleMask(Mask
);
2010 void ShuffleVectorInst::commute() {
2011 int NumOpElts
= cast
<FixedVectorType
>(Op
<0>()->getType())->getNumElements();
2012 int NumMaskElts
= ShuffleMask
.size();
2013 SmallVector
<int, 16> NewMask(NumMaskElts
);
2014 for (int i
= 0; i
!= NumMaskElts
; ++i
) {
2015 int MaskElt
= getMaskValue(i
);
2016 if (MaskElt
== PoisonMaskElem
) {
2017 NewMask
[i
] = PoisonMaskElem
;
2020 assert(MaskElt
>= 0 && MaskElt
< 2 * NumOpElts
&& "Out-of-range mask");
2021 MaskElt
= (MaskElt
< NumOpElts
) ? MaskElt
+ NumOpElts
: MaskElt
- NumOpElts
;
2022 NewMask
[i
] = MaskElt
;
2024 setShuffleMask(NewMask
);
2025 Op
<0>().swap(Op
<1>());
2028 bool ShuffleVectorInst::isValidOperands(const Value
*V1
, const Value
*V2
,
2029 ArrayRef
<int> Mask
) {
2030 // V1 and V2 must be vectors of the same type.
2031 if (!isa
<VectorType
>(V1
->getType()) || V1
->getType() != V2
->getType())
2034 // Make sure the mask elements make sense.
2036 cast
<VectorType
>(V1
->getType())->getElementCount().getKnownMinValue();
2037 for (int Elem
: Mask
)
2038 if (Elem
!= PoisonMaskElem
&& Elem
>= V1Size
* 2)
2041 if (isa
<ScalableVectorType
>(V1
->getType()))
2042 if ((Mask
[0] != 0 && Mask
[0] != PoisonMaskElem
) || !all_equal(Mask
))
2048 bool ShuffleVectorInst::isValidOperands(const Value
*V1
, const Value
*V2
,
2049 const Value
*Mask
) {
2050 // V1 and V2 must be vectors of the same type.
2051 if (!V1
->getType()->isVectorTy() || V1
->getType() != V2
->getType())
2054 // Mask must be vector of i32, and must be the same kind of vector as the
2056 auto *MaskTy
= dyn_cast
<VectorType
>(Mask
->getType());
2057 if (!MaskTy
|| !MaskTy
->getElementType()->isIntegerTy(32) ||
2058 isa
<ScalableVectorType
>(MaskTy
) != isa
<ScalableVectorType
>(V1
->getType()))
2061 // Check to see if Mask is valid.
2062 if (isa
<UndefValue
>(Mask
) || isa
<ConstantAggregateZero
>(Mask
))
2065 if (const auto *MV
= dyn_cast
<ConstantVector
>(Mask
)) {
2066 unsigned V1Size
= cast
<FixedVectorType
>(V1
->getType())->getNumElements();
2067 for (Value
*Op
: MV
->operands()) {
2068 if (auto *CI
= dyn_cast
<ConstantInt
>(Op
)) {
2069 if (CI
->uge(V1Size
*2))
2071 } else if (!isa
<UndefValue
>(Op
)) {
2078 if (const auto *CDS
= dyn_cast
<ConstantDataSequential
>(Mask
)) {
2079 unsigned V1Size
= cast
<FixedVectorType
>(V1
->getType())->getNumElements();
2080 for (unsigned i
= 0, e
= cast
<FixedVectorType
>(MaskTy
)->getNumElements();
2082 if (CDS
->getElementAsInteger(i
) >= V1Size
*2)
2090 void ShuffleVectorInst::getShuffleMask(const Constant
*Mask
,
2091 SmallVectorImpl
<int> &Result
) {
2092 ElementCount EC
= cast
<VectorType
>(Mask
->getType())->getElementCount();
2094 if (isa
<ConstantAggregateZero
>(Mask
)) {
2095 Result
.resize(EC
.getKnownMinValue(), 0);
2099 Result
.reserve(EC
.getKnownMinValue());
2101 if (EC
.isScalable()) {
2102 assert((isa
<ConstantAggregateZero
>(Mask
) || isa
<UndefValue
>(Mask
)) &&
2103 "Scalable vector shuffle mask must be undef or zeroinitializer");
2104 int MaskVal
= isa
<UndefValue
>(Mask
) ? -1 : 0;
2105 for (unsigned I
= 0; I
< EC
.getKnownMinValue(); ++I
)
2106 Result
.emplace_back(MaskVal
);
2110 unsigned NumElts
= EC
.getKnownMinValue();
2112 if (auto *CDS
= dyn_cast
<ConstantDataSequential
>(Mask
)) {
2113 for (unsigned i
= 0; i
!= NumElts
; ++i
)
2114 Result
.push_back(CDS
->getElementAsInteger(i
));
2117 for (unsigned i
= 0; i
!= NumElts
; ++i
) {
2118 Constant
*C
= Mask
->getAggregateElement(i
);
2119 Result
.push_back(isa
<UndefValue
>(C
) ? -1 :
2120 cast
<ConstantInt
>(C
)->getZExtValue());
2124 void ShuffleVectorInst::setShuffleMask(ArrayRef
<int> Mask
) {
2125 ShuffleMask
.assign(Mask
.begin(), Mask
.end());
2126 ShuffleMaskForBitcode
= convertShuffleMaskForBitcode(Mask
, getType());
2129 Constant
*ShuffleVectorInst::convertShuffleMaskForBitcode(ArrayRef
<int> Mask
,
2131 Type
*Int32Ty
= Type::getInt32Ty(ResultTy
->getContext());
2132 if (isa
<ScalableVectorType
>(ResultTy
)) {
2133 assert(all_equal(Mask
) && "Unexpected shuffle");
2134 Type
*VecTy
= VectorType::get(Int32Ty
, Mask
.size(), true);
2136 return Constant::getNullValue(VecTy
);
2137 return UndefValue::get(VecTy
);
2139 SmallVector
<Constant
*, 16> MaskConst
;
2140 for (int Elem
: Mask
) {
2141 if (Elem
== PoisonMaskElem
)
2142 MaskConst
.push_back(PoisonValue::get(Int32Ty
));
2144 MaskConst
.push_back(ConstantInt::get(Int32Ty
, Elem
));
2146 return ConstantVector::get(MaskConst
);
2149 static bool isSingleSourceMaskImpl(ArrayRef
<int> Mask
, int NumOpElts
) {
2150 assert(!Mask
.empty() && "Shuffle mask must contain elements");
2151 bool UsesLHS
= false;
2152 bool UsesRHS
= false;
2153 for (int I
: Mask
) {
2156 assert(I
>= 0 && I
< (NumOpElts
* 2) &&
2157 "Out-of-bounds shuffle mask element");
2158 UsesLHS
|= (I
< NumOpElts
);
2159 UsesRHS
|= (I
>= NumOpElts
);
2160 if (UsesLHS
&& UsesRHS
)
2163 // Allow for degenerate case: completely undef mask means neither source is used.
2164 return UsesLHS
|| UsesRHS
;
2167 bool ShuffleVectorInst::isSingleSourceMask(ArrayRef
<int> Mask
, int NumSrcElts
) {
2168 // We don't have vector operand size information, so assume operands are the
2169 // same size as the mask.
2170 return isSingleSourceMaskImpl(Mask
, NumSrcElts
);
2173 static bool isIdentityMaskImpl(ArrayRef
<int> Mask
, int NumOpElts
) {
2174 if (!isSingleSourceMaskImpl(Mask
, NumOpElts
))
2176 for (int i
= 0, NumMaskElts
= Mask
.size(); i
< NumMaskElts
; ++i
) {
2179 if (Mask
[i
] != i
&& Mask
[i
] != (NumOpElts
+ i
))
2185 bool ShuffleVectorInst::isIdentityMask(ArrayRef
<int> Mask
, int NumSrcElts
) {
2186 if (Mask
.size() != static_cast<unsigned>(NumSrcElts
))
2188 // We don't have vector operand size information, so assume operands are the
2189 // same size as the mask.
2190 return isIdentityMaskImpl(Mask
, NumSrcElts
);
2193 bool ShuffleVectorInst::isReverseMask(ArrayRef
<int> Mask
, int NumSrcElts
) {
2194 if (Mask
.size() != static_cast<unsigned>(NumSrcElts
))
2196 if (!isSingleSourceMask(Mask
, NumSrcElts
))
2199 // The number of elements in the mask must be at least 2.
2203 for (int I
= 0, E
= Mask
.size(); I
< E
; ++I
) {
2206 if (Mask
[I
] != (NumSrcElts
- 1 - I
) &&
2207 Mask
[I
] != (NumSrcElts
+ NumSrcElts
- 1 - I
))
2213 bool ShuffleVectorInst::isZeroEltSplatMask(ArrayRef
<int> Mask
, int NumSrcElts
) {
2214 if (Mask
.size() != static_cast<unsigned>(NumSrcElts
))
2216 if (!isSingleSourceMask(Mask
, NumSrcElts
))
2218 for (int I
= 0, E
= Mask
.size(); I
< E
; ++I
) {
2221 if (Mask
[I
] != 0 && Mask
[I
] != NumSrcElts
)
2227 bool ShuffleVectorInst::isSelectMask(ArrayRef
<int> Mask
, int NumSrcElts
) {
2228 if (Mask
.size() != static_cast<unsigned>(NumSrcElts
))
2230 // Select is differentiated from identity. It requires using both sources.
2231 if (isSingleSourceMask(Mask
, NumSrcElts
))
2233 for (int I
= 0, E
= Mask
.size(); I
< E
; ++I
) {
2236 if (Mask
[I
] != I
&& Mask
[I
] != (NumSrcElts
+ I
))
2242 bool ShuffleVectorInst::isTransposeMask(ArrayRef
<int> Mask
, int NumSrcElts
) {
2243 // Example masks that will return true:
2244 // v1 = <a, b, c, d>
2245 // v2 = <e, f, g, h>
2246 // trn1 = shufflevector v1, v2 <0, 4, 2, 6> = <a, e, c, g>
2247 // trn2 = shufflevector v1, v2 <1, 5, 3, 7> = <b, f, d, h>
2249 if (Mask
.size() != static_cast<unsigned>(NumSrcElts
))
2251 // 1. The number of elements in the mask must be a power-of-2 and at least 2.
2252 int Sz
= Mask
.size();
2253 if (Sz
< 2 || !isPowerOf2_32(Sz
))
2256 // 2. The first element of the mask must be either a 0 or a 1.
2257 if (Mask
[0] != 0 && Mask
[0] != 1)
2260 // 3. The difference between the first 2 elements must be equal to the
2261 // number of elements in the mask.
2262 if ((Mask
[1] - Mask
[0]) != NumSrcElts
)
2265 // 4. The difference between consecutive even-numbered and odd-numbered
2266 // elements must be equal to 2.
2267 for (int I
= 2; I
< Sz
; ++I
) {
2268 int MaskEltVal
= Mask
[I
];
2269 if (MaskEltVal
== -1)
2271 int MaskEltPrevVal
= Mask
[I
- 2];
2272 if (MaskEltVal
- MaskEltPrevVal
!= 2)
2278 bool ShuffleVectorInst::isSpliceMask(ArrayRef
<int> Mask
, int NumSrcElts
,
2280 if (Mask
.size() != static_cast<unsigned>(NumSrcElts
))
2282 // Example: shufflevector <4 x n> A, <4 x n> B, <1,2,3,4>
2283 int StartIndex
= -1;
2284 for (int I
= 0, E
= Mask
.size(); I
!= E
; ++I
) {
2285 int MaskEltVal
= Mask
[I
];
2286 if (MaskEltVal
== -1)
2289 if (StartIndex
== -1) {
2290 // Don't support a StartIndex that begins in the second input, or if the
2291 // first non-undef index would access below the StartIndex.
2292 if (MaskEltVal
< I
|| NumSrcElts
<= (MaskEltVal
- I
))
2295 StartIndex
= MaskEltVal
- I
;
2299 // Splice is sequential starting from StartIndex.
2300 if (MaskEltVal
!= (StartIndex
+ I
))
2304 if (StartIndex
== -1)
2307 // NOTE: This accepts StartIndex == 0 (COPY).
2312 bool ShuffleVectorInst::isExtractSubvectorMask(ArrayRef
<int> Mask
,
2313 int NumSrcElts
, int &Index
) {
2314 // Must extract from a single source.
2315 if (!isSingleSourceMaskImpl(Mask
, NumSrcElts
))
2318 // Must be smaller (else this is an Identity shuffle).
2319 if (NumSrcElts
<= (int)Mask
.size())
2322 // Find start of extraction, accounting that we may start with an UNDEF.
2324 for (int i
= 0, e
= Mask
.size(); i
!= e
; ++i
) {
2328 int Offset
= (M
% NumSrcElts
) - i
;
2329 if (0 <= SubIndex
&& SubIndex
!= Offset
)
2334 if (0 <= SubIndex
&& SubIndex
+ (int)Mask
.size() <= NumSrcElts
) {
2341 bool ShuffleVectorInst::isInsertSubvectorMask(ArrayRef
<int> Mask
,
2342 int NumSrcElts
, int &NumSubElts
,
2344 int NumMaskElts
= Mask
.size();
2346 // Don't try to match if we're shuffling to a smaller size.
2347 if (NumMaskElts
< NumSrcElts
)
2350 // TODO: We don't recognize self-insertion/widening.
2351 if (isSingleSourceMaskImpl(Mask
, NumSrcElts
))
2354 // Determine which mask elements are attributed to which source.
2355 APInt UndefElts
= APInt::getZero(NumMaskElts
);
2356 APInt Src0Elts
= APInt::getZero(NumMaskElts
);
2357 APInt Src1Elts
= APInt::getZero(NumMaskElts
);
2358 bool Src0Identity
= true;
2359 bool Src1Identity
= true;
2361 for (int i
= 0; i
!= NumMaskElts
; ++i
) {
2364 UndefElts
.setBit(i
);
2367 if (M
< NumSrcElts
) {
2369 Src0Identity
&= (M
== i
);
2373 Src1Identity
&= (M
== (i
+ NumSrcElts
));
2375 assert((Src0Elts
| Src1Elts
| UndefElts
).isAllOnes() &&
2376 "unknown shuffle elements");
2377 assert(!Src0Elts
.isZero() && !Src1Elts
.isZero() &&
2378 "2-source shuffle not found");
2380 // Determine lo/hi span ranges.
2381 // TODO: How should we handle undefs at the start of subvector insertions?
2382 int Src0Lo
= Src0Elts
.countr_zero();
2383 int Src1Lo
= Src1Elts
.countr_zero();
2384 int Src0Hi
= NumMaskElts
- Src0Elts
.countl_zero();
2385 int Src1Hi
= NumMaskElts
- Src1Elts
.countl_zero();
2387 // If src0 is in place, see if the src1 elements is inplace within its own
2390 int NumSub1Elts
= Src1Hi
- Src1Lo
;
2391 ArrayRef
<int> Sub1Mask
= Mask
.slice(Src1Lo
, NumSub1Elts
);
2392 if (isIdentityMaskImpl(Sub1Mask
, NumSrcElts
)) {
2393 NumSubElts
= NumSub1Elts
;
2399 // If src1 is in place, see if the src0 elements is inplace within its own
2402 int NumSub0Elts
= Src0Hi
- Src0Lo
;
2403 ArrayRef
<int> Sub0Mask
= Mask
.slice(Src0Lo
, NumSub0Elts
);
2404 if (isIdentityMaskImpl(Sub0Mask
, NumSrcElts
)) {
2405 NumSubElts
= NumSub0Elts
;
2414 bool ShuffleVectorInst::isIdentityWithPadding() const {
2415 // FIXME: Not currently possible to express a shuffle mask for a scalable
2416 // vector for this case.
2417 if (isa
<ScalableVectorType
>(getType()))
2420 int NumOpElts
= cast
<FixedVectorType
>(Op
<0>()->getType())->getNumElements();
2421 int NumMaskElts
= cast
<FixedVectorType
>(getType())->getNumElements();
2422 if (NumMaskElts
<= NumOpElts
)
2425 // The first part of the mask must choose elements from exactly 1 source op.
2426 ArrayRef
<int> Mask
= getShuffleMask();
2427 if (!isIdentityMaskImpl(Mask
, NumOpElts
))
2430 // All extending must be with undef elements.
2431 for (int i
= NumOpElts
; i
< NumMaskElts
; ++i
)
2438 bool ShuffleVectorInst::isIdentityWithExtract() const {
2439 // FIXME: Not currently possible to express a shuffle mask for a scalable
2440 // vector for this case.
2441 if (isa
<ScalableVectorType
>(getType()))
2444 int NumOpElts
= cast
<FixedVectorType
>(Op
<0>()->getType())->getNumElements();
2445 int NumMaskElts
= cast
<FixedVectorType
>(getType())->getNumElements();
2446 if (NumMaskElts
>= NumOpElts
)
2449 return isIdentityMaskImpl(getShuffleMask(), NumOpElts
);
2452 bool ShuffleVectorInst::isConcat() const {
2453 // Vector concatenation is differentiated from identity with padding.
2454 if (isa
<UndefValue
>(Op
<0>()) || isa
<UndefValue
>(Op
<1>()))
2457 // FIXME: Not currently possible to express a shuffle mask for a scalable
2458 // vector for this case.
2459 if (isa
<ScalableVectorType
>(getType()))
2462 int NumOpElts
= cast
<FixedVectorType
>(Op
<0>()->getType())->getNumElements();
2463 int NumMaskElts
= cast
<FixedVectorType
>(getType())->getNumElements();
2464 if (NumMaskElts
!= NumOpElts
* 2)
2467 // Use the mask length rather than the operands' vector lengths here. We
2468 // already know that the shuffle returns a vector twice as long as the inputs,
2469 // and neither of the inputs are undef vectors. If the mask picks consecutive
2470 // elements from both inputs, then this is a concatenation of the inputs.
2471 return isIdentityMaskImpl(getShuffleMask(), NumMaskElts
);
2474 static bool isReplicationMaskWithParams(ArrayRef
<int> Mask
,
2475 int ReplicationFactor
, int VF
) {
2476 assert(Mask
.size() == (unsigned)ReplicationFactor
* VF
&&
2477 "Unexpected mask size.");
2479 for (int CurrElt
: seq(VF
)) {
2480 ArrayRef
<int> CurrSubMask
= Mask
.take_front(ReplicationFactor
);
2481 assert(CurrSubMask
.size() == (unsigned)ReplicationFactor
&&
2482 "Run out of mask?");
2483 Mask
= Mask
.drop_front(ReplicationFactor
);
2484 if (!all_of(CurrSubMask
, [CurrElt
](int MaskElt
) {
2485 return MaskElt
== PoisonMaskElem
|| MaskElt
== CurrElt
;
2489 assert(Mask
.empty() && "Did not consume the whole mask?");
2494 bool ShuffleVectorInst::isReplicationMask(ArrayRef
<int> Mask
,
2495 int &ReplicationFactor
, int &VF
) {
2496 // undef-less case is trivial.
2497 if (!llvm::is_contained(Mask
, PoisonMaskElem
)) {
2499 Mask
.take_while([](int MaskElt
) { return MaskElt
== 0; }).size();
2500 if (ReplicationFactor
== 0 || Mask
.size() % ReplicationFactor
!= 0)
2502 VF
= Mask
.size() / ReplicationFactor
;
2503 return isReplicationMaskWithParams(Mask
, ReplicationFactor
, VF
);
2506 // However, if the mask contains undef's, we have to enumerate possible tuples
2507 // and pick one. There are bounds on replication factor: [1, mask size]
2508 // (where RF=1 is an identity shuffle, RF=mask size is a broadcast shuffle)
2509 // Additionally, mask size is a replication factor multiplied by vector size,
2510 // which further significantly reduces the search space.
2512 // Before doing that, let's perform basic correctness checking first.
2514 for (int MaskElt
: Mask
) {
2515 if (MaskElt
== PoisonMaskElem
)
2517 // Elements must be in non-decreasing order.
2518 if (MaskElt
< Largest
)
2520 Largest
= std::max(Largest
, MaskElt
);
2523 // Prefer larger replication factor if all else equal.
2524 for (int PossibleReplicationFactor
:
2525 reverse(seq_inclusive
<unsigned>(1, Mask
.size()))) {
2526 if (Mask
.size() % PossibleReplicationFactor
!= 0)
2528 int PossibleVF
= Mask
.size() / PossibleReplicationFactor
;
2529 if (!isReplicationMaskWithParams(Mask
, PossibleReplicationFactor
,
2532 ReplicationFactor
= PossibleReplicationFactor
;
2540 bool ShuffleVectorInst::isReplicationMask(int &ReplicationFactor
,
2542 // Not possible to express a shuffle mask for a scalable vector for this
2544 if (isa
<ScalableVectorType
>(getType()))
2547 VF
= cast
<FixedVectorType
>(Op
<0>()->getType())->getNumElements();
2548 if (ShuffleMask
.size() % VF
!= 0)
2550 ReplicationFactor
= ShuffleMask
.size() / VF
;
2552 return isReplicationMaskWithParams(ShuffleMask
, ReplicationFactor
, VF
);
2555 bool ShuffleVectorInst::isOneUseSingleSourceMask(ArrayRef
<int> Mask
, int VF
) {
2556 if (VF
<= 0 || Mask
.size() < static_cast<unsigned>(VF
) ||
2557 Mask
.size() % VF
!= 0)
2559 for (unsigned K
= 0, Sz
= Mask
.size(); K
< Sz
; K
+= VF
) {
2560 ArrayRef
<int> SubMask
= Mask
.slice(K
, VF
);
2561 if (all_of(SubMask
, [](int Idx
) { return Idx
== PoisonMaskElem
; }))
2563 SmallBitVector
Used(VF
, false);
2564 for (int Idx
: SubMask
) {
2565 if (Idx
!= PoisonMaskElem
&& Idx
< VF
)
2574 /// Return true if this shuffle mask is a replication mask.
2575 bool ShuffleVectorInst::isOneUseSingleSourceMask(int VF
) const {
2576 // Not possible to express a shuffle mask for a scalable vector for this
2578 if (isa
<ScalableVectorType
>(getType()))
2580 if (!isSingleSourceMask(ShuffleMask
, VF
))
2583 return isOneUseSingleSourceMask(ShuffleMask
, VF
);
2586 bool ShuffleVectorInst::isInterleave(unsigned Factor
) {
2587 FixedVectorType
*OpTy
= dyn_cast
<FixedVectorType
>(getOperand(0)->getType());
2588 // shuffle_vector can only interleave fixed length vectors - for scalable
2589 // vectors, see the @llvm.experimental.vector.interleave2 intrinsic
2592 unsigned OpNumElts
= OpTy
->getNumElements();
2594 return isInterleaveMask(ShuffleMask
, Factor
, OpNumElts
* 2);
2597 bool ShuffleVectorInst::isInterleaveMask(
2598 ArrayRef
<int> Mask
, unsigned Factor
, unsigned NumInputElts
,
2599 SmallVectorImpl
<unsigned> &StartIndexes
) {
2600 unsigned NumElts
= Mask
.size();
2601 if (NumElts
% Factor
)
2604 unsigned LaneLen
= NumElts
/ Factor
;
2605 if (!isPowerOf2_32(LaneLen
))
2608 StartIndexes
.resize(Factor
);
2610 // Check whether each element matches the general interleaved rule.
2611 // Ignore undef elements, as long as the defined elements match the rule.
2612 // Outer loop processes all factors (x, y, z in the above example)
2614 for (; I
< Factor
; I
++) {
2615 unsigned SavedLaneValue
;
2616 unsigned SavedNoUndefs
= 0;
2618 // Inner loop processes consecutive accesses (x, x+1... in the example)
2619 for (J
= 0; J
< LaneLen
- 1; J
++) {
2620 // Lane computes x's position in the Mask
2621 unsigned Lane
= J
* Factor
+ I
;
2622 unsigned NextLane
= Lane
+ Factor
;
2623 int LaneValue
= Mask
[Lane
];
2624 int NextLaneValue
= Mask
[NextLane
];
2626 // If both are defined, values must be sequential
2627 if (LaneValue
>= 0 && NextLaneValue
>= 0 &&
2628 LaneValue
+ 1 != NextLaneValue
)
2631 // If the next value is undef, save the current one as reference
2632 if (LaneValue
>= 0 && NextLaneValue
< 0) {
2633 SavedLaneValue
= LaneValue
;
2637 // Undefs are allowed, but defined elements must still be consecutive:
2638 // i.e.: x,..., undef,..., x + 2,..., undef,..., undef,..., x + 5, ....
2639 // Verify this by storing the last non-undef followed by an undef
2640 // Check that following non-undef masks are incremented with the
2641 // corresponding distance.
2642 if (SavedNoUndefs
> 0 && LaneValue
< 0) {
2644 if (NextLaneValue
>= 0 &&
2645 SavedLaneValue
+ SavedNoUndefs
!= (unsigned)NextLaneValue
)
2650 if (J
< LaneLen
- 1)
2655 // Check that the start of the I range (J=0) is greater than 0
2656 StartMask
= Mask
[I
];
2657 } else if (Mask
[(LaneLen
- 1) * Factor
+ I
] >= 0) {
2658 // StartMask defined by the last value in lane
2659 StartMask
= Mask
[(LaneLen
- 1) * Factor
+ I
] - J
;
2660 } else if (SavedNoUndefs
> 0) {
2661 // StartMask defined by some non-zero value in the j loop
2662 StartMask
= SavedLaneValue
- (LaneLen
- 1 - SavedNoUndefs
);
2664 // else StartMask remains set to 0, i.e. all elements are undefs
2668 // We must stay within the vectors; This case can happen with undefs.
2669 if (StartMask
+ LaneLen
> NumInputElts
)
2672 StartIndexes
[I
] = StartMask
;
2678 /// Try to lower a vector shuffle as a bit rotation.
2680 /// Look for a repeated rotation pattern in each sub group.
2681 /// Returns an element-wise left bit rotation amount or -1 if failed.
2682 static int matchShuffleAsBitRotate(ArrayRef
<int> Mask
, int NumSubElts
) {
2683 int NumElts
= Mask
.size();
2684 assert((NumElts
% NumSubElts
) == 0 && "Illegal shuffle mask");
2687 for (int i
= 0; i
!= NumElts
; i
+= NumSubElts
) {
2688 for (int j
= 0; j
!= NumSubElts
; ++j
) {
2689 int M
= Mask
[i
+ j
];
2692 if (M
< i
|| M
>= i
+ NumSubElts
)
2694 int Offset
= (NumSubElts
- (M
- (i
+ j
))) % NumSubElts
;
2695 if (0 <= RotateAmt
&& Offset
!= RotateAmt
)
2703 bool ShuffleVectorInst::isBitRotateMask(
2704 ArrayRef
<int> Mask
, unsigned EltSizeInBits
, unsigned MinSubElts
,
2705 unsigned MaxSubElts
, unsigned &NumSubElts
, unsigned &RotateAmt
) {
2706 for (NumSubElts
= MinSubElts
; NumSubElts
<= MaxSubElts
; NumSubElts
*= 2) {
2707 int EltRotateAmt
= matchShuffleAsBitRotate(Mask
, NumSubElts
);
2708 if (EltRotateAmt
< 0)
2710 RotateAmt
= EltRotateAmt
* EltSizeInBits
;
2717 //===----------------------------------------------------------------------===//
2718 // InsertValueInst Class
2719 //===----------------------------------------------------------------------===//
2721 void InsertValueInst::init(Value
*Agg
, Value
*Val
, ArrayRef
<unsigned> Idxs
,
2722 const Twine
&Name
) {
2723 assert(getNumOperands() == 2 && "NumOperands not initialized?");
2725 // There's no fundamental reason why we require at least one index
2726 // (other than weirdness with &*IdxBegin being invalid; see
2727 // getelementptr's init routine for example). But there's no
2728 // present need to support it.
2729 assert(!Idxs
.empty() && "InsertValueInst must have at least one index");
2731 assert(ExtractValueInst::getIndexedType(Agg
->getType(), Idxs
) ==
2732 Val
->getType() && "Inserted value must match indexed type!");
2736 Indices
.append(Idxs
.begin(), Idxs
.end());
2740 InsertValueInst::InsertValueInst(const InsertValueInst
&IVI
)
2741 : Instruction(IVI
.getType(), InsertValue
,
2742 OperandTraits
<InsertValueInst
>::op_begin(this), 2),
2743 Indices(IVI
.Indices
) {
2744 Op
<0>() = IVI
.getOperand(0);
2745 Op
<1>() = IVI
.getOperand(1);
2746 SubclassOptionalData
= IVI
.SubclassOptionalData
;
2749 //===----------------------------------------------------------------------===//
2750 // ExtractValueInst Class
2751 //===----------------------------------------------------------------------===//
2753 void ExtractValueInst::init(ArrayRef
<unsigned> Idxs
, const Twine
&Name
) {
2754 assert(getNumOperands() == 1 && "NumOperands not initialized?");
2756 // There's no fundamental reason why we require at least one index.
2757 // But there's no present need to support it.
2758 assert(!Idxs
.empty() && "ExtractValueInst must have at least one index");
2760 Indices
.append(Idxs
.begin(), Idxs
.end());
2764 ExtractValueInst::ExtractValueInst(const ExtractValueInst
&EVI
)
2765 : UnaryInstruction(EVI
.getType(), ExtractValue
, EVI
.getOperand(0)),
2766 Indices(EVI
.Indices
) {
2767 SubclassOptionalData
= EVI
.SubclassOptionalData
;
2770 // getIndexedType - Returns the type of the element that would be extracted
2771 // with an extractvalue instruction with the specified parameters.
2773 // A null type is returned if the indices are invalid for the specified
2776 Type
*ExtractValueInst::getIndexedType(Type
*Agg
,
2777 ArrayRef
<unsigned> Idxs
) {
2778 for (unsigned Index
: Idxs
) {
2779 // We can't use CompositeType::indexValid(Index) here.
2780 // indexValid() always returns true for arrays because getelementptr allows
2781 // out-of-bounds indices. Since we don't allow those for extractvalue and
2782 // insertvalue we need to check array indexing manually.
2783 // Since the only other types we can index into are struct types it's just
2784 // as easy to check those manually as well.
2785 if (ArrayType
*AT
= dyn_cast
<ArrayType
>(Agg
)) {
2786 if (Index
>= AT
->getNumElements())
2788 Agg
= AT
->getElementType();
2789 } else if (StructType
*ST
= dyn_cast
<StructType
>(Agg
)) {
2790 if (Index
>= ST
->getNumElements())
2792 Agg
= ST
->getElementType(Index
);
2794 // Not a valid type to index into.
2798 return const_cast<Type
*>(Agg
);
2801 //===----------------------------------------------------------------------===//
2802 // UnaryOperator Class
2803 //===----------------------------------------------------------------------===//
2805 UnaryOperator::UnaryOperator(UnaryOps iType
, Value
*S
,
2806 Type
*Ty
, const Twine
&Name
,
2807 Instruction
*InsertBefore
)
2808 : UnaryInstruction(Ty
, iType
, S
, InsertBefore
) {
2814 UnaryOperator::UnaryOperator(UnaryOps iType
, Value
*S
,
2815 Type
*Ty
, const Twine
&Name
,
2816 BasicBlock
*InsertAtEnd
)
2817 : UnaryInstruction(Ty
, iType
, S
, InsertAtEnd
) {
2823 UnaryOperator
*UnaryOperator::Create(UnaryOps Op
, Value
*S
,
2825 Instruction
*InsertBefore
) {
2826 return new UnaryOperator(Op
, S
, S
->getType(), Name
, InsertBefore
);
2829 UnaryOperator
*UnaryOperator::Create(UnaryOps Op
, Value
*S
,
2831 BasicBlock
*InsertAtEnd
) {
2832 UnaryOperator
*Res
= Create(Op
, S
, Name
);
2833 Res
->insertInto(InsertAtEnd
, InsertAtEnd
->end());
2837 void UnaryOperator::AssertOK() {
2838 Value
*LHS
= getOperand(0);
2839 (void)LHS
; // Silence warnings.
2841 switch (getOpcode()) {
2843 assert(getType() == LHS
->getType() &&
2844 "Unary operation should return same type as operand!");
2845 assert(getType()->isFPOrFPVectorTy() &&
2846 "Tried to create a floating-point operation on a "
2847 "non-floating-point type!");
2849 default: llvm_unreachable("Invalid opcode provided");
2854 //===----------------------------------------------------------------------===//
2855 // BinaryOperator Class
2856 //===----------------------------------------------------------------------===//
2858 BinaryOperator::BinaryOperator(BinaryOps iType
, Value
*S1
, Value
*S2
,
2859 Type
*Ty
, const Twine
&Name
,
2860 Instruction
*InsertBefore
)
2861 : Instruction(Ty
, iType
,
2862 OperandTraits
<BinaryOperator
>::op_begin(this),
2863 OperandTraits
<BinaryOperator
>::operands(this),
2871 BinaryOperator::BinaryOperator(BinaryOps iType
, Value
*S1
, Value
*S2
,
2872 Type
*Ty
, const Twine
&Name
,
2873 BasicBlock
*InsertAtEnd
)
2874 : Instruction(Ty
, iType
,
2875 OperandTraits
<BinaryOperator
>::op_begin(this),
2876 OperandTraits
<BinaryOperator
>::operands(this),
2884 void BinaryOperator::AssertOK() {
2885 Value
*LHS
= getOperand(0), *RHS
= getOperand(1);
2886 (void)LHS
; (void)RHS
; // Silence warnings.
2887 assert(LHS
->getType() == RHS
->getType() &&
2888 "Binary operator operand types must match!");
2890 switch (getOpcode()) {
2893 assert(getType() == LHS
->getType() &&
2894 "Arithmetic operation should return same type as operands!");
2895 assert(getType()->isIntOrIntVectorTy() &&
2896 "Tried to create an integer operation on a non-integer type!");
2898 case FAdd
: case FSub
:
2900 assert(getType() == LHS
->getType() &&
2901 "Arithmetic operation should return same type as operands!");
2902 assert(getType()->isFPOrFPVectorTy() &&
2903 "Tried to create a floating-point operation on a "
2904 "non-floating-point type!");
2908 assert(getType() == LHS
->getType() &&
2909 "Arithmetic operation should return same type as operands!");
2910 assert(getType()->isIntOrIntVectorTy() &&
2911 "Incorrect operand type (not integer) for S/UDIV");
2914 assert(getType() == LHS
->getType() &&
2915 "Arithmetic operation should return same type as operands!");
2916 assert(getType()->isFPOrFPVectorTy() &&
2917 "Incorrect operand type (not floating point) for FDIV");
2921 assert(getType() == LHS
->getType() &&
2922 "Arithmetic operation should return same type as operands!");
2923 assert(getType()->isIntOrIntVectorTy() &&
2924 "Incorrect operand type (not integer) for S/UREM");
2927 assert(getType() == LHS
->getType() &&
2928 "Arithmetic operation should return same type as operands!");
2929 assert(getType()->isFPOrFPVectorTy() &&
2930 "Incorrect operand type (not floating point) for FREM");
2935 assert(getType() == LHS
->getType() &&
2936 "Shift operation should return same type as operands!");
2937 assert(getType()->isIntOrIntVectorTy() &&
2938 "Tried to create a shift operation on a non-integral type!");
2942 assert(getType() == LHS
->getType() &&
2943 "Logical operation should return same type as operands!");
2944 assert(getType()->isIntOrIntVectorTy() &&
2945 "Tried to create a logical operation on a non-integral type!");
2947 default: llvm_unreachable("Invalid opcode provided");
2952 BinaryOperator
*BinaryOperator::Create(BinaryOps Op
, Value
*S1
, Value
*S2
,
2954 Instruction
*InsertBefore
) {
2955 assert(S1
->getType() == S2
->getType() &&
2956 "Cannot create binary operator with two operands of differing type!");
2957 return new BinaryOperator(Op
, S1
, S2
, S1
->getType(), Name
, InsertBefore
);
2960 BinaryOperator
*BinaryOperator::Create(BinaryOps Op
, Value
*S1
, Value
*S2
,
2962 BasicBlock
*InsertAtEnd
) {
2963 BinaryOperator
*Res
= Create(Op
, S1
, S2
, Name
);
2964 Res
->insertInto(InsertAtEnd
, InsertAtEnd
->end());
2968 BinaryOperator
*BinaryOperator::CreateNeg(Value
*Op
, const Twine
&Name
,
2969 Instruction
*InsertBefore
) {
2970 Value
*Zero
= ConstantInt::get(Op
->getType(), 0);
2971 return new BinaryOperator(Instruction::Sub
,
2973 Op
->getType(), Name
, InsertBefore
);
2976 BinaryOperator
*BinaryOperator::CreateNeg(Value
*Op
, const Twine
&Name
,
2977 BasicBlock
*InsertAtEnd
) {
2978 Value
*Zero
= ConstantInt::get(Op
->getType(), 0);
2979 return new BinaryOperator(Instruction::Sub
,
2981 Op
->getType(), Name
, InsertAtEnd
);
2984 BinaryOperator
*BinaryOperator::CreateNSWNeg(Value
*Op
, const Twine
&Name
,
2985 Instruction
*InsertBefore
) {
2986 Value
*Zero
= ConstantInt::get(Op
->getType(), 0);
2987 return BinaryOperator::CreateNSWSub(Zero
, Op
, Name
, InsertBefore
);
2990 BinaryOperator
*BinaryOperator::CreateNSWNeg(Value
*Op
, const Twine
&Name
,
2991 BasicBlock
*InsertAtEnd
) {
2992 Value
*Zero
= ConstantInt::get(Op
->getType(), 0);
2993 return BinaryOperator::CreateNSWSub(Zero
, Op
, Name
, InsertAtEnd
);
2996 BinaryOperator
*BinaryOperator::CreateNUWNeg(Value
*Op
, const Twine
&Name
,
2997 Instruction
*InsertBefore
) {
2998 Value
*Zero
= ConstantInt::get(Op
->getType(), 0);
2999 return BinaryOperator::CreateNUWSub(Zero
, Op
, Name
, InsertBefore
);
3002 BinaryOperator
*BinaryOperator::CreateNUWNeg(Value
*Op
, const Twine
&Name
,
3003 BasicBlock
*InsertAtEnd
) {
3004 Value
*Zero
= ConstantInt::get(Op
->getType(), 0);
3005 return BinaryOperator::CreateNUWSub(Zero
, Op
, Name
, InsertAtEnd
);
3008 BinaryOperator
*BinaryOperator::CreateNot(Value
*Op
, const Twine
&Name
,
3009 Instruction
*InsertBefore
) {
3010 Constant
*C
= Constant::getAllOnesValue(Op
->getType());
3011 return new BinaryOperator(Instruction::Xor
, Op
, C
,
3012 Op
->getType(), Name
, InsertBefore
);
3015 BinaryOperator
*BinaryOperator::CreateNot(Value
*Op
, const Twine
&Name
,
3016 BasicBlock
*InsertAtEnd
) {
3017 Constant
*AllOnes
= Constant::getAllOnesValue(Op
->getType());
3018 return new BinaryOperator(Instruction::Xor
, Op
, AllOnes
,
3019 Op
->getType(), Name
, InsertAtEnd
);
3022 // Exchange the two operands to this instruction. This instruction is safe to
3023 // use on any binary instruction and does not modify the semantics of the
3024 // instruction. If the instruction is order-dependent (SetLT f.e.), the opcode
3026 bool BinaryOperator::swapOperands() {
3027 if (!isCommutative())
3028 return true; // Can't commute operands
3029 Op
<0>().swap(Op
<1>());
3033 //===----------------------------------------------------------------------===//
3034 // FPMathOperator Class
3035 //===----------------------------------------------------------------------===//
3037 float FPMathOperator::getFPAccuracy() const {
3039 cast
<Instruction
>(this)->getMetadata(LLVMContext::MD_fpmath
);
3042 ConstantFP
*Accuracy
= mdconst::extract
<ConstantFP
>(MD
->getOperand(0));
3043 return Accuracy
->getValueAPF().convertToFloat();
3046 //===----------------------------------------------------------------------===//
3048 //===----------------------------------------------------------------------===//
3050 // Just determine if this cast only deals with integral->integral conversion.
3051 bool CastInst::isIntegerCast() const {
3052 switch (getOpcode()) {
3053 default: return false;
3054 case Instruction::ZExt
:
3055 case Instruction::SExt
:
3056 case Instruction::Trunc
:
3058 case Instruction::BitCast
:
3059 return getOperand(0)->getType()->isIntegerTy() &&
3060 getType()->isIntegerTy();
3064 /// This function determines if the CastInst does not require any bits to be
3065 /// changed in order to effect the cast. Essentially, it identifies cases where
3066 /// no code gen is necessary for the cast, hence the name no-op cast. For
3067 /// example, the following are all no-op casts:
3068 /// # bitcast i32* %x to i8*
3069 /// # bitcast <2 x i32> %x to <4 x i16>
3070 /// # ptrtoint i32* %x to i32 ; on 32-bit plaforms only
3071 /// Determine if the described cast is a no-op.
3072 bool CastInst::isNoopCast(Instruction::CastOps Opcode
,
3075 const DataLayout
&DL
) {
3076 assert(castIsValid(Opcode
, SrcTy
, DestTy
) && "method precondition");
3078 default: llvm_unreachable("Invalid CastOp");
3079 case Instruction::Trunc
:
3080 case Instruction::ZExt
:
3081 case Instruction::SExt
:
3082 case Instruction::FPTrunc
:
3083 case Instruction::FPExt
:
3084 case Instruction::UIToFP
:
3085 case Instruction::SIToFP
:
3086 case Instruction::FPToUI
:
3087 case Instruction::FPToSI
:
3088 case Instruction::AddrSpaceCast
:
3089 // TODO: Target informations may give a more accurate answer here.
3091 case Instruction::BitCast
:
3092 return true; // BitCast never modifies bits.
3093 case Instruction::PtrToInt
:
3094 return DL
.getIntPtrType(SrcTy
)->getScalarSizeInBits() ==
3095 DestTy
->getScalarSizeInBits();
3096 case Instruction::IntToPtr
:
3097 return DL
.getIntPtrType(DestTy
)->getScalarSizeInBits() ==
3098 SrcTy
->getScalarSizeInBits();
3102 bool CastInst::isNoopCast(const DataLayout
&DL
) const {
3103 return isNoopCast(getOpcode(), getOperand(0)->getType(), getType(), DL
);
3106 /// This function determines if a pair of casts can be eliminated and what
3107 /// opcode should be used in the elimination. This assumes that there are two
3108 /// instructions like this:
3109 /// * %F = firstOpcode SrcTy %x to MidTy
3110 /// * %S = secondOpcode MidTy %F to DstTy
3111 /// The function returns a resultOpcode so these two casts can be replaced with:
3112 /// * %Replacement = resultOpcode %SrcTy %x to DstTy
3113 /// If no such cast is permitted, the function returns 0.
3114 unsigned CastInst::isEliminableCastPair(
3115 Instruction::CastOps firstOp
, Instruction::CastOps secondOp
,
3116 Type
*SrcTy
, Type
*MidTy
, Type
*DstTy
, Type
*SrcIntPtrTy
, Type
*MidIntPtrTy
,
3117 Type
*DstIntPtrTy
) {
3118 // Define the 144 possibilities for these two cast instructions. The values
3119 // in this matrix determine what to do in a given situation and select the
3120 // case in the switch below. The rows correspond to firstOp, the columns
3121 // correspond to secondOp. In looking at the table below, keep in mind
3122 // the following cast properties:
3124 // Size Compare Source Destination
3125 // Operator Src ? Size Type Sign Type Sign
3126 // -------- ------------ ------------------- ---------------------
3127 // TRUNC > Integer Any Integral Any
3128 // ZEXT < Integral Unsigned Integer Any
3129 // SEXT < Integral Signed Integer Any
3130 // FPTOUI n/a FloatPt n/a Integral Unsigned
3131 // FPTOSI n/a FloatPt n/a Integral Signed
3132 // UITOFP n/a Integral Unsigned FloatPt n/a
3133 // SITOFP n/a Integral Signed FloatPt n/a
3134 // FPTRUNC > FloatPt n/a FloatPt n/a
3135 // FPEXT < FloatPt n/a FloatPt n/a
3136 // PTRTOINT n/a Pointer n/a Integral Unsigned
3137 // INTTOPTR n/a Integral Unsigned Pointer n/a
3138 // BITCAST = FirstClass n/a FirstClass n/a
3139 // ADDRSPCST n/a Pointer n/a Pointer n/a
3141 // NOTE: some transforms are safe, but we consider them to be non-profitable.
3142 // For example, we could merge "fptoui double to i32" + "zext i32 to i64",
3143 // into "fptoui double to i64", but this loses information about the range
3144 // of the produced value (we no longer know the top-part is all zeros).
3145 // Further this conversion is often much more expensive for typical hardware,
3146 // and causes issues when building libgcc. We disallow fptosi+sext for the
3148 const unsigned numCastOps
=
3149 Instruction::CastOpsEnd
- Instruction::CastOpsBegin
;
3150 static const uint8_t CastResults
[numCastOps
][numCastOps
] = {
3151 // T F F U S F F P I B A -+
3152 // R Z S P P I I T P 2 N T S |
3153 // U E E 2 2 2 2 R E I T C C +- secondOp
3154 // N X X U S F F N X N 2 V V |
3155 // C T T I I P P C T T P T T -+
3156 { 1, 0, 0,99,99, 0, 0,99,99,99, 0, 3, 0}, // Trunc -+
3157 { 8, 1, 9,99,99, 2,17,99,99,99, 2, 3, 0}, // ZExt |
3158 { 8, 0, 1,99,99, 0, 2,99,99,99, 0, 3, 0}, // SExt |
3159 { 0, 0, 0,99,99, 0, 0,99,99,99, 0, 3, 0}, // FPToUI |
3160 { 0, 0, 0,99,99, 0, 0,99,99,99, 0, 3, 0}, // FPToSI |
3161 { 99,99,99, 0, 0,99,99, 0, 0,99,99, 4, 0}, // UIToFP +- firstOp
3162 { 99,99,99, 0, 0,99,99, 0, 0,99,99, 4, 0}, // SIToFP |
3163 { 99,99,99, 0, 0,99,99, 0, 0,99,99, 4, 0}, // FPTrunc |
3164 { 99,99,99, 2, 2,99,99, 8, 2,99,99, 4, 0}, // FPExt |
3165 { 1, 0, 0,99,99, 0, 0,99,99,99, 7, 3, 0}, // PtrToInt |
3166 { 99,99,99,99,99,99,99,99,99,11,99,15, 0}, // IntToPtr |
3167 { 5, 5, 5, 6, 6, 5, 5, 6, 6,16, 5, 1,14}, // BitCast |
3168 { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,13,12}, // AddrSpaceCast -+
3171 // TODO: This logic could be encoded into the table above and handled in the
3173 // If either of the casts are a bitcast from scalar to vector, disallow the
3174 // merging. However, any pair of bitcasts are allowed.
3175 bool IsFirstBitcast
= (firstOp
== Instruction::BitCast
);
3176 bool IsSecondBitcast
= (secondOp
== Instruction::BitCast
);
3177 bool AreBothBitcasts
= IsFirstBitcast
&& IsSecondBitcast
;
3179 // Check if any of the casts convert scalars <-> vectors.
3180 if ((IsFirstBitcast
&& isa
<VectorType
>(SrcTy
) != isa
<VectorType
>(MidTy
)) ||
3181 (IsSecondBitcast
&& isa
<VectorType
>(MidTy
) != isa
<VectorType
>(DstTy
)))
3182 if (!AreBothBitcasts
)
3185 int ElimCase
= CastResults
[firstOp
-Instruction::CastOpsBegin
]
3186 [secondOp
-Instruction::CastOpsBegin
];
3189 // Categorically disallowed.
3192 // Allowed, use first cast's opcode.
3195 // Allowed, use second cast's opcode.
3198 // No-op cast in second op implies firstOp as long as the DestTy
3199 // is integer and we are not converting between a vector and a
3201 if (!SrcTy
->isVectorTy() && DstTy
->isIntegerTy())
3205 // No-op cast in second op implies firstOp as long as the DestTy
3211 // No-op cast in first op implies secondOp as long as the SrcTy
3213 if (SrcTy
->isIntegerTy())
3217 // No-op cast in first op implies secondOp as long as the SrcTy
3218 // is a floating point.
3219 if (SrcTy
->isFloatingPointTy())
3223 // Disable inttoptr/ptrtoint optimization if enabled.
3224 if (DisableI2pP2iOpt
)
3227 // Cannot simplify if address spaces are different!
3228 if (SrcTy
->getPointerAddressSpace() != DstTy
->getPointerAddressSpace())
3231 unsigned MidSize
= MidTy
->getScalarSizeInBits();
3232 // We can still fold this without knowing the actual sizes as long we
3233 // know that the intermediate pointer is the largest possible
3235 // FIXME: Is this always true?
3237 return Instruction::BitCast
;
3239 // ptrtoint, inttoptr -> bitcast (ptr -> ptr) if int size is >= ptr size.
3240 if (!SrcIntPtrTy
|| DstIntPtrTy
!= SrcIntPtrTy
)
3242 unsigned PtrSize
= SrcIntPtrTy
->getScalarSizeInBits();
3243 if (MidSize
>= PtrSize
)
3244 return Instruction::BitCast
;
3248 // ext, trunc -> bitcast, if the SrcTy and DstTy are the same
3249 // ext, trunc -> ext, if sizeof(SrcTy) < sizeof(DstTy)
3250 // ext, trunc -> trunc, if sizeof(SrcTy) > sizeof(DstTy)
3251 unsigned SrcSize
= SrcTy
->getScalarSizeInBits();
3252 unsigned DstSize
= DstTy
->getScalarSizeInBits();
3254 return Instruction::BitCast
;
3255 if (SrcSize
< DstSize
)
3257 if (SrcSize
> DstSize
)
3262 // zext, sext -> zext, because sext can't sign extend after zext
3263 return Instruction::ZExt
;
3265 // inttoptr, ptrtoint -> bitcast if SrcSize<=PtrSize and SrcSize==DstSize
3268 unsigned PtrSize
= MidIntPtrTy
->getScalarSizeInBits();
3269 unsigned SrcSize
= SrcTy
->getScalarSizeInBits();
3270 unsigned DstSize
= DstTy
->getScalarSizeInBits();
3271 if (SrcSize
<= PtrSize
&& SrcSize
== DstSize
)
3272 return Instruction::BitCast
;
3276 // addrspacecast, addrspacecast -> bitcast, if SrcAS == DstAS
3277 // addrspacecast, addrspacecast -> addrspacecast, if SrcAS != DstAS
3278 if (SrcTy
->getPointerAddressSpace() != DstTy
->getPointerAddressSpace())
3279 return Instruction::AddrSpaceCast
;
3280 return Instruction::BitCast
;
3282 // FIXME: this state can be merged with (1), but the following assert
3283 // is useful to check the correcteness of the sequence due to semantic
3284 // change of bitcast.
3286 SrcTy
->isPtrOrPtrVectorTy() &&
3287 MidTy
->isPtrOrPtrVectorTy() &&
3288 DstTy
->isPtrOrPtrVectorTy() &&
3289 SrcTy
->getPointerAddressSpace() != MidTy
->getPointerAddressSpace() &&
3290 MidTy
->getPointerAddressSpace() == DstTy
->getPointerAddressSpace() &&
3291 "Illegal addrspacecast, bitcast sequence!");
3292 // Allowed, use first cast's opcode
3295 // bitcast, addrspacecast -> addrspacecast
3296 return Instruction::AddrSpaceCast
;
3298 // FIXME: this state can be merged with (1), but the following assert
3299 // is useful to check the correcteness of the sequence due to semantic
3300 // change of bitcast.
3302 SrcTy
->isIntOrIntVectorTy() &&
3303 MidTy
->isPtrOrPtrVectorTy() &&
3304 DstTy
->isPtrOrPtrVectorTy() &&
3305 MidTy
->getPointerAddressSpace() == DstTy
->getPointerAddressSpace() &&
3306 "Illegal inttoptr, bitcast sequence!");
3307 // Allowed, use first cast's opcode
3310 // FIXME: this state can be merged with (2), but the following assert
3311 // is useful to check the correcteness of the sequence due to semantic
3312 // change of bitcast.
3314 SrcTy
->isPtrOrPtrVectorTy() &&
3315 MidTy
->isPtrOrPtrVectorTy() &&
3316 DstTy
->isIntOrIntVectorTy() &&
3317 SrcTy
->getPointerAddressSpace() == MidTy
->getPointerAddressSpace() &&
3318 "Illegal bitcast, ptrtoint sequence!");
3319 // Allowed, use second cast's opcode
3322 // (sitofp (zext x)) -> (uitofp x)
3323 return Instruction::UIToFP
;
3325 // Cast combination can't happen (error in input). This is for all cases
3326 // where the MidTy is not the same for the two cast instructions.
3327 llvm_unreachable("Invalid Cast Combination");
3329 llvm_unreachable("Error in CastResults table!!!");
3333 CastInst
*CastInst::Create(Instruction::CastOps op
, Value
*S
, Type
*Ty
,
3334 const Twine
&Name
, Instruction
*InsertBefore
) {
3335 assert(castIsValid(op
, S
, Ty
) && "Invalid cast!");
3336 // Construct and return the appropriate CastInst subclass
3338 case Trunc
: return new TruncInst (S
, Ty
, Name
, InsertBefore
);
3339 case ZExt
: return new ZExtInst (S
, Ty
, Name
, InsertBefore
);
3340 case SExt
: return new SExtInst (S
, Ty
, Name
, InsertBefore
);
3341 case FPTrunc
: return new FPTruncInst (S
, Ty
, Name
, InsertBefore
);
3342 case FPExt
: return new FPExtInst (S
, Ty
, Name
, InsertBefore
);
3343 case UIToFP
: return new UIToFPInst (S
, Ty
, Name
, InsertBefore
);
3344 case SIToFP
: return new SIToFPInst (S
, Ty
, Name
, InsertBefore
);
3345 case FPToUI
: return new FPToUIInst (S
, Ty
, Name
, InsertBefore
);
3346 case FPToSI
: return new FPToSIInst (S
, Ty
, Name
, InsertBefore
);
3347 case PtrToInt
: return new PtrToIntInst (S
, Ty
, Name
, InsertBefore
);
3348 case IntToPtr
: return new IntToPtrInst (S
, Ty
, Name
, InsertBefore
);
3349 case BitCast
: return new BitCastInst (S
, Ty
, Name
, InsertBefore
);
3350 case AddrSpaceCast
: return new AddrSpaceCastInst (S
, Ty
, Name
, InsertBefore
);
3351 default: llvm_unreachable("Invalid opcode provided");
3355 CastInst
*CastInst::Create(Instruction::CastOps op
, Value
*S
, Type
*Ty
,
3356 const Twine
&Name
, BasicBlock
*InsertAtEnd
) {
3357 assert(castIsValid(op
, S
, Ty
) && "Invalid cast!");
3358 // Construct and return the appropriate CastInst subclass
3360 case Trunc
: return new TruncInst (S
, Ty
, Name
, InsertAtEnd
);
3361 case ZExt
: return new ZExtInst (S
, Ty
, Name
, InsertAtEnd
);
3362 case SExt
: return new SExtInst (S
, Ty
, Name
, InsertAtEnd
);
3363 case FPTrunc
: return new FPTruncInst (S
, Ty
, Name
, InsertAtEnd
);
3364 case FPExt
: return new FPExtInst (S
, Ty
, Name
, InsertAtEnd
);
3365 case UIToFP
: return new UIToFPInst (S
, Ty
, Name
, InsertAtEnd
);
3366 case SIToFP
: return new SIToFPInst (S
, Ty
, Name
, InsertAtEnd
);
3367 case FPToUI
: return new FPToUIInst (S
, Ty
, Name
, InsertAtEnd
);
3368 case FPToSI
: return new FPToSIInst (S
, Ty
, Name
, InsertAtEnd
);
3369 case PtrToInt
: return new PtrToIntInst (S
, Ty
, Name
, InsertAtEnd
);
3370 case IntToPtr
: return new IntToPtrInst (S
, Ty
, Name
, InsertAtEnd
);
3371 case BitCast
: return new BitCastInst (S
, Ty
, Name
, InsertAtEnd
);
3372 case AddrSpaceCast
: return new AddrSpaceCastInst (S
, Ty
, Name
, InsertAtEnd
);
3373 default: llvm_unreachable("Invalid opcode provided");
3377 CastInst
*CastInst::CreateZExtOrBitCast(Value
*S
, Type
*Ty
,
3379 Instruction
*InsertBefore
) {
3380 if (S
->getType()->getScalarSizeInBits() == Ty
->getScalarSizeInBits())
3381 return Create(Instruction::BitCast
, S
, Ty
, Name
, InsertBefore
);
3382 return Create(Instruction::ZExt
, S
, Ty
, Name
, InsertBefore
);
3385 CastInst
*CastInst::CreateZExtOrBitCast(Value
*S
, Type
*Ty
,
3387 BasicBlock
*InsertAtEnd
) {
3388 if (S
->getType()->getScalarSizeInBits() == Ty
->getScalarSizeInBits())
3389 return Create(Instruction::BitCast
, S
, Ty
, Name
, InsertAtEnd
);
3390 return Create(Instruction::ZExt
, S
, Ty
, Name
, InsertAtEnd
);
3393 CastInst
*CastInst::CreateSExtOrBitCast(Value
*S
, Type
*Ty
,
3395 Instruction
*InsertBefore
) {
3396 if (S
->getType()->getScalarSizeInBits() == Ty
->getScalarSizeInBits())
3397 return Create(Instruction::BitCast
, S
, Ty
, Name
, InsertBefore
);
3398 return Create(Instruction::SExt
, S
, Ty
, Name
, InsertBefore
);
3401 CastInst
*CastInst::CreateSExtOrBitCast(Value
*S
, Type
*Ty
,
3403 BasicBlock
*InsertAtEnd
) {
3404 if (S
->getType()->getScalarSizeInBits() == Ty
->getScalarSizeInBits())
3405 return Create(Instruction::BitCast
, S
, Ty
, Name
, InsertAtEnd
);
3406 return Create(Instruction::SExt
, S
, Ty
, Name
, InsertAtEnd
);
3409 CastInst
*CastInst::CreateTruncOrBitCast(Value
*S
, Type
*Ty
,
3411 Instruction
*InsertBefore
) {
3412 if (S
->getType()->getScalarSizeInBits() == Ty
->getScalarSizeInBits())
3413 return Create(Instruction::BitCast
, S
, Ty
, Name
, InsertBefore
);
3414 return Create(Instruction::Trunc
, S
, Ty
, Name
, InsertBefore
);
3417 CastInst
*CastInst::CreateTruncOrBitCast(Value
*S
, Type
*Ty
,
3419 BasicBlock
*InsertAtEnd
) {
3420 if (S
->getType()->getScalarSizeInBits() == Ty
->getScalarSizeInBits())
3421 return Create(Instruction::BitCast
, S
, Ty
, Name
, InsertAtEnd
);
3422 return Create(Instruction::Trunc
, S
, Ty
, Name
, InsertAtEnd
);
3425 CastInst
*CastInst::CreatePointerCast(Value
*S
, Type
*Ty
,
3427 BasicBlock
*InsertAtEnd
) {
3428 assert(S
->getType()->isPtrOrPtrVectorTy() && "Invalid cast");
3429 assert((Ty
->isIntOrIntVectorTy() || Ty
->isPtrOrPtrVectorTy()) &&
3431 assert(Ty
->isVectorTy() == S
->getType()->isVectorTy() && "Invalid cast");
3432 assert((!Ty
->isVectorTy() ||
3433 cast
<VectorType
>(Ty
)->getElementCount() ==
3434 cast
<VectorType
>(S
->getType())->getElementCount()) &&
3437 if (Ty
->isIntOrIntVectorTy())
3438 return Create(Instruction::PtrToInt
, S
, Ty
, Name
, InsertAtEnd
);
3440 return CreatePointerBitCastOrAddrSpaceCast(S
, Ty
, Name
, InsertAtEnd
);
3443 /// Create a BitCast or a PtrToInt cast instruction
3444 CastInst
*CastInst::CreatePointerCast(Value
*S
, Type
*Ty
,
3446 Instruction
*InsertBefore
) {
3447 assert(S
->getType()->isPtrOrPtrVectorTy() && "Invalid cast");
3448 assert((Ty
->isIntOrIntVectorTy() || Ty
->isPtrOrPtrVectorTy()) &&
3450 assert(Ty
->isVectorTy() == S
->getType()->isVectorTy() && "Invalid cast");
3451 assert((!Ty
->isVectorTy() ||
3452 cast
<VectorType
>(Ty
)->getElementCount() ==
3453 cast
<VectorType
>(S
->getType())->getElementCount()) &&
3456 if (Ty
->isIntOrIntVectorTy())
3457 return Create(Instruction::PtrToInt
, S
, Ty
, Name
, InsertBefore
);
3459 return CreatePointerBitCastOrAddrSpaceCast(S
, Ty
, Name
, InsertBefore
);
3462 CastInst
*CastInst::CreatePointerBitCastOrAddrSpaceCast(
3465 BasicBlock
*InsertAtEnd
) {
3466 assert(S
->getType()->isPtrOrPtrVectorTy() && "Invalid cast");
3467 assert(Ty
->isPtrOrPtrVectorTy() && "Invalid cast");
3469 if (S
->getType()->getPointerAddressSpace() != Ty
->getPointerAddressSpace())
3470 return Create(Instruction::AddrSpaceCast
, S
, Ty
, Name
, InsertAtEnd
);
3472 return Create(Instruction::BitCast
, S
, Ty
, Name
, InsertAtEnd
);
3475 CastInst
*CastInst::CreatePointerBitCastOrAddrSpaceCast(
3478 Instruction
*InsertBefore
) {
3479 assert(S
->getType()->isPtrOrPtrVectorTy() && "Invalid cast");
3480 assert(Ty
->isPtrOrPtrVectorTy() && "Invalid cast");
3482 if (S
->getType()->getPointerAddressSpace() != Ty
->getPointerAddressSpace())
3483 return Create(Instruction::AddrSpaceCast
, S
, Ty
, Name
, InsertBefore
);
3485 return Create(Instruction::BitCast
, S
, Ty
, Name
, InsertBefore
);
3488 CastInst
*CastInst::CreateBitOrPointerCast(Value
*S
, Type
*Ty
,
3490 Instruction
*InsertBefore
) {
3491 if (S
->getType()->isPointerTy() && Ty
->isIntegerTy())
3492 return Create(Instruction::PtrToInt
, S
, Ty
, Name
, InsertBefore
);
3493 if (S
->getType()->isIntegerTy() && Ty
->isPointerTy())
3494 return Create(Instruction::IntToPtr
, S
, Ty
, Name
, InsertBefore
);
3496 return Create(Instruction::BitCast
, S
, Ty
, Name
, InsertBefore
);
3499 CastInst
*CastInst::CreateIntegerCast(Value
*C
, Type
*Ty
,
3500 bool isSigned
, const Twine
&Name
,
3501 Instruction
*InsertBefore
) {
3502 assert(C
->getType()->isIntOrIntVectorTy() && Ty
->isIntOrIntVectorTy() &&
3503 "Invalid integer cast");
3504 unsigned SrcBits
= C
->getType()->getScalarSizeInBits();
3505 unsigned DstBits
= Ty
->getScalarSizeInBits();
3506 Instruction::CastOps opcode
=
3507 (SrcBits
== DstBits
? Instruction::BitCast
:
3508 (SrcBits
> DstBits
? Instruction::Trunc
:
3509 (isSigned
? Instruction::SExt
: Instruction::ZExt
)));
3510 return Create(opcode
, C
, Ty
, Name
, InsertBefore
);
3513 CastInst
*CastInst::CreateIntegerCast(Value
*C
, Type
*Ty
,
3514 bool isSigned
, const Twine
&Name
,
3515 BasicBlock
*InsertAtEnd
) {
3516 assert(C
->getType()->isIntOrIntVectorTy() && Ty
->isIntOrIntVectorTy() &&
3518 unsigned SrcBits
= C
->getType()->getScalarSizeInBits();
3519 unsigned DstBits
= Ty
->getScalarSizeInBits();
3520 Instruction::CastOps opcode
=
3521 (SrcBits
== DstBits
? Instruction::BitCast
:
3522 (SrcBits
> DstBits
? Instruction::Trunc
:
3523 (isSigned
? Instruction::SExt
: Instruction::ZExt
)));
3524 return Create(opcode
, C
, Ty
, Name
, InsertAtEnd
);
3527 CastInst
*CastInst::CreateFPCast(Value
*C
, Type
*Ty
,
3529 Instruction
*InsertBefore
) {
3530 assert(C
->getType()->isFPOrFPVectorTy() && Ty
->isFPOrFPVectorTy() &&
3532 unsigned SrcBits
= C
->getType()->getScalarSizeInBits();
3533 unsigned DstBits
= Ty
->getScalarSizeInBits();
3534 Instruction::CastOps opcode
=
3535 (SrcBits
== DstBits
? Instruction::BitCast
:
3536 (SrcBits
> DstBits
? Instruction::FPTrunc
: Instruction::FPExt
));
3537 return Create(opcode
, C
, Ty
, Name
, InsertBefore
);
3540 CastInst
*CastInst::CreateFPCast(Value
*C
, Type
*Ty
,
3542 BasicBlock
*InsertAtEnd
) {
3543 assert(C
->getType()->isFPOrFPVectorTy() && Ty
->isFPOrFPVectorTy() &&
3545 unsigned SrcBits
= C
->getType()->getScalarSizeInBits();
3546 unsigned DstBits
= Ty
->getScalarSizeInBits();
3547 Instruction::CastOps opcode
=
3548 (SrcBits
== DstBits
? Instruction::BitCast
:
3549 (SrcBits
> DstBits
? Instruction::FPTrunc
: Instruction::FPExt
));
3550 return Create(opcode
, C
, Ty
, Name
, InsertAtEnd
);
3553 bool CastInst::isBitCastable(Type
*SrcTy
, Type
*DestTy
) {
3554 if (!SrcTy
->isFirstClassType() || !DestTy
->isFirstClassType())
3557 if (SrcTy
== DestTy
)
3560 if (VectorType
*SrcVecTy
= dyn_cast
<VectorType
>(SrcTy
)) {
3561 if (VectorType
*DestVecTy
= dyn_cast
<VectorType
>(DestTy
)) {
3562 if (SrcVecTy
->getElementCount() == DestVecTy
->getElementCount()) {
3563 // An element by element cast. Valid if casting the elements is valid.
3564 SrcTy
= SrcVecTy
->getElementType();
3565 DestTy
= DestVecTy
->getElementType();
3570 if (PointerType
*DestPtrTy
= dyn_cast
<PointerType
>(DestTy
)) {
3571 if (PointerType
*SrcPtrTy
= dyn_cast
<PointerType
>(SrcTy
)) {
3572 return SrcPtrTy
->getAddressSpace() == DestPtrTy
->getAddressSpace();
3576 TypeSize SrcBits
= SrcTy
->getPrimitiveSizeInBits(); // 0 for ptr
3577 TypeSize DestBits
= DestTy
->getPrimitiveSizeInBits(); // 0 for ptr
3579 // Could still have vectors of pointers if the number of elements doesn't
3581 if (SrcBits
.getKnownMinValue() == 0 || DestBits
.getKnownMinValue() == 0)
3584 if (SrcBits
!= DestBits
)
3587 if (DestTy
->isX86_MMXTy() || SrcTy
->isX86_MMXTy())
3593 bool CastInst::isBitOrNoopPointerCastable(Type
*SrcTy
, Type
*DestTy
,
3594 const DataLayout
&DL
) {
3595 // ptrtoint and inttoptr are not allowed on non-integral pointers
3596 if (auto *PtrTy
= dyn_cast
<PointerType
>(SrcTy
))
3597 if (auto *IntTy
= dyn_cast
<IntegerType
>(DestTy
))
3598 return (IntTy
->getBitWidth() == DL
.getPointerTypeSizeInBits(PtrTy
) &&
3599 !DL
.isNonIntegralPointerType(PtrTy
));
3600 if (auto *PtrTy
= dyn_cast
<PointerType
>(DestTy
))
3601 if (auto *IntTy
= dyn_cast
<IntegerType
>(SrcTy
))
3602 return (IntTy
->getBitWidth() == DL
.getPointerTypeSizeInBits(PtrTy
) &&
3603 !DL
.isNonIntegralPointerType(PtrTy
));
3605 return isBitCastable(SrcTy
, DestTy
);
3608 // Provide a way to get a "cast" where the cast opcode is inferred from the
3609 // types and size of the operand. This, basically, is a parallel of the
3610 // logic in the castIsValid function below. This axiom should hold:
3611 // castIsValid( getCastOpcode(Val, Ty), Val, Ty)
3612 // should not assert in castIsValid. In other words, this produces a "correct"
3613 // casting opcode for the arguments passed to it.
3614 Instruction::CastOps
3615 CastInst::getCastOpcode(
3616 const Value
*Src
, bool SrcIsSigned
, Type
*DestTy
, bool DestIsSigned
) {
3617 Type
*SrcTy
= Src
->getType();
3619 assert(SrcTy
->isFirstClassType() && DestTy
->isFirstClassType() &&
3620 "Only first class types are castable!");
3622 if (SrcTy
== DestTy
)
3625 // FIXME: Check address space sizes here
3626 if (VectorType
*SrcVecTy
= dyn_cast
<VectorType
>(SrcTy
))
3627 if (VectorType
*DestVecTy
= dyn_cast
<VectorType
>(DestTy
))
3628 if (SrcVecTy
->getElementCount() == DestVecTy
->getElementCount()) {
3629 // An element by element cast. Find the appropriate opcode based on the
3631 SrcTy
= SrcVecTy
->getElementType();
3632 DestTy
= DestVecTy
->getElementType();
3635 // Get the bit sizes, we'll need these
3636 unsigned SrcBits
= SrcTy
->getPrimitiveSizeInBits(); // 0 for ptr
3637 unsigned DestBits
= DestTy
->getPrimitiveSizeInBits(); // 0 for ptr
3639 // Run through the possibilities ...
3640 if (DestTy
->isIntegerTy()) { // Casting to integral
3641 if (SrcTy
->isIntegerTy()) { // Casting from integral
3642 if (DestBits
< SrcBits
)
3643 return Trunc
; // int -> smaller int
3644 else if (DestBits
> SrcBits
) { // its an extension
3646 return SExt
; // signed -> SEXT
3648 return ZExt
; // unsigned -> ZEXT
3650 return BitCast
; // Same size, No-op cast
3652 } else if (SrcTy
->isFloatingPointTy()) { // Casting from floating pt
3654 return FPToSI
; // FP -> sint
3656 return FPToUI
; // FP -> uint
3657 } else if (SrcTy
->isVectorTy()) {
3658 assert(DestBits
== SrcBits
&&
3659 "Casting vector to integer of different width");
3660 return BitCast
; // Same size, no-op cast
3662 assert(SrcTy
->isPointerTy() &&
3663 "Casting from a value that is not first-class type");
3664 return PtrToInt
; // ptr -> int
3666 } else if (DestTy
->isFloatingPointTy()) { // Casting to floating pt
3667 if (SrcTy
->isIntegerTy()) { // Casting from integral
3669 return SIToFP
; // sint -> FP
3671 return UIToFP
; // uint -> FP
3672 } else if (SrcTy
->isFloatingPointTy()) { // Casting from floating pt
3673 if (DestBits
< SrcBits
) {
3674 return FPTrunc
; // FP -> smaller FP
3675 } else if (DestBits
> SrcBits
) {
3676 return FPExt
; // FP -> larger FP
3678 return BitCast
; // same size, no-op cast
3680 } else if (SrcTy
->isVectorTy()) {
3681 assert(DestBits
== SrcBits
&&
3682 "Casting vector to floating point of different width");
3683 return BitCast
; // same size, no-op cast
3685 llvm_unreachable("Casting pointer or non-first class to float");
3686 } else if (DestTy
->isVectorTy()) {
3687 assert(DestBits
== SrcBits
&&
3688 "Illegal cast to vector (wrong type or size)");
3690 } else if (DestTy
->isPointerTy()) {
3691 if (SrcTy
->isPointerTy()) {
3692 if (DestTy
->getPointerAddressSpace() != SrcTy
->getPointerAddressSpace())
3693 return AddrSpaceCast
;
3694 return BitCast
; // ptr -> ptr
3695 } else if (SrcTy
->isIntegerTy()) {
3696 return IntToPtr
; // int -> ptr
3698 llvm_unreachable("Casting pointer to other than pointer or int");
3699 } else if (DestTy
->isX86_MMXTy()) {
3700 if (SrcTy
->isVectorTy()) {
3701 assert(DestBits
== SrcBits
&& "Casting vector of wrong width to X86_MMX");
3702 return BitCast
; // 64-bit vector to MMX
3704 llvm_unreachable("Illegal cast to X86_MMX");
3706 llvm_unreachable("Casting to type that is not first-class");
3709 //===----------------------------------------------------------------------===//
3710 // CastInst SubClass Constructors
3711 //===----------------------------------------------------------------------===//
3713 /// Check that the construction parameters for a CastInst are correct. This
3714 /// could be broken out into the separate constructors but it is useful to have
3715 /// it in one place and to eliminate the redundant code for getting the sizes
3716 /// of the types involved.
3718 CastInst::castIsValid(Instruction::CastOps op
, Type
*SrcTy
, Type
*DstTy
) {
3719 if (!SrcTy
->isFirstClassType() || !DstTy
->isFirstClassType() ||
3720 SrcTy
->isAggregateType() || DstTy
->isAggregateType())
3723 // Get the size of the types in bits, and whether we are dealing
3724 // with vector types, we'll need this later.
3725 bool SrcIsVec
= isa
<VectorType
>(SrcTy
);
3726 bool DstIsVec
= isa
<VectorType
>(DstTy
);
3727 unsigned SrcScalarBitSize
= SrcTy
->getScalarSizeInBits();
3728 unsigned DstScalarBitSize
= DstTy
->getScalarSizeInBits();
3730 // If these are vector types, get the lengths of the vectors (using zero for
3731 // scalar types means that checking that vector lengths match also checks that
3732 // scalars are not being converted to vectors or vectors to scalars).
3733 ElementCount SrcEC
= SrcIsVec
? cast
<VectorType
>(SrcTy
)->getElementCount()
3734 : ElementCount::getFixed(0);
3735 ElementCount DstEC
= DstIsVec
? cast
<VectorType
>(DstTy
)->getElementCount()
3736 : ElementCount::getFixed(0);
3738 // Switch on the opcode provided
3740 default: return false; // This is an input error
3741 case Instruction::Trunc
:
3742 return SrcTy
->isIntOrIntVectorTy() && DstTy
->isIntOrIntVectorTy() &&
3743 SrcEC
== DstEC
&& SrcScalarBitSize
> DstScalarBitSize
;
3744 case Instruction::ZExt
:
3745 return SrcTy
->isIntOrIntVectorTy() && DstTy
->isIntOrIntVectorTy() &&
3746 SrcEC
== DstEC
&& SrcScalarBitSize
< DstScalarBitSize
;
3747 case Instruction::SExt
:
3748 return SrcTy
->isIntOrIntVectorTy() && DstTy
->isIntOrIntVectorTy() &&
3749 SrcEC
== DstEC
&& SrcScalarBitSize
< DstScalarBitSize
;
3750 case Instruction::FPTrunc
:
3751 return SrcTy
->isFPOrFPVectorTy() && DstTy
->isFPOrFPVectorTy() &&
3752 SrcEC
== DstEC
&& SrcScalarBitSize
> DstScalarBitSize
;
3753 case Instruction::FPExt
:
3754 return SrcTy
->isFPOrFPVectorTy() && DstTy
->isFPOrFPVectorTy() &&
3755 SrcEC
== DstEC
&& SrcScalarBitSize
< DstScalarBitSize
;
3756 case Instruction::UIToFP
:
3757 case Instruction::SIToFP
:
3758 return SrcTy
->isIntOrIntVectorTy() && DstTy
->isFPOrFPVectorTy() &&
3760 case Instruction::FPToUI
:
3761 case Instruction::FPToSI
:
3762 return SrcTy
->isFPOrFPVectorTy() && DstTy
->isIntOrIntVectorTy() &&
3764 case Instruction::PtrToInt
:
3767 return SrcTy
->isPtrOrPtrVectorTy() && DstTy
->isIntOrIntVectorTy();
3768 case Instruction::IntToPtr
:
3771 return SrcTy
->isIntOrIntVectorTy() && DstTy
->isPtrOrPtrVectorTy();
3772 case Instruction::BitCast
: {
3773 PointerType
*SrcPtrTy
= dyn_cast
<PointerType
>(SrcTy
->getScalarType());
3774 PointerType
*DstPtrTy
= dyn_cast
<PointerType
>(DstTy
->getScalarType());
3776 // BitCast implies a no-op cast of type only. No bits change.
3777 // However, you can't cast pointers to anything but pointers.
3778 if (!SrcPtrTy
!= !DstPtrTy
)
3781 // For non-pointer cases, the cast is okay if the source and destination bit
3782 // widths are identical.
3784 return SrcTy
->getPrimitiveSizeInBits() == DstTy
->getPrimitiveSizeInBits();
3786 // If both are pointers then the address spaces must match.
3787 if (SrcPtrTy
->getAddressSpace() != DstPtrTy
->getAddressSpace())
3790 // A vector of pointers must have the same number of elements.
3791 if (SrcIsVec
&& DstIsVec
)
3792 return SrcEC
== DstEC
;
3794 return SrcEC
== ElementCount::getFixed(1);
3796 return DstEC
== ElementCount::getFixed(1);
3800 case Instruction::AddrSpaceCast
: {
3801 PointerType
*SrcPtrTy
= dyn_cast
<PointerType
>(SrcTy
->getScalarType());
3805 PointerType
*DstPtrTy
= dyn_cast
<PointerType
>(DstTy
->getScalarType());
3809 if (SrcPtrTy
->getAddressSpace() == DstPtrTy
->getAddressSpace())
3812 return SrcEC
== DstEC
;
3817 TruncInst::TruncInst(
3818 Value
*S
, Type
*Ty
, const Twine
&Name
, Instruction
*InsertBefore
3819 ) : CastInst(Ty
, Trunc
, S
, Name
, InsertBefore
) {
3820 assert(castIsValid(getOpcode(), S
, Ty
) && "Illegal Trunc");
3823 TruncInst::TruncInst(
3824 Value
*S
, Type
*Ty
, const Twine
&Name
, BasicBlock
*InsertAtEnd
3825 ) : CastInst(Ty
, Trunc
, S
, Name
, InsertAtEnd
) {
3826 assert(castIsValid(getOpcode(), S
, Ty
) && "Illegal Trunc");
3830 Value
*S
, Type
*Ty
, const Twine
&Name
, Instruction
*InsertBefore
3831 ) : CastInst(Ty
, ZExt
, S
, Name
, InsertBefore
) {
3832 assert(castIsValid(getOpcode(), S
, Ty
) && "Illegal ZExt");
3836 Value
*S
, Type
*Ty
, const Twine
&Name
, BasicBlock
*InsertAtEnd
3837 ) : CastInst(Ty
, ZExt
, S
, Name
, InsertAtEnd
) {
3838 assert(castIsValid(getOpcode(), S
, Ty
) && "Illegal ZExt");
3841 Value
*S
, Type
*Ty
, const Twine
&Name
, Instruction
*InsertBefore
3842 ) : CastInst(Ty
, SExt
, S
, Name
, InsertBefore
) {
3843 assert(castIsValid(getOpcode(), S
, Ty
) && "Illegal SExt");
3847 Value
*S
, Type
*Ty
, const Twine
&Name
, BasicBlock
*InsertAtEnd
3848 ) : CastInst(Ty
, SExt
, S
, Name
, InsertAtEnd
) {
3849 assert(castIsValid(getOpcode(), S
, Ty
) && "Illegal SExt");
3852 FPTruncInst::FPTruncInst(
3853 Value
*S
, Type
*Ty
, const Twine
&Name
, Instruction
*InsertBefore
3854 ) : CastInst(Ty
, FPTrunc
, S
, Name
, InsertBefore
) {
3855 assert(castIsValid(getOpcode(), S
, Ty
) && "Illegal FPTrunc");
3858 FPTruncInst::FPTruncInst(
3859 Value
*S
, Type
*Ty
, const Twine
&Name
, BasicBlock
*InsertAtEnd
3860 ) : CastInst(Ty
, FPTrunc
, S
, Name
, InsertAtEnd
) {
3861 assert(castIsValid(getOpcode(), S
, Ty
) && "Illegal FPTrunc");
3864 FPExtInst::FPExtInst(
3865 Value
*S
, Type
*Ty
, const Twine
&Name
, Instruction
*InsertBefore
3866 ) : CastInst(Ty
, FPExt
, S
, Name
, InsertBefore
) {
3867 assert(castIsValid(getOpcode(), S
, Ty
) && "Illegal FPExt");
3870 FPExtInst::FPExtInst(
3871 Value
*S
, Type
*Ty
, const Twine
&Name
, BasicBlock
*InsertAtEnd
3872 ) : CastInst(Ty
, FPExt
, S
, Name
, InsertAtEnd
) {
3873 assert(castIsValid(getOpcode(), S
, Ty
) && "Illegal FPExt");
3876 UIToFPInst::UIToFPInst(
3877 Value
*S
, Type
*Ty
, const Twine
&Name
, Instruction
*InsertBefore
3878 ) : CastInst(Ty
, UIToFP
, S
, Name
, InsertBefore
) {
3879 assert(castIsValid(getOpcode(), S
, Ty
) && "Illegal UIToFP");
3882 UIToFPInst::UIToFPInst(
3883 Value
*S
, Type
*Ty
, const Twine
&Name
, BasicBlock
*InsertAtEnd
3884 ) : CastInst(Ty
, UIToFP
, S
, Name
, InsertAtEnd
) {
3885 assert(castIsValid(getOpcode(), S
, Ty
) && "Illegal UIToFP");
3888 SIToFPInst::SIToFPInst(
3889 Value
*S
, Type
*Ty
, const Twine
&Name
, Instruction
*InsertBefore
3890 ) : CastInst(Ty
, SIToFP
, S
, Name
, InsertBefore
) {
3891 assert(castIsValid(getOpcode(), S
, Ty
) && "Illegal SIToFP");
3894 SIToFPInst::SIToFPInst(
3895 Value
*S
, Type
*Ty
, const Twine
&Name
, BasicBlock
*InsertAtEnd
3896 ) : CastInst(Ty
, SIToFP
, S
, Name
, InsertAtEnd
) {
3897 assert(castIsValid(getOpcode(), S
, Ty
) && "Illegal SIToFP");
3900 FPToUIInst::FPToUIInst(
3901 Value
*S
, Type
*Ty
, const Twine
&Name
, Instruction
*InsertBefore
3902 ) : CastInst(Ty
, FPToUI
, S
, Name
, InsertBefore
) {
3903 assert(castIsValid(getOpcode(), S
, Ty
) && "Illegal FPToUI");
3906 FPToUIInst::FPToUIInst(
3907 Value
*S
, Type
*Ty
, const Twine
&Name
, BasicBlock
*InsertAtEnd
3908 ) : CastInst(Ty
, FPToUI
, S
, Name
, InsertAtEnd
) {
3909 assert(castIsValid(getOpcode(), S
, Ty
) && "Illegal FPToUI");
3912 FPToSIInst::FPToSIInst(
3913 Value
*S
, Type
*Ty
, const Twine
&Name
, Instruction
*InsertBefore
3914 ) : CastInst(Ty
, FPToSI
, S
, Name
, InsertBefore
) {
3915 assert(castIsValid(getOpcode(), S
, Ty
) && "Illegal FPToSI");
3918 FPToSIInst::FPToSIInst(
3919 Value
*S
, Type
*Ty
, const Twine
&Name
, BasicBlock
*InsertAtEnd
3920 ) : CastInst(Ty
, FPToSI
, S
, Name
, InsertAtEnd
) {
3921 assert(castIsValid(getOpcode(), S
, Ty
) && "Illegal FPToSI");
3924 PtrToIntInst::PtrToIntInst(
3925 Value
*S
, Type
*Ty
, const Twine
&Name
, Instruction
*InsertBefore
3926 ) : CastInst(Ty
, PtrToInt
, S
, Name
, InsertBefore
) {
3927 assert(castIsValid(getOpcode(), S
, Ty
) && "Illegal PtrToInt");
3930 PtrToIntInst::PtrToIntInst(
3931 Value
*S
, Type
*Ty
, const Twine
&Name
, BasicBlock
*InsertAtEnd
3932 ) : CastInst(Ty
, PtrToInt
, S
, Name
, InsertAtEnd
) {
3933 assert(castIsValid(getOpcode(), S
, Ty
) && "Illegal PtrToInt");
3936 IntToPtrInst::IntToPtrInst(
3937 Value
*S
, Type
*Ty
, const Twine
&Name
, Instruction
*InsertBefore
3938 ) : CastInst(Ty
, IntToPtr
, S
, Name
, InsertBefore
) {
3939 assert(castIsValid(getOpcode(), S
, Ty
) && "Illegal IntToPtr");
3942 IntToPtrInst::IntToPtrInst(
3943 Value
*S
, Type
*Ty
, const Twine
&Name
, BasicBlock
*InsertAtEnd
3944 ) : CastInst(Ty
, IntToPtr
, S
, Name
, InsertAtEnd
) {
3945 assert(castIsValid(getOpcode(), S
, Ty
) && "Illegal IntToPtr");
3948 BitCastInst::BitCastInst(
3949 Value
*S
, Type
*Ty
, const Twine
&Name
, Instruction
*InsertBefore
3950 ) : CastInst(Ty
, BitCast
, S
, Name
, InsertBefore
) {
3951 assert(castIsValid(getOpcode(), S
, Ty
) && "Illegal BitCast");
3954 BitCastInst::BitCastInst(
3955 Value
*S
, Type
*Ty
, const Twine
&Name
, BasicBlock
*InsertAtEnd
3956 ) : CastInst(Ty
, BitCast
, S
, Name
, InsertAtEnd
) {
3957 assert(castIsValid(getOpcode(), S
, Ty
) && "Illegal BitCast");
3960 AddrSpaceCastInst::AddrSpaceCastInst(
3961 Value
*S
, Type
*Ty
, const Twine
&Name
, Instruction
*InsertBefore
3962 ) : CastInst(Ty
, AddrSpaceCast
, S
, Name
, InsertBefore
) {
3963 assert(castIsValid(getOpcode(), S
, Ty
) && "Illegal AddrSpaceCast");
3966 AddrSpaceCastInst::AddrSpaceCastInst(
3967 Value
*S
, Type
*Ty
, const Twine
&Name
, BasicBlock
*InsertAtEnd
3968 ) : CastInst(Ty
, AddrSpaceCast
, S
, Name
, InsertAtEnd
) {
3969 assert(castIsValid(getOpcode(), S
, Ty
) && "Illegal AddrSpaceCast");
3972 //===----------------------------------------------------------------------===//
3974 //===----------------------------------------------------------------------===//
3976 CmpInst::CmpInst(Type
*ty
, OtherOps op
, Predicate predicate
, Value
*LHS
,
3977 Value
*RHS
, const Twine
&Name
, Instruction
*InsertBefore
,
3978 Instruction
*FlagsSource
)
3979 : Instruction(ty
, op
,
3980 OperandTraits
<CmpInst
>::op_begin(this),
3981 OperandTraits
<CmpInst
>::operands(this),
3985 setPredicate((Predicate
)predicate
);
3988 copyIRFlags(FlagsSource
);
3991 CmpInst::CmpInst(Type
*ty
, OtherOps op
, Predicate predicate
, Value
*LHS
,
3992 Value
*RHS
, const Twine
&Name
, BasicBlock
*InsertAtEnd
)
3993 : Instruction(ty
, op
,
3994 OperandTraits
<CmpInst
>::op_begin(this),
3995 OperandTraits
<CmpInst
>::operands(this),
3999 setPredicate((Predicate
)predicate
);
4004 CmpInst::Create(OtherOps Op
, Predicate predicate
, Value
*S1
, Value
*S2
,
4005 const Twine
&Name
, Instruction
*InsertBefore
) {
4006 if (Op
== Instruction::ICmp
) {
4008 return new ICmpInst(InsertBefore
, CmpInst::Predicate(predicate
),
4011 return new ICmpInst(CmpInst::Predicate(predicate
),
4016 return new FCmpInst(InsertBefore
, CmpInst::Predicate(predicate
),
4019 return new FCmpInst(CmpInst::Predicate(predicate
),
4024 CmpInst::Create(OtherOps Op
, Predicate predicate
, Value
*S1
, Value
*S2
,
4025 const Twine
&Name
, BasicBlock
*InsertAtEnd
) {
4026 if (Op
== Instruction::ICmp
) {
4027 return new ICmpInst(*InsertAtEnd
, CmpInst::Predicate(predicate
),
4030 return new FCmpInst(*InsertAtEnd
, CmpInst::Predicate(predicate
),
4034 void CmpInst::swapOperands() {
4035 if (ICmpInst
*IC
= dyn_cast
<ICmpInst
>(this))
4038 cast
<FCmpInst
>(this)->swapOperands();
4041 bool CmpInst::isCommutative() const {
4042 if (const ICmpInst
*IC
= dyn_cast
<ICmpInst
>(this))
4043 return IC
->isCommutative();
4044 return cast
<FCmpInst
>(this)->isCommutative();
4047 bool CmpInst::isEquality(Predicate P
) {
4048 if (ICmpInst::isIntPredicate(P
))
4049 return ICmpInst::isEquality(P
);
4050 if (FCmpInst::isFPPredicate(P
))
4051 return FCmpInst::isEquality(P
);
4052 llvm_unreachable("Unsupported predicate kind");
4055 CmpInst::Predicate
CmpInst::getInversePredicate(Predicate pred
) {
4057 default: llvm_unreachable("Unknown cmp predicate!");
4058 case ICMP_EQ
: return ICMP_NE
;
4059 case ICMP_NE
: return ICMP_EQ
;
4060 case ICMP_UGT
: return ICMP_ULE
;
4061 case ICMP_ULT
: return ICMP_UGE
;
4062 case ICMP_UGE
: return ICMP_ULT
;
4063 case ICMP_ULE
: return ICMP_UGT
;
4064 case ICMP_SGT
: return ICMP_SLE
;
4065 case ICMP_SLT
: return ICMP_SGE
;
4066 case ICMP_SGE
: return ICMP_SLT
;
4067 case ICMP_SLE
: return ICMP_SGT
;
4069 case FCMP_OEQ
: return FCMP_UNE
;
4070 case FCMP_ONE
: return FCMP_UEQ
;
4071 case FCMP_OGT
: return FCMP_ULE
;
4072 case FCMP_OLT
: return FCMP_UGE
;
4073 case FCMP_OGE
: return FCMP_ULT
;
4074 case FCMP_OLE
: return FCMP_UGT
;
4075 case FCMP_UEQ
: return FCMP_ONE
;
4076 case FCMP_UNE
: return FCMP_OEQ
;
4077 case FCMP_UGT
: return FCMP_OLE
;
4078 case FCMP_ULT
: return FCMP_OGE
;
4079 case FCMP_UGE
: return FCMP_OLT
;
4080 case FCMP_ULE
: return FCMP_OGT
;
4081 case FCMP_ORD
: return FCMP_UNO
;
4082 case FCMP_UNO
: return FCMP_ORD
;
4083 case FCMP_TRUE
: return FCMP_FALSE
;
4084 case FCMP_FALSE
: return FCMP_TRUE
;
4088 StringRef
CmpInst::getPredicateName(Predicate Pred
) {
4090 default: return "unknown";
4091 case FCmpInst::FCMP_FALSE
: return "false";
4092 case FCmpInst::FCMP_OEQ
: return "oeq";
4093 case FCmpInst::FCMP_OGT
: return "ogt";
4094 case FCmpInst::FCMP_OGE
: return "oge";
4095 case FCmpInst::FCMP_OLT
: return "olt";
4096 case FCmpInst::FCMP_OLE
: return "ole";
4097 case FCmpInst::FCMP_ONE
: return "one";
4098 case FCmpInst::FCMP_ORD
: return "ord";
4099 case FCmpInst::FCMP_UNO
: return "uno";
4100 case FCmpInst::FCMP_UEQ
: return "ueq";
4101 case FCmpInst::FCMP_UGT
: return "ugt";
4102 case FCmpInst::FCMP_UGE
: return "uge";
4103 case FCmpInst::FCMP_ULT
: return "ult";
4104 case FCmpInst::FCMP_ULE
: return "ule";
4105 case FCmpInst::FCMP_UNE
: return "une";
4106 case FCmpInst::FCMP_TRUE
: return "true";
4107 case ICmpInst::ICMP_EQ
: return "eq";
4108 case ICmpInst::ICMP_NE
: return "ne";
4109 case ICmpInst::ICMP_SGT
: return "sgt";
4110 case ICmpInst::ICMP_SGE
: return "sge";
4111 case ICmpInst::ICMP_SLT
: return "slt";
4112 case ICmpInst::ICMP_SLE
: return "sle";
4113 case ICmpInst::ICMP_UGT
: return "ugt";
4114 case ICmpInst::ICMP_UGE
: return "uge";
4115 case ICmpInst::ICMP_ULT
: return "ult";
4116 case ICmpInst::ICMP_ULE
: return "ule";
4120 raw_ostream
&llvm::operator<<(raw_ostream
&OS
, CmpInst::Predicate Pred
) {
4121 OS
<< CmpInst::getPredicateName(Pred
);
4125 ICmpInst::Predicate
ICmpInst::getSignedPredicate(Predicate pred
) {
4127 default: llvm_unreachable("Unknown icmp predicate!");
4128 case ICMP_EQ
: case ICMP_NE
:
4129 case ICMP_SGT
: case ICMP_SLT
: case ICMP_SGE
: case ICMP_SLE
:
4131 case ICMP_UGT
: return ICMP_SGT
;
4132 case ICMP_ULT
: return ICMP_SLT
;
4133 case ICMP_UGE
: return ICMP_SGE
;
4134 case ICMP_ULE
: return ICMP_SLE
;
4138 ICmpInst::Predicate
ICmpInst::getUnsignedPredicate(Predicate pred
) {
4140 default: llvm_unreachable("Unknown icmp predicate!");
4141 case ICMP_EQ
: case ICMP_NE
:
4142 case ICMP_UGT
: case ICMP_ULT
: case ICMP_UGE
: case ICMP_ULE
:
4144 case ICMP_SGT
: return ICMP_UGT
;
4145 case ICMP_SLT
: return ICMP_ULT
;
4146 case ICMP_SGE
: return ICMP_UGE
;
4147 case ICMP_SLE
: return ICMP_ULE
;
4151 CmpInst::Predicate
CmpInst::getSwappedPredicate(Predicate pred
) {
4153 default: llvm_unreachable("Unknown cmp predicate!");
4154 case ICMP_EQ
: case ICMP_NE
:
4156 case ICMP_SGT
: return ICMP_SLT
;
4157 case ICMP_SLT
: return ICMP_SGT
;
4158 case ICMP_SGE
: return ICMP_SLE
;
4159 case ICMP_SLE
: return ICMP_SGE
;
4160 case ICMP_UGT
: return ICMP_ULT
;
4161 case ICMP_ULT
: return ICMP_UGT
;
4162 case ICMP_UGE
: return ICMP_ULE
;
4163 case ICMP_ULE
: return ICMP_UGE
;
4165 case FCMP_FALSE
: case FCMP_TRUE
:
4166 case FCMP_OEQ
: case FCMP_ONE
:
4167 case FCMP_UEQ
: case FCMP_UNE
:
4168 case FCMP_ORD
: case FCMP_UNO
:
4170 case FCMP_OGT
: return FCMP_OLT
;
4171 case FCMP_OLT
: return FCMP_OGT
;
4172 case FCMP_OGE
: return FCMP_OLE
;
4173 case FCMP_OLE
: return FCMP_OGE
;
4174 case FCMP_UGT
: return FCMP_ULT
;
4175 case FCMP_ULT
: return FCMP_UGT
;
4176 case FCMP_UGE
: return FCMP_ULE
;
4177 case FCMP_ULE
: return FCMP_UGE
;
4181 bool CmpInst::isNonStrictPredicate(Predicate pred
) {
4197 bool CmpInst::isStrictPredicate(Predicate pred
) {
4213 CmpInst::Predicate
CmpInst::getStrictPredicate(Predicate pred
) {
4236 CmpInst::Predicate
CmpInst::getNonStrictPredicate(Predicate pred
) {
4259 CmpInst::Predicate
CmpInst::getFlippedStrictnessPredicate(Predicate pred
) {
4260 assert(CmpInst::isRelational(pred
) && "Call only with relational predicate!");
4262 if (isStrictPredicate(pred
))
4263 return getNonStrictPredicate(pred
);
4264 if (isNonStrictPredicate(pred
))
4265 return getStrictPredicate(pred
);
4267 llvm_unreachable("Unknown predicate!");
4270 CmpInst::Predicate
CmpInst::getSignedPredicate(Predicate pred
) {
4271 assert(CmpInst::isUnsigned(pred
) && "Call only with unsigned predicates!");
4275 llvm_unreachable("Unknown predicate!");
4276 case CmpInst::ICMP_ULT
:
4277 return CmpInst::ICMP_SLT
;
4278 case CmpInst::ICMP_ULE
:
4279 return CmpInst::ICMP_SLE
;
4280 case CmpInst::ICMP_UGT
:
4281 return CmpInst::ICMP_SGT
;
4282 case CmpInst::ICMP_UGE
:
4283 return CmpInst::ICMP_SGE
;
4287 CmpInst::Predicate
CmpInst::getUnsignedPredicate(Predicate pred
) {
4288 assert(CmpInst::isSigned(pred
) && "Call only with signed predicates!");
4292 llvm_unreachable("Unknown predicate!");
4293 case CmpInst::ICMP_SLT
:
4294 return CmpInst::ICMP_ULT
;
4295 case CmpInst::ICMP_SLE
:
4296 return CmpInst::ICMP_ULE
;
4297 case CmpInst::ICMP_SGT
:
4298 return CmpInst::ICMP_UGT
;
4299 case CmpInst::ICMP_SGE
:
4300 return CmpInst::ICMP_UGE
;
4304 bool CmpInst::isUnsigned(Predicate predicate
) {
4305 switch (predicate
) {
4306 default: return false;
4307 case ICmpInst::ICMP_ULT
: case ICmpInst::ICMP_ULE
: case ICmpInst::ICMP_UGT
:
4308 case ICmpInst::ICMP_UGE
: return true;
4312 bool CmpInst::isSigned(Predicate predicate
) {
4313 switch (predicate
) {
4314 default: return false;
4315 case ICmpInst::ICMP_SLT
: case ICmpInst::ICMP_SLE
: case ICmpInst::ICMP_SGT
:
4316 case ICmpInst::ICMP_SGE
: return true;
4320 bool ICmpInst::compare(const APInt
&LHS
, const APInt
&RHS
,
4321 ICmpInst::Predicate Pred
) {
4322 assert(ICmpInst::isIntPredicate(Pred
) && "Only for integer predicates!");
4324 case ICmpInst::Predicate::ICMP_EQ
:
4326 case ICmpInst::Predicate::ICMP_NE
:
4328 case ICmpInst::Predicate::ICMP_UGT
:
4329 return LHS
.ugt(RHS
);
4330 case ICmpInst::Predicate::ICMP_UGE
:
4331 return LHS
.uge(RHS
);
4332 case ICmpInst::Predicate::ICMP_ULT
:
4333 return LHS
.ult(RHS
);
4334 case ICmpInst::Predicate::ICMP_ULE
:
4335 return LHS
.ule(RHS
);
4336 case ICmpInst::Predicate::ICMP_SGT
:
4337 return LHS
.sgt(RHS
);
4338 case ICmpInst::Predicate::ICMP_SGE
:
4339 return LHS
.sge(RHS
);
4340 case ICmpInst::Predicate::ICMP_SLT
:
4341 return LHS
.slt(RHS
);
4342 case ICmpInst::Predicate::ICMP_SLE
:
4343 return LHS
.sle(RHS
);
4345 llvm_unreachable("Unexpected non-integer predicate.");
4349 bool FCmpInst::compare(const APFloat
&LHS
, const APFloat
&RHS
,
4350 FCmpInst::Predicate Pred
) {
4351 APFloat::cmpResult R
= LHS
.compare(RHS
);
4354 llvm_unreachable("Invalid FCmp Predicate");
4355 case FCmpInst::FCMP_FALSE
:
4357 case FCmpInst::FCMP_TRUE
:
4359 case FCmpInst::FCMP_UNO
:
4360 return R
== APFloat::cmpUnordered
;
4361 case FCmpInst::FCMP_ORD
:
4362 return R
!= APFloat::cmpUnordered
;
4363 case FCmpInst::FCMP_UEQ
:
4364 return R
== APFloat::cmpUnordered
|| R
== APFloat::cmpEqual
;
4365 case FCmpInst::FCMP_OEQ
:
4366 return R
== APFloat::cmpEqual
;
4367 case FCmpInst::FCMP_UNE
:
4368 return R
!= APFloat::cmpEqual
;
4369 case FCmpInst::FCMP_ONE
:
4370 return R
== APFloat::cmpLessThan
|| R
== APFloat::cmpGreaterThan
;
4371 case FCmpInst::FCMP_ULT
:
4372 return R
== APFloat::cmpUnordered
|| R
== APFloat::cmpLessThan
;
4373 case FCmpInst::FCMP_OLT
:
4374 return R
== APFloat::cmpLessThan
;
4375 case FCmpInst::FCMP_UGT
:
4376 return R
== APFloat::cmpUnordered
|| R
== APFloat::cmpGreaterThan
;
4377 case FCmpInst::FCMP_OGT
:
4378 return R
== APFloat::cmpGreaterThan
;
4379 case FCmpInst::FCMP_ULE
:
4380 return R
!= APFloat::cmpGreaterThan
;
4381 case FCmpInst::FCMP_OLE
:
4382 return R
== APFloat::cmpLessThan
|| R
== APFloat::cmpEqual
;
4383 case FCmpInst::FCMP_UGE
:
4384 return R
!= APFloat::cmpLessThan
;
4385 case FCmpInst::FCMP_OGE
:
4386 return R
== APFloat::cmpGreaterThan
|| R
== APFloat::cmpEqual
;
4390 CmpInst::Predicate
CmpInst::getFlippedSignednessPredicate(Predicate pred
) {
4391 assert(CmpInst::isRelational(pred
) &&
4392 "Call only with non-equality predicates!");
4395 return getUnsignedPredicate(pred
);
4396 if (isUnsigned(pred
))
4397 return getSignedPredicate(pred
);
4399 llvm_unreachable("Unknown predicate!");
4402 bool CmpInst::isOrdered(Predicate predicate
) {
4403 switch (predicate
) {
4404 default: return false;
4405 case FCmpInst::FCMP_OEQ
: case FCmpInst::FCMP_ONE
: case FCmpInst::FCMP_OGT
:
4406 case FCmpInst::FCMP_OLT
: case FCmpInst::FCMP_OGE
: case FCmpInst::FCMP_OLE
:
4407 case FCmpInst::FCMP_ORD
: return true;
4411 bool CmpInst::isUnordered(Predicate predicate
) {
4412 switch (predicate
) {
4413 default: return false;
4414 case FCmpInst::FCMP_UEQ
: case FCmpInst::FCMP_UNE
: case FCmpInst::FCMP_UGT
:
4415 case FCmpInst::FCMP_ULT
: case FCmpInst::FCMP_UGE
: case FCmpInst::FCMP_ULE
:
4416 case FCmpInst::FCMP_UNO
: return true;
4420 bool CmpInst::isTrueWhenEqual(Predicate predicate
) {
4422 default: return false;
4423 case ICMP_EQ
: case ICMP_UGE
: case ICMP_ULE
: case ICMP_SGE
: case ICMP_SLE
:
4424 case FCMP_TRUE
: case FCMP_UEQ
: case FCMP_UGE
: case FCMP_ULE
: return true;
4428 bool CmpInst::isFalseWhenEqual(Predicate predicate
) {
4430 case ICMP_NE
: case ICMP_UGT
: case ICMP_ULT
: case ICMP_SGT
: case ICMP_SLT
:
4431 case FCMP_FALSE
: case FCMP_ONE
: case FCMP_OGT
: case FCMP_OLT
: return true;
4432 default: return false;
4436 bool CmpInst::isImpliedTrueByMatchingCmp(Predicate Pred1
, Predicate Pred2
) {
4437 // If the predicates match, then we know the first condition implies the
4446 // A == B implies A >=u B, A <=u B, A >=s B, and A <=s B are true.
4447 return Pred2
== ICMP_UGE
|| Pred2
== ICMP_ULE
|| Pred2
== ICMP_SGE
||
4449 case ICMP_UGT
: // A >u B implies A != B and A >=u B are true.
4450 return Pred2
== ICMP_NE
|| Pred2
== ICMP_UGE
;
4451 case ICMP_ULT
: // A <u B implies A != B and A <=u B are true.
4452 return Pred2
== ICMP_NE
|| Pred2
== ICMP_ULE
;
4453 case ICMP_SGT
: // A >s B implies A != B and A >=s B are true.
4454 return Pred2
== ICMP_NE
|| Pred2
== ICMP_SGE
;
4455 case ICMP_SLT
: // A <s B implies A != B and A <=s B are true.
4456 return Pred2
== ICMP_NE
|| Pred2
== ICMP_SLE
;
4461 bool CmpInst::isImpliedFalseByMatchingCmp(Predicate Pred1
, Predicate Pred2
) {
4462 return isImpliedTrueByMatchingCmp(Pred1
, getInversePredicate(Pred2
));
4465 //===----------------------------------------------------------------------===//
4466 // SwitchInst Implementation
4467 //===----------------------------------------------------------------------===//
4469 void SwitchInst::init(Value
*Value
, BasicBlock
*Default
, unsigned NumReserved
) {
4470 assert(Value
&& Default
&& NumReserved
);
4471 ReservedSpace
= NumReserved
;
4472 setNumHungOffUseOperands(2);
4473 allocHungoffUses(ReservedSpace
);
4479 /// SwitchInst ctor - Create a new switch instruction, specifying a value to
4480 /// switch on and a default destination. The number of additional cases can
4481 /// be specified here to make memory allocation more efficient. This
4482 /// constructor can also autoinsert before another instruction.
4483 SwitchInst::SwitchInst(Value
*Value
, BasicBlock
*Default
, unsigned NumCases
,
4484 Instruction
*InsertBefore
)
4485 : Instruction(Type::getVoidTy(Value
->getContext()), Instruction::Switch
,
4486 nullptr, 0, InsertBefore
) {
4487 init(Value
, Default
, 2+NumCases
*2);
4490 /// SwitchInst ctor - Create a new switch instruction, specifying a value to
4491 /// switch on and a default destination. The number of additional cases can
4492 /// be specified here to make memory allocation more efficient. This
4493 /// constructor also autoinserts at the end of the specified BasicBlock.
4494 SwitchInst::SwitchInst(Value
*Value
, BasicBlock
*Default
, unsigned NumCases
,
4495 BasicBlock
*InsertAtEnd
)
4496 : Instruction(Type::getVoidTy(Value
->getContext()), Instruction::Switch
,
4497 nullptr, 0, InsertAtEnd
) {
4498 init(Value
, Default
, 2+NumCases
*2);
4501 SwitchInst::SwitchInst(const SwitchInst
&SI
)
4502 : Instruction(SI
.getType(), Instruction::Switch
, nullptr, 0) {
4503 init(SI
.getCondition(), SI
.getDefaultDest(), SI
.getNumOperands());
4504 setNumHungOffUseOperands(SI
.getNumOperands());
4505 Use
*OL
= getOperandList();
4506 const Use
*InOL
= SI
.getOperandList();
4507 for (unsigned i
= 2, E
= SI
.getNumOperands(); i
!= E
; i
+= 2) {
4509 OL
[i
+1] = InOL
[i
+1];
4511 SubclassOptionalData
= SI
.SubclassOptionalData
;
4514 /// addCase - Add an entry to the switch instruction...
4516 void SwitchInst::addCase(ConstantInt
*OnVal
, BasicBlock
*Dest
) {
4517 unsigned NewCaseIdx
= getNumCases();
4518 unsigned OpNo
= getNumOperands();
4519 if (OpNo
+2 > ReservedSpace
)
4520 growOperands(); // Get more space!
4521 // Initialize some new operands.
4522 assert(OpNo
+1 < ReservedSpace
&& "Growing didn't work!");
4523 setNumHungOffUseOperands(OpNo
+2);
4524 CaseHandle
Case(this, NewCaseIdx
);
4525 Case
.setValue(OnVal
);
4526 Case
.setSuccessor(Dest
);
4529 /// removeCase - This method removes the specified case and its successor
4530 /// from the switch instruction.
4531 SwitchInst::CaseIt
SwitchInst::removeCase(CaseIt I
) {
4532 unsigned idx
= I
->getCaseIndex();
4534 assert(2 + idx
*2 < getNumOperands() && "Case index out of range!!!");
4536 unsigned NumOps
= getNumOperands();
4537 Use
*OL
= getOperandList();
4539 // Overwrite this case with the end of the list.
4540 if (2 + (idx
+ 1) * 2 != NumOps
) {
4541 OL
[2 + idx
* 2] = OL
[NumOps
- 2];
4542 OL
[2 + idx
* 2 + 1] = OL
[NumOps
- 1];
4545 // Nuke the last value.
4546 OL
[NumOps
-2].set(nullptr);
4547 OL
[NumOps
-2+1].set(nullptr);
4548 setNumHungOffUseOperands(NumOps
-2);
4550 return CaseIt(this, idx
);
4553 /// growOperands - grow operands - This grows the operand list in response
4554 /// to a push_back style of operation. This grows the number of ops by 3 times.
4556 void SwitchInst::growOperands() {
4557 unsigned e
= getNumOperands();
4558 unsigned NumOps
= e
*3;
4560 ReservedSpace
= NumOps
;
4561 growHungoffUses(ReservedSpace
);
4564 MDNode
*SwitchInstProfUpdateWrapper::buildProfBranchWeightsMD() {
4565 assert(Changed
&& "called only if metadata has changed");
4570 assert(SI
.getNumSuccessors() == Weights
->size() &&
4571 "num of prof branch_weights must accord with num of successors");
4573 bool AllZeroes
= all_of(*Weights
, [](uint32_t W
) { return W
== 0; });
4575 if (AllZeroes
|| Weights
->size() < 2)
4578 return MDBuilder(SI
.getParent()->getContext()).createBranchWeights(*Weights
);
4581 void SwitchInstProfUpdateWrapper::init() {
4582 MDNode
*ProfileData
= getBranchWeightMDNode(SI
);
4586 if (ProfileData
->getNumOperands() != SI
.getNumSuccessors() + 1) {
4587 llvm_unreachable("number of prof branch_weights metadata operands does "
4588 "not correspond to number of succesors");
4591 SmallVector
<uint32_t, 8> Weights
;
4592 if (!extractBranchWeights(ProfileData
, Weights
))
4594 this->Weights
= std::move(Weights
);
4598 SwitchInstProfUpdateWrapper::removeCase(SwitchInst::CaseIt I
) {
4600 assert(SI
.getNumSuccessors() == Weights
->size() &&
4601 "num of prof branch_weights must accord with num of successors");
4603 // Copy the last case to the place of the removed one and shrink.
4604 // This is tightly coupled with the way SwitchInst::removeCase() removes
4605 // the cases in SwitchInst::removeCase(CaseIt).
4606 (*Weights
)[I
->getCaseIndex() + 1] = Weights
->back();
4607 Weights
->pop_back();
4609 return SI
.removeCase(I
);
4612 void SwitchInstProfUpdateWrapper::addCase(
4613 ConstantInt
*OnVal
, BasicBlock
*Dest
,
4614 SwitchInstProfUpdateWrapper::CaseWeightOpt W
) {
4615 SI
.addCase(OnVal
, Dest
);
4617 if (!Weights
&& W
&& *W
) {
4619 Weights
= SmallVector
<uint32_t, 8>(SI
.getNumSuccessors(), 0);
4620 (*Weights
)[SI
.getNumSuccessors() - 1] = *W
;
4621 } else if (Weights
) {
4623 Weights
->push_back(W
.value_or(0));
4626 assert(SI
.getNumSuccessors() == Weights
->size() &&
4627 "num of prof branch_weights must accord with num of successors");
4630 Instruction::InstListType::iterator
4631 SwitchInstProfUpdateWrapper::eraseFromParent() {
4632 // Instruction is erased. Mark as unchanged to not touch it in the destructor.
4636 return SI
.eraseFromParent();
4639 SwitchInstProfUpdateWrapper::CaseWeightOpt
4640 SwitchInstProfUpdateWrapper::getSuccessorWeight(unsigned idx
) {
4642 return std::nullopt
;
4643 return (*Weights
)[idx
];
4646 void SwitchInstProfUpdateWrapper::setSuccessorWeight(
4647 unsigned idx
, SwitchInstProfUpdateWrapper::CaseWeightOpt W
) {
4652 Weights
= SmallVector
<uint32_t, 8>(SI
.getNumSuccessors(), 0);
4655 auto &OldW
= (*Weights
)[idx
];
4663 SwitchInstProfUpdateWrapper::CaseWeightOpt
4664 SwitchInstProfUpdateWrapper::getSuccessorWeight(const SwitchInst
&SI
,
4666 if (MDNode
*ProfileData
= getBranchWeightMDNode(SI
))
4667 if (ProfileData
->getNumOperands() == SI
.getNumSuccessors() + 1)
4668 return mdconst::extract
<ConstantInt
>(ProfileData
->getOperand(idx
+ 1))
4672 return std::nullopt
;
4675 //===----------------------------------------------------------------------===//
4676 // IndirectBrInst Implementation
4677 //===----------------------------------------------------------------------===//
4679 void IndirectBrInst::init(Value
*Address
, unsigned NumDests
) {
4680 assert(Address
&& Address
->getType()->isPointerTy() &&
4681 "Address of indirectbr must be a pointer");
4682 ReservedSpace
= 1+NumDests
;
4683 setNumHungOffUseOperands(1);
4684 allocHungoffUses(ReservedSpace
);
4690 /// growOperands - grow operands - This grows the operand list in response
4691 /// to a push_back style of operation. This grows the number of ops by 2 times.
4693 void IndirectBrInst::growOperands() {
4694 unsigned e
= getNumOperands();
4695 unsigned NumOps
= e
*2;
4697 ReservedSpace
= NumOps
;
4698 growHungoffUses(ReservedSpace
);
4701 IndirectBrInst::IndirectBrInst(Value
*Address
, unsigned NumCases
,
4702 Instruction
*InsertBefore
)
4703 : Instruction(Type::getVoidTy(Address
->getContext()),
4704 Instruction::IndirectBr
, nullptr, 0, InsertBefore
) {
4705 init(Address
, NumCases
);
4708 IndirectBrInst::IndirectBrInst(Value
*Address
, unsigned NumCases
,
4709 BasicBlock
*InsertAtEnd
)
4710 : Instruction(Type::getVoidTy(Address
->getContext()),
4711 Instruction::IndirectBr
, nullptr, 0, InsertAtEnd
) {
4712 init(Address
, NumCases
);
4715 IndirectBrInst::IndirectBrInst(const IndirectBrInst
&IBI
)
4716 : Instruction(Type::getVoidTy(IBI
.getContext()), Instruction::IndirectBr
,
4717 nullptr, IBI
.getNumOperands()) {
4718 allocHungoffUses(IBI
.getNumOperands());
4719 Use
*OL
= getOperandList();
4720 const Use
*InOL
= IBI
.getOperandList();
4721 for (unsigned i
= 0, E
= IBI
.getNumOperands(); i
!= E
; ++i
)
4723 SubclassOptionalData
= IBI
.SubclassOptionalData
;
4726 /// addDestination - Add a destination.
4728 void IndirectBrInst::addDestination(BasicBlock
*DestBB
) {
4729 unsigned OpNo
= getNumOperands();
4730 if (OpNo
+1 > ReservedSpace
)
4731 growOperands(); // Get more space!
4732 // Initialize some new operands.
4733 assert(OpNo
< ReservedSpace
&& "Growing didn't work!");
4734 setNumHungOffUseOperands(OpNo
+1);
4735 getOperandList()[OpNo
] = DestBB
;
4738 /// removeDestination - This method removes the specified successor from the
4739 /// indirectbr instruction.
4740 void IndirectBrInst::removeDestination(unsigned idx
) {
4741 assert(idx
< getNumOperands()-1 && "Successor index out of range!");
4743 unsigned NumOps
= getNumOperands();
4744 Use
*OL
= getOperandList();
4746 // Replace this value with the last one.
4747 OL
[idx
+1] = OL
[NumOps
-1];
4749 // Nuke the last value.
4750 OL
[NumOps
-1].set(nullptr);
4751 setNumHungOffUseOperands(NumOps
-1);
4754 //===----------------------------------------------------------------------===//
4755 // FreezeInst Implementation
4756 //===----------------------------------------------------------------------===//
4758 FreezeInst::FreezeInst(Value
*S
,
4759 const Twine
&Name
, Instruction
*InsertBefore
)
4760 : UnaryInstruction(S
->getType(), Freeze
, S
, InsertBefore
) {
4764 FreezeInst::FreezeInst(Value
*S
,
4765 const Twine
&Name
, BasicBlock
*InsertAtEnd
)
4766 : UnaryInstruction(S
->getType(), Freeze
, S
, InsertAtEnd
) {
4770 //===----------------------------------------------------------------------===//
4771 // cloneImpl() implementations
4772 //===----------------------------------------------------------------------===//
4774 // Define these methods here so vtables don't get emitted into every translation
4775 // unit that uses these classes.
4777 GetElementPtrInst
*GetElementPtrInst::cloneImpl() const {
4778 return new (getNumOperands()) GetElementPtrInst(*this);
4781 UnaryOperator
*UnaryOperator::cloneImpl() const {
4782 return Create(getOpcode(), Op
<0>());
4785 BinaryOperator
*BinaryOperator::cloneImpl() const {
4786 return Create(getOpcode(), Op
<0>(), Op
<1>());
4789 FCmpInst
*FCmpInst::cloneImpl() const {
4790 return new FCmpInst(getPredicate(), Op
<0>(), Op
<1>());
4793 ICmpInst
*ICmpInst::cloneImpl() const {
4794 return new ICmpInst(getPredicate(), Op
<0>(), Op
<1>());
4797 ExtractValueInst
*ExtractValueInst::cloneImpl() const {
4798 return new ExtractValueInst(*this);
4801 InsertValueInst
*InsertValueInst::cloneImpl() const {
4802 return new InsertValueInst(*this);
4805 AllocaInst
*AllocaInst::cloneImpl() const {
4806 AllocaInst
*Result
= new AllocaInst(getAllocatedType(), getAddressSpace(),
4807 getOperand(0), getAlign());
4808 Result
->setUsedWithInAlloca(isUsedWithInAlloca());
4809 Result
->setSwiftError(isSwiftError());
4813 LoadInst
*LoadInst::cloneImpl() const {
4814 return new LoadInst(getType(), getOperand(0), Twine(), isVolatile(),
4815 getAlign(), getOrdering(), getSyncScopeID());
4818 StoreInst
*StoreInst::cloneImpl() const {
4819 return new StoreInst(getOperand(0), getOperand(1), isVolatile(), getAlign(),
4820 getOrdering(), getSyncScopeID());
4823 AtomicCmpXchgInst
*AtomicCmpXchgInst::cloneImpl() const {
4824 AtomicCmpXchgInst
*Result
= new AtomicCmpXchgInst(
4825 getOperand(0), getOperand(1), getOperand(2), getAlign(),
4826 getSuccessOrdering(), getFailureOrdering(), getSyncScopeID());
4827 Result
->setVolatile(isVolatile());
4828 Result
->setWeak(isWeak());
4832 AtomicRMWInst
*AtomicRMWInst::cloneImpl() const {
4833 AtomicRMWInst
*Result
=
4834 new AtomicRMWInst(getOperation(), getOperand(0), getOperand(1),
4835 getAlign(), getOrdering(), getSyncScopeID());
4836 Result
->setVolatile(isVolatile());
4840 FenceInst
*FenceInst::cloneImpl() const {
4841 return new FenceInst(getContext(), getOrdering(), getSyncScopeID());
4844 TruncInst
*TruncInst::cloneImpl() const {
4845 return new TruncInst(getOperand(0), getType());
4848 ZExtInst
*ZExtInst::cloneImpl() const {
4849 return new ZExtInst(getOperand(0), getType());
4852 SExtInst
*SExtInst::cloneImpl() const {
4853 return new SExtInst(getOperand(0), getType());
4856 FPTruncInst
*FPTruncInst::cloneImpl() const {
4857 return new FPTruncInst(getOperand(0), getType());
4860 FPExtInst
*FPExtInst::cloneImpl() const {
4861 return new FPExtInst(getOperand(0), getType());
4864 UIToFPInst
*UIToFPInst::cloneImpl() const {
4865 return new UIToFPInst(getOperand(0), getType());
4868 SIToFPInst
*SIToFPInst::cloneImpl() const {
4869 return new SIToFPInst(getOperand(0), getType());
4872 FPToUIInst
*FPToUIInst::cloneImpl() const {
4873 return new FPToUIInst(getOperand(0), getType());
4876 FPToSIInst
*FPToSIInst::cloneImpl() const {
4877 return new FPToSIInst(getOperand(0), getType());
4880 PtrToIntInst
*PtrToIntInst::cloneImpl() const {
4881 return new PtrToIntInst(getOperand(0), getType());
4884 IntToPtrInst
*IntToPtrInst::cloneImpl() const {
4885 return new IntToPtrInst(getOperand(0), getType());
4888 BitCastInst
*BitCastInst::cloneImpl() const {
4889 return new BitCastInst(getOperand(0), getType());
4892 AddrSpaceCastInst
*AddrSpaceCastInst::cloneImpl() const {
4893 return new AddrSpaceCastInst(getOperand(0), getType());
4896 CallInst
*CallInst::cloneImpl() const {
4897 if (hasOperandBundles()) {
4898 unsigned DescriptorBytes
= getNumOperandBundles() * sizeof(BundleOpInfo
);
4899 return new(getNumOperands(), DescriptorBytes
) CallInst(*this);
4901 return new(getNumOperands()) CallInst(*this);
4904 SelectInst
*SelectInst::cloneImpl() const {
4905 return SelectInst::Create(getOperand(0), getOperand(1), getOperand(2));
4908 VAArgInst
*VAArgInst::cloneImpl() const {
4909 return new VAArgInst(getOperand(0), getType());
4912 ExtractElementInst
*ExtractElementInst::cloneImpl() const {
4913 return ExtractElementInst::Create(getOperand(0), getOperand(1));
4916 InsertElementInst
*InsertElementInst::cloneImpl() const {
4917 return InsertElementInst::Create(getOperand(0), getOperand(1), getOperand(2));
4920 ShuffleVectorInst
*ShuffleVectorInst::cloneImpl() const {
4921 return new ShuffleVectorInst(getOperand(0), getOperand(1), getShuffleMask());
4924 PHINode
*PHINode::cloneImpl() const { return new PHINode(*this); }
4926 LandingPadInst
*LandingPadInst::cloneImpl() const {
4927 return new LandingPadInst(*this);
4930 ReturnInst
*ReturnInst::cloneImpl() const {
4931 return new(getNumOperands()) ReturnInst(*this);
4934 BranchInst
*BranchInst::cloneImpl() const {
4935 return new(getNumOperands()) BranchInst(*this);
4938 SwitchInst
*SwitchInst::cloneImpl() const { return new SwitchInst(*this); }
4940 IndirectBrInst
*IndirectBrInst::cloneImpl() const {
4941 return new IndirectBrInst(*this);
4944 InvokeInst
*InvokeInst::cloneImpl() const {
4945 if (hasOperandBundles()) {
4946 unsigned DescriptorBytes
= getNumOperandBundles() * sizeof(BundleOpInfo
);
4947 return new(getNumOperands(), DescriptorBytes
) InvokeInst(*this);
4949 return new(getNumOperands()) InvokeInst(*this);
4952 CallBrInst
*CallBrInst::cloneImpl() const {
4953 if (hasOperandBundles()) {
4954 unsigned DescriptorBytes
= getNumOperandBundles() * sizeof(BundleOpInfo
);
4955 return new (getNumOperands(), DescriptorBytes
) CallBrInst(*this);
4957 return new (getNumOperands()) CallBrInst(*this);
4960 ResumeInst
*ResumeInst::cloneImpl() const { return new (1) ResumeInst(*this); }
4962 CleanupReturnInst
*CleanupReturnInst::cloneImpl() const {
4963 return new (getNumOperands()) CleanupReturnInst(*this);
4966 CatchReturnInst
*CatchReturnInst::cloneImpl() const {
4967 return new (getNumOperands()) CatchReturnInst(*this);
4970 CatchSwitchInst
*CatchSwitchInst::cloneImpl() const {
4971 return new CatchSwitchInst(*this);
4974 FuncletPadInst
*FuncletPadInst::cloneImpl() const {
4975 return new (getNumOperands()) FuncletPadInst(*this);
4978 UnreachableInst
*UnreachableInst::cloneImpl() const {
4979 LLVMContext
&Context
= getContext();
4980 return new UnreachableInst(Context
);
4983 FreezeInst
*FreezeInst::cloneImpl() const {
4984 return new FreezeInst(getOperand(0));