Run DCE after a LoopFlatten test to reduce spurious output [nfc]
[llvm-project.git] / llvm / lib / IR / Instruction.cpp
blob1b3c03348f41a70a88bdb3166efaf47020192380
1 //===-- Instruction.cpp - Implement the Instruction class -----------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements the Instruction class for the IR library.
11 //===----------------------------------------------------------------------===//
13 #include "llvm/IR/Instruction.h"
14 #include "llvm/ADT/DenseSet.h"
15 #include "llvm/IR/AttributeMask.h"
16 #include "llvm/IR/Constants.h"
17 #include "llvm/IR/Instructions.h"
18 #include "llvm/IR/IntrinsicInst.h"
19 #include "llvm/IR/Intrinsics.h"
20 #include "llvm/IR/Operator.h"
21 #include "llvm/IR/ProfDataUtils.h"
22 #include "llvm/IR/Type.h"
23 using namespace llvm;
25 Instruction::Instruction(Type *ty, unsigned it, Use *Ops, unsigned NumOps,
26 Instruction *InsertBefore)
27 : User(ty, Value::InstructionVal + it, Ops, NumOps), Parent(nullptr) {
29 // If requested, insert this instruction into a basic block...
30 if (InsertBefore) {
31 BasicBlock *BB = InsertBefore->getParent();
32 assert(BB && "Instruction to insert before is not in a basic block!");
33 insertInto(BB, InsertBefore->getIterator());
37 Instruction::Instruction(Type *ty, unsigned it, Use *Ops, unsigned NumOps,
38 BasicBlock *InsertAtEnd)
39 : User(ty, Value::InstructionVal + it, Ops, NumOps), Parent(nullptr) {
41 // append this instruction into the basic block
42 assert(InsertAtEnd && "Basic block to append to may not be NULL!");
43 insertInto(InsertAtEnd, InsertAtEnd->end());
46 Instruction::~Instruction() {
47 assert(!Parent && "Instruction still linked in the program!");
49 // Replace any extant metadata uses of this instruction with undef to
50 // preserve debug info accuracy. Some alternatives include:
51 // - Treat Instruction like any other Value, and point its extant metadata
52 // uses to an empty ValueAsMetadata node. This makes extant dbg.value uses
53 // trivially dead (i.e. fair game for deletion in many passes), leading to
54 // stale dbg.values being in effect for too long.
55 // - Call salvageDebugInfoOrMarkUndef. Not needed to make instruction removal
56 // correct. OTOH results in wasted work in some common cases (e.g. when all
57 // instructions in a BasicBlock are deleted).
58 if (isUsedByMetadata())
59 ValueAsMetadata::handleRAUW(this, UndefValue::get(getType()));
61 // Explicitly remove DIAssignID metadata to clear up ID -> Instruction(s)
62 // mapping in LLVMContext.
63 setMetadata(LLVMContext::MD_DIAssignID, nullptr);
67 void Instruction::setParent(BasicBlock *P) {
68 Parent = P;
71 const Module *Instruction::getModule() const {
72 return getParent()->getModule();
75 const Function *Instruction::getFunction() const {
76 return getParent()->getParent();
79 void Instruction::removeFromParent() {
80 getParent()->getInstList().remove(getIterator());
83 BasicBlock::iterator Instruction::eraseFromParent() {
84 return getParent()->getInstList().erase(getIterator());
87 /// Insert an unlinked instruction into a basic block immediately before the
88 /// specified instruction.
89 void Instruction::insertBefore(Instruction *InsertPos) {
90 insertInto(InsertPos->getParent(), InsertPos->getIterator());
93 /// Insert an unlinked instruction into a basic block immediately after the
94 /// specified instruction.
95 void Instruction::insertAfter(Instruction *InsertPos) {
96 insertInto(InsertPos->getParent(), std::next(InsertPos->getIterator()));
99 BasicBlock::iterator Instruction::insertInto(BasicBlock *ParentBB,
100 BasicBlock::iterator It) {
101 assert(getParent() == nullptr && "Expected detached instruction");
102 assert((It == ParentBB->end() || It->getParent() == ParentBB) &&
103 "It not in ParentBB");
104 return ParentBB->getInstList().insert(It, this);
107 /// Unlink this instruction from its current basic block and insert it into the
108 /// basic block that MovePos lives in, right before MovePos.
109 void Instruction::moveBefore(Instruction *MovePos) {
110 moveBefore(*MovePos->getParent(), MovePos->getIterator());
113 void Instruction::moveAfter(Instruction *MovePos) {
114 moveBefore(*MovePos->getParent(), ++MovePos->getIterator());
117 void Instruction::moveBefore(BasicBlock &BB, InstListType::iterator I) {
118 assert(I == BB.end() || I->getParent() == &BB);
119 BB.splice(I, getParent(), getIterator());
122 bool Instruction::comesBefore(const Instruction *Other) const {
123 assert(Parent && Other->Parent &&
124 "instructions without BB parents have no order");
125 assert(Parent == Other->Parent && "cross-BB instruction order comparison");
126 if (!Parent->isInstrOrderValid())
127 Parent->renumberInstructions();
128 return Order < Other->Order;
131 Instruction *Instruction::getInsertionPointAfterDef() {
132 assert(!getType()->isVoidTy() && "Instruction must define result");
133 BasicBlock *InsertBB;
134 BasicBlock::iterator InsertPt;
135 if (auto *PN = dyn_cast<PHINode>(this)) {
136 InsertBB = PN->getParent();
137 InsertPt = InsertBB->getFirstInsertionPt();
138 } else if (auto *II = dyn_cast<InvokeInst>(this)) {
139 InsertBB = II->getNormalDest();
140 InsertPt = InsertBB->getFirstInsertionPt();
141 } else if (isa<CallBrInst>(this)) {
142 // Def is available in multiple successors, there's no single dominating
143 // insertion point.
144 return nullptr;
145 } else {
146 assert(!isTerminator() && "Only invoke/callbr terminators return value");
147 InsertBB = getParent();
148 InsertPt = std::next(getIterator());
151 // catchswitch blocks don't have any legal insertion point (because they
152 // are both an exception pad and a terminator).
153 if (InsertPt == InsertBB->end())
154 return nullptr;
155 return &*InsertPt;
158 bool Instruction::isOnlyUserOfAnyOperand() {
159 return any_of(operands(), [](Value *V) { return V->hasOneUser(); });
162 void Instruction::setHasNoUnsignedWrap(bool b) {
163 cast<OverflowingBinaryOperator>(this)->setHasNoUnsignedWrap(b);
166 void Instruction::setHasNoSignedWrap(bool b) {
167 cast<OverflowingBinaryOperator>(this)->setHasNoSignedWrap(b);
170 void Instruction::setIsExact(bool b) {
171 cast<PossiblyExactOperator>(this)->setIsExact(b);
174 void Instruction::setNonNeg(bool b) {
175 assert(isa<PossiblyNonNegInst>(this) && "Must be zext");
176 SubclassOptionalData = (SubclassOptionalData & ~PossiblyNonNegInst::NonNeg) |
177 (b * PossiblyNonNegInst::NonNeg);
180 bool Instruction::hasNoUnsignedWrap() const {
181 return cast<OverflowingBinaryOperator>(this)->hasNoUnsignedWrap();
184 bool Instruction::hasNoSignedWrap() const {
185 return cast<OverflowingBinaryOperator>(this)->hasNoSignedWrap();
188 bool Instruction::hasNonNeg() const {
189 assert(isa<PossiblyNonNegInst>(this) && "Must be zext");
190 return (SubclassOptionalData & PossiblyNonNegInst::NonNeg) != 0;
193 bool Instruction::hasPoisonGeneratingFlags() const {
194 return cast<Operator>(this)->hasPoisonGeneratingFlags();
197 void Instruction::dropPoisonGeneratingFlags() {
198 switch (getOpcode()) {
199 case Instruction::Add:
200 case Instruction::Sub:
201 case Instruction::Mul:
202 case Instruction::Shl:
203 cast<OverflowingBinaryOperator>(this)->setHasNoUnsignedWrap(false);
204 cast<OverflowingBinaryOperator>(this)->setHasNoSignedWrap(false);
205 break;
207 case Instruction::UDiv:
208 case Instruction::SDiv:
209 case Instruction::AShr:
210 case Instruction::LShr:
211 cast<PossiblyExactOperator>(this)->setIsExact(false);
212 break;
214 case Instruction::GetElementPtr:
215 cast<GetElementPtrInst>(this)->setIsInBounds(false);
216 break;
218 case Instruction::ZExt:
219 setNonNeg(false);
220 break;
223 if (isa<FPMathOperator>(this)) {
224 setHasNoNaNs(false);
225 setHasNoInfs(false);
228 assert(!hasPoisonGeneratingFlags() && "must be kept in sync");
231 bool Instruction::hasPoisonGeneratingMetadata() const {
232 return hasMetadata(LLVMContext::MD_range) ||
233 hasMetadata(LLVMContext::MD_nonnull) ||
234 hasMetadata(LLVMContext::MD_align);
237 void Instruction::dropPoisonGeneratingMetadata() {
238 eraseMetadata(LLVMContext::MD_range);
239 eraseMetadata(LLVMContext::MD_nonnull);
240 eraseMetadata(LLVMContext::MD_align);
243 void Instruction::dropUBImplyingAttrsAndUnknownMetadata(
244 ArrayRef<unsigned> KnownIDs) {
245 dropUnknownNonDebugMetadata(KnownIDs);
246 auto *CB = dyn_cast<CallBase>(this);
247 if (!CB)
248 return;
249 // For call instructions, we also need to drop parameter and return attributes
250 // that are can cause UB if the call is moved to a location where the
251 // attribute is not valid.
252 AttributeList AL = CB->getAttributes();
253 if (AL.isEmpty())
254 return;
255 AttributeMask UBImplyingAttributes =
256 AttributeFuncs::getUBImplyingAttributes();
257 for (unsigned ArgNo = 0; ArgNo < CB->arg_size(); ArgNo++)
258 CB->removeParamAttrs(ArgNo, UBImplyingAttributes);
259 CB->removeRetAttrs(UBImplyingAttributes);
262 void Instruction::dropUBImplyingAttrsAndMetadata() {
263 // !annotation metadata does not impact semantics.
264 // !range, !nonnull and !align produce poison, so they are safe to speculate.
265 // !noundef and various AA metadata must be dropped, as it generally produces
266 // immediate undefined behavior.
267 unsigned KnownIDs[] = {LLVMContext::MD_annotation, LLVMContext::MD_range,
268 LLVMContext::MD_nonnull, LLVMContext::MD_align};
269 dropUBImplyingAttrsAndUnknownMetadata(KnownIDs);
272 bool Instruction::isExact() const {
273 return cast<PossiblyExactOperator>(this)->isExact();
276 void Instruction::setFast(bool B) {
277 assert(isa<FPMathOperator>(this) && "setting fast-math flag on invalid op");
278 cast<FPMathOperator>(this)->setFast(B);
281 void Instruction::setHasAllowReassoc(bool B) {
282 assert(isa<FPMathOperator>(this) && "setting fast-math flag on invalid op");
283 cast<FPMathOperator>(this)->setHasAllowReassoc(B);
286 void Instruction::setHasNoNaNs(bool B) {
287 assert(isa<FPMathOperator>(this) && "setting fast-math flag on invalid op");
288 cast<FPMathOperator>(this)->setHasNoNaNs(B);
291 void Instruction::setHasNoInfs(bool B) {
292 assert(isa<FPMathOperator>(this) && "setting fast-math flag on invalid op");
293 cast<FPMathOperator>(this)->setHasNoInfs(B);
296 void Instruction::setHasNoSignedZeros(bool B) {
297 assert(isa<FPMathOperator>(this) && "setting fast-math flag on invalid op");
298 cast<FPMathOperator>(this)->setHasNoSignedZeros(B);
301 void Instruction::setHasAllowReciprocal(bool B) {
302 assert(isa<FPMathOperator>(this) && "setting fast-math flag on invalid op");
303 cast<FPMathOperator>(this)->setHasAllowReciprocal(B);
306 void Instruction::setHasAllowContract(bool B) {
307 assert(isa<FPMathOperator>(this) && "setting fast-math flag on invalid op");
308 cast<FPMathOperator>(this)->setHasAllowContract(B);
311 void Instruction::setHasApproxFunc(bool B) {
312 assert(isa<FPMathOperator>(this) && "setting fast-math flag on invalid op");
313 cast<FPMathOperator>(this)->setHasApproxFunc(B);
316 void Instruction::setFastMathFlags(FastMathFlags FMF) {
317 assert(isa<FPMathOperator>(this) && "setting fast-math flag on invalid op");
318 cast<FPMathOperator>(this)->setFastMathFlags(FMF);
321 void Instruction::copyFastMathFlags(FastMathFlags FMF) {
322 assert(isa<FPMathOperator>(this) && "copying fast-math flag on invalid op");
323 cast<FPMathOperator>(this)->copyFastMathFlags(FMF);
326 bool Instruction::isFast() const {
327 assert(isa<FPMathOperator>(this) && "getting fast-math flag on invalid op");
328 return cast<FPMathOperator>(this)->isFast();
331 bool Instruction::hasAllowReassoc() const {
332 assert(isa<FPMathOperator>(this) && "getting fast-math flag on invalid op");
333 return cast<FPMathOperator>(this)->hasAllowReassoc();
336 bool Instruction::hasNoNaNs() const {
337 assert(isa<FPMathOperator>(this) && "getting fast-math flag on invalid op");
338 return cast<FPMathOperator>(this)->hasNoNaNs();
341 bool Instruction::hasNoInfs() const {
342 assert(isa<FPMathOperator>(this) && "getting fast-math flag on invalid op");
343 return cast<FPMathOperator>(this)->hasNoInfs();
346 bool Instruction::hasNoSignedZeros() const {
347 assert(isa<FPMathOperator>(this) && "getting fast-math flag on invalid op");
348 return cast<FPMathOperator>(this)->hasNoSignedZeros();
351 bool Instruction::hasAllowReciprocal() const {
352 assert(isa<FPMathOperator>(this) && "getting fast-math flag on invalid op");
353 return cast<FPMathOperator>(this)->hasAllowReciprocal();
356 bool Instruction::hasAllowContract() const {
357 assert(isa<FPMathOperator>(this) && "getting fast-math flag on invalid op");
358 return cast<FPMathOperator>(this)->hasAllowContract();
361 bool Instruction::hasApproxFunc() const {
362 assert(isa<FPMathOperator>(this) && "getting fast-math flag on invalid op");
363 return cast<FPMathOperator>(this)->hasApproxFunc();
366 FastMathFlags Instruction::getFastMathFlags() const {
367 assert(isa<FPMathOperator>(this) && "getting fast-math flag on invalid op");
368 return cast<FPMathOperator>(this)->getFastMathFlags();
371 void Instruction::copyFastMathFlags(const Instruction *I) {
372 copyFastMathFlags(I->getFastMathFlags());
375 void Instruction::copyIRFlags(const Value *V, bool IncludeWrapFlags) {
376 // Copy the wrapping flags.
377 if (IncludeWrapFlags && isa<OverflowingBinaryOperator>(this)) {
378 if (auto *OB = dyn_cast<OverflowingBinaryOperator>(V)) {
379 setHasNoSignedWrap(OB->hasNoSignedWrap());
380 setHasNoUnsignedWrap(OB->hasNoUnsignedWrap());
384 // Copy the exact flag.
385 if (auto *PE = dyn_cast<PossiblyExactOperator>(V))
386 if (isa<PossiblyExactOperator>(this))
387 setIsExact(PE->isExact());
389 // Copy the fast-math flags.
390 if (auto *FP = dyn_cast<FPMathOperator>(V))
391 if (isa<FPMathOperator>(this))
392 copyFastMathFlags(FP->getFastMathFlags());
394 if (auto *SrcGEP = dyn_cast<GetElementPtrInst>(V))
395 if (auto *DestGEP = dyn_cast<GetElementPtrInst>(this))
396 DestGEP->setIsInBounds(SrcGEP->isInBounds() || DestGEP->isInBounds());
398 if (auto *NNI = dyn_cast<PossiblyNonNegInst>(V))
399 if (isa<PossiblyNonNegInst>(this))
400 setNonNeg(NNI->hasNonNeg());
403 void Instruction::andIRFlags(const Value *V) {
404 if (auto *OB = dyn_cast<OverflowingBinaryOperator>(V)) {
405 if (isa<OverflowingBinaryOperator>(this)) {
406 setHasNoSignedWrap(hasNoSignedWrap() && OB->hasNoSignedWrap());
407 setHasNoUnsignedWrap(hasNoUnsignedWrap() && OB->hasNoUnsignedWrap());
411 if (auto *PE = dyn_cast<PossiblyExactOperator>(V))
412 if (isa<PossiblyExactOperator>(this))
413 setIsExact(isExact() && PE->isExact());
415 if (auto *FP = dyn_cast<FPMathOperator>(V)) {
416 if (isa<FPMathOperator>(this)) {
417 FastMathFlags FM = getFastMathFlags();
418 FM &= FP->getFastMathFlags();
419 copyFastMathFlags(FM);
423 if (auto *SrcGEP = dyn_cast<GetElementPtrInst>(V))
424 if (auto *DestGEP = dyn_cast<GetElementPtrInst>(this))
425 DestGEP->setIsInBounds(SrcGEP->isInBounds() && DestGEP->isInBounds());
427 if (auto *NNI = dyn_cast<PossiblyNonNegInst>(V))
428 if (isa<PossiblyNonNegInst>(this))
429 setNonNeg(hasNonNeg() && NNI->hasNonNeg());
432 const char *Instruction::getOpcodeName(unsigned OpCode) {
433 switch (OpCode) {
434 // Terminators
435 case Ret: return "ret";
436 case Br: return "br";
437 case Switch: return "switch";
438 case IndirectBr: return "indirectbr";
439 case Invoke: return "invoke";
440 case Resume: return "resume";
441 case Unreachable: return "unreachable";
442 case CleanupRet: return "cleanupret";
443 case CatchRet: return "catchret";
444 case CatchPad: return "catchpad";
445 case CatchSwitch: return "catchswitch";
446 case CallBr: return "callbr";
448 // Standard unary operators...
449 case FNeg: return "fneg";
451 // Standard binary operators...
452 case Add: return "add";
453 case FAdd: return "fadd";
454 case Sub: return "sub";
455 case FSub: return "fsub";
456 case Mul: return "mul";
457 case FMul: return "fmul";
458 case UDiv: return "udiv";
459 case SDiv: return "sdiv";
460 case FDiv: return "fdiv";
461 case URem: return "urem";
462 case SRem: return "srem";
463 case FRem: return "frem";
465 // Logical operators...
466 case And: return "and";
467 case Or : return "or";
468 case Xor: return "xor";
470 // Memory instructions...
471 case Alloca: return "alloca";
472 case Load: return "load";
473 case Store: return "store";
474 case AtomicCmpXchg: return "cmpxchg";
475 case AtomicRMW: return "atomicrmw";
476 case Fence: return "fence";
477 case GetElementPtr: return "getelementptr";
479 // Convert instructions...
480 case Trunc: return "trunc";
481 case ZExt: return "zext";
482 case SExt: return "sext";
483 case FPTrunc: return "fptrunc";
484 case FPExt: return "fpext";
485 case FPToUI: return "fptoui";
486 case FPToSI: return "fptosi";
487 case UIToFP: return "uitofp";
488 case SIToFP: return "sitofp";
489 case IntToPtr: return "inttoptr";
490 case PtrToInt: return "ptrtoint";
491 case BitCast: return "bitcast";
492 case AddrSpaceCast: return "addrspacecast";
494 // Other instructions...
495 case ICmp: return "icmp";
496 case FCmp: return "fcmp";
497 case PHI: return "phi";
498 case Select: return "select";
499 case Call: return "call";
500 case Shl: return "shl";
501 case LShr: return "lshr";
502 case AShr: return "ashr";
503 case VAArg: return "va_arg";
504 case ExtractElement: return "extractelement";
505 case InsertElement: return "insertelement";
506 case ShuffleVector: return "shufflevector";
507 case ExtractValue: return "extractvalue";
508 case InsertValue: return "insertvalue";
509 case LandingPad: return "landingpad";
510 case CleanupPad: return "cleanuppad";
511 case Freeze: return "freeze";
513 default: return "<Invalid operator> ";
517 /// This must be kept in sync with FunctionComparator::cmpOperations in
518 /// lib/Transforms/IPO/MergeFunctions.cpp.
519 bool Instruction::hasSameSpecialState(const Instruction *I2,
520 bool IgnoreAlignment) const {
521 auto I1 = this;
522 assert(I1->getOpcode() == I2->getOpcode() &&
523 "Can not compare special state of different instructions");
525 if (const AllocaInst *AI = dyn_cast<AllocaInst>(I1))
526 return AI->getAllocatedType() == cast<AllocaInst>(I2)->getAllocatedType() &&
527 (AI->getAlign() == cast<AllocaInst>(I2)->getAlign() ||
528 IgnoreAlignment);
529 if (const LoadInst *LI = dyn_cast<LoadInst>(I1))
530 return LI->isVolatile() == cast<LoadInst>(I2)->isVolatile() &&
531 (LI->getAlign() == cast<LoadInst>(I2)->getAlign() ||
532 IgnoreAlignment) &&
533 LI->getOrdering() == cast<LoadInst>(I2)->getOrdering() &&
534 LI->getSyncScopeID() == cast<LoadInst>(I2)->getSyncScopeID();
535 if (const StoreInst *SI = dyn_cast<StoreInst>(I1))
536 return SI->isVolatile() == cast<StoreInst>(I2)->isVolatile() &&
537 (SI->getAlign() == cast<StoreInst>(I2)->getAlign() ||
538 IgnoreAlignment) &&
539 SI->getOrdering() == cast<StoreInst>(I2)->getOrdering() &&
540 SI->getSyncScopeID() == cast<StoreInst>(I2)->getSyncScopeID();
541 if (const CmpInst *CI = dyn_cast<CmpInst>(I1))
542 return CI->getPredicate() == cast<CmpInst>(I2)->getPredicate();
543 if (const CallInst *CI = dyn_cast<CallInst>(I1))
544 return CI->isTailCall() == cast<CallInst>(I2)->isTailCall() &&
545 CI->getCallingConv() == cast<CallInst>(I2)->getCallingConv() &&
546 CI->getAttributes() == cast<CallInst>(I2)->getAttributes() &&
547 CI->hasIdenticalOperandBundleSchema(*cast<CallInst>(I2));
548 if (const InvokeInst *CI = dyn_cast<InvokeInst>(I1))
549 return CI->getCallingConv() == cast<InvokeInst>(I2)->getCallingConv() &&
550 CI->getAttributes() == cast<InvokeInst>(I2)->getAttributes() &&
551 CI->hasIdenticalOperandBundleSchema(*cast<InvokeInst>(I2));
552 if (const CallBrInst *CI = dyn_cast<CallBrInst>(I1))
553 return CI->getCallingConv() == cast<CallBrInst>(I2)->getCallingConv() &&
554 CI->getAttributes() == cast<CallBrInst>(I2)->getAttributes() &&
555 CI->hasIdenticalOperandBundleSchema(*cast<CallBrInst>(I2));
556 if (const InsertValueInst *IVI = dyn_cast<InsertValueInst>(I1))
557 return IVI->getIndices() == cast<InsertValueInst>(I2)->getIndices();
558 if (const ExtractValueInst *EVI = dyn_cast<ExtractValueInst>(I1))
559 return EVI->getIndices() == cast<ExtractValueInst>(I2)->getIndices();
560 if (const FenceInst *FI = dyn_cast<FenceInst>(I1))
561 return FI->getOrdering() == cast<FenceInst>(I2)->getOrdering() &&
562 FI->getSyncScopeID() == cast<FenceInst>(I2)->getSyncScopeID();
563 if (const AtomicCmpXchgInst *CXI = dyn_cast<AtomicCmpXchgInst>(I1))
564 return CXI->isVolatile() == cast<AtomicCmpXchgInst>(I2)->isVolatile() &&
565 CXI->isWeak() == cast<AtomicCmpXchgInst>(I2)->isWeak() &&
566 CXI->getSuccessOrdering() ==
567 cast<AtomicCmpXchgInst>(I2)->getSuccessOrdering() &&
568 CXI->getFailureOrdering() ==
569 cast<AtomicCmpXchgInst>(I2)->getFailureOrdering() &&
570 CXI->getSyncScopeID() ==
571 cast<AtomicCmpXchgInst>(I2)->getSyncScopeID();
572 if (const AtomicRMWInst *RMWI = dyn_cast<AtomicRMWInst>(I1))
573 return RMWI->getOperation() == cast<AtomicRMWInst>(I2)->getOperation() &&
574 RMWI->isVolatile() == cast<AtomicRMWInst>(I2)->isVolatile() &&
575 RMWI->getOrdering() == cast<AtomicRMWInst>(I2)->getOrdering() &&
576 RMWI->getSyncScopeID() == cast<AtomicRMWInst>(I2)->getSyncScopeID();
577 if (const ShuffleVectorInst *SVI = dyn_cast<ShuffleVectorInst>(I1))
578 return SVI->getShuffleMask() ==
579 cast<ShuffleVectorInst>(I2)->getShuffleMask();
580 if (const GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(I1))
581 return GEP->getSourceElementType() ==
582 cast<GetElementPtrInst>(I2)->getSourceElementType();
584 return true;
587 bool Instruction::isIdenticalTo(const Instruction *I) const {
588 return isIdenticalToWhenDefined(I) &&
589 SubclassOptionalData == I->SubclassOptionalData;
592 bool Instruction::isIdenticalToWhenDefined(const Instruction *I) const {
593 if (getOpcode() != I->getOpcode() ||
594 getNumOperands() != I->getNumOperands() ||
595 getType() != I->getType())
596 return false;
598 // If both instructions have no operands, they are identical.
599 if (getNumOperands() == 0 && I->getNumOperands() == 0)
600 return this->hasSameSpecialState(I);
602 // We have two instructions of identical opcode and #operands. Check to see
603 // if all operands are the same.
604 if (!std::equal(op_begin(), op_end(), I->op_begin()))
605 return false;
607 // WARNING: this logic must be kept in sync with EliminateDuplicatePHINodes()!
608 if (const PHINode *thisPHI = dyn_cast<PHINode>(this)) {
609 const PHINode *otherPHI = cast<PHINode>(I);
610 return std::equal(thisPHI->block_begin(), thisPHI->block_end(),
611 otherPHI->block_begin());
614 return this->hasSameSpecialState(I);
617 // Keep this in sync with FunctionComparator::cmpOperations in
618 // lib/Transforms/IPO/MergeFunctions.cpp.
619 bool Instruction::isSameOperationAs(const Instruction *I,
620 unsigned flags) const {
621 bool IgnoreAlignment = flags & CompareIgnoringAlignment;
622 bool UseScalarTypes = flags & CompareUsingScalarTypes;
624 if (getOpcode() != I->getOpcode() ||
625 getNumOperands() != I->getNumOperands() ||
626 (UseScalarTypes ?
627 getType()->getScalarType() != I->getType()->getScalarType() :
628 getType() != I->getType()))
629 return false;
631 // We have two instructions of identical opcode and #operands. Check to see
632 // if all operands are the same type
633 for (unsigned i = 0, e = getNumOperands(); i != e; ++i)
634 if (UseScalarTypes ?
635 getOperand(i)->getType()->getScalarType() !=
636 I->getOperand(i)->getType()->getScalarType() :
637 getOperand(i)->getType() != I->getOperand(i)->getType())
638 return false;
640 return this->hasSameSpecialState(I, IgnoreAlignment);
643 bool Instruction::isUsedOutsideOfBlock(const BasicBlock *BB) const {
644 for (const Use &U : uses()) {
645 // PHI nodes uses values in the corresponding predecessor block. For other
646 // instructions, just check to see whether the parent of the use matches up.
647 const Instruction *I = cast<Instruction>(U.getUser());
648 const PHINode *PN = dyn_cast<PHINode>(I);
649 if (!PN) {
650 if (I->getParent() != BB)
651 return true;
652 continue;
655 if (PN->getIncomingBlock(U) != BB)
656 return true;
658 return false;
661 bool Instruction::mayReadFromMemory() const {
662 switch (getOpcode()) {
663 default: return false;
664 case Instruction::VAArg:
665 case Instruction::Load:
666 case Instruction::Fence: // FIXME: refine definition of mayReadFromMemory
667 case Instruction::AtomicCmpXchg:
668 case Instruction::AtomicRMW:
669 case Instruction::CatchPad:
670 case Instruction::CatchRet:
671 return true;
672 case Instruction::Call:
673 case Instruction::Invoke:
674 case Instruction::CallBr:
675 return !cast<CallBase>(this)->onlyWritesMemory();
676 case Instruction::Store:
677 return !cast<StoreInst>(this)->isUnordered();
681 bool Instruction::mayWriteToMemory() const {
682 switch (getOpcode()) {
683 default: return false;
684 case Instruction::Fence: // FIXME: refine definition of mayWriteToMemory
685 case Instruction::Store:
686 case Instruction::VAArg:
687 case Instruction::AtomicCmpXchg:
688 case Instruction::AtomicRMW:
689 case Instruction::CatchPad:
690 case Instruction::CatchRet:
691 return true;
692 case Instruction::Call:
693 case Instruction::Invoke:
694 case Instruction::CallBr:
695 return !cast<CallBase>(this)->onlyReadsMemory();
696 case Instruction::Load:
697 return !cast<LoadInst>(this)->isUnordered();
701 bool Instruction::isAtomic() const {
702 switch (getOpcode()) {
703 default:
704 return false;
705 case Instruction::AtomicCmpXchg:
706 case Instruction::AtomicRMW:
707 case Instruction::Fence:
708 return true;
709 case Instruction::Load:
710 return cast<LoadInst>(this)->getOrdering() != AtomicOrdering::NotAtomic;
711 case Instruction::Store:
712 return cast<StoreInst>(this)->getOrdering() != AtomicOrdering::NotAtomic;
716 bool Instruction::hasAtomicLoad() const {
717 assert(isAtomic());
718 switch (getOpcode()) {
719 default:
720 return false;
721 case Instruction::AtomicCmpXchg:
722 case Instruction::AtomicRMW:
723 case Instruction::Load:
724 return true;
728 bool Instruction::hasAtomicStore() const {
729 assert(isAtomic());
730 switch (getOpcode()) {
731 default:
732 return false;
733 case Instruction::AtomicCmpXchg:
734 case Instruction::AtomicRMW:
735 case Instruction::Store:
736 return true;
740 bool Instruction::isVolatile() const {
741 switch (getOpcode()) {
742 default:
743 return false;
744 case Instruction::AtomicRMW:
745 return cast<AtomicRMWInst>(this)->isVolatile();
746 case Instruction::Store:
747 return cast<StoreInst>(this)->isVolatile();
748 case Instruction::Load:
749 return cast<LoadInst>(this)->isVolatile();
750 case Instruction::AtomicCmpXchg:
751 return cast<AtomicCmpXchgInst>(this)->isVolatile();
752 case Instruction::Call:
753 case Instruction::Invoke:
754 // There are a very limited number of intrinsics with volatile flags.
755 if (auto *II = dyn_cast<IntrinsicInst>(this)) {
756 if (auto *MI = dyn_cast<MemIntrinsic>(II))
757 return MI->isVolatile();
758 switch (II->getIntrinsicID()) {
759 default: break;
760 case Intrinsic::matrix_column_major_load:
761 return cast<ConstantInt>(II->getArgOperand(2))->isOne();
762 case Intrinsic::matrix_column_major_store:
763 return cast<ConstantInt>(II->getArgOperand(3))->isOne();
766 return false;
770 Type *Instruction::getAccessType() const {
771 switch (getOpcode()) {
772 case Instruction::Store:
773 return cast<StoreInst>(this)->getValueOperand()->getType();
774 case Instruction::Load:
775 case Instruction::AtomicRMW:
776 return getType();
777 case Instruction::AtomicCmpXchg:
778 return cast<AtomicCmpXchgInst>(this)->getNewValOperand()->getType();
779 case Instruction::Call:
780 case Instruction::Invoke:
781 if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(this)) {
782 switch (II->getIntrinsicID()) {
783 case Intrinsic::masked_load:
784 case Intrinsic::masked_gather:
785 case Intrinsic::masked_expandload:
786 case Intrinsic::vp_load:
787 case Intrinsic::vp_gather:
788 case Intrinsic::experimental_vp_strided_load:
789 return II->getType();
790 case Intrinsic::masked_store:
791 case Intrinsic::masked_scatter:
792 case Intrinsic::masked_compressstore:
793 case Intrinsic::vp_store:
794 case Intrinsic::vp_scatter:
795 case Intrinsic::experimental_vp_strided_store:
796 return II->getOperand(0)->getType();
797 default:
798 break;
803 return nullptr;
806 static bool canUnwindPastLandingPad(const LandingPadInst *LP,
807 bool IncludePhaseOneUnwind) {
808 // Because phase one unwinding skips cleanup landingpads, we effectively
809 // unwind past this frame, and callers need to have valid unwind info.
810 if (LP->isCleanup())
811 return IncludePhaseOneUnwind;
813 for (unsigned I = 0; I < LP->getNumClauses(); ++I) {
814 Constant *Clause = LP->getClause(I);
815 // catch ptr null catches all exceptions.
816 if (LP->isCatch(I) && isa<ConstantPointerNull>(Clause))
817 return false;
818 // filter [0 x ptr] catches all exceptions.
819 if (LP->isFilter(I) && Clause->getType()->getArrayNumElements() == 0)
820 return false;
823 // May catch only some subset of exceptions, in which case other exceptions
824 // will continue unwinding.
825 return true;
828 bool Instruction::mayThrow(bool IncludePhaseOneUnwind) const {
829 switch (getOpcode()) {
830 case Instruction::Call:
831 return !cast<CallInst>(this)->doesNotThrow();
832 case Instruction::CleanupRet:
833 return cast<CleanupReturnInst>(this)->unwindsToCaller();
834 case Instruction::CatchSwitch:
835 return cast<CatchSwitchInst>(this)->unwindsToCaller();
836 case Instruction::Resume:
837 return true;
838 case Instruction::Invoke: {
839 // Landingpads themselves don't unwind -- however, an invoke of a skipped
840 // landingpad may continue unwinding.
841 BasicBlock *UnwindDest = cast<InvokeInst>(this)->getUnwindDest();
842 Instruction *Pad = UnwindDest->getFirstNonPHI();
843 if (auto *LP = dyn_cast<LandingPadInst>(Pad))
844 return canUnwindPastLandingPad(LP, IncludePhaseOneUnwind);
845 return false;
847 case Instruction::CleanupPad:
848 // Treat the same as cleanup landingpad.
849 return IncludePhaseOneUnwind;
850 default:
851 return false;
855 bool Instruction::mayHaveSideEffects() const {
856 return mayWriteToMemory() || mayThrow() || !willReturn();
859 bool Instruction::isSafeToRemove() const {
860 return (!isa<CallInst>(this) || !this->mayHaveSideEffects()) &&
861 !this->isTerminator() && !this->isEHPad();
864 bool Instruction::willReturn() const {
865 // Volatile store isn't guaranteed to return; see LangRef.
866 if (auto *SI = dyn_cast<StoreInst>(this))
867 return !SI->isVolatile();
869 if (const auto *CB = dyn_cast<CallBase>(this))
870 return CB->hasFnAttr(Attribute::WillReturn);
871 return true;
874 bool Instruction::isLifetimeStartOrEnd() const {
875 auto *II = dyn_cast<IntrinsicInst>(this);
876 if (!II)
877 return false;
878 Intrinsic::ID ID = II->getIntrinsicID();
879 return ID == Intrinsic::lifetime_start || ID == Intrinsic::lifetime_end;
882 bool Instruction::isLaunderOrStripInvariantGroup() const {
883 auto *II = dyn_cast<IntrinsicInst>(this);
884 if (!II)
885 return false;
886 Intrinsic::ID ID = II->getIntrinsicID();
887 return ID == Intrinsic::launder_invariant_group ||
888 ID == Intrinsic::strip_invariant_group;
891 bool Instruction::isDebugOrPseudoInst() const {
892 return isa<DbgInfoIntrinsic>(this) || isa<PseudoProbeInst>(this);
895 const Instruction *
896 Instruction::getNextNonDebugInstruction(bool SkipPseudoOp) const {
897 for (const Instruction *I = getNextNode(); I; I = I->getNextNode())
898 if (!isa<DbgInfoIntrinsic>(I) && !(SkipPseudoOp && isa<PseudoProbeInst>(I)))
899 return I;
900 return nullptr;
903 const Instruction *
904 Instruction::getPrevNonDebugInstruction(bool SkipPseudoOp) const {
905 for (const Instruction *I = getPrevNode(); I; I = I->getPrevNode())
906 if (!isa<DbgInfoIntrinsic>(I) && !(SkipPseudoOp && isa<PseudoProbeInst>(I)))
907 return I;
908 return nullptr;
911 const DebugLoc &Instruction::getStableDebugLoc() const {
912 if (isa<DbgInfoIntrinsic>(this))
913 if (const Instruction *Next = getNextNonDebugInstruction())
914 return Next->getDebugLoc();
915 return getDebugLoc();
918 bool Instruction::isAssociative() const {
919 unsigned Opcode = getOpcode();
920 if (isAssociative(Opcode))
921 return true;
923 switch (Opcode) {
924 case FMul:
925 case FAdd:
926 return cast<FPMathOperator>(this)->hasAllowReassoc() &&
927 cast<FPMathOperator>(this)->hasNoSignedZeros();
928 default:
929 return false;
933 bool Instruction::isCommutative() const {
934 if (auto *II = dyn_cast<IntrinsicInst>(this))
935 return II->isCommutative();
936 // TODO: Should allow icmp/fcmp?
937 return isCommutative(getOpcode());
940 unsigned Instruction::getNumSuccessors() const {
941 switch (getOpcode()) {
942 #define HANDLE_TERM_INST(N, OPC, CLASS) \
943 case Instruction::OPC: \
944 return static_cast<const CLASS *>(this)->getNumSuccessors();
945 #include "llvm/IR/Instruction.def"
946 default:
947 break;
949 llvm_unreachable("not a terminator");
952 BasicBlock *Instruction::getSuccessor(unsigned idx) const {
953 switch (getOpcode()) {
954 #define HANDLE_TERM_INST(N, OPC, CLASS) \
955 case Instruction::OPC: \
956 return static_cast<const CLASS *>(this)->getSuccessor(idx);
957 #include "llvm/IR/Instruction.def"
958 default:
959 break;
961 llvm_unreachable("not a terminator");
964 void Instruction::setSuccessor(unsigned idx, BasicBlock *B) {
965 switch (getOpcode()) {
966 #define HANDLE_TERM_INST(N, OPC, CLASS) \
967 case Instruction::OPC: \
968 return static_cast<CLASS *>(this)->setSuccessor(idx, B);
969 #include "llvm/IR/Instruction.def"
970 default:
971 break;
973 llvm_unreachable("not a terminator");
976 void Instruction::replaceSuccessorWith(BasicBlock *OldBB, BasicBlock *NewBB) {
977 for (unsigned Idx = 0, NumSuccessors = Instruction::getNumSuccessors();
978 Idx != NumSuccessors; ++Idx)
979 if (getSuccessor(Idx) == OldBB)
980 setSuccessor(Idx, NewBB);
983 Instruction *Instruction::cloneImpl() const {
984 llvm_unreachable("Subclass of Instruction failed to implement cloneImpl");
987 void Instruction::swapProfMetadata() {
988 MDNode *ProfileData = getBranchWeightMDNode(*this);
989 if (!ProfileData || ProfileData->getNumOperands() != 3)
990 return;
992 // The first operand is the name. Fetch them backwards and build a new one.
993 Metadata *Ops[] = {ProfileData->getOperand(0), ProfileData->getOperand(2),
994 ProfileData->getOperand(1)};
995 setMetadata(LLVMContext::MD_prof,
996 MDNode::get(ProfileData->getContext(), Ops));
999 void Instruction::copyMetadata(const Instruction &SrcInst,
1000 ArrayRef<unsigned> WL) {
1001 if (!SrcInst.hasMetadata())
1002 return;
1004 DenseSet<unsigned> WLS;
1005 for (unsigned M : WL)
1006 WLS.insert(M);
1008 // Otherwise, enumerate and copy over metadata from the old instruction to the
1009 // new one.
1010 SmallVector<std::pair<unsigned, MDNode *>, 4> TheMDs;
1011 SrcInst.getAllMetadataOtherThanDebugLoc(TheMDs);
1012 for (const auto &MD : TheMDs) {
1013 if (WL.empty() || WLS.count(MD.first))
1014 setMetadata(MD.first, MD.second);
1016 if (WL.empty() || WLS.count(LLVMContext::MD_dbg))
1017 setDebugLoc(SrcInst.getDebugLoc());
1020 Instruction *Instruction::clone() const {
1021 Instruction *New = nullptr;
1022 switch (getOpcode()) {
1023 default:
1024 llvm_unreachable("Unhandled Opcode.");
1025 #define HANDLE_INST(num, opc, clas) \
1026 case Instruction::opc: \
1027 New = cast<clas>(this)->cloneImpl(); \
1028 break;
1029 #include "llvm/IR/Instruction.def"
1030 #undef HANDLE_INST
1033 New->SubclassOptionalData = SubclassOptionalData;
1034 New->copyMetadata(*this);
1035 return New;