[RISCV] Fix mgather -> riscv.masked.strided.load combine not extending indices (...
[llvm-project.git] / llvm / lib / IR / Instruction.cpp
blobd7bf1447921fec7ce43fb87666b25b87eb7ebe90
1 //===-- Instruction.cpp - Implement the Instruction class -----------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements the Instruction class for the IR library.
11 //===----------------------------------------------------------------------===//
13 #include "llvm/IR/Instruction.h"
14 #include "llvm/ADT/DenseSet.h"
15 #include "llvm/IR/AttributeMask.h"
16 #include "llvm/IR/Constants.h"
17 #include "llvm/IR/Instructions.h"
18 #include "llvm/IR/IntrinsicInst.h"
19 #include "llvm/IR/Intrinsics.h"
20 #include "llvm/IR/Operator.h"
21 #include "llvm/IR/ProfDataUtils.h"
22 #include "llvm/IR/Type.h"
23 using namespace llvm;
25 Instruction::Instruction(Type *ty, unsigned it, Use *Ops, unsigned NumOps,
26 Instruction *InsertBefore)
27 : User(ty, Value::InstructionVal + it, Ops, NumOps), Parent(nullptr) {
29 // If requested, insert this instruction into a basic block...
30 if (InsertBefore) {
31 BasicBlock *BB = InsertBefore->getParent();
32 assert(BB && "Instruction to insert before is not in a basic block!");
33 insertInto(BB, InsertBefore->getIterator());
37 Instruction::Instruction(Type *ty, unsigned it, Use *Ops, unsigned NumOps,
38 BasicBlock *InsertAtEnd)
39 : User(ty, Value::InstructionVal + it, Ops, NumOps), Parent(nullptr) {
41 // append this instruction into the basic block
42 assert(InsertAtEnd && "Basic block to append to may not be NULL!");
43 insertInto(InsertAtEnd, InsertAtEnd->end());
46 Instruction::~Instruction() {
47 assert(!Parent && "Instruction still linked in the program!");
49 // Replace any extant metadata uses of this instruction with undef to
50 // preserve debug info accuracy. Some alternatives include:
51 // - Treat Instruction like any other Value, and point its extant metadata
52 // uses to an empty ValueAsMetadata node. This makes extant dbg.value uses
53 // trivially dead (i.e. fair game for deletion in many passes), leading to
54 // stale dbg.values being in effect for too long.
55 // - Call salvageDebugInfoOrMarkUndef. Not needed to make instruction removal
56 // correct. OTOH results in wasted work in some common cases (e.g. when all
57 // instructions in a BasicBlock are deleted).
58 if (isUsedByMetadata())
59 ValueAsMetadata::handleRAUW(this, UndefValue::get(getType()));
61 // Explicitly remove DIAssignID metadata to clear up ID -> Instruction(s)
62 // mapping in LLVMContext.
63 setMetadata(LLVMContext::MD_DIAssignID, nullptr);
67 void Instruction::setParent(BasicBlock *P) {
68 Parent = P;
71 const Module *Instruction::getModule() const {
72 return getParent()->getModule();
75 const Function *Instruction::getFunction() const {
76 return getParent()->getParent();
79 void Instruction::removeFromParent() {
80 // Perform any debug-info maintenence required.
81 handleMarkerRemoval();
83 getParent()->getInstList().remove(getIterator());
86 void Instruction::handleMarkerRemoval() {
87 if (!Parent->IsNewDbgInfoFormat || !DbgMarker)
88 return;
90 DbgMarker->removeMarker();
93 BasicBlock::iterator Instruction::eraseFromParent() {
94 handleMarkerRemoval();
95 return getParent()->getInstList().erase(getIterator());
98 void Instruction::insertBefore(Instruction *InsertPos) {
99 insertBefore(InsertPos->getIterator());
102 /// Insert an unlinked instruction into a basic block immediately before the
103 /// specified instruction.
104 void Instruction::insertBefore(BasicBlock::iterator InsertPos) {
105 insertBefore(*InsertPos->getParent(), InsertPos);
108 /// Insert an unlinked instruction into a basic block immediately after the
109 /// specified instruction.
110 void Instruction::insertAfter(Instruction *InsertPos) {
111 BasicBlock *DestParent = InsertPos->getParent();
113 DestParent->getInstList().insertAfter(InsertPos->getIterator(), this);
115 // No need to manually update DPValues: if we insert after an instruction
116 // position, then we can never have any DPValues on "this".
117 if (DestParent->IsNewDbgInfoFormat)
118 DestParent->createMarker(this);
121 BasicBlock::iterator Instruction::insertInto(BasicBlock *ParentBB,
122 BasicBlock::iterator It) {
123 assert(getParent() == nullptr && "Expected detached instruction");
124 assert((It == ParentBB->end() || It->getParent() == ParentBB) &&
125 "It not in ParentBB");
126 insertBefore(*ParentBB, It);
127 return getIterator();
130 extern cl::opt<bool> UseNewDbgInfoFormat;
132 void Instruction::insertBefore(BasicBlock &BB,
133 InstListType::iterator InsertPos) {
134 assert(!DbgMarker);
136 BB.getInstList().insert(InsertPos, this);
138 if (!BB.IsNewDbgInfoFormat)
139 return;
141 BB.createMarker(this);
143 // We've inserted "this": if InsertAtHead is set then it comes before any
144 // DPValues attached to InsertPos. But if it's not set, then any DPValues
145 // should now come before "this".
146 bool InsertAtHead = InsertPos.getHeadBit();
147 if (!InsertAtHead) {
148 DPMarker *SrcMarker = BB.getMarker(InsertPos);
149 // If there's no source marker, InsertPos is very likely end().
150 if (SrcMarker)
151 DbgMarker->absorbDebugValues(*SrcMarker, false);
154 // If we're inserting a terminator, check if we need to flush out
155 // TrailingDPValues.
156 if (isTerminator())
157 getParent()->flushTerminatorDbgValues();
160 /// Unlink this instruction from its current basic block and insert it into the
161 /// basic block that MovePos lives in, right before MovePos.
162 void Instruction::moveBefore(Instruction *MovePos) {
163 moveBeforeImpl(*MovePos->getParent(), MovePos->getIterator(), false);
166 void Instruction::moveBeforePreserving(Instruction *MovePos) {
167 moveBeforeImpl(*MovePos->getParent(), MovePos->getIterator(), true);
170 void Instruction::moveAfter(Instruction *MovePos) {
171 auto NextIt = std::next(MovePos->getIterator());
172 // We want this instruction to be moved to before NextIt in the instruction
173 // list, but before NextIt's debug value range.
174 NextIt.setHeadBit(true);
175 moveBeforeImpl(*MovePos->getParent(), NextIt, false);
178 void Instruction::moveAfterPreserving(Instruction *MovePos) {
179 auto NextIt = std::next(MovePos->getIterator());
180 // We want this instruction and its debug range to be moved to before NextIt
181 // in the instruction list, but before NextIt's debug value range.
182 NextIt.setHeadBit(true);
183 moveBeforeImpl(*MovePos->getParent(), NextIt, true);
186 void Instruction::moveBefore(BasicBlock &BB, InstListType::iterator I) {
187 moveBeforeImpl(BB, I, false);
190 void Instruction::moveBeforePreserving(BasicBlock &BB,
191 InstListType::iterator I) {
192 moveBeforeImpl(BB, I, true);
195 void Instruction::moveBeforeImpl(BasicBlock &BB, InstListType::iterator I,
196 bool Preserve) {
197 assert(I == BB.end() || I->getParent() == &BB);
198 bool InsertAtHead = I.getHeadBit();
200 // If we've been given the "Preserve" flag, then just move the DPValues with
201 // the instruction, no more special handling needed.
202 if (BB.IsNewDbgInfoFormat && DbgMarker && !Preserve) {
203 if (I != this->getIterator() || InsertAtHead) {
204 // "this" is definitely moving in the list, or it's moving ahead of its
205 // attached DPValues. Detach any existing DPValues.
206 handleMarkerRemoval();
210 // Move this single instruction. Use the list splice method directly, not
211 // the block splicer, which will do more debug-info things.
212 BB.getInstList().splice(I, getParent()->getInstList(), getIterator());
214 if (BB.IsNewDbgInfoFormat && !Preserve) {
215 if (!DbgMarker)
216 BB.createMarker(this);
217 DPMarker *NextMarker = getParent()->getNextMarker(this);
219 // If we're inserting at point I, and not in front of the DPValues attached
220 // there, then we should absorb the DPValues attached to I.
221 if (NextMarker && !InsertAtHead)
222 DbgMarker->absorbDebugValues(*NextMarker, false);
225 if (isTerminator())
226 getParent()->flushTerminatorDbgValues();
229 iterator_range<DPValue::self_iterator>
230 Instruction::cloneDebugInfoFrom(const Instruction *From,
231 std::optional<DPValue::self_iterator> FromHere,
232 bool InsertAtHead) {
233 if (!From->DbgMarker)
234 return DPMarker::getEmptyDPValueRange();
236 assert(getParent()->IsNewDbgInfoFormat);
237 assert(getParent()->IsNewDbgInfoFormat ==
238 From->getParent()->IsNewDbgInfoFormat);
240 if (!DbgMarker)
241 getParent()->createMarker(this);
243 return DbgMarker->cloneDebugInfoFrom(From->DbgMarker, FromHere, InsertAtHead);
246 iterator_range<DPValue::self_iterator>
247 Instruction::getDbgValueRange() const {
248 BasicBlock *Parent = const_cast<BasicBlock *>(getParent());
249 assert(Parent && "Instruction must be inserted to have DPValues");
250 (void)Parent;
252 if (!DbgMarker)
253 return DPMarker::getEmptyDPValueRange();
255 return DbgMarker->getDbgValueRange();
258 std::optional<DPValue::self_iterator> Instruction::getDbgReinsertionPosition() {
259 // Is there a marker on the next instruction?
260 DPMarker *NextMarker = getParent()->getNextMarker(this);
261 if (!NextMarker)
262 return std::nullopt;
264 // Are there any DPValues in the next marker?
265 if (NextMarker->StoredDPValues.empty())
266 return std::nullopt;
268 return NextMarker->StoredDPValues.begin();
271 bool Instruction::hasDbgValues() const { return !getDbgValueRange().empty(); }
273 void Instruction::dropDbgValues() {
274 if (DbgMarker)
275 DbgMarker->dropDPValues();
278 void Instruction::dropOneDbgValue(DPValue *DPV) {
279 DbgMarker->dropOneDPValue(DPV);
282 bool Instruction::comesBefore(const Instruction *Other) const {
283 assert(Parent && Other->Parent &&
284 "instructions without BB parents have no order");
285 assert(Parent == Other->Parent && "cross-BB instruction order comparison");
286 if (!Parent->isInstrOrderValid())
287 Parent->renumberInstructions();
288 return Order < Other->Order;
291 std::optional<BasicBlock::iterator> Instruction::getInsertionPointAfterDef() {
292 assert(!getType()->isVoidTy() && "Instruction must define result");
293 BasicBlock *InsertBB;
294 BasicBlock::iterator InsertPt;
295 if (auto *PN = dyn_cast<PHINode>(this)) {
296 InsertBB = PN->getParent();
297 InsertPt = InsertBB->getFirstInsertionPt();
298 } else if (auto *II = dyn_cast<InvokeInst>(this)) {
299 InsertBB = II->getNormalDest();
300 InsertPt = InsertBB->getFirstInsertionPt();
301 } else if (isa<CallBrInst>(this)) {
302 // Def is available in multiple successors, there's no single dominating
303 // insertion point.
304 return std::nullopt;
305 } else {
306 assert(!isTerminator() && "Only invoke/callbr terminators return value");
307 InsertBB = getParent();
308 InsertPt = std::next(getIterator());
309 // Any instruction inserted immediately after "this" will come before any
310 // debug-info records take effect -- thus, set the head bit indicating that
311 // to debug-info-transfer code.
312 InsertPt.setHeadBit(true);
315 // catchswitch blocks don't have any legal insertion point (because they
316 // are both an exception pad and a terminator).
317 if (InsertPt == InsertBB->end())
318 return std::nullopt;
319 return InsertPt;
322 bool Instruction::isOnlyUserOfAnyOperand() {
323 return any_of(operands(), [](Value *V) { return V->hasOneUser(); });
326 void Instruction::setHasNoUnsignedWrap(bool b) {
327 cast<OverflowingBinaryOperator>(this)->setHasNoUnsignedWrap(b);
330 void Instruction::setHasNoSignedWrap(bool b) {
331 cast<OverflowingBinaryOperator>(this)->setHasNoSignedWrap(b);
334 void Instruction::setIsExact(bool b) {
335 cast<PossiblyExactOperator>(this)->setIsExact(b);
338 void Instruction::setNonNeg(bool b) {
339 assert(isa<PossiblyNonNegInst>(this) && "Must be zext");
340 SubclassOptionalData = (SubclassOptionalData & ~PossiblyNonNegInst::NonNeg) |
341 (b * PossiblyNonNegInst::NonNeg);
344 bool Instruction::hasNoUnsignedWrap() const {
345 return cast<OverflowingBinaryOperator>(this)->hasNoUnsignedWrap();
348 bool Instruction::hasNoSignedWrap() const {
349 return cast<OverflowingBinaryOperator>(this)->hasNoSignedWrap();
352 bool Instruction::hasNonNeg() const {
353 assert(isa<PossiblyNonNegInst>(this) && "Must be zext");
354 return (SubclassOptionalData & PossiblyNonNegInst::NonNeg) != 0;
357 bool Instruction::hasPoisonGeneratingFlags() const {
358 return cast<Operator>(this)->hasPoisonGeneratingFlags();
361 void Instruction::dropPoisonGeneratingFlags() {
362 switch (getOpcode()) {
363 case Instruction::Add:
364 case Instruction::Sub:
365 case Instruction::Mul:
366 case Instruction::Shl:
367 cast<OverflowingBinaryOperator>(this)->setHasNoUnsignedWrap(false);
368 cast<OverflowingBinaryOperator>(this)->setHasNoSignedWrap(false);
369 break;
371 case Instruction::UDiv:
372 case Instruction::SDiv:
373 case Instruction::AShr:
374 case Instruction::LShr:
375 cast<PossiblyExactOperator>(this)->setIsExact(false);
376 break;
378 case Instruction::Or:
379 cast<PossiblyDisjointInst>(this)->setIsDisjoint(false);
380 break;
382 case Instruction::GetElementPtr:
383 cast<GetElementPtrInst>(this)->setIsInBounds(false);
384 break;
386 case Instruction::ZExt:
387 setNonNeg(false);
388 break;
391 if (isa<FPMathOperator>(this)) {
392 setHasNoNaNs(false);
393 setHasNoInfs(false);
396 assert(!hasPoisonGeneratingFlags() && "must be kept in sync");
399 bool Instruction::hasPoisonGeneratingMetadata() const {
400 return hasMetadata(LLVMContext::MD_range) ||
401 hasMetadata(LLVMContext::MD_nonnull) ||
402 hasMetadata(LLVMContext::MD_align);
405 void Instruction::dropPoisonGeneratingMetadata() {
406 eraseMetadata(LLVMContext::MD_range);
407 eraseMetadata(LLVMContext::MD_nonnull);
408 eraseMetadata(LLVMContext::MD_align);
411 void Instruction::dropUBImplyingAttrsAndUnknownMetadata(
412 ArrayRef<unsigned> KnownIDs) {
413 dropUnknownNonDebugMetadata(KnownIDs);
414 auto *CB = dyn_cast<CallBase>(this);
415 if (!CB)
416 return;
417 // For call instructions, we also need to drop parameter and return attributes
418 // that are can cause UB if the call is moved to a location where the
419 // attribute is not valid.
420 AttributeList AL = CB->getAttributes();
421 if (AL.isEmpty())
422 return;
423 AttributeMask UBImplyingAttributes =
424 AttributeFuncs::getUBImplyingAttributes();
425 for (unsigned ArgNo = 0; ArgNo < CB->arg_size(); ArgNo++)
426 CB->removeParamAttrs(ArgNo, UBImplyingAttributes);
427 CB->removeRetAttrs(UBImplyingAttributes);
430 void Instruction::dropUBImplyingAttrsAndMetadata() {
431 // !annotation metadata does not impact semantics.
432 // !range, !nonnull and !align produce poison, so they are safe to speculate.
433 // !noundef and various AA metadata must be dropped, as it generally produces
434 // immediate undefined behavior.
435 unsigned KnownIDs[] = {LLVMContext::MD_annotation, LLVMContext::MD_range,
436 LLVMContext::MD_nonnull, LLVMContext::MD_align};
437 dropUBImplyingAttrsAndUnknownMetadata(KnownIDs);
440 bool Instruction::isExact() const {
441 return cast<PossiblyExactOperator>(this)->isExact();
444 void Instruction::setFast(bool B) {
445 assert(isa<FPMathOperator>(this) && "setting fast-math flag on invalid op");
446 cast<FPMathOperator>(this)->setFast(B);
449 void Instruction::setHasAllowReassoc(bool B) {
450 assert(isa<FPMathOperator>(this) && "setting fast-math flag on invalid op");
451 cast<FPMathOperator>(this)->setHasAllowReassoc(B);
454 void Instruction::setHasNoNaNs(bool B) {
455 assert(isa<FPMathOperator>(this) && "setting fast-math flag on invalid op");
456 cast<FPMathOperator>(this)->setHasNoNaNs(B);
459 void Instruction::setHasNoInfs(bool B) {
460 assert(isa<FPMathOperator>(this) && "setting fast-math flag on invalid op");
461 cast<FPMathOperator>(this)->setHasNoInfs(B);
464 void Instruction::setHasNoSignedZeros(bool B) {
465 assert(isa<FPMathOperator>(this) && "setting fast-math flag on invalid op");
466 cast<FPMathOperator>(this)->setHasNoSignedZeros(B);
469 void Instruction::setHasAllowReciprocal(bool B) {
470 assert(isa<FPMathOperator>(this) && "setting fast-math flag on invalid op");
471 cast<FPMathOperator>(this)->setHasAllowReciprocal(B);
474 void Instruction::setHasAllowContract(bool B) {
475 assert(isa<FPMathOperator>(this) && "setting fast-math flag on invalid op");
476 cast<FPMathOperator>(this)->setHasAllowContract(B);
479 void Instruction::setHasApproxFunc(bool B) {
480 assert(isa<FPMathOperator>(this) && "setting fast-math flag on invalid op");
481 cast<FPMathOperator>(this)->setHasApproxFunc(B);
484 void Instruction::setFastMathFlags(FastMathFlags FMF) {
485 assert(isa<FPMathOperator>(this) && "setting fast-math flag on invalid op");
486 cast<FPMathOperator>(this)->setFastMathFlags(FMF);
489 void Instruction::copyFastMathFlags(FastMathFlags FMF) {
490 assert(isa<FPMathOperator>(this) && "copying fast-math flag on invalid op");
491 cast<FPMathOperator>(this)->copyFastMathFlags(FMF);
494 bool Instruction::isFast() const {
495 assert(isa<FPMathOperator>(this) && "getting fast-math flag on invalid op");
496 return cast<FPMathOperator>(this)->isFast();
499 bool Instruction::hasAllowReassoc() const {
500 assert(isa<FPMathOperator>(this) && "getting fast-math flag on invalid op");
501 return cast<FPMathOperator>(this)->hasAllowReassoc();
504 bool Instruction::hasNoNaNs() const {
505 assert(isa<FPMathOperator>(this) && "getting fast-math flag on invalid op");
506 return cast<FPMathOperator>(this)->hasNoNaNs();
509 bool Instruction::hasNoInfs() const {
510 assert(isa<FPMathOperator>(this) && "getting fast-math flag on invalid op");
511 return cast<FPMathOperator>(this)->hasNoInfs();
514 bool Instruction::hasNoSignedZeros() const {
515 assert(isa<FPMathOperator>(this) && "getting fast-math flag on invalid op");
516 return cast<FPMathOperator>(this)->hasNoSignedZeros();
519 bool Instruction::hasAllowReciprocal() const {
520 assert(isa<FPMathOperator>(this) && "getting fast-math flag on invalid op");
521 return cast<FPMathOperator>(this)->hasAllowReciprocal();
524 bool Instruction::hasAllowContract() const {
525 assert(isa<FPMathOperator>(this) && "getting fast-math flag on invalid op");
526 return cast<FPMathOperator>(this)->hasAllowContract();
529 bool Instruction::hasApproxFunc() const {
530 assert(isa<FPMathOperator>(this) && "getting fast-math flag on invalid op");
531 return cast<FPMathOperator>(this)->hasApproxFunc();
534 FastMathFlags Instruction::getFastMathFlags() const {
535 assert(isa<FPMathOperator>(this) && "getting fast-math flag on invalid op");
536 return cast<FPMathOperator>(this)->getFastMathFlags();
539 void Instruction::copyFastMathFlags(const Instruction *I) {
540 copyFastMathFlags(I->getFastMathFlags());
543 void Instruction::copyIRFlags(const Value *V, bool IncludeWrapFlags) {
544 // Copy the wrapping flags.
545 if (IncludeWrapFlags && isa<OverflowingBinaryOperator>(this)) {
546 if (auto *OB = dyn_cast<OverflowingBinaryOperator>(V)) {
547 setHasNoSignedWrap(OB->hasNoSignedWrap());
548 setHasNoUnsignedWrap(OB->hasNoUnsignedWrap());
552 // Copy the exact flag.
553 if (auto *PE = dyn_cast<PossiblyExactOperator>(V))
554 if (isa<PossiblyExactOperator>(this))
555 setIsExact(PE->isExact());
557 if (auto *SrcPD = dyn_cast<PossiblyDisjointInst>(V))
558 if (auto *DestPD = dyn_cast<PossiblyDisjointInst>(this))
559 DestPD->setIsDisjoint(SrcPD->isDisjoint());
561 // Copy the fast-math flags.
562 if (auto *FP = dyn_cast<FPMathOperator>(V))
563 if (isa<FPMathOperator>(this))
564 copyFastMathFlags(FP->getFastMathFlags());
566 if (auto *SrcGEP = dyn_cast<GetElementPtrInst>(V))
567 if (auto *DestGEP = dyn_cast<GetElementPtrInst>(this))
568 DestGEP->setIsInBounds(SrcGEP->isInBounds() || DestGEP->isInBounds());
570 if (auto *NNI = dyn_cast<PossiblyNonNegInst>(V))
571 if (isa<PossiblyNonNegInst>(this))
572 setNonNeg(NNI->hasNonNeg());
575 void Instruction::andIRFlags(const Value *V) {
576 if (auto *OB = dyn_cast<OverflowingBinaryOperator>(V)) {
577 if (isa<OverflowingBinaryOperator>(this)) {
578 setHasNoSignedWrap(hasNoSignedWrap() && OB->hasNoSignedWrap());
579 setHasNoUnsignedWrap(hasNoUnsignedWrap() && OB->hasNoUnsignedWrap());
583 if (auto *PE = dyn_cast<PossiblyExactOperator>(V))
584 if (isa<PossiblyExactOperator>(this))
585 setIsExact(isExact() && PE->isExact());
587 if (auto *SrcPD = dyn_cast<PossiblyDisjointInst>(V))
588 if (auto *DestPD = dyn_cast<PossiblyDisjointInst>(this))
589 DestPD->setIsDisjoint(DestPD->isDisjoint() && SrcPD->isDisjoint());
591 if (auto *FP = dyn_cast<FPMathOperator>(V)) {
592 if (isa<FPMathOperator>(this)) {
593 FastMathFlags FM = getFastMathFlags();
594 FM &= FP->getFastMathFlags();
595 copyFastMathFlags(FM);
599 if (auto *SrcGEP = dyn_cast<GetElementPtrInst>(V))
600 if (auto *DestGEP = dyn_cast<GetElementPtrInst>(this))
601 DestGEP->setIsInBounds(SrcGEP->isInBounds() && DestGEP->isInBounds());
603 if (auto *NNI = dyn_cast<PossiblyNonNegInst>(V))
604 if (isa<PossiblyNonNegInst>(this))
605 setNonNeg(hasNonNeg() && NNI->hasNonNeg());
608 const char *Instruction::getOpcodeName(unsigned OpCode) {
609 switch (OpCode) {
610 // Terminators
611 case Ret: return "ret";
612 case Br: return "br";
613 case Switch: return "switch";
614 case IndirectBr: return "indirectbr";
615 case Invoke: return "invoke";
616 case Resume: return "resume";
617 case Unreachable: return "unreachable";
618 case CleanupRet: return "cleanupret";
619 case CatchRet: return "catchret";
620 case CatchPad: return "catchpad";
621 case CatchSwitch: return "catchswitch";
622 case CallBr: return "callbr";
624 // Standard unary operators...
625 case FNeg: return "fneg";
627 // Standard binary operators...
628 case Add: return "add";
629 case FAdd: return "fadd";
630 case Sub: return "sub";
631 case FSub: return "fsub";
632 case Mul: return "mul";
633 case FMul: return "fmul";
634 case UDiv: return "udiv";
635 case SDiv: return "sdiv";
636 case FDiv: return "fdiv";
637 case URem: return "urem";
638 case SRem: return "srem";
639 case FRem: return "frem";
641 // Logical operators...
642 case And: return "and";
643 case Or : return "or";
644 case Xor: return "xor";
646 // Memory instructions...
647 case Alloca: return "alloca";
648 case Load: return "load";
649 case Store: return "store";
650 case AtomicCmpXchg: return "cmpxchg";
651 case AtomicRMW: return "atomicrmw";
652 case Fence: return "fence";
653 case GetElementPtr: return "getelementptr";
655 // Convert instructions...
656 case Trunc: return "trunc";
657 case ZExt: return "zext";
658 case SExt: return "sext";
659 case FPTrunc: return "fptrunc";
660 case FPExt: return "fpext";
661 case FPToUI: return "fptoui";
662 case FPToSI: return "fptosi";
663 case UIToFP: return "uitofp";
664 case SIToFP: return "sitofp";
665 case IntToPtr: return "inttoptr";
666 case PtrToInt: return "ptrtoint";
667 case BitCast: return "bitcast";
668 case AddrSpaceCast: return "addrspacecast";
670 // Other instructions...
671 case ICmp: return "icmp";
672 case FCmp: return "fcmp";
673 case PHI: return "phi";
674 case Select: return "select";
675 case Call: return "call";
676 case Shl: return "shl";
677 case LShr: return "lshr";
678 case AShr: return "ashr";
679 case VAArg: return "va_arg";
680 case ExtractElement: return "extractelement";
681 case InsertElement: return "insertelement";
682 case ShuffleVector: return "shufflevector";
683 case ExtractValue: return "extractvalue";
684 case InsertValue: return "insertvalue";
685 case LandingPad: return "landingpad";
686 case CleanupPad: return "cleanuppad";
687 case Freeze: return "freeze";
689 default: return "<Invalid operator> ";
693 /// This must be kept in sync with FunctionComparator::cmpOperations in
694 /// lib/Transforms/IPO/MergeFunctions.cpp.
695 bool Instruction::hasSameSpecialState(const Instruction *I2,
696 bool IgnoreAlignment) const {
697 auto I1 = this;
698 assert(I1->getOpcode() == I2->getOpcode() &&
699 "Can not compare special state of different instructions");
701 if (const AllocaInst *AI = dyn_cast<AllocaInst>(I1))
702 return AI->getAllocatedType() == cast<AllocaInst>(I2)->getAllocatedType() &&
703 (AI->getAlign() == cast<AllocaInst>(I2)->getAlign() ||
704 IgnoreAlignment);
705 if (const LoadInst *LI = dyn_cast<LoadInst>(I1))
706 return LI->isVolatile() == cast<LoadInst>(I2)->isVolatile() &&
707 (LI->getAlign() == cast<LoadInst>(I2)->getAlign() ||
708 IgnoreAlignment) &&
709 LI->getOrdering() == cast<LoadInst>(I2)->getOrdering() &&
710 LI->getSyncScopeID() == cast<LoadInst>(I2)->getSyncScopeID();
711 if (const StoreInst *SI = dyn_cast<StoreInst>(I1))
712 return SI->isVolatile() == cast<StoreInst>(I2)->isVolatile() &&
713 (SI->getAlign() == cast<StoreInst>(I2)->getAlign() ||
714 IgnoreAlignment) &&
715 SI->getOrdering() == cast<StoreInst>(I2)->getOrdering() &&
716 SI->getSyncScopeID() == cast<StoreInst>(I2)->getSyncScopeID();
717 if (const CmpInst *CI = dyn_cast<CmpInst>(I1))
718 return CI->getPredicate() == cast<CmpInst>(I2)->getPredicate();
719 if (const CallInst *CI = dyn_cast<CallInst>(I1))
720 return CI->isTailCall() == cast<CallInst>(I2)->isTailCall() &&
721 CI->getCallingConv() == cast<CallInst>(I2)->getCallingConv() &&
722 CI->getAttributes() == cast<CallInst>(I2)->getAttributes() &&
723 CI->hasIdenticalOperandBundleSchema(*cast<CallInst>(I2));
724 if (const InvokeInst *CI = dyn_cast<InvokeInst>(I1))
725 return CI->getCallingConv() == cast<InvokeInst>(I2)->getCallingConv() &&
726 CI->getAttributes() == cast<InvokeInst>(I2)->getAttributes() &&
727 CI->hasIdenticalOperandBundleSchema(*cast<InvokeInst>(I2));
728 if (const CallBrInst *CI = dyn_cast<CallBrInst>(I1))
729 return CI->getCallingConv() == cast<CallBrInst>(I2)->getCallingConv() &&
730 CI->getAttributes() == cast<CallBrInst>(I2)->getAttributes() &&
731 CI->hasIdenticalOperandBundleSchema(*cast<CallBrInst>(I2));
732 if (const InsertValueInst *IVI = dyn_cast<InsertValueInst>(I1))
733 return IVI->getIndices() == cast<InsertValueInst>(I2)->getIndices();
734 if (const ExtractValueInst *EVI = dyn_cast<ExtractValueInst>(I1))
735 return EVI->getIndices() == cast<ExtractValueInst>(I2)->getIndices();
736 if (const FenceInst *FI = dyn_cast<FenceInst>(I1))
737 return FI->getOrdering() == cast<FenceInst>(I2)->getOrdering() &&
738 FI->getSyncScopeID() == cast<FenceInst>(I2)->getSyncScopeID();
739 if (const AtomicCmpXchgInst *CXI = dyn_cast<AtomicCmpXchgInst>(I1))
740 return CXI->isVolatile() == cast<AtomicCmpXchgInst>(I2)->isVolatile() &&
741 CXI->isWeak() == cast<AtomicCmpXchgInst>(I2)->isWeak() &&
742 CXI->getSuccessOrdering() ==
743 cast<AtomicCmpXchgInst>(I2)->getSuccessOrdering() &&
744 CXI->getFailureOrdering() ==
745 cast<AtomicCmpXchgInst>(I2)->getFailureOrdering() &&
746 CXI->getSyncScopeID() ==
747 cast<AtomicCmpXchgInst>(I2)->getSyncScopeID();
748 if (const AtomicRMWInst *RMWI = dyn_cast<AtomicRMWInst>(I1))
749 return RMWI->getOperation() == cast<AtomicRMWInst>(I2)->getOperation() &&
750 RMWI->isVolatile() == cast<AtomicRMWInst>(I2)->isVolatile() &&
751 RMWI->getOrdering() == cast<AtomicRMWInst>(I2)->getOrdering() &&
752 RMWI->getSyncScopeID() == cast<AtomicRMWInst>(I2)->getSyncScopeID();
753 if (const ShuffleVectorInst *SVI = dyn_cast<ShuffleVectorInst>(I1))
754 return SVI->getShuffleMask() ==
755 cast<ShuffleVectorInst>(I2)->getShuffleMask();
756 if (const GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(I1))
757 return GEP->getSourceElementType() ==
758 cast<GetElementPtrInst>(I2)->getSourceElementType();
760 return true;
763 bool Instruction::isIdenticalTo(const Instruction *I) const {
764 return isIdenticalToWhenDefined(I) &&
765 SubclassOptionalData == I->SubclassOptionalData;
768 bool Instruction::isIdenticalToWhenDefined(const Instruction *I) const {
769 if (getOpcode() != I->getOpcode() ||
770 getNumOperands() != I->getNumOperands() ||
771 getType() != I->getType())
772 return false;
774 // If both instructions have no operands, they are identical.
775 if (getNumOperands() == 0 && I->getNumOperands() == 0)
776 return this->hasSameSpecialState(I);
778 // We have two instructions of identical opcode and #operands. Check to see
779 // if all operands are the same.
780 if (!std::equal(op_begin(), op_end(), I->op_begin()))
781 return false;
783 // WARNING: this logic must be kept in sync with EliminateDuplicatePHINodes()!
784 if (const PHINode *thisPHI = dyn_cast<PHINode>(this)) {
785 const PHINode *otherPHI = cast<PHINode>(I);
786 return std::equal(thisPHI->block_begin(), thisPHI->block_end(),
787 otherPHI->block_begin());
790 return this->hasSameSpecialState(I);
793 // Keep this in sync with FunctionComparator::cmpOperations in
794 // lib/Transforms/IPO/MergeFunctions.cpp.
795 bool Instruction::isSameOperationAs(const Instruction *I,
796 unsigned flags) const {
797 bool IgnoreAlignment = flags & CompareIgnoringAlignment;
798 bool UseScalarTypes = flags & CompareUsingScalarTypes;
800 if (getOpcode() != I->getOpcode() ||
801 getNumOperands() != I->getNumOperands() ||
802 (UseScalarTypes ?
803 getType()->getScalarType() != I->getType()->getScalarType() :
804 getType() != I->getType()))
805 return false;
807 // We have two instructions of identical opcode and #operands. Check to see
808 // if all operands are the same type
809 for (unsigned i = 0, e = getNumOperands(); i != e; ++i)
810 if (UseScalarTypes ?
811 getOperand(i)->getType()->getScalarType() !=
812 I->getOperand(i)->getType()->getScalarType() :
813 getOperand(i)->getType() != I->getOperand(i)->getType())
814 return false;
816 return this->hasSameSpecialState(I, IgnoreAlignment);
819 bool Instruction::isUsedOutsideOfBlock(const BasicBlock *BB) const {
820 for (const Use &U : uses()) {
821 // PHI nodes uses values in the corresponding predecessor block. For other
822 // instructions, just check to see whether the parent of the use matches up.
823 const Instruction *I = cast<Instruction>(U.getUser());
824 const PHINode *PN = dyn_cast<PHINode>(I);
825 if (!PN) {
826 if (I->getParent() != BB)
827 return true;
828 continue;
831 if (PN->getIncomingBlock(U) != BB)
832 return true;
834 return false;
837 bool Instruction::mayReadFromMemory() const {
838 switch (getOpcode()) {
839 default: return false;
840 case Instruction::VAArg:
841 case Instruction::Load:
842 case Instruction::Fence: // FIXME: refine definition of mayReadFromMemory
843 case Instruction::AtomicCmpXchg:
844 case Instruction::AtomicRMW:
845 case Instruction::CatchPad:
846 case Instruction::CatchRet:
847 return true;
848 case Instruction::Call:
849 case Instruction::Invoke:
850 case Instruction::CallBr:
851 return !cast<CallBase>(this)->onlyWritesMemory();
852 case Instruction::Store:
853 return !cast<StoreInst>(this)->isUnordered();
857 bool Instruction::mayWriteToMemory() const {
858 switch (getOpcode()) {
859 default: return false;
860 case Instruction::Fence: // FIXME: refine definition of mayWriteToMemory
861 case Instruction::Store:
862 case Instruction::VAArg:
863 case Instruction::AtomicCmpXchg:
864 case Instruction::AtomicRMW:
865 case Instruction::CatchPad:
866 case Instruction::CatchRet:
867 return true;
868 case Instruction::Call:
869 case Instruction::Invoke:
870 case Instruction::CallBr:
871 return !cast<CallBase>(this)->onlyReadsMemory();
872 case Instruction::Load:
873 return !cast<LoadInst>(this)->isUnordered();
877 bool Instruction::isAtomic() const {
878 switch (getOpcode()) {
879 default:
880 return false;
881 case Instruction::AtomicCmpXchg:
882 case Instruction::AtomicRMW:
883 case Instruction::Fence:
884 return true;
885 case Instruction::Load:
886 return cast<LoadInst>(this)->getOrdering() != AtomicOrdering::NotAtomic;
887 case Instruction::Store:
888 return cast<StoreInst>(this)->getOrdering() != AtomicOrdering::NotAtomic;
892 bool Instruction::hasAtomicLoad() const {
893 assert(isAtomic());
894 switch (getOpcode()) {
895 default:
896 return false;
897 case Instruction::AtomicCmpXchg:
898 case Instruction::AtomicRMW:
899 case Instruction::Load:
900 return true;
904 bool Instruction::hasAtomicStore() const {
905 assert(isAtomic());
906 switch (getOpcode()) {
907 default:
908 return false;
909 case Instruction::AtomicCmpXchg:
910 case Instruction::AtomicRMW:
911 case Instruction::Store:
912 return true;
916 bool Instruction::isVolatile() const {
917 switch (getOpcode()) {
918 default:
919 return false;
920 case Instruction::AtomicRMW:
921 return cast<AtomicRMWInst>(this)->isVolatile();
922 case Instruction::Store:
923 return cast<StoreInst>(this)->isVolatile();
924 case Instruction::Load:
925 return cast<LoadInst>(this)->isVolatile();
926 case Instruction::AtomicCmpXchg:
927 return cast<AtomicCmpXchgInst>(this)->isVolatile();
928 case Instruction::Call:
929 case Instruction::Invoke:
930 // There are a very limited number of intrinsics with volatile flags.
931 if (auto *II = dyn_cast<IntrinsicInst>(this)) {
932 if (auto *MI = dyn_cast<MemIntrinsic>(II))
933 return MI->isVolatile();
934 switch (II->getIntrinsicID()) {
935 default: break;
936 case Intrinsic::matrix_column_major_load:
937 return cast<ConstantInt>(II->getArgOperand(2))->isOne();
938 case Intrinsic::matrix_column_major_store:
939 return cast<ConstantInt>(II->getArgOperand(3))->isOne();
942 return false;
946 Type *Instruction::getAccessType() const {
947 switch (getOpcode()) {
948 case Instruction::Store:
949 return cast<StoreInst>(this)->getValueOperand()->getType();
950 case Instruction::Load:
951 case Instruction::AtomicRMW:
952 return getType();
953 case Instruction::AtomicCmpXchg:
954 return cast<AtomicCmpXchgInst>(this)->getNewValOperand()->getType();
955 case Instruction::Call:
956 case Instruction::Invoke:
957 if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(this)) {
958 switch (II->getIntrinsicID()) {
959 case Intrinsic::masked_load:
960 case Intrinsic::masked_gather:
961 case Intrinsic::masked_expandload:
962 case Intrinsic::vp_load:
963 case Intrinsic::vp_gather:
964 case Intrinsic::experimental_vp_strided_load:
965 return II->getType();
966 case Intrinsic::masked_store:
967 case Intrinsic::masked_scatter:
968 case Intrinsic::masked_compressstore:
969 case Intrinsic::vp_store:
970 case Intrinsic::vp_scatter:
971 case Intrinsic::experimental_vp_strided_store:
972 return II->getOperand(0)->getType();
973 default:
974 break;
979 return nullptr;
982 static bool canUnwindPastLandingPad(const LandingPadInst *LP,
983 bool IncludePhaseOneUnwind) {
984 // Because phase one unwinding skips cleanup landingpads, we effectively
985 // unwind past this frame, and callers need to have valid unwind info.
986 if (LP->isCleanup())
987 return IncludePhaseOneUnwind;
989 for (unsigned I = 0; I < LP->getNumClauses(); ++I) {
990 Constant *Clause = LP->getClause(I);
991 // catch ptr null catches all exceptions.
992 if (LP->isCatch(I) && isa<ConstantPointerNull>(Clause))
993 return false;
994 // filter [0 x ptr] catches all exceptions.
995 if (LP->isFilter(I) && Clause->getType()->getArrayNumElements() == 0)
996 return false;
999 // May catch only some subset of exceptions, in which case other exceptions
1000 // will continue unwinding.
1001 return true;
1004 bool Instruction::mayThrow(bool IncludePhaseOneUnwind) const {
1005 switch (getOpcode()) {
1006 case Instruction::Call:
1007 return !cast<CallInst>(this)->doesNotThrow();
1008 case Instruction::CleanupRet:
1009 return cast<CleanupReturnInst>(this)->unwindsToCaller();
1010 case Instruction::CatchSwitch:
1011 return cast<CatchSwitchInst>(this)->unwindsToCaller();
1012 case Instruction::Resume:
1013 return true;
1014 case Instruction::Invoke: {
1015 // Landingpads themselves don't unwind -- however, an invoke of a skipped
1016 // landingpad may continue unwinding.
1017 BasicBlock *UnwindDest = cast<InvokeInst>(this)->getUnwindDest();
1018 Instruction *Pad = UnwindDest->getFirstNonPHI();
1019 if (auto *LP = dyn_cast<LandingPadInst>(Pad))
1020 return canUnwindPastLandingPad(LP, IncludePhaseOneUnwind);
1021 return false;
1023 case Instruction::CleanupPad:
1024 // Treat the same as cleanup landingpad.
1025 return IncludePhaseOneUnwind;
1026 default:
1027 return false;
1031 bool Instruction::mayHaveSideEffects() const {
1032 return mayWriteToMemory() || mayThrow() || !willReturn();
1035 bool Instruction::isSafeToRemove() const {
1036 return (!isa<CallInst>(this) || !this->mayHaveSideEffects()) &&
1037 !this->isTerminator() && !this->isEHPad();
1040 bool Instruction::willReturn() const {
1041 // Volatile store isn't guaranteed to return; see LangRef.
1042 if (auto *SI = dyn_cast<StoreInst>(this))
1043 return !SI->isVolatile();
1045 if (const auto *CB = dyn_cast<CallBase>(this))
1046 return CB->hasFnAttr(Attribute::WillReturn);
1047 return true;
1050 bool Instruction::isLifetimeStartOrEnd() const {
1051 auto *II = dyn_cast<IntrinsicInst>(this);
1052 if (!II)
1053 return false;
1054 Intrinsic::ID ID = II->getIntrinsicID();
1055 return ID == Intrinsic::lifetime_start || ID == Intrinsic::lifetime_end;
1058 bool Instruction::isLaunderOrStripInvariantGroup() const {
1059 auto *II = dyn_cast<IntrinsicInst>(this);
1060 if (!II)
1061 return false;
1062 Intrinsic::ID ID = II->getIntrinsicID();
1063 return ID == Intrinsic::launder_invariant_group ||
1064 ID == Intrinsic::strip_invariant_group;
1067 bool Instruction::isDebugOrPseudoInst() const {
1068 return isa<DbgInfoIntrinsic>(this) || isa<PseudoProbeInst>(this);
1071 const Instruction *
1072 Instruction::getNextNonDebugInstruction(bool SkipPseudoOp) const {
1073 for (const Instruction *I = getNextNode(); I; I = I->getNextNode())
1074 if (!isa<DbgInfoIntrinsic>(I) && !(SkipPseudoOp && isa<PseudoProbeInst>(I)))
1075 return I;
1076 return nullptr;
1079 const Instruction *
1080 Instruction::getPrevNonDebugInstruction(bool SkipPseudoOp) const {
1081 for (const Instruction *I = getPrevNode(); I; I = I->getPrevNode())
1082 if (!isa<DbgInfoIntrinsic>(I) && !(SkipPseudoOp && isa<PseudoProbeInst>(I)))
1083 return I;
1084 return nullptr;
1087 const DebugLoc &Instruction::getStableDebugLoc() const {
1088 if (isa<DbgInfoIntrinsic>(this))
1089 if (const Instruction *Next = getNextNonDebugInstruction())
1090 return Next->getDebugLoc();
1091 return getDebugLoc();
1094 bool Instruction::isAssociative() const {
1095 if (auto *II = dyn_cast<IntrinsicInst>(this))
1096 return II->isAssociative();
1097 unsigned Opcode = getOpcode();
1098 if (isAssociative(Opcode))
1099 return true;
1101 switch (Opcode) {
1102 case FMul:
1103 case FAdd:
1104 return cast<FPMathOperator>(this)->hasAllowReassoc() &&
1105 cast<FPMathOperator>(this)->hasNoSignedZeros();
1106 default:
1107 return false;
1111 bool Instruction::isCommutative() const {
1112 if (auto *II = dyn_cast<IntrinsicInst>(this))
1113 return II->isCommutative();
1114 // TODO: Should allow icmp/fcmp?
1115 return isCommutative(getOpcode());
1118 unsigned Instruction::getNumSuccessors() const {
1119 switch (getOpcode()) {
1120 #define HANDLE_TERM_INST(N, OPC, CLASS) \
1121 case Instruction::OPC: \
1122 return static_cast<const CLASS *>(this)->getNumSuccessors();
1123 #include "llvm/IR/Instruction.def"
1124 default:
1125 break;
1127 llvm_unreachable("not a terminator");
1130 BasicBlock *Instruction::getSuccessor(unsigned idx) const {
1131 switch (getOpcode()) {
1132 #define HANDLE_TERM_INST(N, OPC, CLASS) \
1133 case Instruction::OPC: \
1134 return static_cast<const CLASS *>(this)->getSuccessor(idx);
1135 #include "llvm/IR/Instruction.def"
1136 default:
1137 break;
1139 llvm_unreachable("not a terminator");
1142 void Instruction::setSuccessor(unsigned idx, BasicBlock *B) {
1143 switch (getOpcode()) {
1144 #define HANDLE_TERM_INST(N, OPC, CLASS) \
1145 case Instruction::OPC: \
1146 return static_cast<CLASS *>(this)->setSuccessor(idx, B);
1147 #include "llvm/IR/Instruction.def"
1148 default:
1149 break;
1151 llvm_unreachable("not a terminator");
1154 void Instruction::replaceSuccessorWith(BasicBlock *OldBB, BasicBlock *NewBB) {
1155 for (unsigned Idx = 0, NumSuccessors = Instruction::getNumSuccessors();
1156 Idx != NumSuccessors; ++Idx)
1157 if (getSuccessor(Idx) == OldBB)
1158 setSuccessor(Idx, NewBB);
1161 Instruction *Instruction::cloneImpl() const {
1162 llvm_unreachable("Subclass of Instruction failed to implement cloneImpl");
1165 void Instruction::swapProfMetadata() {
1166 MDNode *ProfileData = getBranchWeightMDNode(*this);
1167 if (!ProfileData || ProfileData->getNumOperands() != 3)
1168 return;
1170 // The first operand is the name. Fetch them backwards and build a new one.
1171 Metadata *Ops[] = {ProfileData->getOperand(0), ProfileData->getOperand(2),
1172 ProfileData->getOperand(1)};
1173 setMetadata(LLVMContext::MD_prof,
1174 MDNode::get(ProfileData->getContext(), Ops));
1177 void Instruction::copyMetadata(const Instruction &SrcInst,
1178 ArrayRef<unsigned> WL) {
1179 if (!SrcInst.hasMetadata())
1180 return;
1182 DenseSet<unsigned> WLS;
1183 for (unsigned M : WL)
1184 WLS.insert(M);
1186 // Otherwise, enumerate and copy over metadata from the old instruction to the
1187 // new one.
1188 SmallVector<std::pair<unsigned, MDNode *>, 4> TheMDs;
1189 SrcInst.getAllMetadataOtherThanDebugLoc(TheMDs);
1190 for (const auto &MD : TheMDs) {
1191 if (WL.empty() || WLS.count(MD.first))
1192 setMetadata(MD.first, MD.second);
1194 if (WL.empty() || WLS.count(LLVMContext::MD_dbg))
1195 setDebugLoc(SrcInst.getDebugLoc());
1198 Instruction *Instruction::clone() const {
1199 Instruction *New = nullptr;
1200 switch (getOpcode()) {
1201 default:
1202 llvm_unreachable("Unhandled Opcode.");
1203 #define HANDLE_INST(num, opc, clas) \
1204 case Instruction::opc: \
1205 New = cast<clas>(this)->cloneImpl(); \
1206 break;
1207 #include "llvm/IR/Instruction.def"
1208 #undef HANDLE_INST
1211 New->SubclassOptionalData = SubclassOptionalData;
1212 New->copyMetadata(*this);
1213 return New;