[libcxx][test] Fix a test for the range of file offsets on ARMv7 Linux targets. ...
[llvm-project.git] / llvm / lib / IR / Instruction.cpp
blob9eaae62a6390b72d644223a0cbb12b2b99e8e98d
1 //===-- Instruction.cpp - Implement the Instruction class -----------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements the Instruction class for the IR library.
11 //===----------------------------------------------------------------------===//
13 #include "llvm/IR/Instruction.h"
14 #include "llvm/ADT/DenseSet.h"
15 #include "llvm/ADT/STLExtras.h"
16 #include "llvm/IR/AttributeMask.h"
17 #include "llvm/IR/Attributes.h"
18 #include "llvm/IR/Constants.h"
19 #include "llvm/IR/InstrTypes.h"
20 #include "llvm/IR/Instructions.h"
21 #include "llvm/IR/IntrinsicInst.h"
22 #include "llvm/IR/Intrinsics.h"
23 #include "llvm/IR/LLVMContext.h"
24 #include "llvm/IR/MemoryModelRelaxationAnnotations.h"
25 #include "llvm/IR/Module.h"
26 #include "llvm/IR/Operator.h"
27 #include "llvm/IR/ProfDataUtils.h"
28 #include "llvm/IR/Type.h"
29 using namespace llvm;
31 InsertPosition::InsertPosition(Instruction *InsertBefore)
32 : InsertAt(InsertBefore ? InsertBefore->getIterator()
33 : InstListType::iterator()) {}
34 InsertPosition::InsertPosition(BasicBlock *InsertAtEnd)
35 : InsertAt(InsertAtEnd ? InsertAtEnd->end() : InstListType::iterator()) {}
37 Instruction::Instruction(Type *ty, unsigned it, AllocInfo AllocInfo,
38 InsertPosition InsertBefore)
39 : User(ty, Value::InstructionVal + it, AllocInfo) {
40 // When called with an iterator, there must be a block to insert into.
41 if (InstListType::iterator InsertIt = InsertBefore; InsertIt.isValid()) {
42 BasicBlock *BB = InsertIt.getNodeParent();
43 assert(BB && "Instruction to insert before is not in a basic block!");
44 insertInto(BB, InsertBefore);
48 Instruction::~Instruction() {
49 assert(!getParent() && "Instruction still linked in the program!");
51 // Replace any extant metadata uses of this instruction with undef to
52 // preserve debug info accuracy. Some alternatives include:
53 // - Treat Instruction like any other Value, and point its extant metadata
54 // uses to an empty ValueAsMetadata node. This makes extant dbg.value uses
55 // trivially dead (i.e. fair game for deletion in many passes), leading to
56 // stale dbg.values being in effect for too long.
57 // - Call salvageDebugInfoOrMarkUndef. Not needed to make instruction removal
58 // correct. OTOH results in wasted work in some common cases (e.g. when all
59 // instructions in a BasicBlock are deleted).
60 if (isUsedByMetadata())
61 ValueAsMetadata::handleRAUW(this, UndefValue::get(getType()));
63 // Explicitly remove DIAssignID metadata to clear up ID -> Instruction(s)
64 // mapping in LLVMContext.
65 setMetadata(LLVMContext::MD_DIAssignID, nullptr);
68 const Module *Instruction::getModule() const {
69 return getParent()->getModule();
72 const Function *Instruction::getFunction() const {
73 return getParent()->getParent();
76 const DataLayout &Instruction::getDataLayout() const {
77 return getModule()->getDataLayout();
80 void Instruction::removeFromParent() {
81 // Perform any debug-info maintenence required.
82 handleMarkerRemoval();
84 getParent()->getInstList().remove(getIterator());
87 void Instruction::handleMarkerRemoval() {
88 if (!getParent()->IsNewDbgInfoFormat || !DebugMarker)
89 return;
91 DebugMarker->removeMarker();
94 BasicBlock::iterator Instruction::eraseFromParent() {
95 handleMarkerRemoval();
96 return getParent()->getInstList().erase(getIterator());
99 void Instruction::insertBefore(Instruction *InsertPos) {
100 insertBefore(InsertPos->getIterator());
103 /// Insert an unlinked instruction into a basic block immediately before the
104 /// specified instruction.
105 void Instruction::insertBefore(BasicBlock::iterator InsertPos) {
106 insertBefore(*InsertPos->getParent(), InsertPos);
109 /// Insert an unlinked instruction into a basic block immediately after the
110 /// specified instruction.
111 void Instruction::insertAfter(Instruction *InsertPos) {
112 BasicBlock *DestParent = InsertPos->getParent();
114 DestParent->getInstList().insertAfter(InsertPos->getIterator(), this);
117 BasicBlock::iterator Instruction::insertInto(BasicBlock *ParentBB,
118 BasicBlock::iterator It) {
119 assert(getParent() == nullptr && "Expected detached instruction");
120 assert((It == ParentBB->end() || It->getParent() == ParentBB) &&
121 "It not in ParentBB");
122 insertBefore(*ParentBB, It);
123 return getIterator();
126 extern cl::opt<bool> UseNewDbgInfoFormat;
128 void Instruction::insertBefore(BasicBlock &BB,
129 InstListType::iterator InsertPos) {
130 assert(!DebugMarker);
132 BB.getInstList().insert(InsertPos, this);
134 if (!BB.IsNewDbgInfoFormat)
135 return;
137 // We've inserted "this": if InsertAtHead is set then it comes before any
138 // DbgVariableRecords attached to InsertPos. But if it's not set, then any
139 // DbgRecords should now come before "this".
140 bool InsertAtHead = InsertPos.getHeadBit();
141 if (!InsertAtHead) {
142 DbgMarker *SrcMarker = BB.getMarker(InsertPos);
143 if (SrcMarker && !SrcMarker->empty()) {
144 // If this assertion fires, the calling code is about to insert a PHI
145 // after debug-records, which would form a sequence like:
146 // %0 = PHI
147 // #dbg_value
148 // %1 = PHI
149 // Which is de-normalised and undesired -- hence the assertion. To avoid
150 // this, you must insert at that position using an iterator, and it must
151 // be aquired by calling getFirstNonPHIIt / begin or similar methods on
152 // the block. This will signal to this behind-the-scenes debug-info
153 // maintenence code that you intend the PHI to be ahead of everything,
154 // including any debug-info.
155 assert(!isa<PHINode>(this) && "Inserting PHI after debug-records!");
156 adoptDbgRecords(&BB, InsertPos, false);
160 // If we're inserting a terminator, check if we need to flush out
161 // TrailingDbgRecords. Inserting instructions at the end of an incomplete
162 // block is handled by the code block above.
163 if (isTerminator())
164 getParent()->flushTerminatorDbgRecords();
167 /// Unlink this instruction from its current basic block and insert it into the
168 /// basic block that MovePos lives in, right before MovePos.
169 void Instruction::moveBefore(Instruction *MovePos) {
170 moveBeforeImpl(*MovePos->getParent(), MovePos->getIterator(), false);
173 void Instruction::moveBeforePreserving(Instruction *MovePos) {
174 moveBeforeImpl(*MovePos->getParent(), MovePos->getIterator(), true);
177 void Instruction::moveAfter(Instruction *MovePos) {
178 auto NextIt = std::next(MovePos->getIterator());
179 // We want this instruction to be moved to before NextIt in the instruction
180 // list, but before NextIt's debug value range.
181 NextIt.setHeadBit(true);
182 moveBeforeImpl(*MovePos->getParent(), NextIt, false);
185 void Instruction::moveAfterPreserving(Instruction *MovePos) {
186 auto NextIt = std::next(MovePos->getIterator());
187 // We want this instruction and its debug range to be moved to before NextIt
188 // in the instruction list, but before NextIt's debug value range.
189 NextIt.setHeadBit(true);
190 moveBeforeImpl(*MovePos->getParent(), NextIt, true);
193 void Instruction::moveBefore(BasicBlock &BB, InstListType::iterator I) {
194 moveBeforeImpl(BB, I, false);
197 void Instruction::moveBeforePreserving(BasicBlock &BB,
198 InstListType::iterator I) {
199 moveBeforeImpl(BB, I, true);
202 void Instruction::moveBeforeImpl(BasicBlock &BB, InstListType::iterator I,
203 bool Preserve) {
204 assert(I == BB.end() || I->getParent() == &BB);
205 bool InsertAtHead = I.getHeadBit();
207 // If we've been given the "Preserve" flag, then just move the DbgRecords with
208 // the instruction, no more special handling needed.
209 if (BB.IsNewDbgInfoFormat && DebugMarker && !Preserve) {
210 if (I != this->getIterator() || InsertAtHead) {
211 // "this" is definitely moving in the list, or it's moving ahead of its
212 // attached DbgVariableRecords. Detach any existing DbgRecords.
213 handleMarkerRemoval();
217 // Move this single instruction. Use the list splice method directly, not
218 // the block splicer, which will do more debug-info things.
219 BB.getInstList().splice(I, getParent()->getInstList(), getIterator());
221 if (BB.IsNewDbgInfoFormat && !Preserve) {
222 DbgMarker *NextMarker = getParent()->getNextMarker(this);
224 // If we're inserting at point I, and not in front of the DbgRecords
225 // attached there, then we should absorb the DbgRecords attached to I.
226 if (!InsertAtHead && NextMarker && !NextMarker->empty()) {
227 adoptDbgRecords(&BB, I, false);
231 if (isTerminator())
232 getParent()->flushTerminatorDbgRecords();
235 iterator_range<DbgRecord::self_iterator> Instruction::cloneDebugInfoFrom(
236 const Instruction *From, std::optional<DbgRecord::self_iterator> FromHere,
237 bool InsertAtHead) {
238 if (!From->DebugMarker)
239 return DbgMarker::getEmptyDbgRecordRange();
241 assert(getParent()->IsNewDbgInfoFormat);
242 assert(getParent()->IsNewDbgInfoFormat ==
243 From->getParent()->IsNewDbgInfoFormat);
245 if (!DebugMarker)
246 getParent()->createMarker(this);
248 return DebugMarker->cloneDebugInfoFrom(From->DebugMarker, FromHere,
249 InsertAtHead);
252 std::optional<DbgRecord::self_iterator>
253 Instruction::getDbgReinsertionPosition() {
254 // Is there a marker on the next instruction?
255 DbgMarker *NextMarker = getParent()->getNextMarker(this);
256 if (!NextMarker)
257 return std::nullopt;
259 // Are there any DbgRecords in the next marker?
260 if (NextMarker->StoredDbgRecords.empty())
261 return std::nullopt;
263 return NextMarker->StoredDbgRecords.begin();
266 bool Instruction::hasDbgRecords() const { return !getDbgRecordRange().empty(); }
268 void Instruction::adoptDbgRecords(BasicBlock *BB, BasicBlock::iterator It,
269 bool InsertAtHead) {
270 DbgMarker *SrcMarker = BB->getMarker(It);
271 auto ReleaseTrailingDbgRecords = [BB, It, SrcMarker]() {
272 if (BB->end() == It) {
273 SrcMarker->eraseFromParent();
274 BB->deleteTrailingDbgRecords();
278 if (!SrcMarker || SrcMarker->StoredDbgRecords.empty()) {
279 ReleaseTrailingDbgRecords();
280 return;
283 // If we have DbgMarkers attached to this instruction, we have to honour the
284 // ordering of DbgRecords between this and the other marker. Fall back to just
285 // absorbing from the source.
286 if (DebugMarker || It == BB->end()) {
287 // Ensure we _do_ have a marker.
288 getParent()->createMarker(this);
289 DebugMarker->absorbDebugValues(*SrcMarker, InsertAtHead);
291 // Having transferred everything out of SrcMarker, we _could_ clean it up
292 // and free the marker now. However, that's a lot of heap-accounting for a
293 // small amount of memory with a good chance of re-use. Leave it for the
294 // moment. It will be released when the Instruction is freed in the worst
295 // case.
296 // However: if we transferred from a trailing marker off the end of the
297 // block, it's important to not leave the empty marker trailing. It will
298 // give a misleading impression that some debug records have been left
299 // trailing.
300 ReleaseTrailingDbgRecords();
301 } else {
302 // Optimisation: we're transferring all the DbgRecords from the source
303 // marker onto this empty location: just adopt the other instructions
304 // marker.
305 DebugMarker = SrcMarker;
306 DebugMarker->MarkedInstr = this;
307 It->DebugMarker = nullptr;
311 void Instruction::dropDbgRecords() {
312 if (DebugMarker)
313 DebugMarker->dropDbgRecords();
316 void Instruction::dropOneDbgRecord(DbgRecord *DVR) {
317 DebugMarker->dropOneDbgRecord(DVR);
320 bool Instruction::comesBefore(const Instruction *Other) const {
321 assert(getParent() && Other->getParent() &&
322 "instructions without BB parents have no order");
323 assert(getParent() == Other->getParent() &&
324 "cross-BB instruction order comparison");
325 if (!getParent()->isInstrOrderValid())
326 const_cast<BasicBlock *>(getParent())->renumberInstructions();
327 return Order < Other->Order;
330 std::optional<BasicBlock::iterator> Instruction::getInsertionPointAfterDef() {
331 assert(!getType()->isVoidTy() && "Instruction must define result");
332 BasicBlock *InsertBB;
333 BasicBlock::iterator InsertPt;
334 if (auto *PN = dyn_cast<PHINode>(this)) {
335 InsertBB = PN->getParent();
336 InsertPt = InsertBB->getFirstInsertionPt();
337 } else if (auto *II = dyn_cast<InvokeInst>(this)) {
338 InsertBB = II->getNormalDest();
339 InsertPt = InsertBB->getFirstInsertionPt();
340 } else if (isa<CallBrInst>(this)) {
341 // Def is available in multiple successors, there's no single dominating
342 // insertion point.
343 return std::nullopt;
344 } else {
345 assert(!isTerminator() && "Only invoke/callbr terminators return value");
346 InsertBB = getParent();
347 InsertPt = std::next(getIterator());
348 // Any instruction inserted immediately after "this" will come before any
349 // debug-info records take effect -- thus, set the head bit indicating that
350 // to debug-info-transfer code.
351 InsertPt.setHeadBit(true);
354 // catchswitch blocks don't have any legal insertion point (because they
355 // are both an exception pad and a terminator).
356 if (InsertPt == InsertBB->end())
357 return std::nullopt;
358 return InsertPt;
361 bool Instruction::isOnlyUserOfAnyOperand() {
362 return any_of(operands(), [](Value *V) { return V->hasOneUser(); });
365 void Instruction::setHasNoUnsignedWrap(bool b) {
366 if (auto *Inst = dyn_cast<OverflowingBinaryOperator>(this))
367 Inst->setHasNoUnsignedWrap(b);
368 else
369 cast<TruncInst>(this)->setHasNoUnsignedWrap(b);
372 void Instruction::setHasNoSignedWrap(bool b) {
373 if (auto *Inst = dyn_cast<OverflowingBinaryOperator>(this))
374 Inst->setHasNoSignedWrap(b);
375 else
376 cast<TruncInst>(this)->setHasNoSignedWrap(b);
379 void Instruction::setIsExact(bool b) {
380 cast<PossiblyExactOperator>(this)->setIsExact(b);
383 void Instruction::setNonNeg(bool b) {
384 assert(isa<PossiblyNonNegInst>(this) && "Must be zext/uitofp");
385 SubclassOptionalData = (SubclassOptionalData & ~PossiblyNonNegInst::NonNeg) |
386 (b * PossiblyNonNegInst::NonNeg);
389 bool Instruction::hasNoUnsignedWrap() const {
390 if (auto *Inst = dyn_cast<OverflowingBinaryOperator>(this))
391 return Inst->hasNoUnsignedWrap();
393 return cast<TruncInst>(this)->hasNoUnsignedWrap();
396 bool Instruction::hasNoSignedWrap() const {
397 if (auto *Inst = dyn_cast<OverflowingBinaryOperator>(this))
398 return Inst->hasNoSignedWrap();
400 return cast<TruncInst>(this)->hasNoSignedWrap();
403 bool Instruction::hasNonNeg() const {
404 assert(isa<PossiblyNonNegInst>(this) && "Must be zext/uitofp");
405 return (SubclassOptionalData & PossiblyNonNegInst::NonNeg) != 0;
408 bool Instruction::hasPoisonGeneratingFlags() const {
409 return cast<Operator>(this)->hasPoisonGeneratingFlags();
412 void Instruction::dropPoisonGeneratingFlags() {
413 switch (getOpcode()) {
414 case Instruction::Add:
415 case Instruction::Sub:
416 case Instruction::Mul:
417 case Instruction::Shl:
418 cast<OverflowingBinaryOperator>(this)->setHasNoUnsignedWrap(false);
419 cast<OverflowingBinaryOperator>(this)->setHasNoSignedWrap(false);
420 break;
422 case Instruction::UDiv:
423 case Instruction::SDiv:
424 case Instruction::AShr:
425 case Instruction::LShr:
426 cast<PossiblyExactOperator>(this)->setIsExact(false);
427 break;
429 case Instruction::Or:
430 cast<PossiblyDisjointInst>(this)->setIsDisjoint(false);
431 break;
433 case Instruction::GetElementPtr:
434 cast<GetElementPtrInst>(this)->setNoWrapFlags(GEPNoWrapFlags::none());
435 break;
437 case Instruction::UIToFP:
438 case Instruction::ZExt:
439 setNonNeg(false);
440 break;
442 case Instruction::Trunc:
443 cast<TruncInst>(this)->setHasNoUnsignedWrap(false);
444 cast<TruncInst>(this)->setHasNoSignedWrap(false);
445 break;
447 case Instruction::ICmp:
448 cast<ICmpInst>(this)->setSameSign(false);
449 break;
452 if (isa<FPMathOperator>(this)) {
453 setHasNoNaNs(false);
454 setHasNoInfs(false);
457 assert(!hasPoisonGeneratingFlags() && "must be kept in sync");
460 bool Instruction::hasPoisonGeneratingMetadata() const {
461 return any_of(Metadata::PoisonGeneratingIDs,
462 [this](unsigned ID) { return hasMetadata(ID); });
465 bool Instruction::hasNonDebugLocLoopMetadata() const {
466 // If there is no loop metadata at all, we also don't have
467 // non-debug loop metadata, obviously.
468 if (!hasMetadata(LLVMContext::MD_loop))
469 return false;
471 // If we do have loop metadata, retrieve it.
472 MDNode *LoopMD = getMetadata(LLVMContext::MD_loop);
474 // Check if the existing operands are debug locations. This loop
475 // should terminate after at most three iterations. Skip
476 // the first item because it is a self-reference.
477 for (const MDOperand &Op : llvm::drop_begin(LoopMD->operands())) {
478 // check for debug location type by attempting a cast.
479 if (!dyn_cast<DILocation>(Op)) {
480 return true;
484 // If we get here, then all we have is debug locations in the loop metadata.
485 return false;
488 void Instruction::dropPoisonGeneratingMetadata() {
489 for (unsigned ID : Metadata::PoisonGeneratingIDs)
490 eraseMetadata(ID);
493 bool Instruction::hasPoisonGeneratingReturnAttributes() const {
494 if (const auto *CB = dyn_cast<CallBase>(this)) {
495 AttributeSet RetAttrs = CB->getAttributes().getRetAttrs();
496 return RetAttrs.hasAttribute(Attribute::Range) ||
497 RetAttrs.hasAttribute(Attribute::Alignment) ||
498 RetAttrs.hasAttribute(Attribute::NonNull);
500 return false;
503 void Instruction::dropPoisonGeneratingReturnAttributes() {
504 if (auto *CB = dyn_cast<CallBase>(this)) {
505 AttributeMask AM;
506 AM.addAttribute(Attribute::Range);
507 AM.addAttribute(Attribute::Alignment);
508 AM.addAttribute(Attribute::NonNull);
509 CB->removeRetAttrs(AM);
511 assert(!hasPoisonGeneratingReturnAttributes() && "must be kept in sync");
514 void Instruction::dropUBImplyingAttrsAndUnknownMetadata(
515 ArrayRef<unsigned> KnownIDs) {
516 dropUnknownNonDebugMetadata(KnownIDs);
517 auto *CB = dyn_cast<CallBase>(this);
518 if (!CB)
519 return;
520 // For call instructions, we also need to drop parameter and return attributes
521 // that are can cause UB if the call is moved to a location where the
522 // attribute is not valid.
523 AttributeList AL = CB->getAttributes();
524 if (AL.isEmpty())
525 return;
526 AttributeMask UBImplyingAttributes =
527 AttributeFuncs::getUBImplyingAttributes();
528 for (unsigned ArgNo = 0; ArgNo < CB->arg_size(); ArgNo++)
529 CB->removeParamAttrs(ArgNo, UBImplyingAttributes);
530 CB->removeRetAttrs(UBImplyingAttributes);
533 void Instruction::dropUBImplyingAttrsAndMetadata() {
534 // !annotation metadata does not impact semantics.
535 // !range, !nonnull and !align produce poison, so they are safe to speculate.
536 // !noundef and various AA metadata must be dropped, as it generally produces
537 // immediate undefined behavior.
538 unsigned KnownIDs[] = {LLVMContext::MD_annotation, LLVMContext::MD_range,
539 LLVMContext::MD_nonnull, LLVMContext::MD_align};
540 dropUBImplyingAttrsAndUnknownMetadata(KnownIDs);
543 bool Instruction::isExact() const {
544 return cast<PossiblyExactOperator>(this)->isExact();
547 void Instruction::setFast(bool B) {
548 assert(isa<FPMathOperator>(this) && "setting fast-math flag on invalid op");
549 cast<FPMathOperator>(this)->setFast(B);
552 void Instruction::setHasAllowReassoc(bool B) {
553 assert(isa<FPMathOperator>(this) && "setting fast-math flag on invalid op");
554 cast<FPMathOperator>(this)->setHasAllowReassoc(B);
557 void Instruction::setHasNoNaNs(bool B) {
558 assert(isa<FPMathOperator>(this) && "setting fast-math flag on invalid op");
559 cast<FPMathOperator>(this)->setHasNoNaNs(B);
562 void Instruction::setHasNoInfs(bool B) {
563 assert(isa<FPMathOperator>(this) && "setting fast-math flag on invalid op");
564 cast<FPMathOperator>(this)->setHasNoInfs(B);
567 void Instruction::setHasNoSignedZeros(bool B) {
568 assert(isa<FPMathOperator>(this) && "setting fast-math flag on invalid op");
569 cast<FPMathOperator>(this)->setHasNoSignedZeros(B);
572 void Instruction::setHasAllowReciprocal(bool B) {
573 assert(isa<FPMathOperator>(this) && "setting fast-math flag on invalid op");
574 cast<FPMathOperator>(this)->setHasAllowReciprocal(B);
577 void Instruction::setHasAllowContract(bool B) {
578 assert(isa<FPMathOperator>(this) && "setting fast-math flag on invalid op");
579 cast<FPMathOperator>(this)->setHasAllowContract(B);
582 void Instruction::setHasApproxFunc(bool B) {
583 assert(isa<FPMathOperator>(this) && "setting fast-math flag on invalid op");
584 cast<FPMathOperator>(this)->setHasApproxFunc(B);
587 void Instruction::setFastMathFlags(FastMathFlags FMF) {
588 assert(isa<FPMathOperator>(this) && "setting fast-math flag on invalid op");
589 cast<FPMathOperator>(this)->setFastMathFlags(FMF);
592 void Instruction::copyFastMathFlags(FastMathFlags FMF) {
593 assert(isa<FPMathOperator>(this) && "copying fast-math flag on invalid op");
594 cast<FPMathOperator>(this)->copyFastMathFlags(FMF);
597 bool Instruction::isFast() const {
598 assert(isa<FPMathOperator>(this) && "getting fast-math flag on invalid op");
599 return cast<FPMathOperator>(this)->isFast();
602 bool Instruction::hasAllowReassoc() const {
603 assert(isa<FPMathOperator>(this) && "getting fast-math flag on invalid op");
604 return cast<FPMathOperator>(this)->hasAllowReassoc();
607 bool Instruction::hasNoNaNs() const {
608 assert(isa<FPMathOperator>(this) && "getting fast-math flag on invalid op");
609 return cast<FPMathOperator>(this)->hasNoNaNs();
612 bool Instruction::hasNoInfs() const {
613 assert(isa<FPMathOperator>(this) && "getting fast-math flag on invalid op");
614 return cast<FPMathOperator>(this)->hasNoInfs();
617 bool Instruction::hasNoSignedZeros() const {
618 assert(isa<FPMathOperator>(this) && "getting fast-math flag on invalid op");
619 return cast<FPMathOperator>(this)->hasNoSignedZeros();
622 bool Instruction::hasAllowReciprocal() const {
623 assert(isa<FPMathOperator>(this) && "getting fast-math flag on invalid op");
624 return cast<FPMathOperator>(this)->hasAllowReciprocal();
627 bool Instruction::hasAllowContract() const {
628 assert(isa<FPMathOperator>(this) && "getting fast-math flag on invalid op");
629 return cast<FPMathOperator>(this)->hasAllowContract();
632 bool Instruction::hasApproxFunc() const {
633 assert(isa<FPMathOperator>(this) && "getting fast-math flag on invalid op");
634 return cast<FPMathOperator>(this)->hasApproxFunc();
637 FastMathFlags Instruction::getFastMathFlags() const {
638 assert(isa<FPMathOperator>(this) && "getting fast-math flag on invalid op");
639 return cast<FPMathOperator>(this)->getFastMathFlags();
642 void Instruction::copyFastMathFlags(const Instruction *I) {
643 copyFastMathFlags(I->getFastMathFlags());
646 void Instruction::copyIRFlags(const Value *V, bool IncludeWrapFlags) {
647 // Copy the wrapping flags.
648 if (IncludeWrapFlags && isa<OverflowingBinaryOperator>(this)) {
649 if (auto *OB = dyn_cast<OverflowingBinaryOperator>(V)) {
650 setHasNoSignedWrap(OB->hasNoSignedWrap());
651 setHasNoUnsignedWrap(OB->hasNoUnsignedWrap());
655 if (auto *TI = dyn_cast<TruncInst>(V)) {
656 if (isa<TruncInst>(this)) {
657 setHasNoSignedWrap(TI->hasNoSignedWrap());
658 setHasNoUnsignedWrap(TI->hasNoUnsignedWrap());
662 // Copy the exact flag.
663 if (auto *PE = dyn_cast<PossiblyExactOperator>(V))
664 if (isa<PossiblyExactOperator>(this))
665 setIsExact(PE->isExact());
667 if (auto *SrcPD = dyn_cast<PossiblyDisjointInst>(V))
668 if (auto *DestPD = dyn_cast<PossiblyDisjointInst>(this))
669 DestPD->setIsDisjoint(SrcPD->isDisjoint());
671 // Copy the fast-math flags.
672 if (auto *FP = dyn_cast<FPMathOperator>(V))
673 if (isa<FPMathOperator>(this))
674 copyFastMathFlags(FP->getFastMathFlags());
676 if (auto *SrcGEP = dyn_cast<GetElementPtrInst>(V))
677 if (auto *DestGEP = dyn_cast<GetElementPtrInst>(this))
678 DestGEP->setNoWrapFlags(SrcGEP->getNoWrapFlags() |
679 DestGEP->getNoWrapFlags());
681 if (auto *NNI = dyn_cast<PossiblyNonNegInst>(V))
682 if (isa<PossiblyNonNegInst>(this))
683 setNonNeg(NNI->hasNonNeg());
685 if (auto *SrcICmp = dyn_cast<ICmpInst>(V))
686 if (auto *DestICmp = dyn_cast<ICmpInst>(this))
687 DestICmp->setSameSign(SrcICmp->hasSameSign());
690 void Instruction::andIRFlags(const Value *V) {
691 if (auto *OB = dyn_cast<OverflowingBinaryOperator>(V)) {
692 if (isa<OverflowingBinaryOperator>(this)) {
693 setHasNoSignedWrap(hasNoSignedWrap() && OB->hasNoSignedWrap());
694 setHasNoUnsignedWrap(hasNoUnsignedWrap() && OB->hasNoUnsignedWrap());
698 if (auto *TI = dyn_cast<TruncInst>(V)) {
699 if (isa<TruncInst>(this)) {
700 setHasNoSignedWrap(hasNoSignedWrap() && TI->hasNoSignedWrap());
701 setHasNoUnsignedWrap(hasNoUnsignedWrap() && TI->hasNoUnsignedWrap());
705 if (auto *PE = dyn_cast<PossiblyExactOperator>(V))
706 if (isa<PossiblyExactOperator>(this))
707 setIsExact(isExact() && PE->isExact());
709 if (auto *SrcPD = dyn_cast<PossiblyDisjointInst>(V))
710 if (auto *DestPD = dyn_cast<PossiblyDisjointInst>(this))
711 DestPD->setIsDisjoint(DestPD->isDisjoint() && SrcPD->isDisjoint());
713 if (auto *FP = dyn_cast<FPMathOperator>(V)) {
714 if (isa<FPMathOperator>(this)) {
715 FastMathFlags FM = getFastMathFlags();
716 FM &= FP->getFastMathFlags();
717 copyFastMathFlags(FM);
721 if (auto *SrcGEP = dyn_cast<GetElementPtrInst>(V))
722 if (auto *DestGEP = dyn_cast<GetElementPtrInst>(this))
723 DestGEP->setNoWrapFlags(SrcGEP->getNoWrapFlags() &
724 DestGEP->getNoWrapFlags());
726 if (auto *NNI = dyn_cast<PossiblyNonNegInst>(V))
727 if (isa<PossiblyNonNegInst>(this))
728 setNonNeg(hasNonNeg() && NNI->hasNonNeg());
730 if (auto *SrcICmp = dyn_cast<ICmpInst>(V))
731 if (auto *DestICmp = dyn_cast<ICmpInst>(this))
732 DestICmp->setSameSign(DestICmp->hasSameSign() && SrcICmp->hasSameSign());
735 const char *Instruction::getOpcodeName(unsigned OpCode) {
736 switch (OpCode) {
737 // Terminators
738 case Ret: return "ret";
739 case Br: return "br";
740 case Switch: return "switch";
741 case IndirectBr: return "indirectbr";
742 case Invoke: return "invoke";
743 case Resume: return "resume";
744 case Unreachable: return "unreachable";
745 case CleanupRet: return "cleanupret";
746 case CatchRet: return "catchret";
747 case CatchPad: return "catchpad";
748 case CatchSwitch: return "catchswitch";
749 case CallBr: return "callbr";
751 // Standard unary operators...
752 case FNeg: return "fneg";
754 // Standard binary operators...
755 case Add: return "add";
756 case FAdd: return "fadd";
757 case Sub: return "sub";
758 case FSub: return "fsub";
759 case Mul: return "mul";
760 case FMul: return "fmul";
761 case UDiv: return "udiv";
762 case SDiv: return "sdiv";
763 case FDiv: return "fdiv";
764 case URem: return "urem";
765 case SRem: return "srem";
766 case FRem: return "frem";
768 // Logical operators...
769 case And: return "and";
770 case Or : return "or";
771 case Xor: return "xor";
773 // Memory instructions...
774 case Alloca: return "alloca";
775 case Load: return "load";
776 case Store: return "store";
777 case AtomicCmpXchg: return "cmpxchg";
778 case AtomicRMW: return "atomicrmw";
779 case Fence: return "fence";
780 case GetElementPtr: return "getelementptr";
782 // Convert instructions...
783 case Trunc: return "trunc";
784 case ZExt: return "zext";
785 case SExt: return "sext";
786 case FPTrunc: return "fptrunc";
787 case FPExt: return "fpext";
788 case FPToUI: return "fptoui";
789 case FPToSI: return "fptosi";
790 case UIToFP: return "uitofp";
791 case SIToFP: return "sitofp";
792 case IntToPtr: return "inttoptr";
793 case PtrToInt: return "ptrtoint";
794 case BitCast: return "bitcast";
795 case AddrSpaceCast: return "addrspacecast";
797 // Other instructions...
798 case ICmp: return "icmp";
799 case FCmp: return "fcmp";
800 case PHI: return "phi";
801 case Select: return "select";
802 case Call: return "call";
803 case Shl: return "shl";
804 case LShr: return "lshr";
805 case AShr: return "ashr";
806 case VAArg: return "va_arg";
807 case ExtractElement: return "extractelement";
808 case InsertElement: return "insertelement";
809 case ShuffleVector: return "shufflevector";
810 case ExtractValue: return "extractvalue";
811 case InsertValue: return "insertvalue";
812 case LandingPad: return "landingpad";
813 case CleanupPad: return "cleanuppad";
814 case Freeze: return "freeze";
816 default: return "<Invalid operator> ";
820 /// This must be kept in sync with FunctionComparator::cmpOperations in
821 /// lib/Transforms/IPO/MergeFunctions.cpp.
822 bool Instruction::hasSameSpecialState(const Instruction *I2,
823 bool IgnoreAlignment,
824 bool IntersectAttrs) const {
825 auto I1 = this;
826 assert(I1->getOpcode() == I2->getOpcode() &&
827 "Can not compare special state of different instructions");
829 auto CheckAttrsSame = [IntersectAttrs](const CallBase *CB0,
830 const CallBase *CB1) {
831 return IntersectAttrs
832 ? CB0->getAttributes()
833 .intersectWith(CB0->getContext(), CB1->getAttributes())
834 .has_value()
835 : CB0->getAttributes() == CB1->getAttributes();
838 if (const AllocaInst *AI = dyn_cast<AllocaInst>(I1))
839 return AI->getAllocatedType() == cast<AllocaInst>(I2)->getAllocatedType() &&
840 (AI->getAlign() == cast<AllocaInst>(I2)->getAlign() ||
841 IgnoreAlignment);
842 if (const LoadInst *LI = dyn_cast<LoadInst>(I1))
843 return LI->isVolatile() == cast<LoadInst>(I2)->isVolatile() &&
844 (LI->getAlign() == cast<LoadInst>(I2)->getAlign() ||
845 IgnoreAlignment) &&
846 LI->getOrdering() == cast<LoadInst>(I2)->getOrdering() &&
847 LI->getSyncScopeID() == cast<LoadInst>(I2)->getSyncScopeID();
848 if (const StoreInst *SI = dyn_cast<StoreInst>(I1))
849 return SI->isVolatile() == cast<StoreInst>(I2)->isVolatile() &&
850 (SI->getAlign() == cast<StoreInst>(I2)->getAlign() ||
851 IgnoreAlignment) &&
852 SI->getOrdering() == cast<StoreInst>(I2)->getOrdering() &&
853 SI->getSyncScopeID() == cast<StoreInst>(I2)->getSyncScopeID();
854 if (const CmpInst *CI = dyn_cast<CmpInst>(I1))
855 return CI->getPredicate() == cast<CmpInst>(I2)->getPredicate();
856 if (const CallInst *CI = dyn_cast<CallInst>(I1))
857 return CI->isTailCall() == cast<CallInst>(I2)->isTailCall() &&
858 CI->getCallingConv() == cast<CallInst>(I2)->getCallingConv() &&
859 CheckAttrsSame(CI, cast<CallInst>(I2)) &&
860 CI->hasIdenticalOperandBundleSchema(*cast<CallInst>(I2));
861 if (const InvokeInst *CI = dyn_cast<InvokeInst>(I1))
862 return CI->getCallingConv() == cast<InvokeInst>(I2)->getCallingConv() &&
863 CheckAttrsSame(CI, cast<InvokeInst>(I2)) &&
864 CI->hasIdenticalOperandBundleSchema(*cast<InvokeInst>(I2));
865 if (const CallBrInst *CI = dyn_cast<CallBrInst>(I1))
866 return CI->getCallingConv() == cast<CallBrInst>(I2)->getCallingConv() &&
867 CheckAttrsSame(CI, cast<CallBrInst>(I2)) &&
868 CI->hasIdenticalOperandBundleSchema(*cast<CallBrInst>(I2));
869 if (const InsertValueInst *IVI = dyn_cast<InsertValueInst>(I1))
870 return IVI->getIndices() == cast<InsertValueInst>(I2)->getIndices();
871 if (const ExtractValueInst *EVI = dyn_cast<ExtractValueInst>(I1))
872 return EVI->getIndices() == cast<ExtractValueInst>(I2)->getIndices();
873 if (const FenceInst *FI = dyn_cast<FenceInst>(I1))
874 return FI->getOrdering() == cast<FenceInst>(I2)->getOrdering() &&
875 FI->getSyncScopeID() == cast<FenceInst>(I2)->getSyncScopeID();
876 if (const AtomicCmpXchgInst *CXI = dyn_cast<AtomicCmpXchgInst>(I1))
877 return CXI->isVolatile() == cast<AtomicCmpXchgInst>(I2)->isVolatile() &&
878 CXI->isWeak() == cast<AtomicCmpXchgInst>(I2)->isWeak() &&
879 CXI->getSuccessOrdering() ==
880 cast<AtomicCmpXchgInst>(I2)->getSuccessOrdering() &&
881 CXI->getFailureOrdering() ==
882 cast<AtomicCmpXchgInst>(I2)->getFailureOrdering() &&
883 CXI->getSyncScopeID() ==
884 cast<AtomicCmpXchgInst>(I2)->getSyncScopeID();
885 if (const AtomicRMWInst *RMWI = dyn_cast<AtomicRMWInst>(I1))
886 return RMWI->getOperation() == cast<AtomicRMWInst>(I2)->getOperation() &&
887 RMWI->isVolatile() == cast<AtomicRMWInst>(I2)->isVolatile() &&
888 RMWI->getOrdering() == cast<AtomicRMWInst>(I2)->getOrdering() &&
889 RMWI->getSyncScopeID() == cast<AtomicRMWInst>(I2)->getSyncScopeID();
890 if (const ShuffleVectorInst *SVI = dyn_cast<ShuffleVectorInst>(I1))
891 return SVI->getShuffleMask() ==
892 cast<ShuffleVectorInst>(I2)->getShuffleMask();
893 if (const GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(I1))
894 return GEP->getSourceElementType() ==
895 cast<GetElementPtrInst>(I2)->getSourceElementType();
897 return true;
900 bool Instruction::isIdenticalTo(const Instruction *I) const {
901 return isIdenticalToWhenDefined(I) &&
902 SubclassOptionalData == I->SubclassOptionalData;
905 bool Instruction::isIdenticalToWhenDefined(const Instruction *I,
906 bool IntersectAttrs) const {
907 if (getOpcode() != I->getOpcode() ||
908 getNumOperands() != I->getNumOperands() || getType() != I->getType())
909 return false;
911 // If both instructions have no operands, they are identical.
912 if (getNumOperands() == 0 && I->getNumOperands() == 0)
913 return this->hasSameSpecialState(I, /*IgnoreAlignment=*/false,
914 IntersectAttrs);
916 // We have two instructions of identical opcode and #operands. Check to see
917 // if all operands are the same.
918 if (!std::equal(op_begin(), op_end(), I->op_begin()))
919 return false;
921 // WARNING: this logic must be kept in sync with EliminateDuplicatePHINodes()!
922 if (const PHINode *thisPHI = dyn_cast<PHINode>(this)) {
923 const PHINode *otherPHI = cast<PHINode>(I);
924 return std::equal(thisPHI->block_begin(), thisPHI->block_end(),
925 otherPHI->block_begin());
928 return this->hasSameSpecialState(I, /*IgnoreAlignment=*/false,
929 IntersectAttrs);
932 // Keep this in sync with FunctionComparator::cmpOperations in
933 // lib/Transforms/IPO/MergeFunctions.cpp.
934 bool Instruction::isSameOperationAs(const Instruction *I,
935 unsigned flags) const {
936 bool IgnoreAlignment = flags & CompareIgnoringAlignment;
937 bool UseScalarTypes = flags & CompareUsingScalarTypes;
938 bool IntersectAttrs = flags & CompareUsingIntersectedAttrs;
940 if (getOpcode() != I->getOpcode() ||
941 getNumOperands() != I->getNumOperands() ||
942 (UseScalarTypes ?
943 getType()->getScalarType() != I->getType()->getScalarType() :
944 getType() != I->getType()))
945 return false;
947 // We have two instructions of identical opcode and #operands. Check to see
948 // if all operands are the same type
949 for (unsigned i = 0, e = getNumOperands(); i != e; ++i)
950 if (UseScalarTypes ?
951 getOperand(i)->getType()->getScalarType() !=
952 I->getOperand(i)->getType()->getScalarType() :
953 getOperand(i)->getType() != I->getOperand(i)->getType())
954 return false;
956 return this->hasSameSpecialState(I, IgnoreAlignment, IntersectAttrs);
959 bool Instruction::isUsedOutsideOfBlock(const BasicBlock *BB) const {
960 for (const Use &U : uses()) {
961 // PHI nodes uses values in the corresponding predecessor block. For other
962 // instructions, just check to see whether the parent of the use matches up.
963 const Instruction *I = cast<Instruction>(U.getUser());
964 const PHINode *PN = dyn_cast<PHINode>(I);
965 if (!PN) {
966 if (I->getParent() != BB)
967 return true;
968 continue;
971 if (PN->getIncomingBlock(U) != BB)
972 return true;
974 return false;
977 bool Instruction::mayReadFromMemory() const {
978 switch (getOpcode()) {
979 default: return false;
980 case Instruction::VAArg:
981 case Instruction::Load:
982 case Instruction::Fence: // FIXME: refine definition of mayReadFromMemory
983 case Instruction::AtomicCmpXchg:
984 case Instruction::AtomicRMW:
985 case Instruction::CatchPad:
986 case Instruction::CatchRet:
987 return true;
988 case Instruction::Call:
989 case Instruction::Invoke:
990 case Instruction::CallBr:
991 return !cast<CallBase>(this)->onlyWritesMemory();
992 case Instruction::Store:
993 return !cast<StoreInst>(this)->isUnordered();
997 bool Instruction::mayWriteToMemory() const {
998 switch (getOpcode()) {
999 default: return false;
1000 case Instruction::Fence: // FIXME: refine definition of mayWriteToMemory
1001 case Instruction::Store:
1002 case Instruction::VAArg:
1003 case Instruction::AtomicCmpXchg:
1004 case Instruction::AtomicRMW:
1005 case Instruction::CatchPad:
1006 case Instruction::CatchRet:
1007 return true;
1008 case Instruction::Call:
1009 case Instruction::Invoke:
1010 case Instruction::CallBr:
1011 return !cast<CallBase>(this)->onlyReadsMemory();
1012 case Instruction::Load:
1013 return !cast<LoadInst>(this)->isUnordered();
1017 bool Instruction::isAtomic() const {
1018 switch (getOpcode()) {
1019 default:
1020 return false;
1021 case Instruction::AtomicCmpXchg:
1022 case Instruction::AtomicRMW:
1023 case Instruction::Fence:
1024 return true;
1025 case Instruction::Load:
1026 return cast<LoadInst>(this)->getOrdering() != AtomicOrdering::NotAtomic;
1027 case Instruction::Store:
1028 return cast<StoreInst>(this)->getOrdering() != AtomicOrdering::NotAtomic;
1032 bool Instruction::hasAtomicLoad() const {
1033 assert(isAtomic());
1034 switch (getOpcode()) {
1035 default:
1036 return false;
1037 case Instruction::AtomicCmpXchg:
1038 case Instruction::AtomicRMW:
1039 case Instruction::Load:
1040 return true;
1044 bool Instruction::hasAtomicStore() const {
1045 assert(isAtomic());
1046 switch (getOpcode()) {
1047 default:
1048 return false;
1049 case Instruction::AtomicCmpXchg:
1050 case Instruction::AtomicRMW:
1051 case Instruction::Store:
1052 return true;
1056 bool Instruction::isVolatile() const {
1057 switch (getOpcode()) {
1058 default:
1059 return false;
1060 case Instruction::AtomicRMW:
1061 return cast<AtomicRMWInst>(this)->isVolatile();
1062 case Instruction::Store:
1063 return cast<StoreInst>(this)->isVolatile();
1064 case Instruction::Load:
1065 return cast<LoadInst>(this)->isVolatile();
1066 case Instruction::AtomicCmpXchg:
1067 return cast<AtomicCmpXchgInst>(this)->isVolatile();
1068 case Instruction::Call:
1069 case Instruction::Invoke:
1070 // There are a very limited number of intrinsics with volatile flags.
1071 if (auto *II = dyn_cast<IntrinsicInst>(this)) {
1072 if (auto *MI = dyn_cast<MemIntrinsic>(II))
1073 return MI->isVolatile();
1074 switch (II->getIntrinsicID()) {
1075 default: break;
1076 case Intrinsic::matrix_column_major_load:
1077 return cast<ConstantInt>(II->getArgOperand(2))->isOne();
1078 case Intrinsic::matrix_column_major_store:
1079 return cast<ConstantInt>(II->getArgOperand(3))->isOne();
1082 return false;
1086 Type *Instruction::getAccessType() const {
1087 switch (getOpcode()) {
1088 case Instruction::Store:
1089 return cast<StoreInst>(this)->getValueOperand()->getType();
1090 case Instruction::Load:
1091 case Instruction::AtomicRMW:
1092 return getType();
1093 case Instruction::AtomicCmpXchg:
1094 return cast<AtomicCmpXchgInst>(this)->getNewValOperand()->getType();
1095 case Instruction::Call:
1096 case Instruction::Invoke:
1097 if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(this)) {
1098 switch (II->getIntrinsicID()) {
1099 case Intrinsic::masked_load:
1100 case Intrinsic::masked_gather:
1101 case Intrinsic::masked_expandload:
1102 case Intrinsic::vp_load:
1103 case Intrinsic::vp_gather:
1104 case Intrinsic::experimental_vp_strided_load:
1105 return II->getType();
1106 case Intrinsic::masked_store:
1107 case Intrinsic::masked_scatter:
1108 case Intrinsic::masked_compressstore:
1109 case Intrinsic::vp_store:
1110 case Intrinsic::vp_scatter:
1111 case Intrinsic::experimental_vp_strided_store:
1112 return II->getOperand(0)->getType();
1113 default:
1114 break;
1119 return nullptr;
1122 static bool canUnwindPastLandingPad(const LandingPadInst *LP,
1123 bool IncludePhaseOneUnwind) {
1124 // Because phase one unwinding skips cleanup landingpads, we effectively
1125 // unwind past this frame, and callers need to have valid unwind info.
1126 if (LP->isCleanup())
1127 return IncludePhaseOneUnwind;
1129 for (unsigned I = 0; I < LP->getNumClauses(); ++I) {
1130 Constant *Clause = LP->getClause(I);
1131 // catch ptr null catches all exceptions.
1132 if (LP->isCatch(I) && isa<ConstantPointerNull>(Clause))
1133 return false;
1134 // filter [0 x ptr] catches all exceptions.
1135 if (LP->isFilter(I) && Clause->getType()->getArrayNumElements() == 0)
1136 return false;
1139 // May catch only some subset of exceptions, in which case other exceptions
1140 // will continue unwinding.
1141 return true;
1144 bool Instruction::mayThrow(bool IncludePhaseOneUnwind) const {
1145 switch (getOpcode()) {
1146 case Instruction::Call:
1147 return !cast<CallInst>(this)->doesNotThrow();
1148 case Instruction::CleanupRet:
1149 return cast<CleanupReturnInst>(this)->unwindsToCaller();
1150 case Instruction::CatchSwitch:
1151 return cast<CatchSwitchInst>(this)->unwindsToCaller();
1152 case Instruction::Resume:
1153 return true;
1154 case Instruction::Invoke: {
1155 // Landingpads themselves don't unwind -- however, an invoke of a skipped
1156 // landingpad may continue unwinding.
1157 BasicBlock *UnwindDest = cast<InvokeInst>(this)->getUnwindDest();
1158 Instruction *Pad = UnwindDest->getFirstNonPHI();
1159 if (auto *LP = dyn_cast<LandingPadInst>(Pad))
1160 return canUnwindPastLandingPad(LP, IncludePhaseOneUnwind);
1161 return false;
1163 case Instruction::CleanupPad:
1164 // Treat the same as cleanup landingpad.
1165 return IncludePhaseOneUnwind;
1166 default:
1167 return false;
1171 bool Instruction::mayHaveSideEffects() const {
1172 return mayWriteToMemory() || mayThrow() || !willReturn();
1175 bool Instruction::isSafeToRemove() const {
1176 return (!isa<CallInst>(this) || !this->mayHaveSideEffects()) &&
1177 !this->isTerminator() && !this->isEHPad();
1180 bool Instruction::willReturn() const {
1181 // Volatile store isn't guaranteed to return; see LangRef.
1182 if (auto *SI = dyn_cast<StoreInst>(this))
1183 return !SI->isVolatile();
1185 if (const auto *CB = dyn_cast<CallBase>(this))
1186 return CB->hasFnAttr(Attribute::WillReturn);
1187 return true;
1190 bool Instruction::isLifetimeStartOrEnd() const {
1191 auto *II = dyn_cast<IntrinsicInst>(this);
1192 if (!II)
1193 return false;
1194 Intrinsic::ID ID = II->getIntrinsicID();
1195 return ID == Intrinsic::lifetime_start || ID == Intrinsic::lifetime_end;
1198 bool Instruction::isLaunderOrStripInvariantGroup() const {
1199 auto *II = dyn_cast<IntrinsicInst>(this);
1200 if (!II)
1201 return false;
1202 Intrinsic::ID ID = II->getIntrinsicID();
1203 return ID == Intrinsic::launder_invariant_group ||
1204 ID == Intrinsic::strip_invariant_group;
1207 bool Instruction::isDebugOrPseudoInst() const {
1208 return isa<DbgInfoIntrinsic>(this) || isa<PseudoProbeInst>(this);
1211 const Instruction *
1212 Instruction::getNextNonDebugInstruction(bool SkipPseudoOp) const {
1213 for (const Instruction *I = getNextNode(); I; I = I->getNextNode())
1214 if (!isa<DbgInfoIntrinsic>(I) && !(SkipPseudoOp && isa<PseudoProbeInst>(I)))
1215 return I;
1216 return nullptr;
1219 const Instruction *
1220 Instruction::getPrevNonDebugInstruction(bool SkipPseudoOp) const {
1221 for (const Instruction *I = getPrevNode(); I; I = I->getPrevNode())
1222 if (!isa<DbgInfoIntrinsic>(I) &&
1223 !(SkipPseudoOp && isa<PseudoProbeInst>(I)) &&
1224 !(isa<IntrinsicInst>(I) &&
1225 cast<IntrinsicInst>(I)->getIntrinsicID() == Intrinsic::fake_use))
1226 return I;
1227 return nullptr;
1230 const DebugLoc &Instruction::getStableDebugLoc() const {
1231 if (isa<DbgInfoIntrinsic>(this))
1232 if (const Instruction *Next = getNextNonDebugInstruction())
1233 return Next->getDebugLoc();
1234 return getDebugLoc();
1237 bool Instruction::isAssociative() const {
1238 if (auto *II = dyn_cast<IntrinsicInst>(this))
1239 return II->isAssociative();
1240 unsigned Opcode = getOpcode();
1241 if (isAssociative(Opcode))
1242 return true;
1244 switch (Opcode) {
1245 case FMul:
1246 case FAdd:
1247 return cast<FPMathOperator>(this)->hasAllowReassoc() &&
1248 cast<FPMathOperator>(this)->hasNoSignedZeros();
1249 default:
1250 return false;
1254 bool Instruction::isCommutative() const {
1255 if (auto *II = dyn_cast<IntrinsicInst>(this))
1256 return II->isCommutative();
1257 // TODO: Should allow icmp/fcmp?
1258 return isCommutative(getOpcode());
1261 unsigned Instruction::getNumSuccessors() const {
1262 switch (getOpcode()) {
1263 #define HANDLE_TERM_INST(N, OPC, CLASS) \
1264 case Instruction::OPC: \
1265 return static_cast<const CLASS *>(this)->getNumSuccessors();
1266 #include "llvm/IR/Instruction.def"
1267 default:
1268 break;
1270 llvm_unreachable("not a terminator");
1273 BasicBlock *Instruction::getSuccessor(unsigned idx) const {
1274 switch (getOpcode()) {
1275 #define HANDLE_TERM_INST(N, OPC, CLASS) \
1276 case Instruction::OPC: \
1277 return static_cast<const CLASS *>(this)->getSuccessor(idx);
1278 #include "llvm/IR/Instruction.def"
1279 default:
1280 break;
1282 llvm_unreachable("not a terminator");
1285 void Instruction::setSuccessor(unsigned idx, BasicBlock *B) {
1286 switch (getOpcode()) {
1287 #define HANDLE_TERM_INST(N, OPC, CLASS) \
1288 case Instruction::OPC: \
1289 return static_cast<CLASS *>(this)->setSuccessor(idx, B);
1290 #include "llvm/IR/Instruction.def"
1291 default:
1292 break;
1294 llvm_unreachable("not a terminator");
1297 void Instruction::replaceSuccessorWith(BasicBlock *OldBB, BasicBlock *NewBB) {
1298 for (unsigned Idx = 0, NumSuccessors = Instruction::getNumSuccessors();
1299 Idx != NumSuccessors; ++Idx)
1300 if (getSuccessor(Idx) == OldBB)
1301 setSuccessor(Idx, NewBB);
1304 Instruction *Instruction::cloneImpl() const {
1305 llvm_unreachable("Subclass of Instruction failed to implement cloneImpl");
1308 void Instruction::swapProfMetadata() {
1309 MDNode *ProfileData = getBranchWeightMDNode(*this);
1310 if (!ProfileData)
1311 return;
1312 unsigned FirstIdx = getBranchWeightOffset(ProfileData);
1313 if (ProfileData->getNumOperands() != 2 + FirstIdx)
1314 return;
1316 unsigned SecondIdx = FirstIdx + 1;
1317 SmallVector<Metadata *, 4> Ops;
1318 // If there are more weights past the second, we can't swap them
1319 if (ProfileData->getNumOperands() > SecondIdx + 1)
1320 return;
1321 for (unsigned Idx = 0; Idx < FirstIdx; ++Idx) {
1322 Ops.push_back(ProfileData->getOperand(Idx));
1324 // Switch the order of the weights
1325 Ops.push_back(ProfileData->getOperand(SecondIdx));
1326 Ops.push_back(ProfileData->getOperand(FirstIdx));
1327 setMetadata(LLVMContext::MD_prof,
1328 MDNode::get(ProfileData->getContext(), Ops));
1331 void Instruction::copyMetadata(const Instruction &SrcInst,
1332 ArrayRef<unsigned> WL) {
1333 if (!SrcInst.hasMetadata())
1334 return;
1336 SmallDenseSet<unsigned, 4> WLS(WL.begin(), WL.end());
1338 // Otherwise, enumerate and copy over metadata from the old instruction to the
1339 // new one.
1340 SmallVector<std::pair<unsigned, MDNode *>, 4> TheMDs;
1341 SrcInst.getAllMetadataOtherThanDebugLoc(TheMDs);
1342 for (const auto &MD : TheMDs) {
1343 if (WL.empty() || WLS.count(MD.first))
1344 setMetadata(MD.first, MD.second);
1346 if (WL.empty() || WLS.count(LLVMContext::MD_dbg))
1347 setDebugLoc(SrcInst.getDebugLoc());
1350 Instruction *Instruction::clone() const {
1351 Instruction *New = nullptr;
1352 switch (getOpcode()) {
1353 default:
1354 llvm_unreachable("Unhandled Opcode.");
1355 #define HANDLE_INST(num, opc, clas) \
1356 case Instruction::opc: \
1357 New = cast<clas>(this)->cloneImpl(); \
1358 break;
1359 #include "llvm/IR/Instruction.def"
1360 #undef HANDLE_INST
1363 New->SubclassOptionalData = SubclassOptionalData;
1364 New->copyMetadata(*this);
1365 return New;