Run DCE after a LoopFlatten test to reduce spurious output [nfc]
[llvm-project.git] / llvm / lib / IR / IntrinsicInst.cpp
blob20ae08dd1283000f5db44481f631dd460e1bb2df
1 //===-- IntrinsicInst.cpp - Intrinsic Instruction Wrappers ---------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements methods that make it really easy to deal with intrinsic
10 // functions.
12 // All intrinsic function calls are instances of the call instruction, so these
13 // are all subclasses of the CallInst class. Note that none of these classes
14 // has state or virtual methods, which is an important part of this gross/neat
15 // hack working.
17 // In some cases, arguments to intrinsics need to be generic and are defined as
18 // type pointer to empty struct { }*. To access the real item of interest the
19 // cast instruction needs to be stripped away.
21 //===----------------------------------------------------------------------===//
23 #include "llvm/IR/IntrinsicInst.h"
24 #include "llvm/ADT/StringSwitch.h"
25 #include "llvm/IR/Constants.h"
26 #include "llvm/IR/DebugInfoMetadata.h"
27 #include "llvm/IR/Metadata.h"
28 #include "llvm/IR/Module.h"
29 #include "llvm/IR/Operator.h"
30 #include "llvm/IR/PatternMatch.h"
31 #include "llvm/IR/Statepoint.h"
32 #include <optional>
34 using namespace llvm;
36 bool IntrinsicInst::mayLowerToFunctionCall(Intrinsic::ID IID) {
37 switch (IID) {
38 case Intrinsic::objc_autorelease:
39 case Intrinsic::objc_autoreleasePoolPop:
40 case Intrinsic::objc_autoreleasePoolPush:
41 case Intrinsic::objc_autoreleaseReturnValue:
42 case Intrinsic::objc_copyWeak:
43 case Intrinsic::objc_destroyWeak:
44 case Intrinsic::objc_initWeak:
45 case Intrinsic::objc_loadWeak:
46 case Intrinsic::objc_loadWeakRetained:
47 case Intrinsic::objc_moveWeak:
48 case Intrinsic::objc_release:
49 case Intrinsic::objc_retain:
50 case Intrinsic::objc_retainAutorelease:
51 case Intrinsic::objc_retainAutoreleaseReturnValue:
52 case Intrinsic::objc_retainAutoreleasedReturnValue:
53 case Intrinsic::objc_retainBlock:
54 case Intrinsic::objc_storeStrong:
55 case Intrinsic::objc_storeWeak:
56 case Intrinsic::objc_unsafeClaimAutoreleasedReturnValue:
57 case Intrinsic::objc_retainedObject:
58 case Intrinsic::objc_unretainedObject:
59 case Intrinsic::objc_unretainedPointer:
60 case Intrinsic::objc_retain_autorelease:
61 case Intrinsic::objc_sync_enter:
62 case Intrinsic::objc_sync_exit:
63 return true;
64 default:
65 return false;
69 //===----------------------------------------------------------------------===//
70 /// DbgVariableIntrinsic - This is the common base class for debug info
71 /// intrinsics for variables.
72 ///
74 iterator_range<location_op_iterator> RawLocationWrapper::location_ops() const {
75 Metadata *MD = getRawLocation();
76 assert(MD && "First operand of DbgVariableIntrinsic should be non-null.");
77 // If operand is ValueAsMetadata, return a range over just that operand.
78 if (auto *VAM = dyn_cast<ValueAsMetadata>(MD)) {
79 return {location_op_iterator(VAM), location_op_iterator(VAM + 1)};
81 // If operand is DIArgList, return a range over its args.
82 if (auto *AL = dyn_cast<DIArgList>(MD))
83 return {location_op_iterator(AL->args_begin()),
84 location_op_iterator(AL->args_end())};
85 // Operand must be an empty metadata tuple, so return empty iterator.
86 return {location_op_iterator(static_cast<ValueAsMetadata *>(nullptr)),
87 location_op_iterator(static_cast<ValueAsMetadata *>(nullptr))};
90 iterator_range<location_op_iterator>
91 DbgVariableIntrinsic::location_ops() const {
92 return getWrappedLocation().location_ops();
95 Value *DbgVariableIntrinsic::getVariableLocationOp(unsigned OpIdx) const {
96 return getWrappedLocation().getVariableLocationOp(OpIdx);
99 Value *RawLocationWrapper::getVariableLocationOp(unsigned OpIdx) const {
100 Metadata *MD = getRawLocation();
101 assert(MD && "First operand of DbgVariableIntrinsic should be non-null.");
102 if (auto *AL = dyn_cast<DIArgList>(MD))
103 return AL->getArgs()[OpIdx]->getValue();
104 if (isa<MDNode>(MD))
105 return nullptr;
106 assert(
107 isa<ValueAsMetadata>(MD) &&
108 "Attempted to get location operand from DbgVariableIntrinsic with none.");
109 auto *V = cast<ValueAsMetadata>(MD);
110 assert(OpIdx == 0 && "Operand Index must be 0 for a debug intrinsic with a "
111 "single location operand.");
112 return V->getValue();
115 static ValueAsMetadata *getAsMetadata(Value *V) {
116 return isa<MetadataAsValue>(V) ? dyn_cast<ValueAsMetadata>(
117 cast<MetadataAsValue>(V)->getMetadata())
118 : ValueAsMetadata::get(V);
121 void DbgVariableIntrinsic::replaceVariableLocationOp(Value *OldValue,
122 Value *NewValue) {
123 // If OldValue is used as the address part of a dbg.assign intrinsic replace
124 // it with NewValue and return true.
125 auto ReplaceDbgAssignAddress = [this, OldValue, NewValue]() -> bool {
126 auto *DAI = dyn_cast<DbgAssignIntrinsic>(this);
127 if (!DAI || OldValue != DAI->getAddress())
128 return false;
129 DAI->setAddress(NewValue);
130 return true;
132 bool DbgAssignAddrReplaced = ReplaceDbgAssignAddress();
133 (void)DbgAssignAddrReplaced;
135 assert(NewValue && "Values must be non-null");
136 auto Locations = location_ops();
137 auto OldIt = find(Locations, OldValue);
138 if (OldIt == Locations.end()) {
139 assert(DbgAssignAddrReplaced &&
140 "OldValue must be dbg.assign addr if unused in DIArgList");
141 return;
144 assert(OldIt != Locations.end() && "OldValue must be a current location");
145 if (!hasArgList()) {
146 Value *NewOperand = isa<MetadataAsValue>(NewValue)
147 ? NewValue
148 : MetadataAsValue::get(
149 getContext(), ValueAsMetadata::get(NewValue));
150 return setArgOperand(0, NewOperand);
152 SmallVector<ValueAsMetadata *, 4> MDs;
153 ValueAsMetadata *NewOperand = getAsMetadata(NewValue);
154 for (auto *VMD : Locations)
155 MDs.push_back(VMD == *OldIt ? NewOperand : getAsMetadata(VMD));
156 setArgOperand(
157 0, MetadataAsValue::get(getContext(), DIArgList::get(getContext(), MDs)));
159 void DbgVariableIntrinsic::replaceVariableLocationOp(unsigned OpIdx,
160 Value *NewValue) {
161 assert(OpIdx < getNumVariableLocationOps() && "Invalid Operand Index");
162 if (!hasArgList()) {
163 Value *NewOperand = isa<MetadataAsValue>(NewValue)
164 ? NewValue
165 : MetadataAsValue::get(
166 getContext(), ValueAsMetadata::get(NewValue));
167 return setArgOperand(0, NewOperand);
169 SmallVector<ValueAsMetadata *, 4> MDs;
170 ValueAsMetadata *NewOperand = getAsMetadata(NewValue);
171 for (unsigned Idx = 0; Idx < getNumVariableLocationOps(); ++Idx)
172 MDs.push_back(Idx == OpIdx ? NewOperand
173 : getAsMetadata(getVariableLocationOp(Idx)));
174 setArgOperand(
175 0, MetadataAsValue::get(getContext(), DIArgList::get(getContext(), MDs)));
178 void DbgVariableIntrinsic::addVariableLocationOps(ArrayRef<Value *> NewValues,
179 DIExpression *NewExpr) {
180 assert(NewExpr->hasAllLocationOps(getNumVariableLocationOps() +
181 NewValues.size()) &&
182 "NewExpr for debug variable intrinsic does not reference every "
183 "location operand.");
184 assert(!is_contained(NewValues, nullptr) && "New values must be non-null");
185 setArgOperand(2, MetadataAsValue::get(getContext(), NewExpr));
186 SmallVector<ValueAsMetadata *, 4> MDs;
187 for (auto *VMD : location_ops())
188 MDs.push_back(getAsMetadata(VMD));
189 for (auto *VMD : NewValues)
190 MDs.push_back(getAsMetadata(VMD));
191 setArgOperand(
192 0, MetadataAsValue::get(getContext(), DIArgList::get(getContext(), MDs)));
195 std::optional<uint64_t> DbgVariableIntrinsic::getFragmentSizeInBits() const {
196 if (auto Fragment = getExpression()->getFragmentInfo())
197 return Fragment->SizeInBits;
198 return getVariable()->getSizeInBits();
201 Value *DbgAssignIntrinsic::getAddress() const {
202 auto *MD = getRawAddress();
203 if (auto *V = dyn_cast<ValueAsMetadata>(MD))
204 return V->getValue();
206 // When the value goes to null, it gets replaced by an empty MDNode.
207 assert(!cast<MDNode>(MD)->getNumOperands() && "Expected an empty MDNode");
208 return nullptr;
211 void DbgAssignIntrinsic::setAssignId(DIAssignID *New) {
212 setOperand(OpAssignID, MetadataAsValue::get(getContext(), New));
215 void DbgAssignIntrinsic::setAddress(Value *V) {
216 setOperand(OpAddress,
217 MetadataAsValue::get(getContext(), ValueAsMetadata::get(V)));
220 void DbgAssignIntrinsic::setKillAddress() {
221 if (isKillAddress())
222 return;
223 setAddress(UndefValue::get(getAddress()->getType()));
226 bool DbgAssignIntrinsic::isKillAddress() const {
227 Value *Addr = getAddress();
228 return !Addr || isa<UndefValue>(Addr);
231 void DbgAssignIntrinsic::setValue(Value *V) {
232 setOperand(OpValue,
233 MetadataAsValue::get(getContext(), ValueAsMetadata::get(V)));
236 int llvm::Intrinsic::lookupLLVMIntrinsicByName(ArrayRef<const char *> NameTable,
237 StringRef Name) {
238 assert(Name.startswith("llvm."));
240 // Do successive binary searches of the dotted name components. For
241 // "llvm.gc.experimental.statepoint.p1i8.p1i32", we will find the range of
242 // intrinsics starting with "llvm.gc", then "llvm.gc.experimental", then
243 // "llvm.gc.experimental.statepoint", and then we will stop as the range is
244 // size 1. During the search, we can skip the prefix that we already know is
245 // identical. By using strncmp we consider names with differing suffixes to
246 // be part of the equal range.
247 size_t CmpEnd = 4; // Skip the "llvm" component.
248 const char *const *Low = NameTable.begin();
249 const char *const *High = NameTable.end();
250 const char *const *LastLow = Low;
251 while (CmpEnd < Name.size() && High - Low > 0) {
252 size_t CmpStart = CmpEnd;
253 CmpEnd = Name.find('.', CmpStart + 1);
254 CmpEnd = CmpEnd == StringRef::npos ? Name.size() : CmpEnd;
255 auto Cmp = [CmpStart, CmpEnd](const char *LHS, const char *RHS) {
256 return strncmp(LHS + CmpStart, RHS + CmpStart, CmpEnd - CmpStart) < 0;
258 LastLow = Low;
259 std::tie(Low, High) = std::equal_range(Low, High, Name.data(), Cmp);
261 if (High - Low > 0)
262 LastLow = Low;
264 if (LastLow == NameTable.end())
265 return -1;
266 StringRef NameFound = *LastLow;
267 if (Name == NameFound ||
268 (Name.startswith(NameFound) && Name[NameFound.size()] == '.'))
269 return LastLow - NameTable.begin();
270 return -1;
273 ConstantInt *InstrProfCntrInstBase::getNumCounters() const {
274 if (InstrProfValueProfileInst::classof(this))
275 llvm_unreachable("InstrProfValueProfileInst does not have counters!");
276 return cast<ConstantInt>(const_cast<Value *>(getArgOperand(2)));
279 ConstantInt *InstrProfCntrInstBase::getIndex() const {
280 if (InstrProfValueProfileInst::classof(this))
281 llvm_unreachable("Please use InstrProfValueProfileInst::getIndex()");
282 return cast<ConstantInt>(const_cast<Value *>(getArgOperand(3)));
285 Value *InstrProfIncrementInst::getStep() const {
286 if (InstrProfIncrementInstStep::classof(this)) {
287 return const_cast<Value *>(getArgOperand(4));
289 const Module *M = getModule();
290 LLVMContext &Context = M->getContext();
291 return ConstantInt::get(Type::getInt64Ty(Context), 1);
294 std::optional<RoundingMode> ConstrainedFPIntrinsic::getRoundingMode() const {
295 unsigned NumOperands = arg_size();
296 Metadata *MD = nullptr;
297 auto *MAV = dyn_cast<MetadataAsValue>(getArgOperand(NumOperands - 2));
298 if (MAV)
299 MD = MAV->getMetadata();
300 if (!MD || !isa<MDString>(MD))
301 return std::nullopt;
302 return convertStrToRoundingMode(cast<MDString>(MD)->getString());
305 std::optional<fp::ExceptionBehavior>
306 ConstrainedFPIntrinsic::getExceptionBehavior() const {
307 unsigned NumOperands = arg_size();
308 Metadata *MD = nullptr;
309 auto *MAV = dyn_cast<MetadataAsValue>(getArgOperand(NumOperands - 1));
310 if (MAV)
311 MD = MAV->getMetadata();
312 if (!MD || !isa<MDString>(MD))
313 return std::nullopt;
314 return convertStrToExceptionBehavior(cast<MDString>(MD)->getString());
317 bool ConstrainedFPIntrinsic::isDefaultFPEnvironment() const {
318 std::optional<fp::ExceptionBehavior> Except = getExceptionBehavior();
319 if (Except) {
320 if (*Except != fp::ebIgnore)
321 return false;
324 std::optional<RoundingMode> Rounding = getRoundingMode();
325 if (Rounding) {
326 if (*Rounding != RoundingMode::NearestTiesToEven)
327 return false;
330 return true;
333 static FCmpInst::Predicate getFPPredicateFromMD(const Value *Op) {
334 Metadata *MD = cast<MetadataAsValue>(Op)->getMetadata();
335 if (!MD || !isa<MDString>(MD))
336 return FCmpInst::BAD_FCMP_PREDICATE;
337 return StringSwitch<FCmpInst::Predicate>(cast<MDString>(MD)->getString())
338 .Case("oeq", FCmpInst::FCMP_OEQ)
339 .Case("ogt", FCmpInst::FCMP_OGT)
340 .Case("oge", FCmpInst::FCMP_OGE)
341 .Case("olt", FCmpInst::FCMP_OLT)
342 .Case("ole", FCmpInst::FCMP_OLE)
343 .Case("one", FCmpInst::FCMP_ONE)
344 .Case("ord", FCmpInst::FCMP_ORD)
345 .Case("uno", FCmpInst::FCMP_UNO)
346 .Case("ueq", FCmpInst::FCMP_UEQ)
347 .Case("ugt", FCmpInst::FCMP_UGT)
348 .Case("uge", FCmpInst::FCMP_UGE)
349 .Case("ult", FCmpInst::FCMP_ULT)
350 .Case("ule", FCmpInst::FCMP_ULE)
351 .Case("une", FCmpInst::FCMP_UNE)
352 .Default(FCmpInst::BAD_FCMP_PREDICATE);
355 FCmpInst::Predicate ConstrainedFPCmpIntrinsic::getPredicate() const {
356 return getFPPredicateFromMD(getArgOperand(2));
359 bool ConstrainedFPIntrinsic::isUnaryOp() const {
360 switch (getIntrinsicID()) {
361 default:
362 return false;
363 #define INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC) \
364 case Intrinsic::INTRINSIC: \
365 return NARG == 1;
366 #include "llvm/IR/ConstrainedOps.def"
370 bool ConstrainedFPIntrinsic::isTernaryOp() const {
371 switch (getIntrinsicID()) {
372 default:
373 return false;
374 #define INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC) \
375 case Intrinsic::INTRINSIC: \
376 return NARG == 3;
377 #include "llvm/IR/ConstrainedOps.def"
381 bool ConstrainedFPIntrinsic::classof(const IntrinsicInst *I) {
382 switch (I->getIntrinsicID()) {
383 #define INSTRUCTION(NAME, NARGS, ROUND_MODE, INTRINSIC) \
384 case Intrinsic::INTRINSIC:
385 #include "llvm/IR/ConstrainedOps.def"
386 return true;
387 default:
388 return false;
392 ElementCount VPIntrinsic::getStaticVectorLength() const {
393 auto GetVectorLengthOfType = [](const Type *T) -> ElementCount {
394 const auto *VT = cast<VectorType>(T);
395 auto ElemCount = VT->getElementCount();
396 return ElemCount;
399 Value *VPMask = getMaskParam();
400 if (!VPMask) {
401 assert((getIntrinsicID() == Intrinsic::vp_merge ||
402 getIntrinsicID() == Intrinsic::vp_select) &&
403 "Unexpected VP intrinsic without mask operand");
404 return GetVectorLengthOfType(getType());
406 return GetVectorLengthOfType(VPMask->getType());
409 Value *VPIntrinsic::getMaskParam() const {
410 if (auto MaskPos = getMaskParamPos(getIntrinsicID()))
411 return getArgOperand(*MaskPos);
412 return nullptr;
415 void VPIntrinsic::setMaskParam(Value *NewMask) {
416 auto MaskPos = getMaskParamPos(getIntrinsicID());
417 setArgOperand(*MaskPos, NewMask);
420 Value *VPIntrinsic::getVectorLengthParam() const {
421 if (auto EVLPos = getVectorLengthParamPos(getIntrinsicID()))
422 return getArgOperand(*EVLPos);
423 return nullptr;
426 void VPIntrinsic::setVectorLengthParam(Value *NewEVL) {
427 auto EVLPos = getVectorLengthParamPos(getIntrinsicID());
428 setArgOperand(*EVLPos, NewEVL);
431 std::optional<unsigned>
432 VPIntrinsic::getMaskParamPos(Intrinsic::ID IntrinsicID) {
433 switch (IntrinsicID) {
434 default:
435 return std::nullopt;
437 #define BEGIN_REGISTER_VP_INTRINSIC(VPID, MASKPOS, VLENPOS) \
438 case Intrinsic::VPID: \
439 return MASKPOS;
440 #include "llvm/IR/VPIntrinsics.def"
444 std::optional<unsigned>
445 VPIntrinsic::getVectorLengthParamPos(Intrinsic::ID IntrinsicID) {
446 switch (IntrinsicID) {
447 default:
448 return std::nullopt;
450 #define BEGIN_REGISTER_VP_INTRINSIC(VPID, MASKPOS, VLENPOS) \
451 case Intrinsic::VPID: \
452 return VLENPOS;
453 #include "llvm/IR/VPIntrinsics.def"
457 /// \return the alignment of the pointer used by this load/store/gather or
458 /// scatter.
459 MaybeAlign VPIntrinsic::getPointerAlignment() const {
460 std::optional<unsigned> PtrParamOpt =
461 getMemoryPointerParamPos(getIntrinsicID());
462 assert(PtrParamOpt && "no pointer argument!");
463 return getParamAlign(*PtrParamOpt);
466 /// \return The pointer operand of this load,store, gather or scatter.
467 Value *VPIntrinsic::getMemoryPointerParam() const {
468 if (auto PtrParamOpt = getMemoryPointerParamPos(getIntrinsicID()))
469 return getArgOperand(*PtrParamOpt);
470 return nullptr;
473 std::optional<unsigned>
474 VPIntrinsic::getMemoryPointerParamPos(Intrinsic::ID VPID) {
475 switch (VPID) {
476 default:
477 break;
478 #define BEGIN_REGISTER_VP_INTRINSIC(VPID, ...) case Intrinsic::VPID:
479 #define VP_PROPERTY_MEMOP(POINTERPOS, ...) return POINTERPOS;
480 #define END_REGISTER_VP_INTRINSIC(VPID) break;
481 #include "llvm/IR/VPIntrinsics.def"
483 return std::nullopt;
486 /// \return The data (payload) operand of this store or scatter.
487 Value *VPIntrinsic::getMemoryDataParam() const {
488 auto DataParamOpt = getMemoryDataParamPos(getIntrinsicID());
489 if (!DataParamOpt)
490 return nullptr;
491 return getArgOperand(*DataParamOpt);
494 std::optional<unsigned> VPIntrinsic::getMemoryDataParamPos(Intrinsic::ID VPID) {
495 switch (VPID) {
496 default:
497 break;
498 #define BEGIN_REGISTER_VP_INTRINSIC(VPID, ...) case Intrinsic::VPID:
499 #define VP_PROPERTY_MEMOP(POINTERPOS, DATAPOS) return DATAPOS;
500 #define END_REGISTER_VP_INTRINSIC(VPID) break;
501 #include "llvm/IR/VPIntrinsics.def"
503 return std::nullopt;
506 constexpr bool isVPIntrinsic(Intrinsic::ID ID) {
507 switch (ID) {
508 default:
509 break;
510 #define BEGIN_REGISTER_VP_INTRINSIC(VPID, MASKPOS, VLENPOS) \
511 case Intrinsic::VPID: \
512 return true;
513 #include "llvm/IR/VPIntrinsics.def"
515 return false;
518 bool VPIntrinsic::isVPIntrinsic(Intrinsic::ID ID) {
519 return ::isVPIntrinsic(ID);
522 // Equivalent non-predicated opcode
523 constexpr static std::optional<unsigned>
524 getFunctionalOpcodeForVP(Intrinsic::ID ID) {
525 switch (ID) {
526 default:
527 break;
528 #define BEGIN_REGISTER_VP_INTRINSIC(VPID, ...) case Intrinsic::VPID:
529 #define VP_PROPERTY_FUNCTIONAL_OPC(OPC) return Instruction::OPC;
530 #define END_REGISTER_VP_INTRINSIC(VPID) break;
531 #include "llvm/IR/VPIntrinsics.def"
533 return std::nullopt;
536 std::optional<unsigned>
537 VPIntrinsic::getFunctionalOpcodeForVP(Intrinsic::ID ID) {
538 return ::getFunctionalOpcodeForVP(ID);
541 // Equivalent non-predicated intrinsic ID
542 constexpr static std::optional<Intrinsic::ID>
543 getFunctionalIntrinsicIDForVP(Intrinsic::ID ID) {
544 switch (ID) {
545 default:
546 break;
547 #define BEGIN_REGISTER_VP_INTRINSIC(VPID, ...) case Intrinsic::VPID:
548 #define VP_PROPERTY_FUNCTIONAL_INTRINSIC(INTRIN) return Intrinsic::INTRIN;
549 #define END_REGISTER_VP_INTRINSIC(VPID) break;
550 #include "llvm/IR/VPIntrinsics.def"
552 return std::nullopt;
555 std::optional<Intrinsic::ID>
556 VPIntrinsic::getFunctionalIntrinsicIDForVP(Intrinsic::ID ID) {
557 return ::getFunctionalIntrinsicIDForVP(ID);
560 constexpr static bool doesVPHaveNoFunctionalEquivalent(Intrinsic::ID ID) {
561 switch (ID) {
562 default:
563 break;
564 #define BEGIN_REGISTER_VP_INTRINSIC(VPID, ...) case Intrinsic::VPID:
565 #define VP_PROPERTY_NO_FUNCTIONAL return true;
566 #define END_REGISTER_VP_INTRINSIC(VPID) break;
567 #include "llvm/IR/VPIntrinsics.def"
569 return false;
572 // All VP intrinsics should have an equivalent non-VP opcode or intrinsic
573 // defined, or be marked that they don't have one.
574 #define BEGIN_REGISTER_VP_INTRINSIC(VPID, ...) \
575 static_assert(doesVPHaveNoFunctionalEquivalent(Intrinsic::VPID) || \
576 getFunctionalOpcodeForVP(Intrinsic::VPID) || \
577 getFunctionalIntrinsicIDForVP(Intrinsic::VPID));
578 #include "llvm/IR/VPIntrinsics.def"
580 // Equivalent non-predicated constrained intrinsic
581 std::optional<Intrinsic::ID>
582 VPIntrinsic::getConstrainedIntrinsicIDForVP(Intrinsic::ID ID) {
583 switch (ID) {
584 default:
585 break;
586 #define BEGIN_REGISTER_VP_INTRINSIC(VPID, ...) case Intrinsic::VPID:
587 #define VP_PROPERTY_CONSTRAINEDFP(HASRND, HASEXCEPT, CID) return Intrinsic::CID;
588 #define END_REGISTER_VP_INTRINSIC(VPID) break;
589 #include "llvm/IR/VPIntrinsics.def"
591 return std::nullopt;
594 Intrinsic::ID VPIntrinsic::getForOpcode(unsigned IROPC) {
595 switch (IROPC) {
596 default:
597 break;
599 #define BEGIN_REGISTER_VP_INTRINSIC(VPID, ...) break;
600 #define VP_PROPERTY_FUNCTIONAL_OPC(OPC) case Instruction::OPC:
601 #define END_REGISTER_VP_INTRINSIC(VPID) return Intrinsic::VPID;
602 #include "llvm/IR/VPIntrinsics.def"
604 return Intrinsic::not_intrinsic;
607 bool VPIntrinsic::canIgnoreVectorLengthParam() const {
608 using namespace PatternMatch;
610 ElementCount EC = getStaticVectorLength();
612 // No vlen param - no lanes masked-off by it.
613 auto *VLParam = getVectorLengthParam();
614 if (!VLParam)
615 return true;
617 // Note that the VP intrinsic causes undefined behavior if the Explicit Vector
618 // Length parameter is strictly greater-than the number of vector elements of
619 // the operation. This function returns true when this is detected statically
620 // in the IR.
622 // Check whether "W == vscale * EC.getKnownMinValue()"
623 if (EC.isScalable()) {
624 // Compare vscale patterns
625 uint64_t VScaleFactor;
626 if (match(VLParam, m_c_Mul(m_ConstantInt(VScaleFactor), m_VScale())))
627 return VScaleFactor >= EC.getKnownMinValue();
628 return (EC.getKnownMinValue() == 1) && match(VLParam, m_VScale());
631 // standard SIMD operation
632 const auto *VLConst = dyn_cast<ConstantInt>(VLParam);
633 if (!VLConst)
634 return false;
636 uint64_t VLNum = VLConst->getZExtValue();
637 if (VLNum >= EC.getKnownMinValue())
638 return true;
640 return false;
643 Function *VPIntrinsic::getDeclarationForParams(Module *M, Intrinsic::ID VPID,
644 Type *ReturnType,
645 ArrayRef<Value *> Params) {
646 assert(isVPIntrinsic(VPID) && "not a VP intrinsic");
647 Function *VPFunc;
648 switch (VPID) {
649 default: {
650 Type *OverloadTy = Params[0]->getType();
651 if (VPReductionIntrinsic::isVPReduction(VPID))
652 OverloadTy =
653 Params[*VPReductionIntrinsic::getVectorParamPos(VPID)]->getType();
655 VPFunc = Intrinsic::getDeclaration(M, VPID, OverloadTy);
656 break;
658 case Intrinsic::vp_trunc:
659 case Intrinsic::vp_sext:
660 case Intrinsic::vp_zext:
661 case Intrinsic::vp_fptoui:
662 case Intrinsic::vp_fptosi:
663 case Intrinsic::vp_uitofp:
664 case Intrinsic::vp_sitofp:
665 case Intrinsic::vp_fptrunc:
666 case Intrinsic::vp_fpext:
667 case Intrinsic::vp_ptrtoint:
668 case Intrinsic::vp_inttoptr:
669 VPFunc =
670 Intrinsic::getDeclaration(M, VPID, {ReturnType, Params[0]->getType()});
671 break;
672 case Intrinsic::vp_is_fpclass:
673 VPFunc = Intrinsic::getDeclaration(M, VPID, {Params[0]->getType()});
674 break;
675 case Intrinsic::vp_merge:
676 case Intrinsic::vp_select:
677 VPFunc = Intrinsic::getDeclaration(M, VPID, {Params[1]->getType()});
678 break;
679 case Intrinsic::vp_load:
680 VPFunc = Intrinsic::getDeclaration(
681 M, VPID, {ReturnType, Params[0]->getType()});
682 break;
683 case Intrinsic::experimental_vp_strided_load:
684 VPFunc = Intrinsic::getDeclaration(
685 M, VPID, {ReturnType, Params[0]->getType(), Params[1]->getType()});
686 break;
687 case Intrinsic::vp_gather:
688 VPFunc = Intrinsic::getDeclaration(
689 M, VPID, {ReturnType, Params[0]->getType()});
690 break;
691 case Intrinsic::vp_store:
692 VPFunc = Intrinsic::getDeclaration(
693 M, VPID, {Params[0]->getType(), Params[1]->getType()});
694 break;
695 case Intrinsic::experimental_vp_strided_store:
696 VPFunc = Intrinsic::getDeclaration(
697 M, VPID,
698 {Params[0]->getType(), Params[1]->getType(), Params[2]->getType()});
699 break;
700 case Intrinsic::vp_scatter:
701 VPFunc = Intrinsic::getDeclaration(
702 M, VPID, {Params[0]->getType(), Params[1]->getType()});
703 break;
705 assert(VPFunc && "Could not declare VP intrinsic");
706 return VPFunc;
709 bool VPReductionIntrinsic::isVPReduction(Intrinsic::ID ID) {
710 switch (ID) {
711 default:
712 break;
713 #define BEGIN_REGISTER_VP_INTRINSIC(VPID, ...) case Intrinsic::VPID:
714 #define VP_PROPERTY_REDUCTION(STARTPOS, ...) return true;
715 #define END_REGISTER_VP_INTRINSIC(VPID) break;
716 #include "llvm/IR/VPIntrinsics.def"
718 return false;
721 bool VPCastIntrinsic::isVPCast(Intrinsic::ID ID) {
722 switch (ID) {
723 default:
724 break;
725 #define BEGIN_REGISTER_VP_INTRINSIC(VPID, ...) case Intrinsic::VPID:
726 #define VP_PROPERTY_CASTOP return true;
727 #define END_REGISTER_VP_INTRINSIC(VPID) break;
728 #include "llvm/IR/VPIntrinsics.def"
730 return false;
733 bool VPCmpIntrinsic::isVPCmp(Intrinsic::ID ID) {
734 switch (ID) {
735 default:
736 break;
737 #define BEGIN_REGISTER_VP_INTRINSIC(VPID, ...) case Intrinsic::VPID:
738 #define VP_PROPERTY_CMP(CCPOS, ...) return true;
739 #define END_REGISTER_VP_INTRINSIC(VPID) break;
740 #include "llvm/IR/VPIntrinsics.def"
742 return false;
745 bool VPBinOpIntrinsic::isVPBinOp(Intrinsic::ID ID) {
746 switch (ID) {
747 default:
748 break;
749 #define BEGIN_REGISTER_VP_INTRINSIC(VPID, ...) case Intrinsic::VPID:
750 #define VP_PROPERTY_BINARYOP return true;
751 #define END_REGISTER_VP_INTRINSIC(VPID) break;
752 #include "llvm/IR/VPIntrinsics.def"
754 return false;
757 static ICmpInst::Predicate getIntPredicateFromMD(const Value *Op) {
758 Metadata *MD = cast<MetadataAsValue>(Op)->getMetadata();
759 if (!MD || !isa<MDString>(MD))
760 return ICmpInst::BAD_ICMP_PREDICATE;
761 return StringSwitch<ICmpInst::Predicate>(cast<MDString>(MD)->getString())
762 .Case("eq", ICmpInst::ICMP_EQ)
763 .Case("ne", ICmpInst::ICMP_NE)
764 .Case("ugt", ICmpInst::ICMP_UGT)
765 .Case("uge", ICmpInst::ICMP_UGE)
766 .Case("ult", ICmpInst::ICMP_ULT)
767 .Case("ule", ICmpInst::ICMP_ULE)
768 .Case("sgt", ICmpInst::ICMP_SGT)
769 .Case("sge", ICmpInst::ICMP_SGE)
770 .Case("slt", ICmpInst::ICMP_SLT)
771 .Case("sle", ICmpInst::ICMP_SLE)
772 .Default(ICmpInst::BAD_ICMP_PREDICATE);
775 CmpInst::Predicate VPCmpIntrinsic::getPredicate() const {
776 bool IsFP = true;
777 std::optional<unsigned> CCArgIdx;
778 switch (getIntrinsicID()) {
779 default:
780 break;
781 #define BEGIN_REGISTER_VP_INTRINSIC(VPID, ...) case Intrinsic::VPID:
782 #define VP_PROPERTY_CMP(CCPOS, ISFP) \
783 CCArgIdx = CCPOS; \
784 IsFP = ISFP; \
785 break;
786 #define END_REGISTER_VP_INTRINSIC(VPID) break;
787 #include "llvm/IR/VPIntrinsics.def"
789 assert(CCArgIdx && "Unexpected vector-predicated comparison");
790 return IsFP ? getFPPredicateFromMD(getArgOperand(*CCArgIdx))
791 : getIntPredicateFromMD(getArgOperand(*CCArgIdx));
794 unsigned VPReductionIntrinsic::getVectorParamPos() const {
795 return *VPReductionIntrinsic::getVectorParamPos(getIntrinsicID());
798 unsigned VPReductionIntrinsic::getStartParamPos() const {
799 return *VPReductionIntrinsic::getStartParamPos(getIntrinsicID());
802 std::optional<unsigned>
803 VPReductionIntrinsic::getVectorParamPos(Intrinsic::ID ID) {
804 switch (ID) {
805 #define BEGIN_REGISTER_VP_INTRINSIC(VPID, ...) case Intrinsic::VPID:
806 #define VP_PROPERTY_REDUCTION(STARTPOS, VECTORPOS) return VECTORPOS;
807 #define END_REGISTER_VP_INTRINSIC(VPID) break;
808 #include "llvm/IR/VPIntrinsics.def"
809 default:
810 break;
812 return std::nullopt;
815 std::optional<unsigned>
816 VPReductionIntrinsic::getStartParamPos(Intrinsic::ID ID) {
817 switch (ID) {
818 #define BEGIN_REGISTER_VP_INTRINSIC(VPID, ...) case Intrinsic::VPID:
819 #define VP_PROPERTY_REDUCTION(STARTPOS, VECTORPOS) return STARTPOS;
820 #define END_REGISTER_VP_INTRINSIC(VPID) break;
821 #include "llvm/IR/VPIntrinsics.def"
822 default:
823 break;
825 return std::nullopt;
828 Instruction::BinaryOps BinaryOpIntrinsic::getBinaryOp() const {
829 switch (getIntrinsicID()) {
830 case Intrinsic::uadd_with_overflow:
831 case Intrinsic::sadd_with_overflow:
832 case Intrinsic::uadd_sat:
833 case Intrinsic::sadd_sat:
834 return Instruction::Add;
835 case Intrinsic::usub_with_overflow:
836 case Intrinsic::ssub_with_overflow:
837 case Intrinsic::usub_sat:
838 case Intrinsic::ssub_sat:
839 return Instruction::Sub;
840 case Intrinsic::umul_with_overflow:
841 case Intrinsic::smul_with_overflow:
842 return Instruction::Mul;
843 default:
844 llvm_unreachable("Invalid intrinsic");
848 bool BinaryOpIntrinsic::isSigned() const {
849 switch (getIntrinsicID()) {
850 case Intrinsic::sadd_with_overflow:
851 case Intrinsic::ssub_with_overflow:
852 case Intrinsic::smul_with_overflow:
853 case Intrinsic::sadd_sat:
854 case Intrinsic::ssub_sat:
855 return true;
856 default:
857 return false;
861 unsigned BinaryOpIntrinsic::getNoWrapKind() const {
862 if (isSigned())
863 return OverflowingBinaryOperator::NoSignedWrap;
864 else
865 return OverflowingBinaryOperator::NoUnsignedWrap;
868 const Value *GCProjectionInst::getStatepoint() const {
869 const Value *Token = getArgOperand(0);
870 if (isa<UndefValue>(Token))
871 return Token;
873 // This takes care both of relocates for call statepoints and relocates
874 // on normal path of invoke statepoint.
875 if (!isa<LandingPadInst>(Token))
876 return cast<GCStatepointInst>(Token);
878 // This relocate is on exceptional path of an invoke statepoint
879 const BasicBlock *InvokeBB =
880 cast<Instruction>(Token)->getParent()->getUniquePredecessor();
882 assert(InvokeBB && "safepoints should have unique landingpads");
883 assert(InvokeBB->getTerminator() &&
884 "safepoint block should be well formed");
886 return cast<GCStatepointInst>(InvokeBB->getTerminator());
889 Value *GCRelocateInst::getBasePtr() const {
890 auto Statepoint = getStatepoint();
891 if (isa<UndefValue>(Statepoint))
892 return UndefValue::get(Statepoint->getType());
894 auto *GCInst = cast<GCStatepointInst>(Statepoint);
895 if (auto Opt = GCInst->getOperandBundle(LLVMContext::OB_gc_live))
896 return *(Opt->Inputs.begin() + getBasePtrIndex());
897 return *(GCInst->arg_begin() + getBasePtrIndex());
900 Value *GCRelocateInst::getDerivedPtr() const {
901 auto *Statepoint = getStatepoint();
902 if (isa<UndefValue>(Statepoint))
903 return UndefValue::get(Statepoint->getType());
905 auto *GCInst = cast<GCStatepointInst>(Statepoint);
906 if (auto Opt = GCInst->getOperandBundle(LLVMContext::OB_gc_live))
907 return *(Opt->Inputs.begin() + getDerivedPtrIndex());
908 return *(GCInst->arg_begin() + getDerivedPtrIndex());