[ORC] Add std::tuple support to SimplePackedSerialization.
[llvm-project.git] / llvm / lib / IR / Function.cpp
blob9313c4980dfbbb72afa82eb282029bca27b5401a
1 //===- Function.cpp - Implement the Global object classes -----------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements the Function class for the IR library.
11 //===----------------------------------------------------------------------===//
13 #include "llvm/IR/Function.h"
14 #include "SymbolTableListTraitsImpl.h"
15 #include "llvm/ADT/ArrayRef.h"
16 #include "llvm/ADT/DenseSet.h"
17 #include "llvm/ADT/None.h"
18 #include "llvm/ADT/STLExtras.h"
19 #include "llvm/ADT/SmallString.h"
20 #include "llvm/ADT/SmallVector.h"
21 #include "llvm/ADT/StringExtras.h"
22 #include "llvm/ADT/StringRef.h"
23 #include "llvm/IR/AbstractCallSite.h"
24 #include "llvm/IR/Argument.h"
25 #include "llvm/IR/Attributes.h"
26 #include "llvm/IR/BasicBlock.h"
27 #include "llvm/IR/Constant.h"
28 #include "llvm/IR/Constants.h"
29 #include "llvm/IR/DerivedTypes.h"
30 #include "llvm/IR/GlobalValue.h"
31 #include "llvm/IR/InstIterator.h"
32 #include "llvm/IR/Instruction.h"
33 #include "llvm/IR/Instructions.h"
34 #include "llvm/IR/IntrinsicInst.h"
35 #include "llvm/IR/Intrinsics.h"
36 #include "llvm/IR/IntrinsicsAArch64.h"
37 #include "llvm/IR/IntrinsicsAMDGPU.h"
38 #include "llvm/IR/IntrinsicsARM.h"
39 #include "llvm/IR/IntrinsicsBPF.h"
40 #include "llvm/IR/IntrinsicsHexagon.h"
41 #include "llvm/IR/IntrinsicsMips.h"
42 #include "llvm/IR/IntrinsicsNVPTX.h"
43 #include "llvm/IR/IntrinsicsPowerPC.h"
44 #include "llvm/IR/IntrinsicsR600.h"
45 #include "llvm/IR/IntrinsicsRISCV.h"
46 #include "llvm/IR/IntrinsicsS390.h"
47 #include "llvm/IR/IntrinsicsVE.h"
48 #include "llvm/IR/IntrinsicsWebAssembly.h"
49 #include "llvm/IR/IntrinsicsX86.h"
50 #include "llvm/IR/IntrinsicsXCore.h"
51 #include "llvm/IR/LLVMContext.h"
52 #include "llvm/IR/MDBuilder.h"
53 #include "llvm/IR/Metadata.h"
54 #include "llvm/IR/Module.h"
55 #include "llvm/IR/Operator.h"
56 #include "llvm/IR/SymbolTableListTraits.h"
57 #include "llvm/IR/Type.h"
58 #include "llvm/IR/Use.h"
59 #include "llvm/IR/User.h"
60 #include "llvm/IR/Value.h"
61 #include "llvm/IR/ValueSymbolTable.h"
62 #include "llvm/Support/Casting.h"
63 #include "llvm/Support/CommandLine.h"
64 #include "llvm/Support/Compiler.h"
65 #include "llvm/Support/ErrorHandling.h"
66 #include <algorithm>
67 #include <cassert>
68 #include <cstddef>
69 #include <cstdint>
70 #include <cstring>
71 #include <string>
73 using namespace llvm;
74 using ProfileCount = Function::ProfileCount;
76 // Explicit instantiations of SymbolTableListTraits since some of the methods
77 // are not in the public header file...
78 template class llvm::SymbolTableListTraits<BasicBlock>;
80 static cl::opt<unsigned> NonGlobalValueMaxNameSize(
81 "non-global-value-max-name-size", cl::Hidden, cl::init(1024),
82 cl::desc("Maximum size for the name of non-global values."));
84 //===----------------------------------------------------------------------===//
85 // Argument Implementation
86 //===----------------------------------------------------------------------===//
88 Argument::Argument(Type *Ty, const Twine &Name, Function *Par, unsigned ArgNo)
89 : Value(Ty, Value::ArgumentVal), Parent(Par), ArgNo(ArgNo) {
90 setName(Name);
93 void Argument::setParent(Function *parent) {
94 Parent = parent;
97 bool Argument::hasNonNullAttr(bool AllowUndefOrPoison) const {
98 if (!getType()->isPointerTy()) return false;
99 if (getParent()->hasParamAttribute(getArgNo(), Attribute::NonNull) &&
100 (AllowUndefOrPoison ||
101 getParent()->hasParamAttribute(getArgNo(), Attribute::NoUndef)))
102 return true;
103 else if (getDereferenceableBytes() > 0 &&
104 !NullPointerIsDefined(getParent(),
105 getType()->getPointerAddressSpace()))
106 return true;
107 return false;
110 bool Argument::hasByValAttr() const {
111 if (!getType()->isPointerTy()) return false;
112 return hasAttribute(Attribute::ByVal);
115 bool Argument::hasByRefAttr() const {
116 if (!getType()->isPointerTy())
117 return false;
118 return hasAttribute(Attribute::ByRef);
121 bool Argument::hasSwiftSelfAttr() const {
122 return getParent()->hasParamAttribute(getArgNo(), Attribute::SwiftSelf);
125 bool Argument::hasSwiftErrorAttr() const {
126 return getParent()->hasParamAttribute(getArgNo(), Attribute::SwiftError);
129 bool Argument::hasInAllocaAttr() const {
130 if (!getType()->isPointerTy()) return false;
131 return hasAttribute(Attribute::InAlloca);
134 bool Argument::hasPreallocatedAttr() const {
135 if (!getType()->isPointerTy())
136 return false;
137 return hasAttribute(Attribute::Preallocated);
140 bool Argument::hasPassPointeeByValueCopyAttr() const {
141 if (!getType()->isPointerTy()) return false;
142 AttributeList Attrs = getParent()->getAttributes();
143 return Attrs.hasParamAttr(getArgNo(), Attribute::ByVal) ||
144 Attrs.hasParamAttr(getArgNo(), Attribute::InAlloca) ||
145 Attrs.hasParamAttr(getArgNo(), Attribute::Preallocated);
148 bool Argument::hasPointeeInMemoryValueAttr() const {
149 if (!getType()->isPointerTy())
150 return false;
151 AttributeList Attrs = getParent()->getAttributes();
152 return Attrs.hasParamAttr(getArgNo(), Attribute::ByVal) ||
153 Attrs.hasParamAttr(getArgNo(), Attribute::StructRet) ||
154 Attrs.hasParamAttr(getArgNo(), Attribute::InAlloca) ||
155 Attrs.hasParamAttr(getArgNo(), Attribute::Preallocated) ||
156 Attrs.hasParamAttr(getArgNo(), Attribute::ByRef);
159 /// For a byval, sret, inalloca, or preallocated parameter, get the in-memory
160 /// parameter type.
161 static Type *getMemoryParamAllocType(AttributeSet ParamAttrs, Type *ArgTy) {
162 // FIXME: All the type carrying attributes are mutually exclusive, so there
163 // should be a single query to get the stored type that handles any of them.
164 if (Type *ByValTy = ParamAttrs.getByValType())
165 return ByValTy;
166 if (Type *ByRefTy = ParamAttrs.getByRefType())
167 return ByRefTy;
168 if (Type *PreAllocTy = ParamAttrs.getPreallocatedType())
169 return PreAllocTy;
170 if (Type *InAllocaTy = ParamAttrs.getInAllocaType())
171 return InAllocaTy;
172 if (Type *SRetTy = ParamAttrs.getStructRetType())
173 return SRetTy;
175 return nullptr;
178 uint64_t Argument::getPassPointeeByValueCopySize(const DataLayout &DL) const {
179 AttributeSet ParamAttrs =
180 getParent()->getAttributes().getParamAttrs(getArgNo());
181 if (Type *MemTy = getMemoryParamAllocType(ParamAttrs, getType()))
182 return DL.getTypeAllocSize(MemTy);
183 return 0;
186 Type *Argument::getPointeeInMemoryValueType() const {
187 AttributeSet ParamAttrs =
188 getParent()->getAttributes().getParamAttrs(getArgNo());
189 return getMemoryParamAllocType(ParamAttrs, getType());
192 unsigned Argument::getParamAlignment() const {
193 assert(getType()->isPointerTy() && "Only pointers have alignments");
194 return getParent()->getParamAlignment(getArgNo());
197 MaybeAlign Argument::getParamAlign() const {
198 assert(getType()->isPointerTy() && "Only pointers have alignments");
199 return getParent()->getParamAlign(getArgNo());
202 MaybeAlign Argument::getParamStackAlign() const {
203 return getParent()->getParamStackAlign(getArgNo());
206 Type *Argument::getParamByValType() const {
207 assert(getType()->isPointerTy() && "Only pointers have byval types");
208 return getParent()->getParamByValType(getArgNo());
211 Type *Argument::getParamStructRetType() const {
212 assert(getType()->isPointerTy() && "Only pointers have sret types");
213 return getParent()->getParamStructRetType(getArgNo());
216 Type *Argument::getParamByRefType() const {
217 assert(getType()->isPointerTy() && "Only pointers have byref types");
218 return getParent()->getParamByRefType(getArgNo());
221 Type *Argument::getParamInAllocaType() const {
222 assert(getType()->isPointerTy() && "Only pointers have inalloca types");
223 return getParent()->getParamInAllocaType(getArgNo());
226 uint64_t Argument::getDereferenceableBytes() const {
227 assert(getType()->isPointerTy() &&
228 "Only pointers have dereferenceable bytes");
229 return getParent()->getParamDereferenceableBytes(getArgNo());
232 uint64_t Argument::getDereferenceableOrNullBytes() const {
233 assert(getType()->isPointerTy() &&
234 "Only pointers have dereferenceable bytes");
235 return getParent()->getParamDereferenceableOrNullBytes(getArgNo());
238 bool Argument::hasNestAttr() const {
239 if (!getType()->isPointerTy()) return false;
240 return hasAttribute(Attribute::Nest);
243 bool Argument::hasNoAliasAttr() const {
244 if (!getType()->isPointerTy()) return false;
245 return hasAttribute(Attribute::NoAlias);
248 bool Argument::hasNoCaptureAttr() const {
249 if (!getType()->isPointerTy()) return false;
250 return hasAttribute(Attribute::NoCapture);
253 bool Argument::hasNoFreeAttr() const {
254 if (!getType()->isPointerTy()) return false;
255 return hasAttribute(Attribute::NoFree);
258 bool Argument::hasStructRetAttr() const {
259 if (!getType()->isPointerTy()) return false;
260 return hasAttribute(Attribute::StructRet);
263 bool Argument::hasInRegAttr() const {
264 return hasAttribute(Attribute::InReg);
267 bool Argument::hasReturnedAttr() const {
268 return hasAttribute(Attribute::Returned);
271 bool Argument::hasZExtAttr() const {
272 return hasAttribute(Attribute::ZExt);
275 bool Argument::hasSExtAttr() const {
276 return hasAttribute(Attribute::SExt);
279 bool Argument::onlyReadsMemory() const {
280 AttributeList Attrs = getParent()->getAttributes();
281 return Attrs.hasParamAttr(getArgNo(), Attribute::ReadOnly) ||
282 Attrs.hasParamAttr(getArgNo(), Attribute::ReadNone);
285 void Argument::addAttrs(AttrBuilder &B) {
286 AttributeList AL = getParent()->getAttributes();
287 AL = AL.addParamAttributes(Parent->getContext(), getArgNo(), B);
288 getParent()->setAttributes(AL);
291 void Argument::addAttr(Attribute::AttrKind Kind) {
292 getParent()->addParamAttr(getArgNo(), Kind);
295 void Argument::addAttr(Attribute Attr) {
296 getParent()->addParamAttr(getArgNo(), Attr);
299 void Argument::removeAttr(Attribute::AttrKind Kind) {
300 getParent()->removeParamAttr(getArgNo(), Kind);
303 void Argument::removeAttrs(const AttrBuilder &B) {
304 AttributeList AL = getParent()->getAttributes();
305 AL = AL.removeParamAttributes(Parent->getContext(), getArgNo(), B);
306 getParent()->setAttributes(AL);
309 bool Argument::hasAttribute(Attribute::AttrKind Kind) const {
310 return getParent()->hasParamAttribute(getArgNo(), Kind);
313 Attribute Argument::getAttribute(Attribute::AttrKind Kind) const {
314 return getParent()->getParamAttribute(getArgNo(), Kind);
317 //===----------------------------------------------------------------------===//
318 // Helper Methods in Function
319 //===----------------------------------------------------------------------===//
321 LLVMContext &Function::getContext() const {
322 return getType()->getContext();
325 unsigned Function::getInstructionCount() const {
326 unsigned NumInstrs = 0;
327 for (const BasicBlock &BB : BasicBlocks)
328 NumInstrs += std::distance(BB.instructionsWithoutDebug().begin(),
329 BB.instructionsWithoutDebug().end());
330 return NumInstrs;
333 Function *Function::Create(FunctionType *Ty, LinkageTypes Linkage,
334 const Twine &N, Module &M) {
335 return Create(Ty, Linkage, M.getDataLayout().getProgramAddressSpace(), N, &M);
338 Function *Function::createWithDefaultAttr(FunctionType *Ty,
339 LinkageTypes Linkage,
340 unsigned AddrSpace, const Twine &N,
341 Module *M) {
342 auto *F = new Function(Ty, Linkage, AddrSpace, N, M);
343 AttrBuilder B;
344 if (M->getUwtable())
345 B.addAttribute(Attribute::UWTable);
346 switch (M->getFramePointer()) {
347 case FramePointerKind::None:
348 // 0 ("none") is the default.
349 break;
350 case FramePointerKind::NonLeaf:
351 B.addAttribute("frame-pointer", "non-leaf");
352 break;
353 case FramePointerKind::All:
354 B.addAttribute("frame-pointer", "all");
355 break;
357 F->addFnAttrs(B);
358 return F;
361 void Function::removeFromParent() {
362 getParent()->getFunctionList().remove(getIterator());
365 void Function::eraseFromParent() {
366 getParent()->getFunctionList().erase(getIterator());
369 //===----------------------------------------------------------------------===//
370 // Function Implementation
371 //===----------------------------------------------------------------------===//
373 static unsigned computeAddrSpace(unsigned AddrSpace, Module *M) {
374 // If AS == -1 and we are passed a valid module pointer we place the function
375 // in the program address space. Otherwise we default to AS0.
376 if (AddrSpace == static_cast<unsigned>(-1))
377 return M ? M->getDataLayout().getProgramAddressSpace() : 0;
378 return AddrSpace;
381 Function::Function(FunctionType *Ty, LinkageTypes Linkage, unsigned AddrSpace,
382 const Twine &name, Module *ParentModule)
383 : GlobalObject(Ty, Value::FunctionVal,
384 OperandTraits<Function>::op_begin(this), 0, Linkage, name,
385 computeAddrSpace(AddrSpace, ParentModule)),
386 NumArgs(Ty->getNumParams()) {
387 assert(FunctionType::isValidReturnType(getReturnType()) &&
388 "invalid return type");
389 setGlobalObjectSubClassData(0);
391 // We only need a symbol table for a function if the context keeps value names
392 if (!getContext().shouldDiscardValueNames())
393 SymTab = std::make_unique<ValueSymbolTable>(NonGlobalValueMaxNameSize);
395 // If the function has arguments, mark them as lazily built.
396 if (Ty->getNumParams())
397 setValueSubclassData(1); // Set the "has lazy arguments" bit.
399 if (ParentModule)
400 ParentModule->getFunctionList().push_back(this);
402 HasLLVMReservedName = getName().startswith("llvm.");
403 // Ensure intrinsics have the right parameter attributes.
404 // Note, the IntID field will have been set in Value::setName if this function
405 // name is a valid intrinsic ID.
406 if (IntID)
407 setAttributes(Intrinsic::getAttributes(getContext(), IntID));
410 Function::~Function() {
411 dropAllReferences(); // After this it is safe to delete instructions.
413 // Delete all of the method arguments and unlink from symbol table...
414 if (Arguments)
415 clearArguments();
417 // Remove the function from the on-the-side GC table.
418 clearGC();
421 void Function::BuildLazyArguments() const {
422 // Create the arguments vector, all arguments start out unnamed.
423 auto *FT = getFunctionType();
424 if (NumArgs > 0) {
425 Arguments = std::allocator<Argument>().allocate(NumArgs);
426 for (unsigned i = 0, e = NumArgs; i != e; ++i) {
427 Type *ArgTy = FT->getParamType(i);
428 assert(!ArgTy->isVoidTy() && "Cannot have void typed arguments!");
429 new (Arguments + i) Argument(ArgTy, "", const_cast<Function *>(this), i);
433 // Clear the lazy arguments bit.
434 unsigned SDC = getSubclassDataFromValue();
435 SDC &= ~(1 << 0);
436 const_cast<Function*>(this)->setValueSubclassData(SDC);
437 assert(!hasLazyArguments());
440 static MutableArrayRef<Argument> makeArgArray(Argument *Args, size_t Count) {
441 return MutableArrayRef<Argument>(Args, Count);
444 bool Function::isConstrainedFPIntrinsic() const {
445 switch (getIntrinsicID()) {
446 #define INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC) \
447 case Intrinsic::INTRINSIC:
448 #include "llvm/IR/ConstrainedOps.def"
449 return true;
450 #undef INSTRUCTION
451 default:
452 return false;
456 void Function::clearArguments() {
457 for (Argument &A : makeArgArray(Arguments, NumArgs)) {
458 A.setName("");
459 A.~Argument();
461 std::allocator<Argument>().deallocate(Arguments, NumArgs);
462 Arguments = nullptr;
465 void Function::stealArgumentListFrom(Function &Src) {
466 assert(isDeclaration() && "Expected no references to current arguments");
468 // Drop the current arguments, if any, and set the lazy argument bit.
469 if (!hasLazyArguments()) {
470 assert(llvm::all_of(makeArgArray(Arguments, NumArgs),
471 [](const Argument &A) { return A.use_empty(); }) &&
472 "Expected arguments to be unused in declaration");
473 clearArguments();
474 setValueSubclassData(getSubclassDataFromValue() | (1 << 0));
477 // Nothing to steal if Src has lazy arguments.
478 if (Src.hasLazyArguments())
479 return;
481 // Steal arguments from Src, and fix the lazy argument bits.
482 assert(arg_size() == Src.arg_size());
483 Arguments = Src.Arguments;
484 Src.Arguments = nullptr;
485 for (Argument &A : makeArgArray(Arguments, NumArgs)) {
486 // FIXME: This does the work of transferNodesFromList inefficiently.
487 SmallString<128> Name;
488 if (A.hasName())
489 Name = A.getName();
490 if (!Name.empty())
491 A.setName("");
492 A.setParent(this);
493 if (!Name.empty())
494 A.setName(Name);
497 setValueSubclassData(getSubclassDataFromValue() & ~(1 << 0));
498 assert(!hasLazyArguments());
499 Src.setValueSubclassData(Src.getSubclassDataFromValue() | (1 << 0));
502 // dropAllReferences() - This function causes all the subinstructions to "let
503 // go" of all references that they are maintaining. This allows one to
504 // 'delete' a whole class at a time, even though there may be circular
505 // references... first all references are dropped, and all use counts go to
506 // zero. Then everything is deleted for real. Note that no operations are
507 // valid on an object that has "dropped all references", except operator
508 // delete.
510 void Function::dropAllReferences() {
511 setIsMaterializable(false);
513 for (BasicBlock &BB : *this)
514 BB.dropAllReferences();
516 // Delete all basic blocks. They are now unused, except possibly by
517 // blockaddresses, but BasicBlock's destructor takes care of those.
518 while (!BasicBlocks.empty())
519 BasicBlocks.begin()->eraseFromParent();
521 // Drop uses of any optional data (real or placeholder).
522 if (getNumOperands()) {
523 User::dropAllReferences();
524 setNumHungOffUseOperands(0);
525 setValueSubclassData(getSubclassDataFromValue() & ~0xe);
528 // Metadata is stored in a side-table.
529 clearMetadata();
532 void Function::addAttribute(unsigned i, Attribute Attr) {
533 AttributeSets = AttributeSets.addAttribute(getContext(), i, Attr);
536 void Function::addFnAttr(Attribute::AttrKind Kind) {
537 AttributeSets = AttributeSets.addFnAttribute(getContext(), Kind);
540 void Function::addFnAttr(StringRef Kind, StringRef Val) {
541 AttributeSets = AttributeSets.addFnAttribute(getContext(), Kind, Val);
544 void Function::addFnAttr(Attribute Attr) {
545 AttributeSets = AttributeSets.addFnAttribute(getContext(), Attr);
548 void Function::addFnAttrs(const AttrBuilder &Attrs) {
549 AttributeSets = AttributeSets.addFnAttributes(getContext(), Attrs);
552 void Function::addRetAttr(Attribute::AttrKind Kind) {
553 AttributeSets = AttributeSets.addRetAttribute(getContext(), Kind);
556 void Function::addParamAttr(unsigned ArgNo, Attribute::AttrKind Kind) {
557 AttributeSets = AttributeSets.addParamAttribute(getContext(), ArgNo, Kind);
560 void Function::addParamAttr(unsigned ArgNo, Attribute Attr) {
561 AttributeSets = AttributeSets.addParamAttribute(getContext(), ArgNo, Attr);
564 void Function::addParamAttrs(unsigned ArgNo, const AttrBuilder &Attrs) {
565 AttributeSets = AttributeSets.addParamAttributes(getContext(), ArgNo, Attrs);
568 void Function::removeAttribute(unsigned i, Attribute::AttrKind Kind) {
569 AttributeSets = AttributeSets.removeAttribute(getContext(), i, Kind);
572 void Function::removeAttribute(unsigned i, StringRef Kind) {
573 AttributeSets = AttributeSets.removeAttribute(getContext(), i, Kind);
576 void Function::removeFnAttr(Attribute::AttrKind Kind) {
577 AttributeSets = AttributeSets.removeFnAttribute(getContext(), Kind);
580 void Function::removeFnAttr(StringRef Kind) {
581 AttributeSets = AttributeSets.removeFnAttribute(getContext(), Kind);
584 void Function::removeFnAttrs(const AttrBuilder &Attrs) {
585 AttributeSets = AttributeSets.removeFnAttributes(getContext(), Attrs);
588 void Function::removeRetAttr(Attribute::AttrKind Kind) {
589 AttributeSets = AttributeSets.removeRetAttribute(getContext(), Kind);
592 void Function::removeRetAttr(StringRef Kind) {
593 AttributeSets = AttributeSets.removeRetAttribute(getContext(), Kind);
596 void Function::removeRetAttrs(const AttrBuilder &Attrs) {
597 AttributeSets = AttributeSets.removeRetAttributes(getContext(), Attrs);
600 void Function::removeParamAttr(unsigned ArgNo, Attribute::AttrKind Kind) {
601 AttributeSets = AttributeSets.removeParamAttribute(getContext(), ArgNo, Kind);
604 void Function::removeParamAttr(unsigned ArgNo, StringRef Kind) {
605 AttributeSets = AttributeSets.removeParamAttribute(getContext(), ArgNo, Kind);
608 void Function::removeParamAttrs(unsigned ArgNo, const AttrBuilder &Attrs) {
609 AttributeSets =
610 AttributeSets.removeParamAttributes(getContext(), ArgNo, Attrs);
613 void Function::addDereferenceableParamAttr(unsigned ArgNo, uint64_t Bytes) {
614 AttributeSets =
615 AttributeSets.addDereferenceableParamAttr(getContext(), ArgNo, Bytes);
618 bool Function::hasFnAttribute(Attribute::AttrKind Kind) const {
619 return AttributeSets.hasFnAttr(Kind);
622 bool Function::hasFnAttribute(StringRef Kind) const {
623 return AttributeSets.hasFnAttr(Kind);
626 bool Function::hasRetAttribute(Attribute::AttrKind Kind) const {
627 return AttributeSets.hasRetAttr(Kind);
630 bool Function::hasParamAttribute(unsigned ArgNo,
631 Attribute::AttrKind Kind) const {
632 return AttributeSets.hasParamAttr(ArgNo, Kind);
635 Attribute Function::getAttribute(unsigned i, Attribute::AttrKind Kind) const {
636 return AttributeSets.getAttribute(i, Kind);
639 Attribute Function::getAttribute(unsigned i, StringRef Kind) const {
640 return AttributeSets.getAttribute(i, Kind);
643 Attribute Function::getFnAttribute(Attribute::AttrKind Kind) const {
644 return AttributeSets.getFnAttr(Kind);
647 Attribute Function::getFnAttribute(StringRef Kind) const {
648 return AttributeSets.getFnAttr(Kind);
651 /// gets the specified attribute from the list of attributes.
652 Attribute Function::getParamAttribute(unsigned ArgNo,
653 Attribute::AttrKind Kind) const {
654 return AttributeSets.getParamAttr(ArgNo, Kind);
657 void Function::addDereferenceableOrNullParamAttr(unsigned ArgNo,
658 uint64_t Bytes) {
659 AttributeSets = AttributeSets.addDereferenceableOrNullParamAttr(getContext(),
660 ArgNo, Bytes);
663 DenormalMode Function::getDenormalMode(const fltSemantics &FPType) const {
664 if (&FPType == &APFloat::IEEEsingle()) {
665 Attribute Attr = getFnAttribute("denormal-fp-math-f32");
666 StringRef Val = Attr.getValueAsString();
667 if (!Val.empty())
668 return parseDenormalFPAttribute(Val);
670 // If the f32 variant of the attribute isn't specified, try to use the
671 // generic one.
674 Attribute Attr = getFnAttribute("denormal-fp-math");
675 return parseDenormalFPAttribute(Attr.getValueAsString());
678 const std::string &Function::getGC() const {
679 assert(hasGC() && "Function has no collector");
680 return getContext().getGC(*this);
683 void Function::setGC(std::string Str) {
684 setValueSubclassDataBit(14, !Str.empty());
685 getContext().setGC(*this, std::move(Str));
688 void Function::clearGC() {
689 if (!hasGC())
690 return;
691 getContext().deleteGC(*this);
692 setValueSubclassDataBit(14, false);
695 bool Function::hasStackProtectorFnAttr() const {
696 return hasFnAttribute(Attribute::StackProtect) ||
697 hasFnAttribute(Attribute::StackProtectStrong) ||
698 hasFnAttribute(Attribute::StackProtectReq);
701 /// Copy all additional attributes (those not needed to create a Function) from
702 /// the Function Src to this one.
703 void Function::copyAttributesFrom(const Function *Src) {
704 GlobalObject::copyAttributesFrom(Src);
705 setCallingConv(Src->getCallingConv());
706 setAttributes(Src->getAttributes());
707 if (Src->hasGC())
708 setGC(Src->getGC());
709 else
710 clearGC();
711 if (Src->hasPersonalityFn())
712 setPersonalityFn(Src->getPersonalityFn());
713 if (Src->hasPrefixData())
714 setPrefixData(Src->getPrefixData());
715 if (Src->hasPrologueData())
716 setPrologueData(Src->getPrologueData());
719 /// Table of string intrinsic names indexed by enum value.
720 static const char * const IntrinsicNameTable[] = {
721 "not_intrinsic",
722 #define GET_INTRINSIC_NAME_TABLE
723 #include "llvm/IR/IntrinsicImpl.inc"
724 #undef GET_INTRINSIC_NAME_TABLE
727 /// Table of per-target intrinsic name tables.
728 #define GET_INTRINSIC_TARGET_DATA
729 #include "llvm/IR/IntrinsicImpl.inc"
730 #undef GET_INTRINSIC_TARGET_DATA
732 bool Function::isTargetIntrinsic(Intrinsic::ID IID) {
733 return IID > TargetInfos[0].Count;
736 bool Function::isTargetIntrinsic() const {
737 return isTargetIntrinsic(IntID);
740 /// Find the segment of \c IntrinsicNameTable for intrinsics with the same
741 /// target as \c Name, or the generic table if \c Name is not target specific.
743 /// Returns the relevant slice of \c IntrinsicNameTable
744 static ArrayRef<const char *> findTargetSubtable(StringRef Name) {
745 assert(Name.startswith("llvm."));
747 ArrayRef<IntrinsicTargetInfo> Targets(TargetInfos);
748 // Drop "llvm." and take the first dotted component. That will be the target
749 // if this is target specific.
750 StringRef Target = Name.drop_front(5).split('.').first;
751 auto It = partition_point(
752 Targets, [=](const IntrinsicTargetInfo &TI) { return TI.Name < Target; });
753 // We've either found the target or just fall back to the generic set, which
754 // is always first.
755 const auto &TI = It != Targets.end() && It->Name == Target ? *It : Targets[0];
756 return makeArrayRef(&IntrinsicNameTable[1] + TI.Offset, TI.Count);
759 /// This does the actual lookup of an intrinsic ID which
760 /// matches the given function name.
761 Intrinsic::ID Function::lookupIntrinsicID(StringRef Name) {
762 ArrayRef<const char *> NameTable = findTargetSubtable(Name);
763 int Idx = Intrinsic::lookupLLVMIntrinsicByName(NameTable, Name);
764 if (Idx == -1)
765 return Intrinsic::not_intrinsic;
767 // Intrinsic IDs correspond to the location in IntrinsicNameTable, but we have
768 // an index into a sub-table.
769 int Adjust = NameTable.data() - IntrinsicNameTable;
770 Intrinsic::ID ID = static_cast<Intrinsic::ID>(Idx + Adjust);
772 // If the intrinsic is not overloaded, require an exact match. If it is
773 // overloaded, require either exact or prefix match.
774 const auto MatchSize = strlen(NameTable[Idx]);
775 assert(Name.size() >= MatchSize && "Expected either exact or prefix match");
776 bool IsExactMatch = Name.size() == MatchSize;
777 return IsExactMatch || Intrinsic::isOverloaded(ID) ? ID
778 : Intrinsic::not_intrinsic;
781 void Function::recalculateIntrinsicID() {
782 StringRef Name = getName();
783 if (!Name.startswith("llvm.")) {
784 HasLLVMReservedName = false;
785 IntID = Intrinsic::not_intrinsic;
786 return;
788 HasLLVMReservedName = true;
789 IntID = lookupIntrinsicID(Name);
792 /// Returns a stable mangling for the type specified for use in the name
793 /// mangling scheme used by 'any' types in intrinsic signatures. The mangling
794 /// of named types is simply their name. Manglings for unnamed types consist
795 /// of a prefix ('p' for pointers, 'a' for arrays, 'f_' for functions)
796 /// combined with the mangling of their component types. A vararg function
797 /// type will have a suffix of 'vararg'. Since function types can contain
798 /// other function types, we close a function type mangling with suffix 'f'
799 /// which can't be confused with it's prefix. This ensures we don't have
800 /// collisions between two unrelated function types. Otherwise, you might
801 /// parse ffXX as f(fXX) or f(fX)X. (X is a placeholder for any other type.)
802 /// The HasUnnamedType boolean is set if an unnamed type was encountered,
803 /// indicating that extra care must be taken to ensure a unique name.
804 static std::string getMangledTypeStr(Type *Ty, bool &HasUnnamedType) {
805 std::string Result;
806 if (PointerType *PTyp = dyn_cast<PointerType>(Ty)) {
807 Result += "p" + utostr(PTyp->getAddressSpace());
808 // Opaque pointer doesn't have pointee type information, so we just mangle
809 // address space for opaque pointer.
810 if (!PTyp->isOpaque())
811 Result += getMangledTypeStr(PTyp->getElementType(), HasUnnamedType);
812 } else if (ArrayType *ATyp = dyn_cast<ArrayType>(Ty)) {
813 Result += "a" + utostr(ATyp->getNumElements()) +
814 getMangledTypeStr(ATyp->getElementType(), HasUnnamedType);
815 } else if (StructType *STyp = dyn_cast<StructType>(Ty)) {
816 if (!STyp->isLiteral()) {
817 Result += "s_";
818 if (STyp->hasName())
819 Result += STyp->getName();
820 else
821 HasUnnamedType = true;
822 } else {
823 Result += "sl_";
824 for (auto Elem : STyp->elements())
825 Result += getMangledTypeStr(Elem, HasUnnamedType);
827 // Ensure nested structs are distinguishable.
828 Result += "s";
829 } else if (FunctionType *FT = dyn_cast<FunctionType>(Ty)) {
830 Result += "f_" + getMangledTypeStr(FT->getReturnType(), HasUnnamedType);
831 for (size_t i = 0; i < FT->getNumParams(); i++)
832 Result += getMangledTypeStr(FT->getParamType(i), HasUnnamedType);
833 if (FT->isVarArg())
834 Result += "vararg";
835 // Ensure nested function types are distinguishable.
836 Result += "f";
837 } else if (VectorType *VTy = dyn_cast<VectorType>(Ty)) {
838 ElementCount EC = VTy->getElementCount();
839 if (EC.isScalable())
840 Result += "nx";
841 Result += "v" + utostr(EC.getKnownMinValue()) +
842 getMangledTypeStr(VTy->getElementType(), HasUnnamedType);
843 } else if (Ty) {
844 switch (Ty->getTypeID()) {
845 default: llvm_unreachable("Unhandled type");
846 case Type::VoidTyID: Result += "isVoid"; break;
847 case Type::MetadataTyID: Result += "Metadata"; break;
848 case Type::HalfTyID: Result += "f16"; break;
849 case Type::BFloatTyID: Result += "bf16"; break;
850 case Type::FloatTyID: Result += "f32"; break;
851 case Type::DoubleTyID: Result += "f64"; break;
852 case Type::X86_FP80TyID: Result += "f80"; break;
853 case Type::FP128TyID: Result += "f128"; break;
854 case Type::PPC_FP128TyID: Result += "ppcf128"; break;
855 case Type::X86_MMXTyID: Result += "x86mmx"; break;
856 case Type::X86_AMXTyID: Result += "x86amx"; break;
857 case Type::IntegerTyID:
858 Result += "i" + utostr(cast<IntegerType>(Ty)->getBitWidth());
859 break;
862 return Result;
865 StringRef Intrinsic::getBaseName(ID id) {
866 assert(id < num_intrinsics && "Invalid intrinsic ID!");
867 return IntrinsicNameTable[id];
870 StringRef Intrinsic::getName(ID id) {
871 assert(id < num_intrinsics && "Invalid intrinsic ID!");
872 assert(!Intrinsic::isOverloaded(id) &&
873 "This version of getName does not support overloading");
874 return getBaseName(id);
877 static std::string getIntrinsicNameImpl(Intrinsic::ID Id, ArrayRef<Type *> Tys,
878 Module *M, FunctionType *FT,
879 bool EarlyModuleCheck) {
881 assert(Id < Intrinsic::num_intrinsics && "Invalid intrinsic ID!");
882 assert((Tys.empty() || Intrinsic::isOverloaded(Id)) &&
883 "This version of getName is for overloaded intrinsics only");
884 (void)EarlyModuleCheck;
885 assert((!EarlyModuleCheck || M ||
886 !any_of(Tys, [](Type *T) { return isa<PointerType>(T); })) &&
887 "Intrinsic overloading on pointer types need to provide a Module");
888 bool HasUnnamedType = false;
889 std::string Result(Intrinsic::getBaseName(Id));
890 for (Type *Ty : Tys)
891 Result += "." + getMangledTypeStr(Ty, HasUnnamedType);
892 if (HasUnnamedType) {
893 assert(M && "unnamed types need a module");
894 if (!FT)
895 FT = Intrinsic::getType(M->getContext(), Id, Tys);
896 else
897 assert((FT == Intrinsic::getType(M->getContext(), Id, Tys)) &&
898 "Provided FunctionType must match arguments");
899 return M->getUniqueIntrinsicName(Result, Id, FT);
901 return Result;
904 std::string Intrinsic::getName(ID Id, ArrayRef<Type *> Tys, Module *M,
905 FunctionType *FT) {
906 assert(M && "We need to have a Module");
907 return getIntrinsicNameImpl(Id, Tys, M, FT, true);
910 std::string Intrinsic::getNameNoUnnamedTypes(ID Id, ArrayRef<Type *> Tys) {
911 return getIntrinsicNameImpl(Id, Tys, nullptr, nullptr, false);
914 /// IIT_Info - These are enumerators that describe the entries returned by the
915 /// getIntrinsicInfoTableEntries function.
917 /// NOTE: This must be kept in synch with the copy in TblGen/IntrinsicEmitter!
918 enum IIT_Info {
919 // Common values should be encoded with 0-15.
920 IIT_Done = 0,
921 IIT_I1 = 1,
922 IIT_I8 = 2,
923 IIT_I16 = 3,
924 IIT_I32 = 4,
925 IIT_I64 = 5,
926 IIT_F16 = 6,
927 IIT_F32 = 7,
928 IIT_F64 = 8,
929 IIT_V2 = 9,
930 IIT_V4 = 10,
931 IIT_V8 = 11,
932 IIT_V16 = 12,
933 IIT_V32 = 13,
934 IIT_PTR = 14,
935 IIT_ARG = 15,
937 // Values from 16+ are only encodable with the inefficient encoding.
938 IIT_V64 = 16,
939 IIT_MMX = 17,
940 IIT_TOKEN = 18,
941 IIT_METADATA = 19,
942 IIT_EMPTYSTRUCT = 20,
943 IIT_STRUCT2 = 21,
944 IIT_STRUCT3 = 22,
945 IIT_STRUCT4 = 23,
946 IIT_STRUCT5 = 24,
947 IIT_EXTEND_ARG = 25,
948 IIT_TRUNC_ARG = 26,
949 IIT_ANYPTR = 27,
950 IIT_V1 = 28,
951 IIT_VARARG = 29,
952 IIT_HALF_VEC_ARG = 30,
953 IIT_SAME_VEC_WIDTH_ARG = 31,
954 IIT_PTR_TO_ARG = 32,
955 IIT_PTR_TO_ELT = 33,
956 IIT_VEC_OF_ANYPTRS_TO_ELT = 34,
957 IIT_I128 = 35,
958 IIT_V512 = 36,
959 IIT_V1024 = 37,
960 IIT_STRUCT6 = 38,
961 IIT_STRUCT7 = 39,
962 IIT_STRUCT8 = 40,
963 IIT_F128 = 41,
964 IIT_VEC_ELEMENT = 42,
965 IIT_SCALABLE_VEC = 43,
966 IIT_SUBDIVIDE2_ARG = 44,
967 IIT_SUBDIVIDE4_ARG = 45,
968 IIT_VEC_OF_BITCASTS_TO_INT = 46,
969 IIT_V128 = 47,
970 IIT_BF16 = 48,
971 IIT_STRUCT9 = 49,
972 IIT_V256 = 50,
973 IIT_AMX = 51
976 static void DecodeIITType(unsigned &NextElt, ArrayRef<unsigned char> Infos,
977 IIT_Info LastInfo,
978 SmallVectorImpl<Intrinsic::IITDescriptor> &OutputTable) {
979 using namespace Intrinsic;
981 bool IsScalableVector = (LastInfo == IIT_SCALABLE_VEC);
983 IIT_Info Info = IIT_Info(Infos[NextElt++]);
984 unsigned StructElts = 2;
986 switch (Info) {
987 case IIT_Done:
988 OutputTable.push_back(IITDescriptor::get(IITDescriptor::Void, 0));
989 return;
990 case IIT_VARARG:
991 OutputTable.push_back(IITDescriptor::get(IITDescriptor::VarArg, 0));
992 return;
993 case IIT_MMX:
994 OutputTable.push_back(IITDescriptor::get(IITDescriptor::MMX, 0));
995 return;
996 case IIT_AMX:
997 OutputTable.push_back(IITDescriptor::get(IITDescriptor::AMX, 0));
998 return;
999 case IIT_TOKEN:
1000 OutputTable.push_back(IITDescriptor::get(IITDescriptor::Token, 0));
1001 return;
1002 case IIT_METADATA:
1003 OutputTable.push_back(IITDescriptor::get(IITDescriptor::Metadata, 0));
1004 return;
1005 case IIT_F16:
1006 OutputTable.push_back(IITDescriptor::get(IITDescriptor::Half, 0));
1007 return;
1008 case IIT_BF16:
1009 OutputTable.push_back(IITDescriptor::get(IITDescriptor::BFloat, 0));
1010 return;
1011 case IIT_F32:
1012 OutputTable.push_back(IITDescriptor::get(IITDescriptor::Float, 0));
1013 return;
1014 case IIT_F64:
1015 OutputTable.push_back(IITDescriptor::get(IITDescriptor::Double, 0));
1016 return;
1017 case IIT_F128:
1018 OutputTable.push_back(IITDescriptor::get(IITDescriptor::Quad, 0));
1019 return;
1020 case IIT_I1:
1021 OutputTable.push_back(IITDescriptor::get(IITDescriptor::Integer, 1));
1022 return;
1023 case IIT_I8:
1024 OutputTable.push_back(IITDescriptor::get(IITDescriptor::Integer, 8));
1025 return;
1026 case IIT_I16:
1027 OutputTable.push_back(IITDescriptor::get(IITDescriptor::Integer,16));
1028 return;
1029 case IIT_I32:
1030 OutputTable.push_back(IITDescriptor::get(IITDescriptor::Integer, 32));
1031 return;
1032 case IIT_I64:
1033 OutputTable.push_back(IITDescriptor::get(IITDescriptor::Integer, 64));
1034 return;
1035 case IIT_I128:
1036 OutputTable.push_back(IITDescriptor::get(IITDescriptor::Integer, 128));
1037 return;
1038 case IIT_V1:
1039 OutputTable.push_back(IITDescriptor::getVector(1, IsScalableVector));
1040 DecodeIITType(NextElt, Infos, Info, OutputTable);
1041 return;
1042 case IIT_V2:
1043 OutputTable.push_back(IITDescriptor::getVector(2, IsScalableVector));
1044 DecodeIITType(NextElt, Infos, Info, OutputTable);
1045 return;
1046 case IIT_V4:
1047 OutputTable.push_back(IITDescriptor::getVector(4, IsScalableVector));
1048 DecodeIITType(NextElt, Infos, Info, OutputTable);
1049 return;
1050 case IIT_V8:
1051 OutputTable.push_back(IITDescriptor::getVector(8, IsScalableVector));
1052 DecodeIITType(NextElt, Infos, Info, OutputTable);
1053 return;
1054 case IIT_V16:
1055 OutputTable.push_back(IITDescriptor::getVector(16, IsScalableVector));
1056 DecodeIITType(NextElt, Infos, Info, OutputTable);
1057 return;
1058 case IIT_V32:
1059 OutputTable.push_back(IITDescriptor::getVector(32, IsScalableVector));
1060 DecodeIITType(NextElt, Infos, Info, OutputTable);
1061 return;
1062 case IIT_V64:
1063 OutputTable.push_back(IITDescriptor::getVector(64, IsScalableVector));
1064 DecodeIITType(NextElt, Infos, Info, OutputTable);
1065 return;
1066 case IIT_V128:
1067 OutputTable.push_back(IITDescriptor::getVector(128, IsScalableVector));
1068 DecodeIITType(NextElt, Infos, Info, OutputTable);
1069 return;
1070 case IIT_V256:
1071 OutputTable.push_back(IITDescriptor::getVector(256, IsScalableVector));
1072 DecodeIITType(NextElt, Infos, Info, OutputTable);
1073 return;
1074 case IIT_V512:
1075 OutputTable.push_back(IITDescriptor::getVector(512, IsScalableVector));
1076 DecodeIITType(NextElt, Infos, Info, OutputTable);
1077 return;
1078 case IIT_V1024:
1079 OutputTable.push_back(IITDescriptor::getVector(1024, IsScalableVector));
1080 DecodeIITType(NextElt, Infos, Info, OutputTable);
1081 return;
1082 case IIT_PTR:
1083 OutputTable.push_back(IITDescriptor::get(IITDescriptor::Pointer, 0));
1084 DecodeIITType(NextElt, Infos, Info, OutputTable);
1085 return;
1086 case IIT_ANYPTR: { // [ANYPTR addrspace, subtype]
1087 OutputTable.push_back(IITDescriptor::get(IITDescriptor::Pointer,
1088 Infos[NextElt++]));
1089 DecodeIITType(NextElt, Infos, Info, OutputTable);
1090 return;
1092 case IIT_ARG: {
1093 unsigned ArgInfo = (NextElt == Infos.size() ? 0 : Infos[NextElt++]);
1094 OutputTable.push_back(IITDescriptor::get(IITDescriptor::Argument, ArgInfo));
1095 return;
1097 case IIT_EXTEND_ARG: {
1098 unsigned ArgInfo = (NextElt == Infos.size() ? 0 : Infos[NextElt++]);
1099 OutputTable.push_back(IITDescriptor::get(IITDescriptor::ExtendArgument,
1100 ArgInfo));
1101 return;
1103 case IIT_TRUNC_ARG: {
1104 unsigned ArgInfo = (NextElt == Infos.size() ? 0 : Infos[NextElt++]);
1105 OutputTable.push_back(IITDescriptor::get(IITDescriptor::TruncArgument,
1106 ArgInfo));
1107 return;
1109 case IIT_HALF_VEC_ARG: {
1110 unsigned ArgInfo = (NextElt == Infos.size() ? 0 : Infos[NextElt++]);
1111 OutputTable.push_back(IITDescriptor::get(IITDescriptor::HalfVecArgument,
1112 ArgInfo));
1113 return;
1115 case IIT_SAME_VEC_WIDTH_ARG: {
1116 unsigned ArgInfo = (NextElt == Infos.size() ? 0 : Infos[NextElt++]);
1117 OutputTable.push_back(IITDescriptor::get(IITDescriptor::SameVecWidthArgument,
1118 ArgInfo));
1119 return;
1121 case IIT_PTR_TO_ARG: {
1122 unsigned ArgInfo = (NextElt == Infos.size() ? 0 : Infos[NextElt++]);
1123 OutputTable.push_back(IITDescriptor::get(IITDescriptor::PtrToArgument,
1124 ArgInfo));
1125 return;
1127 case IIT_PTR_TO_ELT: {
1128 unsigned ArgInfo = (NextElt == Infos.size() ? 0 : Infos[NextElt++]);
1129 OutputTable.push_back(IITDescriptor::get(IITDescriptor::PtrToElt, ArgInfo));
1130 return;
1132 case IIT_VEC_OF_ANYPTRS_TO_ELT: {
1133 unsigned short ArgNo = (NextElt == Infos.size() ? 0 : Infos[NextElt++]);
1134 unsigned short RefNo = (NextElt == Infos.size() ? 0 : Infos[NextElt++]);
1135 OutputTable.push_back(
1136 IITDescriptor::get(IITDescriptor::VecOfAnyPtrsToElt, ArgNo, RefNo));
1137 return;
1139 case IIT_EMPTYSTRUCT:
1140 OutputTable.push_back(IITDescriptor::get(IITDescriptor::Struct, 0));
1141 return;
1142 case IIT_STRUCT9: ++StructElts; LLVM_FALLTHROUGH;
1143 case IIT_STRUCT8: ++StructElts; LLVM_FALLTHROUGH;
1144 case IIT_STRUCT7: ++StructElts; LLVM_FALLTHROUGH;
1145 case IIT_STRUCT6: ++StructElts; LLVM_FALLTHROUGH;
1146 case IIT_STRUCT5: ++StructElts; LLVM_FALLTHROUGH;
1147 case IIT_STRUCT4: ++StructElts; LLVM_FALLTHROUGH;
1148 case IIT_STRUCT3: ++StructElts; LLVM_FALLTHROUGH;
1149 case IIT_STRUCT2: {
1150 OutputTable.push_back(IITDescriptor::get(IITDescriptor::Struct,StructElts));
1152 for (unsigned i = 0; i != StructElts; ++i)
1153 DecodeIITType(NextElt, Infos, Info, OutputTable);
1154 return;
1156 case IIT_SUBDIVIDE2_ARG: {
1157 unsigned ArgInfo = (NextElt == Infos.size() ? 0 : Infos[NextElt++]);
1158 OutputTable.push_back(IITDescriptor::get(IITDescriptor::Subdivide2Argument,
1159 ArgInfo));
1160 return;
1162 case IIT_SUBDIVIDE4_ARG: {
1163 unsigned ArgInfo = (NextElt == Infos.size() ? 0 : Infos[NextElt++]);
1164 OutputTable.push_back(IITDescriptor::get(IITDescriptor::Subdivide4Argument,
1165 ArgInfo));
1166 return;
1168 case IIT_VEC_ELEMENT: {
1169 unsigned ArgInfo = (NextElt == Infos.size() ? 0 : Infos[NextElt++]);
1170 OutputTable.push_back(IITDescriptor::get(IITDescriptor::VecElementArgument,
1171 ArgInfo));
1172 return;
1174 case IIT_SCALABLE_VEC: {
1175 DecodeIITType(NextElt, Infos, Info, OutputTable);
1176 return;
1178 case IIT_VEC_OF_BITCASTS_TO_INT: {
1179 unsigned ArgInfo = (NextElt == Infos.size() ? 0 : Infos[NextElt++]);
1180 OutputTable.push_back(IITDescriptor::get(IITDescriptor::VecOfBitcastsToInt,
1181 ArgInfo));
1182 return;
1185 llvm_unreachable("unhandled");
1188 #define GET_INTRINSIC_GENERATOR_GLOBAL
1189 #include "llvm/IR/IntrinsicImpl.inc"
1190 #undef GET_INTRINSIC_GENERATOR_GLOBAL
1192 void Intrinsic::getIntrinsicInfoTableEntries(ID id,
1193 SmallVectorImpl<IITDescriptor> &T){
1194 // Check to see if the intrinsic's type was expressible by the table.
1195 unsigned TableVal = IIT_Table[id-1];
1197 // Decode the TableVal into an array of IITValues.
1198 SmallVector<unsigned char, 8> IITValues;
1199 ArrayRef<unsigned char> IITEntries;
1200 unsigned NextElt = 0;
1201 if ((TableVal >> 31) != 0) {
1202 // This is an offset into the IIT_LongEncodingTable.
1203 IITEntries = IIT_LongEncodingTable;
1205 // Strip sentinel bit.
1206 NextElt = (TableVal << 1) >> 1;
1207 } else {
1208 // Decode the TableVal into an array of IITValues. If the entry was encoded
1209 // into a single word in the table itself, decode it now.
1210 do {
1211 IITValues.push_back(TableVal & 0xF);
1212 TableVal >>= 4;
1213 } while (TableVal);
1215 IITEntries = IITValues;
1216 NextElt = 0;
1219 // Okay, decode the table into the output vector of IITDescriptors.
1220 DecodeIITType(NextElt, IITEntries, IIT_Done, T);
1221 while (NextElt != IITEntries.size() && IITEntries[NextElt] != 0)
1222 DecodeIITType(NextElt, IITEntries, IIT_Done, T);
1225 static Type *DecodeFixedType(ArrayRef<Intrinsic::IITDescriptor> &Infos,
1226 ArrayRef<Type*> Tys, LLVMContext &Context) {
1227 using namespace Intrinsic;
1229 IITDescriptor D = Infos.front();
1230 Infos = Infos.slice(1);
1232 switch (D.Kind) {
1233 case IITDescriptor::Void: return Type::getVoidTy(Context);
1234 case IITDescriptor::VarArg: return Type::getVoidTy(Context);
1235 case IITDescriptor::MMX: return Type::getX86_MMXTy(Context);
1236 case IITDescriptor::AMX: return Type::getX86_AMXTy(Context);
1237 case IITDescriptor::Token: return Type::getTokenTy(Context);
1238 case IITDescriptor::Metadata: return Type::getMetadataTy(Context);
1239 case IITDescriptor::Half: return Type::getHalfTy(Context);
1240 case IITDescriptor::BFloat: return Type::getBFloatTy(Context);
1241 case IITDescriptor::Float: return Type::getFloatTy(Context);
1242 case IITDescriptor::Double: return Type::getDoubleTy(Context);
1243 case IITDescriptor::Quad: return Type::getFP128Ty(Context);
1245 case IITDescriptor::Integer:
1246 return IntegerType::get(Context, D.Integer_Width);
1247 case IITDescriptor::Vector:
1248 return VectorType::get(DecodeFixedType(Infos, Tys, Context),
1249 D.Vector_Width);
1250 case IITDescriptor::Pointer:
1251 return PointerType::get(DecodeFixedType(Infos, Tys, Context),
1252 D.Pointer_AddressSpace);
1253 case IITDescriptor::Struct: {
1254 SmallVector<Type *, 8> Elts;
1255 for (unsigned i = 0, e = D.Struct_NumElements; i != e; ++i)
1256 Elts.push_back(DecodeFixedType(Infos, Tys, Context));
1257 return StructType::get(Context, Elts);
1259 case IITDescriptor::Argument:
1260 return Tys[D.getArgumentNumber()];
1261 case IITDescriptor::ExtendArgument: {
1262 Type *Ty = Tys[D.getArgumentNumber()];
1263 if (VectorType *VTy = dyn_cast<VectorType>(Ty))
1264 return VectorType::getExtendedElementVectorType(VTy);
1266 return IntegerType::get(Context, 2 * cast<IntegerType>(Ty)->getBitWidth());
1268 case IITDescriptor::TruncArgument: {
1269 Type *Ty = Tys[D.getArgumentNumber()];
1270 if (VectorType *VTy = dyn_cast<VectorType>(Ty))
1271 return VectorType::getTruncatedElementVectorType(VTy);
1273 IntegerType *ITy = cast<IntegerType>(Ty);
1274 assert(ITy->getBitWidth() % 2 == 0);
1275 return IntegerType::get(Context, ITy->getBitWidth() / 2);
1277 case IITDescriptor::Subdivide2Argument:
1278 case IITDescriptor::Subdivide4Argument: {
1279 Type *Ty = Tys[D.getArgumentNumber()];
1280 VectorType *VTy = dyn_cast<VectorType>(Ty);
1281 assert(VTy && "Expected an argument of Vector Type");
1282 int SubDivs = D.Kind == IITDescriptor::Subdivide2Argument ? 1 : 2;
1283 return VectorType::getSubdividedVectorType(VTy, SubDivs);
1285 case IITDescriptor::HalfVecArgument:
1286 return VectorType::getHalfElementsVectorType(cast<VectorType>(
1287 Tys[D.getArgumentNumber()]));
1288 case IITDescriptor::SameVecWidthArgument: {
1289 Type *EltTy = DecodeFixedType(Infos, Tys, Context);
1290 Type *Ty = Tys[D.getArgumentNumber()];
1291 if (auto *VTy = dyn_cast<VectorType>(Ty))
1292 return VectorType::get(EltTy, VTy->getElementCount());
1293 return EltTy;
1295 case IITDescriptor::PtrToArgument: {
1296 Type *Ty = Tys[D.getArgumentNumber()];
1297 return PointerType::getUnqual(Ty);
1299 case IITDescriptor::PtrToElt: {
1300 Type *Ty = Tys[D.getArgumentNumber()];
1301 VectorType *VTy = dyn_cast<VectorType>(Ty);
1302 if (!VTy)
1303 llvm_unreachable("Expected an argument of Vector Type");
1304 Type *EltTy = VTy->getElementType();
1305 return PointerType::getUnqual(EltTy);
1307 case IITDescriptor::VecElementArgument: {
1308 Type *Ty = Tys[D.getArgumentNumber()];
1309 if (VectorType *VTy = dyn_cast<VectorType>(Ty))
1310 return VTy->getElementType();
1311 llvm_unreachable("Expected an argument of Vector Type");
1313 case IITDescriptor::VecOfBitcastsToInt: {
1314 Type *Ty = Tys[D.getArgumentNumber()];
1315 VectorType *VTy = dyn_cast<VectorType>(Ty);
1316 assert(VTy && "Expected an argument of Vector Type");
1317 return VectorType::getInteger(VTy);
1319 case IITDescriptor::VecOfAnyPtrsToElt:
1320 // Return the overloaded type (which determines the pointers address space)
1321 return Tys[D.getOverloadArgNumber()];
1323 llvm_unreachable("unhandled");
1326 FunctionType *Intrinsic::getType(LLVMContext &Context,
1327 ID id, ArrayRef<Type*> Tys) {
1328 SmallVector<IITDescriptor, 8> Table;
1329 getIntrinsicInfoTableEntries(id, Table);
1331 ArrayRef<IITDescriptor> TableRef = Table;
1332 Type *ResultTy = DecodeFixedType(TableRef, Tys, Context);
1334 SmallVector<Type*, 8> ArgTys;
1335 while (!TableRef.empty())
1336 ArgTys.push_back(DecodeFixedType(TableRef, Tys, Context));
1338 // DecodeFixedType returns Void for IITDescriptor::Void and IITDescriptor::VarArg
1339 // If we see void type as the type of the last argument, it is vararg intrinsic
1340 if (!ArgTys.empty() && ArgTys.back()->isVoidTy()) {
1341 ArgTys.pop_back();
1342 return FunctionType::get(ResultTy, ArgTys, true);
1344 return FunctionType::get(ResultTy, ArgTys, false);
1347 bool Intrinsic::isOverloaded(ID id) {
1348 #define GET_INTRINSIC_OVERLOAD_TABLE
1349 #include "llvm/IR/IntrinsicImpl.inc"
1350 #undef GET_INTRINSIC_OVERLOAD_TABLE
1353 bool Intrinsic::isLeaf(ID id) {
1354 switch (id) {
1355 default:
1356 return true;
1358 case Intrinsic::experimental_gc_statepoint:
1359 case Intrinsic::experimental_patchpoint_void:
1360 case Intrinsic::experimental_patchpoint_i64:
1361 return false;
1365 /// This defines the "Intrinsic::getAttributes(ID id)" method.
1366 #define GET_INTRINSIC_ATTRIBUTES
1367 #include "llvm/IR/IntrinsicImpl.inc"
1368 #undef GET_INTRINSIC_ATTRIBUTES
1370 Function *Intrinsic::getDeclaration(Module *M, ID id, ArrayRef<Type*> Tys) {
1371 // There can never be multiple globals with the same name of different types,
1372 // because intrinsics must be a specific type.
1373 auto *FT = getType(M->getContext(), id, Tys);
1374 return cast<Function>(
1375 M->getOrInsertFunction(Tys.empty() ? getName(id)
1376 : getName(id, Tys, M, FT),
1377 getType(M->getContext(), id, Tys))
1378 .getCallee());
1381 // This defines the "Intrinsic::getIntrinsicForGCCBuiltin()" method.
1382 #define GET_LLVM_INTRINSIC_FOR_GCC_BUILTIN
1383 #include "llvm/IR/IntrinsicImpl.inc"
1384 #undef GET_LLVM_INTRINSIC_FOR_GCC_BUILTIN
1386 // This defines the "Intrinsic::getIntrinsicForMSBuiltin()" method.
1387 #define GET_LLVM_INTRINSIC_FOR_MS_BUILTIN
1388 #include "llvm/IR/IntrinsicImpl.inc"
1389 #undef GET_LLVM_INTRINSIC_FOR_MS_BUILTIN
1391 using DeferredIntrinsicMatchPair =
1392 std::pair<Type *, ArrayRef<Intrinsic::IITDescriptor>>;
1394 static bool matchIntrinsicType(
1395 Type *Ty, ArrayRef<Intrinsic::IITDescriptor> &Infos,
1396 SmallVectorImpl<Type *> &ArgTys,
1397 SmallVectorImpl<DeferredIntrinsicMatchPair> &DeferredChecks,
1398 bool IsDeferredCheck) {
1399 using namespace Intrinsic;
1401 // If we ran out of descriptors, there are too many arguments.
1402 if (Infos.empty()) return true;
1404 // Do this before slicing off the 'front' part
1405 auto InfosRef = Infos;
1406 auto DeferCheck = [&DeferredChecks, &InfosRef](Type *T) {
1407 DeferredChecks.emplace_back(T, InfosRef);
1408 return false;
1411 IITDescriptor D = Infos.front();
1412 Infos = Infos.slice(1);
1414 switch (D.Kind) {
1415 case IITDescriptor::Void: return !Ty->isVoidTy();
1416 case IITDescriptor::VarArg: return true;
1417 case IITDescriptor::MMX: return !Ty->isX86_MMXTy();
1418 case IITDescriptor::AMX: return !Ty->isX86_AMXTy();
1419 case IITDescriptor::Token: return !Ty->isTokenTy();
1420 case IITDescriptor::Metadata: return !Ty->isMetadataTy();
1421 case IITDescriptor::Half: return !Ty->isHalfTy();
1422 case IITDescriptor::BFloat: return !Ty->isBFloatTy();
1423 case IITDescriptor::Float: return !Ty->isFloatTy();
1424 case IITDescriptor::Double: return !Ty->isDoubleTy();
1425 case IITDescriptor::Quad: return !Ty->isFP128Ty();
1426 case IITDescriptor::Integer: return !Ty->isIntegerTy(D.Integer_Width);
1427 case IITDescriptor::Vector: {
1428 VectorType *VT = dyn_cast<VectorType>(Ty);
1429 return !VT || VT->getElementCount() != D.Vector_Width ||
1430 matchIntrinsicType(VT->getElementType(), Infos, ArgTys,
1431 DeferredChecks, IsDeferredCheck);
1433 case IITDescriptor::Pointer: {
1434 PointerType *PT = dyn_cast<PointerType>(Ty);
1435 if (!PT || PT->getAddressSpace() != D.Pointer_AddressSpace)
1436 return true;
1437 if (!PT->isOpaque())
1438 return matchIntrinsicType(PT->getElementType(), Infos, ArgTys,
1439 DeferredChecks, IsDeferredCheck);
1440 // If typed pointers are supported, do not allow using opaque pointer in
1441 // place of fixed pointer type. This would make the intrinsic signature
1442 // non-unique.
1443 if (Ty->getContext().supportsTypedPointers())
1444 return true;
1445 // Consume IIT descriptors relating to the pointer element type.
1446 while (Infos.front().Kind == IITDescriptor::Pointer)
1447 Infos = Infos.slice(1);
1448 Infos = Infos.slice(1);
1449 return false;
1452 case IITDescriptor::Struct: {
1453 StructType *ST = dyn_cast<StructType>(Ty);
1454 if (!ST || ST->getNumElements() != D.Struct_NumElements)
1455 return true;
1457 for (unsigned i = 0, e = D.Struct_NumElements; i != e; ++i)
1458 if (matchIntrinsicType(ST->getElementType(i), Infos, ArgTys,
1459 DeferredChecks, IsDeferredCheck))
1460 return true;
1461 return false;
1464 case IITDescriptor::Argument:
1465 // If this is the second occurrence of an argument,
1466 // verify that the later instance matches the previous instance.
1467 if (D.getArgumentNumber() < ArgTys.size())
1468 return Ty != ArgTys[D.getArgumentNumber()];
1470 if (D.getArgumentNumber() > ArgTys.size() ||
1471 D.getArgumentKind() == IITDescriptor::AK_MatchType)
1472 return IsDeferredCheck || DeferCheck(Ty);
1474 assert(D.getArgumentNumber() == ArgTys.size() && !IsDeferredCheck &&
1475 "Table consistency error");
1476 ArgTys.push_back(Ty);
1478 switch (D.getArgumentKind()) {
1479 case IITDescriptor::AK_Any: return false; // Success
1480 case IITDescriptor::AK_AnyInteger: return !Ty->isIntOrIntVectorTy();
1481 case IITDescriptor::AK_AnyFloat: return !Ty->isFPOrFPVectorTy();
1482 case IITDescriptor::AK_AnyVector: return !isa<VectorType>(Ty);
1483 case IITDescriptor::AK_AnyPointer: return !isa<PointerType>(Ty);
1484 default: break;
1486 llvm_unreachable("all argument kinds not covered");
1488 case IITDescriptor::ExtendArgument: {
1489 // If this is a forward reference, defer the check for later.
1490 if (D.getArgumentNumber() >= ArgTys.size())
1491 return IsDeferredCheck || DeferCheck(Ty);
1493 Type *NewTy = ArgTys[D.getArgumentNumber()];
1494 if (VectorType *VTy = dyn_cast<VectorType>(NewTy))
1495 NewTy = VectorType::getExtendedElementVectorType(VTy);
1496 else if (IntegerType *ITy = dyn_cast<IntegerType>(NewTy))
1497 NewTy = IntegerType::get(ITy->getContext(), 2 * ITy->getBitWidth());
1498 else
1499 return true;
1501 return Ty != NewTy;
1503 case IITDescriptor::TruncArgument: {
1504 // If this is a forward reference, defer the check for later.
1505 if (D.getArgumentNumber() >= ArgTys.size())
1506 return IsDeferredCheck || DeferCheck(Ty);
1508 Type *NewTy = ArgTys[D.getArgumentNumber()];
1509 if (VectorType *VTy = dyn_cast<VectorType>(NewTy))
1510 NewTy = VectorType::getTruncatedElementVectorType(VTy);
1511 else if (IntegerType *ITy = dyn_cast<IntegerType>(NewTy))
1512 NewTy = IntegerType::get(ITy->getContext(), ITy->getBitWidth() / 2);
1513 else
1514 return true;
1516 return Ty != NewTy;
1518 case IITDescriptor::HalfVecArgument:
1519 // If this is a forward reference, defer the check for later.
1520 if (D.getArgumentNumber() >= ArgTys.size())
1521 return IsDeferredCheck || DeferCheck(Ty);
1522 return !isa<VectorType>(ArgTys[D.getArgumentNumber()]) ||
1523 VectorType::getHalfElementsVectorType(
1524 cast<VectorType>(ArgTys[D.getArgumentNumber()])) != Ty;
1525 case IITDescriptor::SameVecWidthArgument: {
1526 if (D.getArgumentNumber() >= ArgTys.size()) {
1527 // Defer check and subsequent check for the vector element type.
1528 Infos = Infos.slice(1);
1529 return IsDeferredCheck || DeferCheck(Ty);
1531 auto *ReferenceType = dyn_cast<VectorType>(ArgTys[D.getArgumentNumber()]);
1532 auto *ThisArgType = dyn_cast<VectorType>(Ty);
1533 // Both must be vectors of the same number of elements or neither.
1534 if ((ReferenceType != nullptr) != (ThisArgType != nullptr))
1535 return true;
1536 Type *EltTy = Ty;
1537 if (ThisArgType) {
1538 if (ReferenceType->getElementCount() !=
1539 ThisArgType->getElementCount())
1540 return true;
1541 EltTy = ThisArgType->getElementType();
1543 return matchIntrinsicType(EltTy, Infos, ArgTys, DeferredChecks,
1544 IsDeferredCheck);
1546 case IITDescriptor::PtrToArgument: {
1547 if (D.getArgumentNumber() >= ArgTys.size())
1548 return IsDeferredCheck || DeferCheck(Ty);
1549 Type * ReferenceType = ArgTys[D.getArgumentNumber()];
1550 PointerType *ThisArgType = dyn_cast<PointerType>(Ty);
1551 return (!ThisArgType || ThisArgType->getElementType() != ReferenceType);
1553 case IITDescriptor::PtrToElt: {
1554 if (D.getArgumentNumber() >= ArgTys.size())
1555 return IsDeferredCheck || DeferCheck(Ty);
1556 VectorType * ReferenceType =
1557 dyn_cast<VectorType> (ArgTys[D.getArgumentNumber()]);
1558 PointerType *ThisArgType = dyn_cast<PointerType>(Ty);
1560 if (!ThisArgType || !ReferenceType)
1561 return true;
1562 if (!ThisArgType->isOpaque())
1563 return ThisArgType->getElementType() != ReferenceType->getElementType();
1564 // If typed pointers are supported, do not allow opaque pointer to ensure
1565 // uniqueness.
1566 return Ty->getContext().supportsTypedPointers();
1568 case IITDescriptor::VecOfAnyPtrsToElt: {
1569 unsigned RefArgNumber = D.getRefArgNumber();
1570 if (RefArgNumber >= ArgTys.size()) {
1571 if (IsDeferredCheck)
1572 return true;
1573 // If forward referencing, already add the pointer-vector type and
1574 // defer the checks for later.
1575 ArgTys.push_back(Ty);
1576 return DeferCheck(Ty);
1579 if (!IsDeferredCheck){
1580 assert(D.getOverloadArgNumber() == ArgTys.size() &&
1581 "Table consistency error");
1582 ArgTys.push_back(Ty);
1585 // Verify the overloaded type "matches" the Ref type.
1586 // i.e. Ty is a vector with the same width as Ref.
1587 // Composed of pointers to the same element type as Ref.
1588 auto *ReferenceType = dyn_cast<VectorType>(ArgTys[RefArgNumber]);
1589 auto *ThisArgVecTy = dyn_cast<VectorType>(Ty);
1590 if (!ThisArgVecTy || !ReferenceType ||
1591 (ReferenceType->getElementCount() != ThisArgVecTy->getElementCount()))
1592 return true;
1593 PointerType *ThisArgEltTy =
1594 dyn_cast<PointerType>(ThisArgVecTy->getElementType());
1595 if (!ThisArgEltTy)
1596 return true;
1597 return !ThisArgEltTy->isOpaqueOrPointeeTypeMatches(
1598 ReferenceType->getElementType());
1600 case IITDescriptor::VecElementArgument: {
1601 if (D.getArgumentNumber() >= ArgTys.size())
1602 return IsDeferredCheck ? true : DeferCheck(Ty);
1603 auto *ReferenceType = dyn_cast<VectorType>(ArgTys[D.getArgumentNumber()]);
1604 return !ReferenceType || Ty != ReferenceType->getElementType();
1606 case IITDescriptor::Subdivide2Argument:
1607 case IITDescriptor::Subdivide4Argument: {
1608 // If this is a forward reference, defer the check for later.
1609 if (D.getArgumentNumber() >= ArgTys.size())
1610 return IsDeferredCheck || DeferCheck(Ty);
1612 Type *NewTy = ArgTys[D.getArgumentNumber()];
1613 if (auto *VTy = dyn_cast<VectorType>(NewTy)) {
1614 int SubDivs = D.Kind == IITDescriptor::Subdivide2Argument ? 1 : 2;
1615 NewTy = VectorType::getSubdividedVectorType(VTy, SubDivs);
1616 return Ty != NewTy;
1618 return true;
1620 case IITDescriptor::VecOfBitcastsToInt: {
1621 if (D.getArgumentNumber() >= ArgTys.size())
1622 return IsDeferredCheck || DeferCheck(Ty);
1623 auto *ReferenceType = dyn_cast<VectorType>(ArgTys[D.getArgumentNumber()]);
1624 auto *ThisArgVecTy = dyn_cast<VectorType>(Ty);
1625 if (!ThisArgVecTy || !ReferenceType)
1626 return true;
1627 return ThisArgVecTy != VectorType::getInteger(ReferenceType);
1630 llvm_unreachable("unhandled");
1633 Intrinsic::MatchIntrinsicTypesResult
1634 Intrinsic::matchIntrinsicSignature(FunctionType *FTy,
1635 ArrayRef<Intrinsic::IITDescriptor> &Infos,
1636 SmallVectorImpl<Type *> &ArgTys) {
1637 SmallVector<DeferredIntrinsicMatchPair, 2> DeferredChecks;
1638 if (matchIntrinsicType(FTy->getReturnType(), Infos, ArgTys, DeferredChecks,
1639 false))
1640 return MatchIntrinsicTypes_NoMatchRet;
1642 unsigned NumDeferredReturnChecks = DeferredChecks.size();
1644 for (auto Ty : FTy->params())
1645 if (matchIntrinsicType(Ty, Infos, ArgTys, DeferredChecks, false))
1646 return MatchIntrinsicTypes_NoMatchArg;
1648 for (unsigned I = 0, E = DeferredChecks.size(); I != E; ++I) {
1649 DeferredIntrinsicMatchPair &Check = DeferredChecks[I];
1650 if (matchIntrinsicType(Check.first, Check.second, ArgTys, DeferredChecks,
1651 true))
1652 return I < NumDeferredReturnChecks ? MatchIntrinsicTypes_NoMatchRet
1653 : MatchIntrinsicTypes_NoMatchArg;
1656 return MatchIntrinsicTypes_Match;
1659 bool
1660 Intrinsic::matchIntrinsicVarArg(bool isVarArg,
1661 ArrayRef<Intrinsic::IITDescriptor> &Infos) {
1662 // If there are no descriptors left, then it can't be a vararg.
1663 if (Infos.empty())
1664 return isVarArg;
1666 // There should be only one descriptor remaining at this point.
1667 if (Infos.size() != 1)
1668 return true;
1670 // Check and verify the descriptor.
1671 IITDescriptor D = Infos.front();
1672 Infos = Infos.slice(1);
1673 if (D.Kind == IITDescriptor::VarArg)
1674 return !isVarArg;
1676 return true;
1679 bool Intrinsic::getIntrinsicSignature(Function *F,
1680 SmallVectorImpl<Type *> &ArgTys) {
1681 Intrinsic::ID ID = F->getIntrinsicID();
1682 if (!ID)
1683 return false;
1685 SmallVector<Intrinsic::IITDescriptor, 8> Table;
1686 getIntrinsicInfoTableEntries(ID, Table);
1687 ArrayRef<Intrinsic::IITDescriptor> TableRef = Table;
1689 if (Intrinsic::matchIntrinsicSignature(F->getFunctionType(), TableRef,
1690 ArgTys) !=
1691 Intrinsic::MatchIntrinsicTypesResult::MatchIntrinsicTypes_Match) {
1692 return false;
1694 if (Intrinsic::matchIntrinsicVarArg(F->getFunctionType()->isVarArg(),
1695 TableRef))
1696 return false;
1697 return true;
1700 Optional<Function *> Intrinsic::remangleIntrinsicFunction(Function *F) {
1701 SmallVector<Type *, 4> ArgTys;
1702 if (!getIntrinsicSignature(F, ArgTys))
1703 return None;
1705 Intrinsic::ID ID = F->getIntrinsicID();
1706 StringRef Name = F->getName();
1707 std::string WantedName =
1708 Intrinsic::getName(ID, ArgTys, F->getParent(), F->getFunctionType());
1709 if (Name == WantedName)
1710 return None;
1712 Function *NewDecl = [&] {
1713 if (auto *ExistingGV = F->getParent()->getNamedValue(WantedName)) {
1714 if (auto *ExistingF = dyn_cast<Function>(ExistingGV))
1715 if (ExistingF->getFunctionType() == F->getFunctionType())
1716 return ExistingF;
1718 // The name already exists, but is not a function or has the wrong
1719 // prototype. Make place for the new one by renaming the old version.
1720 // Either this old version will be removed later on or the module is
1721 // invalid and we'll get an error.
1722 ExistingGV->setName(WantedName + ".renamed");
1724 return Intrinsic::getDeclaration(F->getParent(), ID, ArgTys);
1725 }();
1727 NewDecl->setCallingConv(F->getCallingConv());
1728 assert(NewDecl->getFunctionType() == F->getFunctionType() &&
1729 "Shouldn't change the signature");
1730 return NewDecl;
1733 /// hasAddressTaken - returns true if there are any uses of this function
1734 /// other than direct calls or invokes to it. Optionally ignores callback
1735 /// uses, assume like pointer annotation calls, and references in llvm.used
1736 /// and llvm.compiler.used variables.
1737 bool Function::hasAddressTaken(const User **PutOffender,
1738 bool IgnoreCallbackUses,
1739 bool IgnoreAssumeLikeCalls,
1740 bool IgnoreLLVMUsed) const {
1741 for (const Use &U : uses()) {
1742 const User *FU = U.getUser();
1743 if (isa<BlockAddress>(FU))
1744 continue;
1746 if (IgnoreCallbackUses) {
1747 AbstractCallSite ACS(&U);
1748 if (ACS && ACS.isCallbackCall())
1749 continue;
1752 const auto *Call = dyn_cast<CallBase>(FU);
1753 if (!Call) {
1754 if (IgnoreAssumeLikeCalls) {
1755 if (const auto *FI = dyn_cast<Instruction>(FU)) {
1756 if (FI->isCast() && !FI->user_empty() &&
1757 llvm::all_of(FU->users(), [](const User *U) {
1758 if (const auto *I = dyn_cast<IntrinsicInst>(U))
1759 return I->isAssumeLikeIntrinsic();
1760 return false;
1762 continue;
1765 if (IgnoreLLVMUsed && !FU->user_empty()) {
1766 const User *FUU = FU;
1767 if (isa<BitCastOperator>(FU) && FU->hasOneUse() &&
1768 !FU->user_begin()->user_empty())
1769 FUU = *FU->user_begin();
1770 if (llvm::all_of(FUU->users(), [](const User *U) {
1771 if (const auto *GV = dyn_cast<GlobalVariable>(U))
1772 return GV->hasName() &&
1773 (GV->getName().equals("llvm.compiler.used") ||
1774 GV->getName().equals("llvm.used"));
1775 return false;
1777 continue;
1779 if (PutOffender)
1780 *PutOffender = FU;
1781 return true;
1783 if (!Call->isCallee(&U)) {
1784 if (PutOffender)
1785 *PutOffender = FU;
1786 return true;
1789 return false;
1792 bool Function::isDefTriviallyDead() const {
1793 // Check the linkage
1794 if (!hasLinkOnceLinkage() && !hasLocalLinkage() &&
1795 !hasAvailableExternallyLinkage())
1796 return false;
1798 // Check if the function is used by anything other than a blockaddress.
1799 for (const User *U : users())
1800 if (!isa<BlockAddress>(U))
1801 return false;
1803 return true;
1806 /// callsFunctionThatReturnsTwice - Return true if the function has a call to
1807 /// setjmp or other function that gcc recognizes as "returning twice".
1808 bool Function::callsFunctionThatReturnsTwice() const {
1809 for (const Instruction &I : instructions(this))
1810 if (const auto *Call = dyn_cast<CallBase>(&I))
1811 if (Call->hasFnAttr(Attribute::ReturnsTwice))
1812 return true;
1814 return false;
1817 Constant *Function::getPersonalityFn() const {
1818 assert(hasPersonalityFn() && getNumOperands());
1819 return cast<Constant>(Op<0>());
1822 void Function::setPersonalityFn(Constant *Fn) {
1823 setHungoffOperand<0>(Fn);
1824 setValueSubclassDataBit(3, Fn != nullptr);
1827 Constant *Function::getPrefixData() const {
1828 assert(hasPrefixData() && getNumOperands());
1829 return cast<Constant>(Op<1>());
1832 void Function::setPrefixData(Constant *PrefixData) {
1833 setHungoffOperand<1>(PrefixData);
1834 setValueSubclassDataBit(1, PrefixData != nullptr);
1837 Constant *Function::getPrologueData() const {
1838 assert(hasPrologueData() && getNumOperands());
1839 return cast<Constant>(Op<2>());
1842 void Function::setPrologueData(Constant *PrologueData) {
1843 setHungoffOperand<2>(PrologueData);
1844 setValueSubclassDataBit(2, PrologueData != nullptr);
1847 void Function::allocHungoffUselist() {
1848 // If we've already allocated a uselist, stop here.
1849 if (getNumOperands())
1850 return;
1852 allocHungoffUses(3, /*IsPhi=*/ false);
1853 setNumHungOffUseOperands(3);
1855 // Initialize the uselist with placeholder operands to allow traversal.
1856 auto *CPN = ConstantPointerNull::get(Type::getInt1PtrTy(getContext(), 0));
1857 Op<0>().set(CPN);
1858 Op<1>().set(CPN);
1859 Op<2>().set(CPN);
1862 template <int Idx>
1863 void Function::setHungoffOperand(Constant *C) {
1864 if (C) {
1865 allocHungoffUselist();
1866 Op<Idx>().set(C);
1867 } else if (getNumOperands()) {
1868 Op<Idx>().set(
1869 ConstantPointerNull::get(Type::getInt1PtrTy(getContext(), 0)));
1873 void Function::setValueSubclassDataBit(unsigned Bit, bool On) {
1874 assert(Bit < 16 && "SubclassData contains only 16 bits");
1875 if (On)
1876 setValueSubclassData(getSubclassDataFromValue() | (1 << Bit));
1877 else
1878 setValueSubclassData(getSubclassDataFromValue() & ~(1 << Bit));
1881 void Function::setEntryCount(ProfileCount Count,
1882 const DenseSet<GlobalValue::GUID> *S) {
1883 assert(Count.hasValue());
1884 #if !defined(NDEBUG)
1885 auto PrevCount = getEntryCount();
1886 assert(!PrevCount.hasValue() || PrevCount.getType() == Count.getType());
1887 #endif
1889 auto ImportGUIDs = getImportGUIDs();
1890 if (S == nullptr && ImportGUIDs.size())
1891 S = &ImportGUIDs;
1893 MDBuilder MDB(getContext());
1894 setMetadata(
1895 LLVMContext::MD_prof,
1896 MDB.createFunctionEntryCount(Count.getCount(), Count.isSynthetic(), S));
1899 void Function::setEntryCount(uint64_t Count, Function::ProfileCountType Type,
1900 const DenseSet<GlobalValue::GUID> *Imports) {
1901 setEntryCount(ProfileCount(Count, Type), Imports);
1904 ProfileCount Function::getEntryCount(bool AllowSynthetic) const {
1905 MDNode *MD = getMetadata(LLVMContext::MD_prof);
1906 if (MD && MD->getOperand(0))
1907 if (MDString *MDS = dyn_cast<MDString>(MD->getOperand(0))) {
1908 if (MDS->getString().equals("function_entry_count")) {
1909 ConstantInt *CI = mdconst::extract<ConstantInt>(MD->getOperand(1));
1910 uint64_t Count = CI->getValue().getZExtValue();
1911 // A value of -1 is used for SamplePGO when there were no samples.
1912 // Treat this the same as unknown.
1913 if (Count == (uint64_t)-1)
1914 return ProfileCount::getInvalid();
1915 return ProfileCount(Count, PCT_Real);
1916 } else if (AllowSynthetic &&
1917 MDS->getString().equals("synthetic_function_entry_count")) {
1918 ConstantInt *CI = mdconst::extract<ConstantInt>(MD->getOperand(1));
1919 uint64_t Count = CI->getValue().getZExtValue();
1920 return ProfileCount(Count, PCT_Synthetic);
1923 return ProfileCount::getInvalid();
1926 DenseSet<GlobalValue::GUID> Function::getImportGUIDs() const {
1927 DenseSet<GlobalValue::GUID> R;
1928 if (MDNode *MD = getMetadata(LLVMContext::MD_prof))
1929 if (MDString *MDS = dyn_cast<MDString>(MD->getOperand(0)))
1930 if (MDS->getString().equals("function_entry_count"))
1931 for (unsigned i = 2; i < MD->getNumOperands(); i++)
1932 R.insert(mdconst::extract<ConstantInt>(MD->getOperand(i))
1933 ->getValue()
1934 .getZExtValue());
1935 return R;
1938 void Function::setSectionPrefix(StringRef Prefix) {
1939 MDBuilder MDB(getContext());
1940 setMetadata(LLVMContext::MD_section_prefix,
1941 MDB.createFunctionSectionPrefix(Prefix));
1944 Optional<StringRef> Function::getSectionPrefix() const {
1945 if (MDNode *MD = getMetadata(LLVMContext::MD_section_prefix)) {
1946 assert(cast<MDString>(MD->getOperand(0))
1947 ->getString()
1948 .equals("function_section_prefix") &&
1949 "Metadata not match");
1950 return cast<MDString>(MD->getOperand(1))->getString();
1952 return None;
1955 bool Function::nullPointerIsDefined() const {
1956 return hasFnAttribute(Attribute::NullPointerIsValid);
1959 bool llvm::NullPointerIsDefined(const Function *F, unsigned AS) {
1960 if (F && F->nullPointerIsDefined())
1961 return true;
1963 if (AS != 0)
1964 return true;
1966 return false;