1 //===- IRBuilder.cpp - Builder for LLVM Instrs ----------------------------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This file implements the IRBuilder class, which is used as a convenient way
10 // to create LLVM instructions with a consistent and simplified interface.
12 //===----------------------------------------------------------------------===//
14 #include "llvm/IR/IRBuilder.h"
15 #include "llvm/ADT/ArrayRef.h"
16 #include "llvm/IR/Constant.h"
17 #include "llvm/IR/Constants.h"
18 #include "llvm/IR/DebugInfoMetadata.h"
19 #include "llvm/IR/DerivedTypes.h"
20 #include "llvm/IR/Function.h"
21 #include "llvm/IR/GlobalValue.h"
22 #include "llvm/IR/GlobalVariable.h"
23 #include "llvm/IR/IntrinsicInst.h"
24 #include "llvm/IR/Intrinsics.h"
25 #include "llvm/IR/LLVMContext.h"
26 #include "llvm/IR/NoFolder.h"
27 #include "llvm/IR/Operator.h"
28 #include "llvm/IR/Statepoint.h"
29 #include "llvm/IR/Type.h"
30 #include "llvm/IR/Value.h"
31 #include "llvm/Support/Casting.h"
39 /// CreateGlobalString - Make a new global variable with an initializer that
40 /// has array of i8 type filled in with the nul terminated string value
41 /// specified. If Name is specified, it is the name of the global variable
43 GlobalVariable
*IRBuilderBase::CreateGlobalString(StringRef Str
,
45 unsigned AddressSpace
,
47 Constant
*StrConstant
= ConstantDataArray::getString(Context
, Str
);
49 M
= BB
->getParent()->getParent();
50 auto *GV
= new GlobalVariable(
51 *M
, StrConstant
->getType(), true, GlobalValue::PrivateLinkage
,
52 StrConstant
, Name
, nullptr, GlobalVariable::NotThreadLocal
, AddressSpace
);
53 GV
->setUnnamedAddr(GlobalValue::UnnamedAddr::Global
);
54 GV
->setAlignment(Align(1));
58 Type
*IRBuilderBase::getCurrentFunctionReturnType() const {
59 assert(BB
&& BB
->getParent() && "No current function!");
60 return BB
->getParent()->getReturnType();
63 DebugLoc
IRBuilderBase::getCurrentDebugLocation() const {
64 for (auto &KV
: MetadataToCopy
)
65 if (KV
.first
== LLVMContext::MD_dbg
)
66 return {cast
<DILocation
>(KV
.second
)};
70 void IRBuilderBase::SetInstDebugLocation(Instruction
*I
) const {
71 for (const auto &KV
: MetadataToCopy
)
72 if (KV
.first
== LLVMContext::MD_dbg
) {
73 I
->setDebugLoc(DebugLoc(KV
.second
));
79 IRBuilderBase::createCallHelper(Function
*Callee
, ArrayRef
<Value
*> Ops
,
80 const Twine
&Name
, Instruction
*FMFSource
,
81 ArrayRef
<OperandBundleDef
> OpBundles
) {
82 CallInst
*CI
= CreateCall(Callee
, Ops
, OpBundles
, Name
);
84 CI
->copyFastMathFlags(FMFSource
);
88 Value
*IRBuilderBase::CreateVScale(Constant
*Scaling
, const Twine
&Name
) {
89 assert(isa
<ConstantInt
>(Scaling
) && "Expected constant integer");
90 if (cast
<ConstantInt
>(Scaling
)->isZero())
92 Module
*M
= GetInsertBlock()->getParent()->getParent();
94 Intrinsic::getDeclaration(M
, Intrinsic::vscale
, {Scaling
->getType()});
95 CallInst
*CI
= CreateCall(TheFn
, {}, {}, Name
);
96 return cast
<ConstantInt
>(Scaling
)->isOne() ? CI
: CreateMul(CI
, Scaling
);
99 Value
*IRBuilderBase::CreateElementCount(Type
*DstType
, ElementCount EC
) {
100 Constant
*MinEC
= ConstantInt::get(DstType
, EC
.getKnownMinValue());
101 return EC
.isScalable() ? CreateVScale(MinEC
) : MinEC
;
104 Value
*IRBuilderBase::CreateTypeSize(Type
*DstType
, TypeSize Size
) {
105 Constant
*MinSize
= ConstantInt::get(DstType
, Size
.getKnownMinValue());
106 return Size
.isScalable() ? CreateVScale(MinSize
) : MinSize
;
109 Value
*IRBuilderBase::CreateStepVector(Type
*DstType
, const Twine
&Name
) {
110 Type
*STy
= DstType
->getScalarType();
111 if (isa
<ScalableVectorType
>(DstType
)) {
112 Type
*StepVecType
= DstType
;
113 // TODO: We expect this special case (element type < 8 bits) to be
114 // temporary - once the intrinsic properly supports < 8 bits this code
116 if (STy
->getScalarSizeInBits() < 8)
118 VectorType::get(getInt8Ty(), cast
<ScalableVectorType
>(DstType
));
119 Value
*Res
= CreateIntrinsic(Intrinsic::experimental_stepvector
,
120 {StepVecType
}, {}, nullptr, Name
);
121 if (StepVecType
!= DstType
)
122 Res
= CreateTrunc(Res
, DstType
);
126 unsigned NumEls
= cast
<FixedVectorType
>(DstType
)->getNumElements();
128 // Create a vector of consecutive numbers from zero to VF.
129 SmallVector
<Constant
*, 8> Indices
;
130 for (unsigned i
= 0; i
< NumEls
; ++i
)
131 Indices
.push_back(ConstantInt::get(STy
, i
));
133 // Add the consecutive indices to the vector value.
134 return ConstantVector::get(Indices
);
137 CallInst
*IRBuilderBase::CreateMemSet(Value
*Ptr
, Value
*Val
, Value
*Size
,
138 MaybeAlign Align
, bool isVolatile
,
139 MDNode
*TBAATag
, MDNode
*ScopeTag
,
140 MDNode
*NoAliasTag
) {
141 Value
*Ops
[] = {Ptr
, Val
, Size
, getInt1(isVolatile
)};
142 Type
*Tys
[] = { Ptr
->getType(), Size
->getType() };
143 Module
*M
= BB
->getParent()->getParent();
144 Function
*TheFn
= Intrinsic::getDeclaration(M
, Intrinsic::memset
, Tys
);
146 CallInst
*CI
= CreateCall(TheFn
, Ops
);
149 cast
<MemSetInst
>(CI
)->setDestAlignment(*Align
);
151 // Set the TBAA info if present.
153 CI
->setMetadata(LLVMContext::MD_tbaa
, TBAATag
);
156 CI
->setMetadata(LLVMContext::MD_alias_scope
, ScopeTag
);
159 CI
->setMetadata(LLVMContext::MD_noalias
, NoAliasTag
);
164 CallInst
*IRBuilderBase::CreateMemSetInline(Value
*Dst
, MaybeAlign DstAlign
,
165 Value
*Val
, Value
*Size
,
166 bool IsVolatile
, MDNode
*TBAATag
,
168 MDNode
*NoAliasTag
) {
169 Value
*Ops
[] = {Dst
, Val
, Size
, getInt1(IsVolatile
)};
170 Type
*Tys
[] = {Dst
->getType(), Size
->getType()};
171 Module
*M
= BB
->getParent()->getParent();
172 Function
*TheFn
= Intrinsic::getDeclaration(M
, Intrinsic::memset_inline
, Tys
);
174 CallInst
*CI
= CreateCall(TheFn
, Ops
);
177 cast
<MemSetInlineInst
>(CI
)->setDestAlignment(*DstAlign
);
179 // Set the TBAA info if present.
181 CI
->setMetadata(LLVMContext::MD_tbaa
, TBAATag
);
184 CI
->setMetadata(LLVMContext::MD_alias_scope
, ScopeTag
);
187 CI
->setMetadata(LLVMContext::MD_noalias
, NoAliasTag
);
192 CallInst
*IRBuilderBase::CreateElementUnorderedAtomicMemSet(
193 Value
*Ptr
, Value
*Val
, Value
*Size
, Align Alignment
, uint32_t ElementSize
,
194 MDNode
*TBAATag
, MDNode
*ScopeTag
, MDNode
*NoAliasTag
) {
196 Value
*Ops
[] = {Ptr
, Val
, Size
, getInt32(ElementSize
)};
197 Type
*Tys
[] = {Ptr
->getType(), Size
->getType()};
198 Module
*M
= BB
->getParent()->getParent();
199 Function
*TheFn
= Intrinsic::getDeclaration(
200 M
, Intrinsic::memset_element_unordered_atomic
, Tys
);
202 CallInst
*CI
= CreateCall(TheFn
, Ops
);
204 cast
<AtomicMemSetInst
>(CI
)->setDestAlignment(Alignment
);
206 // Set the TBAA info if present.
208 CI
->setMetadata(LLVMContext::MD_tbaa
, TBAATag
);
211 CI
->setMetadata(LLVMContext::MD_alias_scope
, ScopeTag
);
214 CI
->setMetadata(LLVMContext::MD_noalias
, NoAliasTag
);
219 CallInst
*IRBuilderBase::CreateMemTransferInst(
220 Intrinsic::ID IntrID
, Value
*Dst
, MaybeAlign DstAlign
, Value
*Src
,
221 MaybeAlign SrcAlign
, Value
*Size
, bool isVolatile
, MDNode
*TBAATag
,
222 MDNode
*TBAAStructTag
, MDNode
*ScopeTag
, MDNode
*NoAliasTag
) {
223 assert((IntrID
== Intrinsic::memcpy
|| IntrID
== Intrinsic::memcpy_inline
||
224 IntrID
== Intrinsic::memmove
) &&
225 "Unexpected intrinsic ID");
226 Value
*Ops
[] = {Dst
, Src
, Size
, getInt1(isVolatile
)};
227 Type
*Tys
[] = { Dst
->getType(), Src
->getType(), Size
->getType() };
228 Module
*M
= BB
->getParent()->getParent();
229 Function
*TheFn
= Intrinsic::getDeclaration(M
, IntrID
, Tys
);
231 CallInst
*CI
= CreateCall(TheFn
, Ops
);
233 auto* MCI
= cast
<MemTransferInst
>(CI
);
235 MCI
->setDestAlignment(*DstAlign
);
237 MCI
->setSourceAlignment(*SrcAlign
);
239 // Set the TBAA info if present.
241 CI
->setMetadata(LLVMContext::MD_tbaa
, TBAATag
);
243 // Set the TBAA Struct info if present.
245 CI
->setMetadata(LLVMContext::MD_tbaa_struct
, TBAAStructTag
);
248 CI
->setMetadata(LLVMContext::MD_alias_scope
, ScopeTag
);
251 CI
->setMetadata(LLVMContext::MD_noalias
, NoAliasTag
);
256 CallInst
*IRBuilderBase::CreateElementUnorderedAtomicMemCpy(
257 Value
*Dst
, Align DstAlign
, Value
*Src
, Align SrcAlign
, Value
*Size
,
258 uint32_t ElementSize
, MDNode
*TBAATag
, MDNode
*TBAAStructTag
,
259 MDNode
*ScopeTag
, MDNode
*NoAliasTag
) {
260 assert(DstAlign
>= ElementSize
&&
261 "Pointer alignment must be at least element size");
262 assert(SrcAlign
>= ElementSize
&&
263 "Pointer alignment must be at least element size");
264 Value
*Ops
[] = {Dst
, Src
, Size
, getInt32(ElementSize
)};
265 Type
*Tys
[] = {Dst
->getType(), Src
->getType(), Size
->getType()};
266 Module
*M
= BB
->getParent()->getParent();
267 Function
*TheFn
= Intrinsic::getDeclaration(
268 M
, Intrinsic::memcpy_element_unordered_atomic
, Tys
);
270 CallInst
*CI
= CreateCall(TheFn
, Ops
);
272 // Set the alignment of the pointer args.
273 auto *AMCI
= cast
<AtomicMemCpyInst
>(CI
);
274 AMCI
->setDestAlignment(DstAlign
);
275 AMCI
->setSourceAlignment(SrcAlign
);
277 // Set the TBAA info if present.
279 CI
->setMetadata(LLVMContext::MD_tbaa
, TBAATag
);
281 // Set the TBAA Struct info if present.
283 CI
->setMetadata(LLVMContext::MD_tbaa_struct
, TBAAStructTag
);
286 CI
->setMetadata(LLVMContext::MD_alias_scope
, ScopeTag
);
289 CI
->setMetadata(LLVMContext::MD_noalias
, NoAliasTag
);
294 /// isConstantOne - Return true only if val is constant int 1
295 static bool isConstantOne(const Value
*Val
) {
296 assert(Val
&& "isConstantOne does not work with nullptr Val");
297 const ConstantInt
*CVal
= dyn_cast
<ConstantInt
>(Val
);
298 return CVal
&& CVal
->isOne();
301 CallInst
*IRBuilderBase::CreateMalloc(Type
*IntPtrTy
, Type
*AllocTy
,
302 Value
*AllocSize
, Value
*ArraySize
,
303 ArrayRef
<OperandBundleDef
> OpB
,
304 Function
*MallocF
, const Twine
&Name
) {
305 // malloc(type) becomes:
306 // i8* malloc(typeSize)
307 // malloc(type, arraySize) becomes:
308 // i8* malloc(typeSize*arraySize)
310 ArraySize
= ConstantInt::get(IntPtrTy
, 1);
311 else if (ArraySize
->getType() != IntPtrTy
)
312 ArraySize
= CreateIntCast(ArraySize
, IntPtrTy
, false);
314 if (!isConstantOne(ArraySize
)) {
315 if (isConstantOne(AllocSize
)) {
316 AllocSize
= ArraySize
; // Operand * 1 = Operand
318 // Multiply type size by the array size...
319 AllocSize
= CreateMul(ArraySize
, AllocSize
, "mallocsize");
323 assert(AllocSize
->getType() == IntPtrTy
&& "malloc arg is wrong size");
324 // Create the call to Malloc.
325 Module
*M
= BB
->getParent()->getParent();
326 Type
*BPTy
= PointerType::getUnqual(Context
);
327 FunctionCallee MallocFunc
= MallocF
;
329 // prototype malloc as "void *malloc(size_t)"
330 MallocFunc
= M
->getOrInsertFunction("malloc", BPTy
, IntPtrTy
);
331 CallInst
*MCall
= CreateCall(MallocFunc
, AllocSize
, OpB
, Name
);
333 MCall
->setTailCall();
334 if (Function
*F
= dyn_cast
<Function
>(MallocFunc
.getCallee())) {
335 MCall
->setCallingConv(F
->getCallingConv());
336 F
->setReturnDoesNotAlias();
339 assert(!MCall
->getType()->isVoidTy() && "Malloc has void return type");
344 CallInst
*IRBuilderBase::CreateMalloc(Type
*IntPtrTy
, Type
*AllocTy
,
345 Value
*AllocSize
, Value
*ArraySize
,
346 Function
*MallocF
, const Twine
&Name
) {
348 return CreateMalloc(IntPtrTy
, AllocTy
, AllocSize
, ArraySize
, std::nullopt
,
352 /// CreateFree - Generate the IR for a call to the builtin free function.
353 CallInst
*IRBuilderBase::CreateFree(Value
*Source
,
354 ArrayRef
<OperandBundleDef
> Bundles
) {
355 assert(Source
->getType()->isPointerTy() &&
356 "Can not free something of nonpointer type!");
358 Module
*M
= BB
->getParent()->getParent();
360 Type
*VoidTy
= Type::getVoidTy(M
->getContext());
361 Type
*VoidPtrTy
= PointerType::getUnqual(M
->getContext());
362 // prototype free as "void free(void*)"
363 FunctionCallee FreeFunc
= M
->getOrInsertFunction("free", VoidTy
, VoidPtrTy
);
364 CallInst
*Result
= CreateCall(FreeFunc
, Source
, Bundles
, "");
365 Result
->setTailCall();
366 if (Function
*F
= dyn_cast
<Function
>(FreeFunc
.getCallee()))
367 Result
->setCallingConv(F
->getCallingConv());
372 CallInst
*IRBuilderBase::CreateElementUnorderedAtomicMemMove(
373 Value
*Dst
, Align DstAlign
, Value
*Src
, Align SrcAlign
, Value
*Size
,
374 uint32_t ElementSize
, MDNode
*TBAATag
, MDNode
*TBAAStructTag
,
375 MDNode
*ScopeTag
, MDNode
*NoAliasTag
) {
376 assert(DstAlign
>= ElementSize
&&
377 "Pointer alignment must be at least element size");
378 assert(SrcAlign
>= ElementSize
&&
379 "Pointer alignment must be at least element size");
380 Value
*Ops
[] = {Dst
, Src
, Size
, getInt32(ElementSize
)};
381 Type
*Tys
[] = {Dst
->getType(), Src
->getType(), Size
->getType()};
382 Module
*M
= BB
->getParent()->getParent();
383 Function
*TheFn
= Intrinsic::getDeclaration(
384 M
, Intrinsic::memmove_element_unordered_atomic
, Tys
);
386 CallInst
*CI
= CreateCall(TheFn
, Ops
);
388 // Set the alignment of the pointer args.
389 CI
->addParamAttr(0, Attribute::getWithAlignment(CI
->getContext(), DstAlign
));
390 CI
->addParamAttr(1, Attribute::getWithAlignment(CI
->getContext(), SrcAlign
));
392 // Set the TBAA info if present.
394 CI
->setMetadata(LLVMContext::MD_tbaa
, TBAATag
);
396 // Set the TBAA Struct info if present.
398 CI
->setMetadata(LLVMContext::MD_tbaa_struct
, TBAAStructTag
);
401 CI
->setMetadata(LLVMContext::MD_alias_scope
, ScopeTag
);
404 CI
->setMetadata(LLVMContext::MD_noalias
, NoAliasTag
);
409 CallInst
*IRBuilderBase::getReductionIntrinsic(Intrinsic::ID ID
, Value
*Src
) {
410 Module
*M
= GetInsertBlock()->getParent()->getParent();
411 Value
*Ops
[] = {Src
};
412 Type
*Tys
[] = { Src
->getType() };
413 auto Decl
= Intrinsic::getDeclaration(M
, ID
, Tys
);
414 return CreateCall(Decl
, Ops
);
417 CallInst
*IRBuilderBase::CreateFAddReduce(Value
*Acc
, Value
*Src
) {
418 Module
*M
= GetInsertBlock()->getParent()->getParent();
419 Value
*Ops
[] = {Acc
, Src
};
420 auto Decl
= Intrinsic::getDeclaration(M
, Intrinsic::vector_reduce_fadd
,
422 return CreateCall(Decl
, Ops
);
425 CallInst
*IRBuilderBase::CreateFMulReduce(Value
*Acc
, Value
*Src
) {
426 Module
*M
= GetInsertBlock()->getParent()->getParent();
427 Value
*Ops
[] = {Acc
, Src
};
428 auto Decl
= Intrinsic::getDeclaration(M
, Intrinsic::vector_reduce_fmul
,
430 return CreateCall(Decl
, Ops
);
433 CallInst
*IRBuilderBase::CreateAddReduce(Value
*Src
) {
434 return getReductionIntrinsic(Intrinsic::vector_reduce_add
, Src
);
437 CallInst
*IRBuilderBase::CreateMulReduce(Value
*Src
) {
438 return getReductionIntrinsic(Intrinsic::vector_reduce_mul
, Src
);
441 CallInst
*IRBuilderBase::CreateAndReduce(Value
*Src
) {
442 return getReductionIntrinsic(Intrinsic::vector_reduce_and
, Src
);
445 CallInst
*IRBuilderBase::CreateOrReduce(Value
*Src
) {
446 return getReductionIntrinsic(Intrinsic::vector_reduce_or
, Src
);
449 CallInst
*IRBuilderBase::CreateXorReduce(Value
*Src
) {
450 return getReductionIntrinsic(Intrinsic::vector_reduce_xor
, Src
);
453 CallInst
*IRBuilderBase::CreateIntMaxReduce(Value
*Src
, bool IsSigned
) {
455 IsSigned
? Intrinsic::vector_reduce_smax
: Intrinsic::vector_reduce_umax
;
456 return getReductionIntrinsic(ID
, Src
);
459 CallInst
*IRBuilderBase::CreateIntMinReduce(Value
*Src
, bool IsSigned
) {
461 IsSigned
? Intrinsic::vector_reduce_smin
: Intrinsic::vector_reduce_umin
;
462 return getReductionIntrinsic(ID
, Src
);
465 CallInst
*IRBuilderBase::CreateFPMaxReduce(Value
*Src
) {
466 return getReductionIntrinsic(Intrinsic::vector_reduce_fmax
, Src
);
469 CallInst
*IRBuilderBase::CreateFPMinReduce(Value
*Src
) {
470 return getReductionIntrinsic(Intrinsic::vector_reduce_fmin
, Src
);
473 CallInst
*IRBuilderBase::CreateFPMaximumReduce(Value
*Src
) {
474 return getReductionIntrinsic(Intrinsic::vector_reduce_fmaximum
, Src
);
477 CallInst
*IRBuilderBase::CreateFPMinimumReduce(Value
*Src
) {
478 return getReductionIntrinsic(Intrinsic::vector_reduce_fminimum
, Src
);
481 CallInst
*IRBuilderBase::CreateLifetimeStart(Value
*Ptr
, ConstantInt
*Size
) {
482 assert(isa
<PointerType
>(Ptr
->getType()) &&
483 "lifetime.start only applies to pointers.");
487 assert(Size
->getType() == getInt64Ty() &&
488 "lifetime.start requires the size to be an i64");
489 Value
*Ops
[] = { Size
, Ptr
};
490 Module
*M
= BB
->getParent()->getParent();
492 Intrinsic::getDeclaration(M
, Intrinsic::lifetime_start
, {Ptr
->getType()});
493 return CreateCall(TheFn
, Ops
);
496 CallInst
*IRBuilderBase::CreateLifetimeEnd(Value
*Ptr
, ConstantInt
*Size
) {
497 assert(isa
<PointerType
>(Ptr
->getType()) &&
498 "lifetime.end only applies to pointers.");
502 assert(Size
->getType() == getInt64Ty() &&
503 "lifetime.end requires the size to be an i64");
504 Value
*Ops
[] = { Size
, Ptr
};
505 Module
*M
= BB
->getParent()->getParent();
507 Intrinsic::getDeclaration(M
, Intrinsic::lifetime_end
, {Ptr
->getType()});
508 return CreateCall(TheFn
, Ops
);
511 CallInst
*IRBuilderBase::CreateInvariantStart(Value
*Ptr
, ConstantInt
*Size
) {
513 assert(isa
<PointerType
>(Ptr
->getType()) &&
514 "invariant.start only applies to pointers.");
518 assert(Size
->getType() == getInt64Ty() &&
519 "invariant.start requires the size to be an i64");
521 Value
*Ops
[] = {Size
, Ptr
};
522 // Fill in the single overloaded type: memory object type.
523 Type
*ObjectPtr
[1] = {Ptr
->getType()};
524 Module
*M
= BB
->getParent()->getParent();
526 Intrinsic::getDeclaration(M
, Intrinsic::invariant_start
, ObjectPtr
);
527 return CreateCall(TheFn
, Ops
);
530 static MaybeAlign
getAlign(Value
*Ptr
) {
531 if (auto *O
= dyn_cast
<GlobalObject
>(Ptr
))
532 return O
->getAlign();
533 if (auto *A
= dyn_cast
<GlobalAlias
>(Ptr
))
534 return A
->getAliaseeObject()->getAlign();
538 CallInst
*IRBuilderBase::CreateThreadLocalAddress(Value
*Ptr
) {
539 assert(isa
<GlobalValue
>(Ptr
) && cast
<GlobalValue
>(Ptr
)->isThreadLocal() &&
540 "threadlocal_address only applies to thread local variables.");
541 CallInst
*CI
= CreateIntrinsic(llvm::Intrinsic::threadlocal_address
,
542 {Ptr
->getType()}, {Ptr
});
543 if (MaybeAlign A
= getAlign(Ptr
)) {
544 CI
->addParamAttr(0, Attribute::getWithAlignment(CI
->getContext(), *A
));
545 CI
->addRetAttr(Attribute::getWithAlignment(CI
->getContext(), *A
));
551 IRBuilderBase::CreateAssumption(Value
*Cond
,
552 ArrayRef
<OperandBundleDef
> OpBundles
) {
553 assert(Cond
->getType() == getInt1Ty() &&
554 "an assumption condition must be of type i1");
556 Value
*Ops
[] = { Cond
};
557 Module
*M
= BB
->getParent()->getParent();
558 Function
*FnAssume
= Intrinsic::getDeclaration(M
, Intrinsic::assume
);
559 return CreateCall(FnAssume
, Ops
, OpBundles
);
562 Instruction
*IRBuilderBase::CreateNoAliasScopeDeclaration(Value
*Scope
) {
563 Module
*M
= BB
->getModule();
564 auto *FnIntrinsic
= Intrinsic::getDeclaration(
565 M
, Intrinsic::experimental_noalias_scope_decl
, {});
566 return CreateCall(FnIntrinsic
, {Scope
});
569 /// Create a call to a Masked Load intrinsic.
570 /// \p Ty - vector type to load
571 /// \p Ptr - base pointer for the load
572 /// \p Alignment - alignment of the source location
573 /// \p Mask - vector of booleans which indicates what vector lanes should
574 /// be accessed in memory
575 /// \p PassThru - pass-through value that is used to fill the masked-off lanes
577 /// \p Name - name of the result variable
578 CallInst
*IRBuilderBase::CreateMaskedLoad(Type
*Ty
, Value
*Ptr
, Align Alignment
,
579 Value
*Mask
, Value
*PassThru
,
581 auto *PtrTy
= cast
<PointerType
>(Ptr
->getType());
582 assert(Ty
->isVectorTy() && "Type should be vector");
583 assert(Mask
&& "Mask should not be all-ones (null)");
585 PassThru
= PoisonValue::get(Ty
);
586 Type
*OverloadedTypes
[] = { Ty
, PtrTy
};
587 Value
*Ops
[] = {Ptr
, getInt32(Alignment
.value()), Mask
, PassThru
};
588 return CreateMaskedIntrinsic(Intrinsic::masked_load
, Ops
,
589 OverloadedTypes
, Name
);
592 /// Create a call to a Masked Store intrinsic.
593 /// \p Val - data to be stored,
594 /// \p Ptr - base pointer for the store
595 /// \p Alignment - alignment of the destination location
596 /// \p Mask - vector of booleans which indicates what vector lanes should
597 /// be accessed in memory
598 CallInst
*IRBuilderBase::CreateMaskedStore(Value
*Val
, Value
*Ptr
,
599 Align Alignment
, Value
*Mask
) {
600 auto *PtrTy
= cast
<PointerType
>(Ptr
->getType());
601 Type
*DataTy
= Val
->getType();
602 assert(DataTy
->isVectorTy() && "Val should be a vector");
603 assert(Mask
&& "Mask should not be all-ones (null)");
604 Type
*OverloadedTypes
[] = { DataTy
, PtrTy
};
605 Value
*Ops
[] = {Val
, Ptr
, getInt32(Alignment
.value()), Mask
};
606 return CreateMaskedIntrinsic(Intrinsic::masked_store
, Ops
, OverloadedTypes
);
609 /// Create a call to a Masked intrinsic, with given intrinsic Id,
610 /// an array of operands - Ops, and an array of overloaded types -
612 CallInst
*IRBuilderBase::CreateMaskedIntrinsic(Intrinsic::ID Id
,
613 ArrayRef
<Value
*> Ops
,
614 ArrayRef
<Type
*> OverloadedTypes
,
616 Module
*M
= BB
->getParent()->getParent();
617 Function
*TheFn
= Intrinsic::getDeclaration(M
, Id
, OverloadedTypes
);
618 return CreateCall(TheFn
, Ops
, {}, Name
);
621 /// Create a call to a Masked Gather intrinsic.
622 /// \p Ty - vector type to gather
623 /// \p Ptrs - vector of pointers for loading
624 /// \p Align - alignment for one element
625 /// \p Mask - vector of booleans which indicates what vector lanes should
626 /// be accessed in memory
627 /// \p PassThru - pass-through value that is used to fill the masked-off lanes
629 /// \p Name - name of the result variable
630 CallInst
*IRBuilderBase::CreateMaskedGather(Type
*Ty
, Value
*Ptrs
,
631 Align Alignment
, Value
*Mask
,
634 auto *VecTy
= cast
<VectorType
>(Ty
);
635 ElementCount NumElts
= VecTy
->getElementCount();
636 auto *PtrsTy
= cast
<VectorType
>(Ptrs
->getType());
637 assert(NumElts
== PtrsTy
->getElementCount() && "Element count mismatch");
640 Mask
= getAllOnesMask(NumElts
);
643 PassThru
= PoisonValue::get(Ty
);
645 Type
*OverloadedTypes
[] = {Ty
, PtrsTy
};
646 Value
*Ops
[] = {Ptrs
, getInt32(Alignment
.value()), Mask
, PassThru
};
648 // We specify only one type when we create this intrinsic. Types of other
649 // arguments are derived from this type.
650 return CreateMaskedIntrinsic(Intrinsic::masked_gather
, Ops
, OverloadedTypes
,
654 /// Create a call to a Masked Scatter intrinsic.
655 /// \p Data - data to be stored,
656 /// \p Ptrs - the vector of pointers, where the \p Data elements should be
658 /// \p Align - alignment for one element
659 /// \p Mask - vector of booleans which indicates what vector lanes should
660 /// be accessed in memory
661 CallInst
*IRBuilderBase::CreateMaskedScatter(Value
*Data
, Value
*Ptrs
,
662 Align Alignment
, Value
*Mask
) {
663 auto *PtrsTy
= cast
<VectorType
>(Ptrs
->getType());
664 auto *DataTy
= cast
<VectorType
>(Data
->getType());
665 ElementCount NumElts
= PtrsTy
->getElementCount();
668 Mask
= getAllOnesMask(NumElts
);
670 Type
*OverloadedTypes
[] = {DataTy
, PtrsTy
};
671 Value
*Ops
[] = {Data
, Ptrs
, getInt32(Alignment
.value()), Mask
};
673 // We specify only one type when we create this intrinsic. Types of other
674 // arguments are derived from this type.
675 return CreateMaskedIntrinsic(Intrinsic::masked_scatter
, Ops
, OverloadedTypes
);
678 /// Create a call to Masked Expand Load intrinsic
679 /// \p Ty - vector type to load
680 /// \p Ptr - base pointer for the load
681 /// \p Mask - vector of booleans which indicates what vector lanes should
682 /// be accessed in memory
683 /// \p PassThru - pass-through value that is used to fill the masked-off lanes
685 /// \p Name - name of the result variable
686 CallInst
*IRBuilderBase::CreateMaskedExpandLoad(Type
*Ty
, Value
*Ptr
,
687 Value
*Mask
, Value
*PassThru
,
689 assert(Ty
->isVectorTy() && "Type should be vector");
690 assert(Mask
&& "Mask should not be all-ones (null)");
692 PassThru
= PoisonValue::get(Ty
);
693 Type
*OverloadedTypes
[] = {Ty
};
694 Value
*Ops
[] = {Ptr
, Mask
, PassThru
};
695 return CreateMaskedIntrinsic(Intrinsic::masked_expandload
, Ops
,
696 OverloadedTypes
, Name
);
699 /// Create a call to Masked Compress Store intrinsic
700 /// \p Val - data to be stored,
701 /// \p Ptr - base pointer for the store
702 /// \p Mask - vector of booleans which indicates what vector lanes should
703 /// be accessed in memory
704 CallInst
*IRBuilderBase::CreateMaskedCompressStore(Value
*Val
, Value
*Ptr
,
706 Type
*DataTy
= Val
->getType();
707 assert(DataTy
->isVectorTy() && "Val should be a vector");
708 assert(Mask
&& "Mask should not be all-ones (null)");
709 Type
*OverloadedTypes
[] = {DataTy
};
710 Value
*Ops
[] = {Val
, Ptr
, Mask
};
711 return CreateMaskedIntrinsic(Intrinsic::masked_compressstore
, Ops
,
715 template <typename T0
>
716 static std::vector
<Value
*>
717 getStatepointArgs(IRBuilderBase
&B
, uint64_t ID
, uint32_t NumPatchBytes
,
718 Value
*ActualCallee
, uint32_t Flags
, ArrayRef
<T0
> CallArgs
) {
719 std::vector
<Value
*> Args
;
720 Args
.push_back(B
.getInt64(ID
));
721 Args
.push_back(B
.getInt32(NumPatchBytes
));
722 Args
.push_back(ActualCallee
);
723 Args
.push_back(B
.getInt32(CallArgs
.size()));
724 Args
.push_back(B
.getInt32(Flags
));
725 llvm::append_range(Args
, CallArgs
);
726 // GC Transition and Deopt args are now always handled via operand bundle.
727 // They will be removed from the signature of gc.statepoint shortly.
728 Args
.push_back(B
.getInt32(0));
729 Args
.push_back(B
.getInt32(0));
730 // GC args are now encoded in the gc-live operand bundle
734 template<typename T1
, typename T2
, typename T3
>
735 static std::vector
<OperandBundleDef
>
736 getStatepointBundles(std::optional
<ArrayRef
<T1
>> TransitionArgs
,
737 std::optional
<ArrayRef
<T2
>> DeoptArgs
,
738 ArrayRef
<T3
> GCArgs
) {
739 std::vector
<OperandBundleDef
> Rval
;
741 SmallVector
<Value
*, 16> DeoptValues
;
742 llvm::append_range(DeoptValues
, *DeoptArgs
);
743 Rval
.emplace_back("deopt", DeoptValues
);
745 if (TransitionArgs
) {
746 SmallVector
<Value
*, 16> TransitionValues
;
747 llvm::append_range(TransitionValues
, *TransitionArgs
);
748 Rval
.emplace_back("gc-transition", TransitionValues
);
751 SmallVector
<Value
*, 16> LiveValues
;
752 llvm::append_range(LiveValues
, GCArgs
);
753 Rval
.emplace_back("gc-live", LiveValues
);
758 template <typename T0
, typename T1
, typename T2
, typename T3
>
759 static CallInst
*CreateGCStatepointCallCommon(
760 IRBuilderBase
*Builder
, uint64_t ID
, uint32_t NumPatchBytes
,
761 FunctionCallee ActualCallee
, uint32_t Flags
, ArrayRef
<T0
> CallArgs
,
762 std::optional
<ArrayRef
<T1
>> TransitionArgs
,
763 std::optional
<ArrayRef
<T2
>> DeoptArgs
, ArrayRef
<T3
> GCArgs
,
765 Module
*M
= Builder
->GetInsertBlock()->getParent()->getParent();
766 // Fill in the one generic type'd argument (the function is also vararg)
767 Function
*FnStatepoint
=
768 Intrinsic::getDeclaration(M
, Intrinsic::experimental_gc_statepoint
,
769 {ActualCallee
.getCallee()->getType()});
771 std::vector
<Value
*> Args
= getStatepointArgs(
772 *Builder
, ID
, NumPatchBytes
, ActualCallee
.getCallee(), Flags
, CallArgs
);
774 CallInst
*CI
= Builder
->CreateCall(
776 getStatepointBundles(TransitionArgs
, DeoptArgs
, GCArgs
), Name
);
778 Attribute::get(Builder
->getContext(), Attribute::ElementType
,
779 ActualCallee
.getFunctionType()));
783 CallInst
*IRBuilderBase::CreateGCStatepointCall(
784 uint64_t ID
, uint32_t NumPatchBytes
, FunctionCallee ActualCallee
,
785 ArrayRef
<Value
*> CallArgs
, std::optional
<ArrayRef
<Value
*>> DeoptArgs
,
786 ArrayRef
<Value
*> GCArgs
, const Twine
&Name
) {
787 return CreateGCStatepointCallCommon
<Value
*, Value
*, Value
*, Value
*>(
788 this, ID
, NumPatchBytes
, ActualCallee
, uint32_t(StatepointFlags::None
),
789 CallArgs
, std::nullopt
/* No Transition Args */, DeoptArgs
, GCArgs
, Name
);
792 CallInst
*IRBuilderBase::CreateGCStatepointCall(
793 uint64_t ID
, uint32_t NumPatchBytes
, FunctionCallee ActualCallee
,
794 uint32_t Flags
, ArrayRef
<Value
*> CallArgs
,
795 std::optional
<ArrayRef
<Use
>> TransitionArgs
,
796 std::optional
<ArrayRef
<Use
>> DeoptArgs
, ArrayRef
<Value
*> GCArgs
,
798 return CreateGCStatepointCallCommon
<Value
*, Use
, Use
, Value
*>(
799 this, ID
, NumPatchBytes
, ActualCallee
, Flags
, CallArgs
, TransitionArgs
,
800 DeoptArgs
, GCArgs
, Name
);
803 CallInst
*IRBuilderBase::CreateGCStatepointCall(
804 uint64_t ID
, uint32_t NumPatchBytes
, FunctionCallee ActualCallee
,
805 ArrayRef
<Use
> CallArgs
, std::optional
<ArrayRef
<Value
*>> DeoptArgs
,
806 ArrayRef
<Value
*> GCArgs
, const Twine
&Name
) {
807 return CreateGCStatepointCallCommon
<Use
, Value
*, Value
*, Value
*>(
808 this, ID
, NumPatchBytes
, ActualCallee
, uint32_t(StatepointFlags::None
),
809 CallArgs
, std::nullopt
, DeoptArgs
, GCArgs
, Name
);
812 template <typename T0
, typename T1
, typename T2
, typename T3
>
813 static InvokeInst
*CreateGCStatepointInvokeCommon(
814 IRBuilderBase
*Builder
, uint64_t ID
, uint32_t NumPatchBytes
,
815 FunctionCallee ActualInvokee
, BasicBlock
*NormalDest
,
816 BasicBlock
*UnwindDest
, uint32_t Flags
, ArrayRef
<T0
> InvokeArgs
,
817 std::optional
<ArrayRef
<T1
>> TransitionArgs
,
818 std::optional
<ArrayRef
<T2
>> DeoptArgs
, ArrayRef
<T3
> GCArgs
,
820 Module
*M
= Builder
->GetInsertBlock()->getParent()->getParent();
821 // Fill in the one generic type'd argument (the function is also vararg)
822 Function
*FnStatepoint
=
823 Intrinsic::getDeclaration(M
, Intrinsic::experimental_gc_statepoint
,
824 {ActualInvokee
.getCallee()->getType()});
826 std::vector
<Value
*> Args
=
827 getStatepointArgs(*Builder
, ID
, NumPatchBytes
, ActualInvokee
.getCallee(),
830 InvokeInst
*II
= Builder
->CreateInvoke(
831 FnStatepoint
, NormalDest
, UnwindDest
, Args
,
832 getStatepointBundles(TransitionArgs
, DeoptArgs
, GCArgs
), Name
);
834 Attribute::get(Builder
->getContext(), Attribute::ElementType
,
835 ActualInvokee
.getFunctionType()));
839 InvokeInst
*IRBuilderBase::CreateGCStatepointInvoke(
840 uint64_t ID
, uint32_t NumPatchBytes
, FunctionCallee ActualInvokee
,
841 BasicBlock
*NormalDest
, BasicBlock
*UnwindDest
,
842 ArrayRef
<Value
*> InvokeArgs
, std::optional
<ArrayRef
<Value
*>> DeoptArgs
,
843 ArrayRef
<Value
*> GCArgs
, const Twine
&Name
) {
844 return CreateGCStatepointInvokeCommon
<Value
*, Value
*, Value
*, Value
*>(
845 this, ID
, NumPatchBytes
, ActualInvokee
, NormalDest
, UnwindDest
,
846 uint32_t(StatepointFlags::None
), InvokeArgs
,
847 std::nullopt
/* No Transition Args*/, DeoptArgs
, GCArgs
, Name
);
850 InvokeInst
*IRBuilderBase::CreateGCStatepointInvoke(
851 uint64_t ID
, uint32_t NumPatchBytes
, FunctionCallee ActualInvokee
,
852 BasicBlock
*NormalDest
, BasicBlock
*UnwindDest
, uint32_t Flags
,
853 ArrayRef
<Value
*> InvokeArgs
, std::optional
<ArrayRef
<Use
>> TransitionArgs
,
854 std::optional
<ArrayRef
<Use
>> DeoptArgs
, ArrayRef
<Value
*> GCArgs
,
856 return CreateGCStatepointInvokeCommon
<Value
*, Use
, Use
, Value
*>(
857 this, ID
, NumPatchBytes
, ActualInvokee
, NormalDest
, UnwindDest
, Flags
,
858 InvokeArgs
, TransitionArgs
, DeoptArgs
, GCArgs
, Name
);
861 InvokeInst
*IRBuilderBase::CreateGCStatepointInvoke(
862 uint64_t ID
, uint32_t NumPatchBytes
, FunctionCallee ActualInvokee
,
863 BasicBlock
*NormalDest
, BasicBlock
*UnwindDest
, ArrayRef
<Use
> InvokeArgs
,
864 std::optional
<ArrayRef
<Value
*>> DeoptArgs
, ArrayRef
<Value
*> GCArgs
,
866 return CreateGCStatepointInvokeCommon
<Use
, Value
*, Value
*, Value
*>(
867 this, ID
, NumPatchBytes
, ActualInvokee
, NormalDest
, UnwindDest
,
868 uint32_t(StatepointFlags::None
), InvokeArgs
, std::nullopt
, DeoptArgs
,
872 CallInst
*IRBuilderBase::CreateGCResult(Instruction
*Statepoint
,
873 Type
*ResultType
, const Twine
&Name
) {
874 Intrinsic::ID ID
= Intrinsic::experimental_gc_result
;
875 Module
*M
= BB
->getParent()->getParent();
876 Type
*Types
[] = {ResultType
};
877 Function
*FnGCResult
= Intrinsic::getDeclaration(M
, ID
, Types
);
879 Value
*Args
[] = {Statepoint
};
880 return CreateCall(FnGCResult
, Args
, {}, Name
);
883 CallInst
*IRBuilderBase::CreateGCRelocate(Instruction
*Statepoint
,
884 int BaseOffset
, int DerivedOffset
,
885 Type
*ResultType
, const Twine
&Name
) {
886 Module
*M
= BB
->getParent()->getParent();
887 Type
*Types
[] = {ResultType
};
888 Function
*FnGCRelocate
=
889 Intrinsic::getDeclaration(M
, Intrinsic::experimental_gc_relocate
, Types
);
891 Value
*Args
[] = {Statepoint
, getInt32(BaseOffset
), getInt32(DerivedOffset
)};
892 return CreateCall(FnGCRelocate
, Args
, {}, Name
);
895 CallInst
*IRBuilderBase::CreateGCGetPointerBase(Value
*DerivedPtr
,
897 Module
*M
= BB
->getParent()->getParent();
898 Type
*PtrTy
= DerivedPtr
->getType();
899 Function
*FnGCFindBase
= Intrinsic::getDeclaration(
900 M
, Intrinsic::experimental_gc_get_pointer_base
, {PtrTy
, PtrTy
});
901 return CreateCall(FnGCFindBase
, {DerivedPtr
}, {}, Name
);
904 CallInst
*IRBuilderBase::CreateGCGetPointerOffset(Value
*DerivedPtr
,
906 Module
*M
= BB
->getParent()->getParent();
907 Type
*PtrTy
= DerivedPtr
->getType();
908 Function
*FnGCGetOffset
= Intrinsic::getDeclaration(
909 M
, Intrinsic::experimental_gc_get_pointer_offset
, {PtrTy
});
910 return CreateCall(FnGCGetOffset
, {DerivedPtr
}, {}, Name
);
913 CallInst
*IRBuilderBase::CreateUnaryIntrinsic(Intrinsic::ID ID
, Value
*V
,
914 Instruction
*FMFSource
,
916 Module
*M
= BB
->getModule();
917 Function
*Fn
= Intrinsic::getDeclaration(M
, ID
, {V
->getType()});
918 return createCallHelper(Fn
, {V
}, Name
, FMFSource
);
921 CallInst
*IRBuilderBase::CreateBinaryIntrinsic(Intrinsic::ID ID
, Value
*LHS
,
923 Instruction
*FMFSource
,
925 Module
*M
= BB
->getModule();
926 Function
*Fn
= Intrinsic::getDeclaration(M
, ID
, { LHS
->getType() });
927 return createCallHelper(Fn
, {LHS
, RHS
}, Name
, FMFSource
);
930 CallInst
*IRBuilderBase::CreateIntrinsic(Intrinsic::ID ID
,
931 ArrayRef
<Type
*> Types
,
932 ArrayRef
<Value
*> Args
,
933 Instruction
*FMFSource
,
935 Module
*M
= BB
->getModule();
936 Function
*Fn
= Intrinsic::getDeclaration(M
, ID
, Types
);
937 return createCallHelper(Fn
, Args
, Name
, FMFSource
);
940 CallInst
*IRBuilderBase::CreateIntrinsic(Type
*RetTy
, Intrinsic::ID ID
,
941 ArrayRef
<Value
*> Args
,
942 Instruction
*FMFSource
,
944 Module
*M
= BB
->getModule();
946 SmallVector
<Intrinsic::IITDescriptor
> Table
;
947 Intrinsic::getIntrinsicInfoTableEntries(ID
, Table
);
948 ArrayRef
<Intrinsic::IITDescriptor
> TableRef(Table
);
950 SmallVector
<Type
*> ArgTys
;
951 ArgTys
.reserve(Args
.size());
953 ArgTys
.push_back(I
->getType());
954 FunctionType
*FTy
= FunctionType::get(RetTy
, ArgTys
, false);
955 SmallVector
<Type
*> OverloadTys
;
956 Intrinsic::MatchIntrinsicTypesResult Res
=
957 matchIntrinsicSignature(FTy
, TableRef
, OverloadTys
);
959 assert(Res
== Intrinsic::MatchIntrinsicTypes_Match
&& TableRef
.empty() &&
960 "Wrong types for intrinsic!");
961 // TODO: Handle varargs intrinsics.
963 Function
*Fn
= Intrinsic::getDeclaration(M
, ID
, OverloadTys
);
964 return createCallHelper(Fn
, Args
, Name
, FMFSource
);
967 CallInst
*IRBuilderBase::CreateConstrainedFPBinOp(
968 Intrinsic::ID ID
, Value
*L
, Value
*R
, Instruction
*FMFSource
,
969 const Twine
&Name
, MDNode
*FPMathTag
,
970 std::optional
<RoundingMode
> Rounding
,
971 std::optional
<fp::ExceptionBehavior
> Except
) {
972 Value
*RoundingV
= getConstrainedFPRounding(Rounding
);
973 Value
*ExceptV
= getConstrainedFPExcept(Except
);
975 FastMathFlags UseFMF
= FMF
;
977 UseFMF
= FMFSource
->getFastMathFlags();
979 CallInst
*C
= CreateIntrinsic(ID
, {L
->getType()},
980 {L
, R
, RoundingV
, ExceptV
}, nullptr, Name
);
981 setConstrainedFPCallAttr(C
);
982 setFPAttrs(C
, FPMathTag
, UseFMF
);
986 CallInst
*IRBuilderBase::CreateConstrainedFPUnroundedBinOp(
987 Intrinsic::ID ID
, Value
*L
, Value
*R
, Instruction
*FMFSource
,
988 const Twine
&Name
, MDNode
*FPMathTag
,
989 std::optional
<fp::ExceptionBehavior
> Except
) {
990 Value
*ExceptV
= getConstrainedFPExcept(Except
);
992 FastMathFlags UseFMF
= FMF
;
994 UseFMF
= FMFSource
->getFastMathFlags();
997 CreateIntrinsic(ID
, {L
->getType()}, {L
, R
, ExceptV
}, nullptr, Name
);
998 setConstrainedFPCallAttr(C
);
999 setFPAttrs(C
, FPMathTag
, UseFMF
);
1003 Value
*IRBuilderBase::CreateNAryOp(unsigned Opc
, ArrayRef
<Value
*> Ops
,
1004 const Twine
&Name
, MDNode
*FPMathTag
) {
1005 if (Instruction::isBinaryOp(Opc
)) {
1006 assert(Ops
.size() == 2 && "Invalid number of operands!");
1007 return CreateBinOp(static_cast<Instruction::BinaryOps
>(Opc
),
1008 Ops
[0], Ops
[1], Name
, FPMathTag
);
1010 if (Instruction::isUnaryOp(Opc
)) {
1011 assert(Ops
.size() == 1 && "Invalid number of operands!");
1012 return CreateUnOp(static_cast<Instruction::UnaryOps
>(Opc
),
1013 Ops
[0], Name
, FPMathTag
);
1015 llvm_unreachable("Unexpected opcode!");
1018 CallInst
*IRBuilderBase::CreateConstrainedFPCast(
1019 Intrinsic::ID ID
, Value
*V
, Type
*DestTy
,
1020 Instruction
*FMFSource
, const Twine
&Name
, MDNode
*FPMathTag
,
1021 std::optional
<RoundingMode
> Rounding
,
1022 std::optional
<fp::ExceptionBehavior
> Except
) {
1023 Value
*ExceptV
= getConstrainedFPExcept(Except
);
1025 FastMathFlags UseFMF
= FMF
;
1027 UseFMF
= FMFSource
->getFastMathFlags();
1030 bool HasRoundingMD
= false;
1034 #define INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC) \
1035 case Intrinsic::INTRINSIC: \
1036 HasRoundingMD = ROUND_MODE; \
1038 #include "llvm/IR/ConstrainedOps.def"
1040 if (HasRoundingMD
) {
1041 Value
*RoundingV
= getConstrainedFPRounding(Rounding
);
1042 C
= CreateIntrinsic(ID
, {DestTy
, V
->getType()}, {V
, RoundingV
, ExceptV
},
1045 C
= CreateIntrinsic(ID
, {DestTy
, V
->getType()}, {V
, ExceptV
}, nullptr,
1048 setConstrainedFPCallAttr(C
);
1050 if (isa
<FPMathOperator
>(C
))
1051 setFPAttrs(C
, FPMathTag
, UseFMF
);
1055 Value
*IRBuilderBase::CreateFCmpHelper(
1056 CmpInst::Predicate P
, Value
*LHS
, Value
*RHS
, const Twine
&Name
,
1057 MDNode
*FPMathTag
, bool IsSignaling
) {
1058 if (IsFPConstrained
) {
1059 auto ID
= IsSignaling
? Intrinsic::experimental_constrained_fcmps
1060 : Intrinsic::experimental_constrained_fcmp
;
1061 return CreateConstrainedFPCmp(ID
, P
, LHS
, RHS
, Name
);
1064 if (auto *LC
= dyn_cast
<Constant
>(LHS
))
1065 if (auto *RC
= dyn_cast
<Constant
>(RHS
))
1066 return Insert(Folder
.CreateFCmp(P
, LC
, RC
), Name
);
1067 return Insert(setFPAttrs(new FCmpInst(P
, LHS
, RHS
), FPMathTag
, FMF
), Name
);
1070 CallInst
*IRBuilderBase::CreateConstrainedFPCmp(
1071 Intrinsic::ID ID
, CmpInst::Predicate P
, Value
*L
, Value
*R
,
1072 const Twine
&Name
, std::optional
<fp::ExceptionBehavior
> Except
) {
1073 Value
*PredicateV
= getConstrainedFPPredicate(P
);
1074 Value
*ExceptV
= getConstrainedFPExcept(Except
);
1076 CallInst
*C
= CreateIntrinsic(ID
, {L
->getType()},
1077 {L
, R
, PredicateV
, ExceptV
}, nullptr, Name
);
1078 setConstrainedFPCallAttr(C
);
1082 CallInst
*IRBuilderBase::CreateConstrainedFPCall(
1083 Function
*Callee
, ArrayRef
<Value
*> Args
, const Twine
&Name
,
1084 std::optional
<RoundingMode
> Rounding
,
1085 std::optional
<fp::ExceptionBehavior
> Except
) {
1086 llvm::SmallVector
<Value
*, 6> UseArgs
;
1088 append_range(UseArgs
, Args
);
1089 bool HasRoundingMD
= false;
1090 switch (Callee
->getIntrinsicID()) {
1093 #define INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC) \
1094 case Intrinsic::INTRINSIC: \
1095 HasRoundingMD = ROUND_MODE; \
1097 #include "llvm/IR/ConstrainedOps.def"
1100 UseArgs
.push_back(getConstrainedFPRounding(Rounding
));
1101 UseArgs
.push_back(getConstrainedFPExcept(Except
));
1103 CallInst
*C
= CreateCall(Callee
, UseArgs
, Name
);
1104 setConstrainedFPCallAttr(C
);
1108 Value
*IRBuilderBase::CreateSelect(Value
*C
, Value
*True
, Value
*False
,
1109 const Twine
&Name
, Instruction
*MDFrom
) {
1110 if (auto *V
= Folder
.FoldSelect(C
, True
, False
))
1113 SelectInst
*Sel
= SelectInst::Create(C
, True
, False
);
1115 MDNode
*Prof
= MDFrom
->getMetadata(LLVMContext::MD_prof
);
1116 MDNode
*Unpred
= MDFrom
->getMetadata(LLVMContext::MD_unpredictable
);
1117 Sel
= addBranchMetadata(Sel
, Prof
, Unpred
);
1119 if (isa
<FPMathOperator
>(Sel
))
1120 setFPAttrs(Sel
, nullptr /* MDNode* */, FMF
);
1121 return Insert(Sel
, Name
);
1124 Value
*IRBuilderBase::CreatePtrDiff(Type
*ElemTy
, Value
*LHS
, Value
*RHS
,
1125 const Twine
&Name
) {
1126 assert(LHS
->getType() == RHS
->getType() &&
1127 "Pointer subtraction operand types must match!");
1128 Value
*LHS_int
= CreatePtrToInt(LHS
, Type::getInt64Ty(Context
));
1129 Value
*RHS_int
= CreatePtrToInt(RHS
, Type::getInt64Ty(Context
));
1130 Value
*Difference
= CreateSub(LHS_int
, RHS_int
);
1131 return CreateExactSDiv(Difference
, ConstantExpr::getSizeOf(ElemTy
),
1135 Value
*IRBuilderBase::CreateLaunderInvariantGroup(Value
*Ptr
) {
1136 assert(isa
<PointerType
>(Ptr
->getType()) &&
1137 "launder.invariant.group only applies to pointers.");
1138 auto *PtrType
= Ptr
->getType();
1139 Module
*M
= BB
->getParent()->getParent();
1140 Function
*FnLaunderInvariantGroup
= Intrinsic::getDeclaration(
1141 M
, Intrinsic::launder_invariant_group
, {PtrType
});
1143 assert(FnLaunderInvariantGroup
->getReturnType() == PtrType
&&
1144 FnLaunderInvariantGroup
->getFunctionType()->getParamType(0) ==
1146 "LaunderInvariantGroup should take and return the same type");
1148 return CreateCall(FnLaunderInvariantGroup
, {Ptr
});
1151 Value
*IRBuilderBase::CreateStripInvariantGroup(Value
*Ptr
) {
1152 assert(isa
<PointerType
>(Ptr
->getType()) &&
1153 "strip.invariant.group only applies to pointers.");
1155 auto *PtrType
= Ptr
->getType();
1156 Module
*M
= BB
->getParent()->getParent();
1157 Function
*FnStripInvariantGroup
= Intrinsic::getDeclaration(
1158 M
, Intrinsic::strip_invariant_group
, {PtrType
});
1160 assert(FnStripInvariantGroup
->getReturnType() == PtrType
&&
1161 FnStripInvariantGroup
->getFunctionType()->getParamType(0) ==
1163 "StripInvariantGroup should take and return the same type");
1165 return CreateCall(FnStripInvariantGroup
, {Ptr
});
1168 Value
*IRBuilderBase::CreateVectorReverse(Value
*V
, const Twine
&Name
) {
1169 auto *Ty
= cast
<VectorType
>(V
->getType());
1170 if (isa
<ScalableVectorType
>(Ty
)) {
1171 Module
*M
= BB
->getParent()->getParent();
1172 Function
*F
= Intrinsic::getDeclaration(
1173 M
, Intrinsic::experimental_vector_reverse
, Ty
);
1174 return Insert(CallInst::Create(F
, V
), Name
);
1176 // Keep the original behaviour for fixed vector
1177 SmallVector
<int, 8> ShuffleMask
;
1178 int NumElts
= Ty
->getElementCount().getKnownMinValue();
1179 for (int i
= 0; i
< NumElts
; ++i
)
1180 ShuffleMask
.push_back(NumElts
- i
- 1);
1181 return CreateShuffleVector(V
, ShuffleMask
, Name
);
1184 Value
*IRBuilderBase::CreateVectorSplice(Value
*V1
, Value
*V2
, int64_t Imm
,
1185 const Twine
&Name
) {
1186 assert(isa
<VectorType
>(V1
->getType()) && "Unexpected type");
1187 assert(V1
->getType() == V2
->getType() &&
1188 "Splice expects matching operand types!");
1190 if (auto *VTy
= dyn_cast
<ScalableVectorType
>(V1
->getType())) {
1191 Module
*M
= BB
->getParent()->getParent();
1192 Function
*F
= Intrinsic::getDeclaration(
1193 M
, Intrinsic::experimental_vector_splice
, VTy
);
1195 Value
*Ops
[] = {V1
, V2
, getInt32(Imm
)};
1196 return Insert(CallInst::Create(F
, Ops
), Name
);
1199 unsigned NumElts
= cast
<FixedVectorType
>(V1
->getType())->getNumElements();
1200 assert(((-Imm
<= NumElts
) || (Imm
< NumElts
)) &&
1201 "Invalid immediate for vector splice!");
1203 // Keep the original behaviour for fixed vector
1204 unsigned Idx
= (NumElts
+ Imm
) % NumElts
;
1205 SmallVector
<int, 8> Mask
;
1206 for (unsigned I
= 0; I
< NumElts
; ++I
)
1207 Mask
.push_back(Idx
+ I
);
1209 return CreateShuffleVector(V1
, V2
, Mask
);
1212 Value
*IRBuilderBase::CreateVectorSplat(unsigned NumElts
, Value
*V
,
1213 const Twine
&Name
) {
1214 auto EC
= ElementCount::getFixed(NumElts
);
1215 return CreateVectorSplat(EC
, V
, Name
);
1218 Value
*IRBuilderBase::CreateVectorSplat(ElementCount EC
, Value
*V
,
1219 const Twine
&Name
) {
1220 assert(EC
.isNonZero() && "Cannot splat to an empty vector!");
1222 // First insert it into a poison vector so we can shuffle it.
1223 Value
*Poison
= PoisonValue::get(VectorType::get(V
->getType(), EC
));
1224 V
= CreateInsertElement(Poison
, V
, getInt64(0), Name
+ ".splatinsert");
1226 // Shuffle the value across the desired number of elements.
1227 SmallVector
<int, 16> Zeros
;
1228 Zeros
.resize(EC
.getKnownMinValue());
1229 return CreateShuffleVector(V
, Zeros
, Name
+ ".splat");
1232 Value
*IRBuilderBase::CreatePreserveArrayAccessIndex(
1233 Type
*ElTy
, Value
*Base
, unsigned Dimension
, unsigned LastIndex
,
1235 auto *BaseType
= Base
->getType();
1236 assert(isa
<PointerType
>(BaseType
) &&
1237 "Invalid Base ptr type for preserve.array.access.index.");
1239 Value
*LastIndexV
= getInt32(LastIndex
);
1240 Constant
*Zero
= ConstantInt::get(Type::getInt32Ty(Context
), 0);
1241 SmallVector
<Value
*, 4> IdxList(Dimension
, Zero
);
1242 IdxList
.push_back(LastIndexV
);
1244 Type
*ResultType
= GetElementPtrInst::getGEPReturnType(Base
, IdxList
);
1246 Module
*M
= BB
->getParent()->getParent();
1247 Function
*FnPreserveArrayAccessIndex
= Intrinsic::getDeclaration(
1248 M
, Intrinsic::preserve_array_access_index
, {ResultType
, BaseType
});
1250 Value
*DimV
= getInt32(Dimension
);
1252 CreateCall(FnPreserveArrayAccessIndex
, {Base
, DimV
, LastIndexV
});
1254 0, Attribute::get(Fn
->getContext(), Attribute::ElementType
, ElTy
));
1256 Fn
->setMetadata(LLVMContext::MD_preserve_access_index
, DbgInfo
);
1261 Value
*IRBuilderBase::CreatePreserveUnionAccessIndex(
1262 Value
*Base
, unsigned FieldIndex
, MDNode
*DbgInfo
) {
1263 assert(isa
<PointerType
>(Base
->getType()) &&
1264 "Invalid Base ptr type for preserve.union.access.index.");
1265 auto *BaseType
= Base
->getType();
1267 Module
*M
= BB
->getParent()->getParent();
1268 Function
*FnPreserveUnionAccessIndex
= Intrinsic::getDeclaration(
1269 M
, Intrinsic::preserve_union_access_index
, {BaseType
, BaseType
});
1271 Value
*DIIndex
= getInt32(FieldIndex
);
1273 CreateCall(FnPreserveUnionAccessIndex
, {Base
, DIIndex
});
1275 Fn
->setMetadata(LLVMContext::MD_preserve_access_index
, DbgInfo
);
1280 Value
*IRBuilderBase::CreatePreserveStructAccessIndex(
1281 Type
*ElTy
, Value
*Base
, unsigned Index
, unsigned FieldIndex
,
1283 auto *BaseType
= Base
->getType();
1284 assert(isa
<PointerType
>(BaseType
) &&
1285 "Invalid Base ptr type for preserve.struct.access.index.");
1287 Value
*GEPIndex
= getInt32(Index
);
1288 Constant
*Zero
= ConstantInt::get(Type::getInt32Ty(Context
), 0);
1290 GetElementPtrInst::getGEPReturnType(Base
, {Zero
, GEPIndex
});
1292 Module
*M
= BB
->getParent()->getParent();
1293 Function
*FnPreserveStructAccessIndex
= Intrinsic::getDeclaration(
1294 M
, Intrinsic::preserve_struct_access_index
, {ResultType
, BaseType
});
1296 Value
*DIIndex
= getInt32(FieldIndex
);
1297 CallInst
*Fn
= CreateCall(FnPreserveStructAccessIndex
,
1298 {Base
, GEPIndex
, DIIndex
});
1300 0, Attribute::get(Fn
->getContext(), Attribute::ElementType
, ElTy
));
1302 Fn
->setMetadata(LLVMContext::MD_preserve_access_index
, DbgInfo
);
1307 Value
*IRBuilderBase::createIsFPClass(Value
*FPNum
, unsigned Test
) {
1308 ConstantInt
*TestV
= getInt32(Test
);
1309 Module
*M
= BB
->getParent()->getParent();
1310 Function
*FnIsFPClass
=
1311 Intrinsic::getDeclaration(M
, Intrinsic::is_fpclass
, {FPNum
->getType()});
1312 return CreateCall(FnIsFPClass
, {FPNum
, TestV
});
1315 CallInst
*IRBuilderBase::CreateAlignmentAssumptionHelper(const DataLayout
&DL
,
1318 Value
*OffsetValue
) {
1319 SmallVector
<Value
*, 4> Vals({PtrValue
, AlignValue
});
1321 Vals
.push_back(OffsetValue
);
1322 OperandBundleDefT
<Value
*> AlignOpB("align", Vals
);
1323 return CreateAssumption(ConstantInt::getTrue(getContext()), {AlignOpB
});
1326 CallInst
*IRBuilderBase::CreateAlignmentAssumption(const DataLayout
&DL
,
1329 Value
*OffsetValue
) {
1330 assert(isa
<PointerType
>(PtrValue
->getType()) &&
1331 "trying to create an alignment assumption on a non-pointer?");
1332 assert(Alignment
!= 0 && "Invalid Alignment");
1333 auto *PtrTy
= cast
<PointerType
>(PtrValue
->getType());
1334 Type
*IntPtrTy
= getIntPtrTy(DL
, PtrTy
->getAddressSpace());
1335 Value
*AlignValue
= ConstantInt::get(IntPtrTy
, Alignment
);
1336 return CreateAlignmentAssumptionHelper(DL
, PtrValue
, AlignValue
, OffsetValue
);
1339 CallInst
*IRBuilderBase::CreateAlignmentAssumption(const DataLayout
&DL
,
1342 Value
*OffsetValue
) {
1343 assert(isa
<PointerType
>(PtrValue
->getType()) &&
1344 "trying to create an alignment assumption on a non-pointer?");
1345 return CreateAlignmentAssumptionHelper(DL
, PtrValue
, Alignment
, OffsetValue
);
1348 IRBuilderDefaultInserter::~IRBuilderDefaultInserter() = default;
1349 IRBuilderCallbackInserter::~IRBuilderCallbackInserter() = default;
1350 IRBuilderFolder::~IRBuilderFolder() = default;
1351 void ConstantFolder::anchor() {}
1352 void NoFolder::anchor() {}