1 //===- IRBuilder.cpp - Builder for LLVM Instrs ----------------------------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This file implements the IRBuilder class, which is used as a convenient way
10 // to create LLVM instructions with a consistent and simplified interface.
12 //===----------------------------------------------------------------------===//
14 #include "llvm/IR/IRBuilder.h"
15 #include "llvm/ADT/ArrayRef.h"
16 #include "llvm/IR/Constant.h"
17 #include "llvm/IR/Constants.h"
18 #include "llvm/IR/DebugInfoMetadata.h"
19 #include "llvm/IR/DerivedTypes.h"
20 #include "llvm/IR/Function.h"
21 #include "llvm/IR/GlobalValue.h"
22 #include "llvm/IR/GlobalVariable.h"
23 #include "llvm/IR/IntrinsicInst.h"
24 #include "llvm/IR/Intrinsics.h"
25 #include "llvm/IR/LLVMContext.h"
26 #include "llvm/IR/NoFolder.h"
27 #include "llvm/IR/Operator.h"
28 #include "llvm/IR/Statepoint.h"
29 #include "llvm/IR/Type.h"
30 #include "llvm/IR/Value.h"
31 #include "llvm/Support/Casting.h"
39 /// CreateGlobalString - Make a new global variable with an initializer that
40 /// has array of i8 type filled in with the nul terminated string value
41 /// specified. If Name is specified, it is the name of the global variable
43 GlobalVariable
*IRBuilderBase::CreateGlobalString(StringRef Str
,
45 unsigned AddressSpace
,
47 Constant
*StrConstant
= ConstantDataArray::getString(Context
, Str
);
49 M
= BB
->getParent()->getParent();
50 auto *GV
= new GlobalVariable(
51 *M
, StrConstant
->getType(), true, GlobalValue::PrivateLinkage
,
52 StrConstant
, Name
, nullptr, GlobalVariable::NotThreadLocal
, AddressSpace
);
53 GV
->setUnnamedAddr(GlobalValue::UnnamedAddr::Global
);
54 GV
->setAlignment(Align(1));
58 Type
*IRBuilderBase::getCurrentFunctionReturnType() const {
59 assert(BB
&& BB
->getParent() && "No current function!");
60 return BB
->getParent()->getReturnType();
63 DebugLoc
IRBuilderBase::getCurrentDebugLocation() const {
64 for (auto &KV
: MetadataToCopy
)
65 if (KV
.first
== LLVMContext::MD_dbg
)
66 return {cast
<DILocation
>(KV
.second
)};
70 void IRBuilderBase::SetInstDebugLocation(Instruction
*I
) const {
71 for (const auto &KV
: MetadataToCopy
)
72 if (KV
.first
== LLVMContext::MD_dbg
) {
73 I
->setDebugLoc(DebugLoc(KV
.second
));
79 IRBuilderBase::createCallHelper(Function
*Callee
, ArrayRef
<Value
*> Ops
,
80 const Twine
&Name
, Instruction
*FMFSource
,
81 ArrayRef
<OperandBundleDef
> OpBundles
) {
82 CallInst
*CI
= CreateCall(Callee
, Ops
, OpBundles
, Name
);
84 CI
->copyFastMathFlags(FMFSource
);
88 Value
*IRBuilderBase::CreateVScale(Constant
*Scaling
, const Twine
&Name
) {
89 assert(isa
<ConstantInt
>(Scaling
) && "Expected constant integer");
90 if (cast
<ConstantInt
>(Scaling
)->isZero())
92 Module
*M
= GetInsertBlock()->getParent()->getParent();
94 Intrinsic::getDeclaration(M
, Intrinsic::vscale
, {Scaling
->getType()});
95 CallInst
*CI
= CreateCall(TheFn
, {}, {}, Name
);
96 return cast
<ConstantInt
>(Scaling
)->isOne() ? CI
: CreateMul(CI
, Scaling
);
99 Value
*IRBuilderBase::CreateElementCount(Type
*DstType
, ElementCount EC
) {
100 Constant
*MinEC
= ConstantInt::get(DstType
, EC
.getKnownMinValue());
101 return EC
.isScalable() ? CreateVScale(MinEC
) : MinEC
;
104 Value
*IRBuilderBase::CreateTypeSize(Type
*DstType
, TypeSize Size
) {
105 Constant
*MinSize
= ConstantInt::get(DstType
, Size
.getKnownMinValue());
106 return Size
.isScalable() ? CreateVScale(MinSize
) : MinSize
;
109 Value
*IRBuilderBase::CreateStepVector(Type
*DstType
, const Twine
&Name
) {
110 Type
*STy
= DstType
->getScalarType();
111 if (isa
<ScalableVectorType
>(DstType
)) {
112 Type
*StepVecType
= DstType
;
113 // TODO: We expect this special case (element type < 8 bits) to be
114 // temporary - once the intrinsic properly supports < 8 bits this code
116 if (STy
->getScalarSizeInBits() < 8)
118 VectorType::get(getInt8Ty(), cast
<ScalableVectorType
>(DstType
));
119 Value
*Res
= CreateIntrinsic(Intrinsic::experimental_stepvector
,
120 {StepVecType
}, {}, nullptr, Name
);
121 if (StepVecType
!= DstType
)
122 Res
= CreateTrunc(Res
, DstType
);
126 unsigned NumEls
= cast
<FixedVectorType
>(DstType
)->getNumElements();
128 // Create a vector of consecutive numbers from zero to VF.
129 SmallVector
<Constant
*, 8> Indices
;
130 for (unsigned i
= 0; i
< NumEls
; ++i
)
131 Indices
.push_back(ConstantInt::get(STy
, i
));
133 // Add the consecutive indices to the vector value.
134 return ConstantVector::get(Indices
);
137 CallInst
*IRBuilderBase::CreateMemSet(Value
*Ptr
, Value
*Val
, Value
*Size
,
138 MaybeAlign Align
, bool isVolatile
,
139 MDNode
*TBAATag
, MDNode
*ScopeTag
,
140 MDNode
*NoAliasTag
) {
141 Value
*Ops
[] = {Ptr
, Val
, Size
, getInt1(isVolatile
)};
142 Type
*Tys
[] = { Ptr
->getType(), Size
->getType() };
143 Module
*M
= BB
->getParent()->getParent();
144 Function
*TheFn
= Intrinsic::getDeclaration(M
, Intrinsic::memset
, Tys
);
146 CallInst
*CI
= CreateCall(TheFn
, Ops
);
149 cast
<MemSetInst
>(CI
)->setDestAlignment(*Align
);
151 // Set the TBAA info if present.
153 CI
->setMetadata(LLVMContext::MD_tbaa
, TBAATag
);
156 CI
->setMetadata(LLVMContext::MD_alias_scope
, ScopeTag
);
159 CI
->setMetadata(LLVMContext::MD_noalias
, NoAliasTag
);
164 CallInst
*IRBuilderBase::CreateMemSetInline(Value
*Dst
, MaybeAlign DstAlign
,
165 Value
*Val
, Value
*Size
,
166 bool IsVolatile
, MDNode
*TBAATag
,
168 MDNode
*NoAliasTag
) {
169 Value
*Ops
[] = {Dst
, Val
, Size
, getInt1(IsVolatile
)};
170 Type
*Tys
[] = {Dst
->getType(), Size
->getType()};
171 Module
*M
= BB
->getParent()->getParent();
172 Function
*TheFn
= Intrinsic::getDeclaration(M
, Intrinsic::memset_inline
, Tys
);
174 CallInst
*CI
= CreateCall(TheFn
, Ops
);
177 cast
<MemSetInlineInst
>(CI
)->setDestAlignment(*DstAlign
);
179 // Set the TBAA info if present.
181 CI
->setMetadata(LLVMContext::MD_tbaa
, TBAATag
);
184 CI
->setMetadata(LLVMContext::MD_alias_scope
, ScopeTag
);
187 CI
->setMetadata(LLVMContext::MD_noalias
, NoAliasTag
);
192 CallInst
*IRBuilderBase::CreateElementUnorderedAtomicMemSet(
193 Value
*Ptr
, Value
*Val
, Value
*Size
, Align Alignment
, uint32_t ElementSize
,
194 MDNode
*TBAATag
, MDNode
*ScopeTag
, MDNode
*NoAliasTag
) {
196 Value
*Ops
[] = {Ptr
, Val
, Size
, getInt32(ElementSize
)};
197 Type
*Tys
[] = {Ptr
->getType(), Size
->getType()};
198 Module
*M
= BB
->getParent()->getParent();
199 Function
*TheFn
= Intrinsic::getDeclaration(
200 M
, Intrinsic::memset_element_unordered_atomic
, Tys
);
202 CallInst
*CI
= CreateCall(TheFn
, Ops
);
204 cast
<AtomicMemSetInst
>(CI
)->setDestAlignment(Alignment
);
206 // Set the TBAA info if present.
208 CI
->setMetadata(LLVMContext::MD_tbaa
, TBAATag
);
211 CI
->setMetadata(LLVMContext::MD_alias_scope
, ScopeTag
);
214 CI
->setMetadata(LLVMContext::MD_noalias
, NoAliasTag
);
219 CallInst
*IRBuilderBase::CreateMemTransferInst(
220 Intrinsic::ID IntrID
, Value
*Dst
, MaybeAlign DstAlign
, Value
*Src
,
221 MaybeAlign SrcAlign
, Value
*Size
, bool isVolatile
, MDNode
*TBAATag
,
222 MDNode
*TBAAStructTag
, MDNode
*ScopeTag
, MDNode
*NoAliasTag
) {
223 assert((IntrID
== Intrinsic::memcpy
|| IntrID
== Intrinsic::memcpy_inline
||
224 IntrID
== Intrinsic::memmove
) &&
225 "Unexpected intrinsic ID");
226 Value
*Ops
[] = {Dst
, Src
, Size
, getInt1(isVolatile
)};
227 Type
*Tys
[] = { Dst
->getType(), Src
->getType(), Size
->getType() };
228 Module
*M
= BB
->getParent()->getParent();
229 Function
*TheFn
= Intrinsic::getDeclaration(M
, IntrID
, Tys
);
231 CallInst
*CI
= CreateCall(TheFn
, Ops
);
233 auto* MCI
= cast
<MemTransferInst
>(CI
);
235 MCI
->setDestAlignment(*DstAlign
);
237 MCI
->setSourceAlignment(*SrcAlign
);
239 // Set the TBAA info if present.
241 CI
->setMetadata(LLVMContext::MD_tbaa
, TBAATag
);
243 // Set the TBAA Struct info if present.
245 CI
->setMetadata(LLVMContext::MD_tbaa_struct
, TBAAStructTag
);
248 CI
->setMetadata(LLVMContext::MD_alias_scope
, ScopeTag
);
251 CI
->setMetadata(LLVMContext::MD_noalias
, NoAliasTag
);
256 CallInst
*IRBuilderBase::CreateElementUnorderedAtomicMemCpy(
257 Value
*Dst
, Align DstAlign
, Value
*Src
, Align SrcAlign
, Value
*Size
,
258 uint32_t ElementSize
, MDNode
*TBAATag
, MDNode
*TBAAStructTag
,
259 MDNode
*ScopeTag
, MDNode
*NoAliasTag
) {
260 assert(DstAlign
>= ElementSize
&&
261 "Pointer alignment must be at least element size");
262 assert(SrcAlign
>= ElementSize
&&
263 "Pointer alignment must be at least element size");
264 Value
*Ops
[] = {Dst
, Src
, Size
, getInt32(ElementSize
)};
265 Type
*Tys
[] = {Dst
->getType(), Src
->getType(), Size
->getType()};
266 Module
*M
= BB
->getParent()->getParent();
267 Function
*TheFn
= Intrinsic::getDeclaration(
268 M
, Intrinsic::memcpy_element_unordered_atomic
, Tys
);
270 CallInst
*CI
= CreateCall(TheFn
, Ops
);
272 // Set the alignment of the pointer args.
273 auto *AMCI
= cast
<AtomicMemCpyInst
>(CI
);
274 AMCI
->setDestAlignment(DstAlign
);
275 AMCI
->setSourceAlignment(SrcAlign
);
277 // Set the TBAA info if present.
279 CI
->setMetadata(LLVMContext::MD_tbaa
, TBAATag
);
281 // Set the TBAA Struct info if present.
283 CI
->setMetadata(LLVMContext::MD_tbaa_struct
, TBAAStructTag
);
286 CI
->setMetadata(LLVMContext::MD_alias_scope
, ScopeTag
);
289 CI
->setMetadata(LLVMContext::MD_noalias
, NoAliasTag
);
294 /// isConstantOne - Return true only if val is constant int 1
295 static bool isConstantOne(const Value
*Val
) {
296 assert(Val
&& "isConstantOne does not work with nullptr Val");
297 const ConstantInt
*CVal
= dyn_cast
<ConstantInt
>(Val
);
298 return CVal
&& CVal
->isOne();
301 CallInst
*IRBuilderBase::CreateMalloc(Type
*IntPtrTy
, Type
*AllocTy
,
302 Value
*AllocSize
, Value
*ArraySize
,
303 ArrayRef
<OperandBundleDef
> OpB
,
304 Function
*MallocF
, const Twine
&Name
) {
305 // malloc(type) becomes:
306 // i8* malloc(typeSize)
307 // malloc(type, arraySize) becomes:
308 // i8* malloc(typeSize*arraySize)
310 ArraySize
= ConstantInt::get(IntPtrTy
, 1);
311 else if (ArraySize
->getType() != IntPtrTy
)
312 ArraySize
= CreateIntCast(ArraySize
, IntPtrTy
, false);
314 if (!isConstantOne(ArraySize
)) {
315 if (isConstantOne(AllocSize
)) {
316 AllocSize
= ArraySize
; // Operand * 1 = Operand
318 // Multiply type size by the array size...
319 AllocSize
= CreateMul(ArraySize
, AllocSize
, "mallocsize");
323 assert(AllocSize
->getType() == IntPtrTy
&& "malloc arg is wrong size");
324 // Create the call to Malloc.
325 Module
*M
= BB
->getParent()->getParent();
326 Type
*BPTy
= PointerType::getUnqual(Context
);
327 FunctionCallee MallocFunc
= MallocF
;
329 // prototype malloc as "void *malloc(size_t)"
330 MallocFunc
= M
->getOrInsertFunction("malloc", BPTy
, IntPtrTy
);
331 CallInst
*MCall
= CreateCall(MallocFunc
, AllocSize
, OpB
, Name
);
333 MCall
->setTailCall();
334 if (Function
*F
= dyn_cast
<Function
>(MallocFunc
.getCallee())) {
335 MCall
->setCallingConv(F
->getCallingConv());
336 F
->setReturnDoesNotAlias();
339 assert(!MCall
->getType()->isVoidTy() && "Malloc has void return type");
344 CallInst
*IRBuilderBase::CreateMalloc(Type
*IntPtrTy
, Type
*AllocTy
,
345 Value
*AllocSize
, Value
*ArraySize
,
346 Function
*MallocF
, const Twine
&Name
) {
348 return CreateMalloc(IntPtrTy
, AllocTy
, AllocSize
, ArraySize
, std::nullopt
,
352 /// CreateFree - Generate the IR for a call to the builtin free function.
353 CallInst
*IRBuilderBase::CreateFree(Value
*Source
,
354 ArrayRef
<OperandBundleDef
> Bundles
) {
355 assert(Source
->getType()->isPointerTy() &&
356 "Can not free something of nonpointer type!");
358 Module
*M
= BB
->getParent()->getParent();
360 Type
*VoidTy
= Type::getVoidTy(M
->getContext());
361 Type
*VoidPtrTy
= PointerType::getUnqual(M
->getContext());
362 // prototype free as "void free(void*)"
363 FunctionCallee FreeFunc
= M
->getOrInsertFunction("free", VoidTy
, VoidPtrTy
);
364 CallInst
*Result
= CreateCall(FreeFunc
, Source
, Bundles
, "");
365 Result
->setTailCall();
366 if (Function
*F
= dyn_cast
<Function
>(FreeFunc
.getCallee()))
367 Result
->setCallingConv(F
->getCallingConv());
372 CallInst
*IRBuilderBase::CreateElementUnorderedAtomicMemMove(
373 Value
*Dst
, Align DstAlign
, Value
*Src
, Align SrcAlign
, Value
*Size
,
374 uint32_t ElementSize
, MDNode
*TBAATag
, MDNode
*TBAAStructTag
,
375 MDNode
*ScopeTag
, MDNode
*NoAliasTag
) {
376 assert(DstAlign
>= ElementSize
&&
377 "Pointer alignment must be at least element size");
378 assert(SrcAlign
>= ElementSize
&&
379 "Pointer alignment must be at least element size");
380 Value
*Ops
[] = {Dst
, Src
, Size
, getInt32(ElementSize
)};
381 Type
*Tys
[] = {Dst
->getType(), Src
->getType(), Size
->getType()};
382 Module
*M
= BB
->getParent()->getParent();
383 Function
*TheFn
= Intrinsic::getDeclaration(
384 M
, Intrinsic::memmove_element_unordered_atomic
, Tys
);
386 CallInst
*CI
= CreateCall(TheFn
, Ops
);
388 // Set the alignment of the pointer args.
389 CI
->addParamAttr(0, Attribute::getWithAlignment(CI
->getContext(), DstAlign
));
390 CI
->addParamAttr(1, Attribute::getWithAlignment(CI
->getContext(), SrcAlign
));
392 // Set the TBAA info if present.
394 CI
->setMetadata(LLVMContext::MD_tbaa
, TBAATag
);
396 // Set the TBAA Struct info if present.
398 CI
->setMetadata(LLVMContext::MD_tbaa_struct
, TBAAStructTag
);
401 CI
->setMetadata(LLVMContext::MD_alias_scope
, ScopeTag
);
404 CI
->setMetadata(LLVMContext::MD_noalias
, NoAliasTag
);
409 CallInst
*IRBuilderBase::getReductionIntrinsic(Intrinsic::ID ID
, Value
*Src
) {
410 Module
*M
= GetInsertBlock()->getParent()->getParent();
411 Value
*Ops
[] = {Src
};
412 Type
*Tys
[] = { Src
->getType() };
413 auto Decl
= Intrinsic::getDeclaration(M
, ID
, Tys
);
414 return CreateCall(Decl
, Ops
);
417 CallInst
*IRBuilderBase::CreateFAddReduce(Value
*Acc
, Value
*Src
) {
418 Module
*M
= GetInsertBlock()->getParent()->getParent();
419 Value
*Ops
[] = {Acc
, Src
};
420 auto Decl
= Intrinsic::getDeclaration(M
, Intrinsic::vector_reduce_fadd
,
422 return CreateCall(Decl
, Ops
);
425 CallInst
*IRBuilderBase::CreateFMulReduce(Value
*Acc
, Value
*Src
) {
426 Module
*M
= GetInsertBlock()->getParent()->getParent();
427 Value
*Ops
[] = {Acc
, Src
};
428 auto Decl
= Intrinsic::getDeclaration(M
, Intrinsic::vector_reduce_fmul
,
430 return CreateCall(Decl
, Ops
);
433 CallInst
*IRBuilderBase::CreateAddReduce(Value
*Src
) {
434 return getReductionIntrinsic(Intrinsic::vector_reduce_add
, Src
);
437 CallInst
*IRBuilderBase::CreateMulReduce(Value
*Src
) {
438 return getReductionIntrinsic(Intrinsic::vector_reduce_mul
, Src
);
441 CallInst
*IRBuilderBase::CreateAndReduce(Value
*Src
) {
442 return getReductionIntrinsic(Intrinsic::vector_reduce_and
, Src
);
445 CallInst
*IRBuilderBase::CreateOrReduce(Value
*Src
) {
446 return getReductionIntrinsic(Intrinsic::vector_reduce_or
, Src
);
449 CallInst
*IRBuilderBase::CreateXorReduce(Value
*Src
) {
450 return getReductionIntrinsic(Intrinsic::vector_reduce_xor
, Src
);
453 CallInst
*IRBuilderBase::CreateIntMaxReduce(Value
*Src
, bool IsSigned
) {
455 IsSigned
? Intrinsic::vector_reduce_smax
: Intrinsic::vector_reduce_umax
;
456 return getReductionIntrinsic(ID
, Src
);
459 CallInst
*IRBuilderBase::CreateIntMinReduce(Value
*Src
, bool IsSigned
) {
461 IsSigned
? Intrinsic::vector_reduce_smin
: Intrinsic::vector_reduce_umin
;
462 return getReductionIntrinsic(ID
, Src
);
465 CallInst
*IRBuilderBase::CreateFPMaxReduce(Value
*Src
) {
466 return getReductionIntrinsic(Intrinsic::vector_reduce_fmax
, Src
);
469 CallInst
*IRBuilderBase::CreateFPMinReduce(Value
*Src
) {
470 return getReductionIntrinsic(Intrinsic::vector_reduce_fmin
, Src
);
473 CallInst
*IRBuilderBase::CreateFPMaximumReduce(Value
*Src
) {
474 return getReductionIntrinsic(Intrinsic::vector_reduce_fmaximum
, Src
);
477 CallInst
*IRBuilderBase::CreateFPMinimumReduce(Value
*Src
) {
478 return getReductionIntrinsic(Intrinsic::vector_reduce_fminimum
, Src
);
481 CallInst
*IRBuilderBase::CreateLifetimeStart(Value
*Ptr
, ConstantInt
*Size
) {
482 assert(isa
<PointerType
>(Ptr
->getType()) &&
483 "lifetime.start only applies to pointers.");
487 assert(Size
->getType() == getInt64Ty() &&
488 "lifetime.start requires the size to be an i64");
489 Value
*Ops
[] = { Size
, Ptr
};
490 Module
*M
= BB
->getParent()->getParent();
492 Intrinsic::getDeclaration(M
, Intrinsic::lifetime_start
, {Ptr
->getType()});
493 return CreateCall(TheFn
, Ops
);
496 CallInst
*IRBuilderBase::CreateLifetimeEnd(Value
*Ptr
, ConstantInt
*Size
) {
497 assert(isa
<PointerType
>(Ptr
->getType()) &&
498 "lifetime.end only applies to pointers.");
502 assert(Size
->getType() == getInt64Ty() &&
503 "lifetime.end requires the size to be an i64");
504 Value
*Ops
[] = { Size
, Ptr
};
505 Module
*M
= BB
->getParent()->getParent();
507 Intrinsic::getDeclaration(M
, Intrinsic::lifetime_end
, {Ptr
->getType()});
508 return CreateCall(TheFn
, Ops
);
511 CallInst
*IRBuilderBase::CreateInvariantStart(Value
*Ptr
, ConstantInt
*Size
) {
513 assert(isa
<PointerType
>(Ptr
->getType()) &&
514 "invariant.start only applies to pointers.");
518 assert(Size
->getType() == getInt64Ty() &&
519 "invariant.start requires the size to be an i64");
521 Value
*Ops
[] = {Size
, Ptr
};
522 // Fill in the single overloaded type: memory object type.
523 Type
*ObjectPtr
[1] = {Ptr
->getType()};
524 Module
*M
= BB
->getParent()->getParent();
526 Intrinsic::getDeclaration(M
, Intrinsic::invariant_start
, ObjectPtr
);
527 return CreateCall(TheFn
, Ops
);
530 static MaybeAlign
getAlign(Value
*Ptr
) {
531 if (auto *O
= dyn_cast
<GlobalObject
>(Ptr
))
532 return O
->getAlign();
533 if (auto *A
= dyn_cast
<GlobalAlias
>(Ptr
))
534 return A
->getAliaseeObject()->getAlign();
538 CallInst
*IRBuilderBase::CreateThreadLocalAddress(Value
*Ptr
) {
540 // Handle specially for constexpr cast. This is possible when
541 // opaque pointers not enabled since constant could be sinked
542 // directly by the design of llvm. This could be eliminated
543 // after we eliminate the abuse of constexpr.
545 if (auto *CE
= dyn_cast
<ConstantExpr
>(V
))
547 V
= CE
->getOperand(0);
549 assert(isa
<GlobalValue
>(V
) && cast
<GlobalValue
>(V
)->isThreadLocal() &&
550 "threadlocal_address only applies to thread local variables.");
552 CallInst
*CI
= CreateIntrinsic(llvm::Intrinsic::threadlocal_address
,
553 {Ptr
->getType()}, {Ptr
});
554 if (MaybeAlign A
= getAlign(Ptr
)) {
555 CI
->addParamAttr(0, Attribute::getWithAlignment(CI
->getContext(), *A
));
556 CI
->addRetAttr(Attribute::getWithAlignment(CI
->getContext(), *A
));
562 IRBuilderBase::CreateAssumption(Value
*Cond
,
563 ArrayRef
<OperandBundleDef
> OpBundles
) {
564 assert(Cond
->getType() == getInt1Ty() &&
565 "an assumption condition must be of type i1");
567 Value
*Ops
[] = { Cond
};
568 Module
*M
= BB
->getParent()->getParent();
569 Function
*FnAssume
= Intrinsic::getDeclaration(M
, Intrinsic::assume
);
570 return CreateCall(FnAssume
, Ops
, OpBundles
);
573 Instruction
*IRBuilderBase::CreateNoAliasScopeDeclaration(Value
*Scope
) {
574 Module
*M
= BB
->getModule();
575 auto *FnIntrinsic
= Intrinsic::getDeclaration(
576 M
, Intrinsic::experimental_noalias_scope_decl
, {});
577 return CreateCall(FnIntrinsic
, {Scope
});
580 /// Create a call to a Masked Load intrinsic.
581 /// \p Ty - vector type to load
582 /// \p Ptr - base pointer for the load
583 /// \p Alignment - alignment of the source location
584 /// \p Mask - vector of booleans which indicates what vector lanes should
585 /// be accessed in memory
586 /// \p PassThru - pass-through value that is used to fill the masked-off lanes
588 /// \p Name - name of the result variable
589 CallInst
*IRBuilderBase::CreateMaskedLoad(Type
*Ty
, Value
*Ptr
, Align Alignment
,
590 Value
*Mask
, Value
*PassThru
,
592 auto *PtrTy
= cast
<PointerType
>(Ptr
->getType());
593 assert(Ty
->isVectorTy() && "Type should be vector");
594 assert(Mask
&& "Mask should not be all-ones (null)");
596 PassThru
= PoisonValue::get(Ty
);
597 Type
*OverloadedTypes
[] = { Ty
, PtrTy
};
598 Value
*Ops
[] = {Ptr
, getInt32(Alignment
.value()), Mask
, PassThru
};
599 return CreateMaskedIntrinsic(Intrinsic::masked_load
, Ops
,
600 OverloadedTypes
, Name
);
603 /// Create a call to a Masked Store intrinsic.
604 /// \p Val - data to be stored,
605 /// \p Ptr - base pointer for the store
606 /// \p Alignment - alignment of the destination location
607 /// \p Mask - vector of booleans which indicates what vector lanes should
608 /// be accessed in memory
609 CallInst
*IRBuilderBase::CreateMaskedStore(Value
*Val
, Value
*Ptr
,
610 Align Alignment
, Value
*Mask
) {
611 auto *PtrTy
= cast
<PointerType
>(Ptr
->getType());
612 Type
*DataTy
= Val
->getType();
613 assert(DataTy
->isVectorTy() && "Val should be a vector");
614 assert(Mask
&& "Mask should not be all-ones (null)");
615 Type
*OverloadedTypes
[] = { DataTy
, PtrTy
};
616 Value
*Ops
[] = {Val
, Ptr
, getInt32(Alignment
.value()), Mask
};
617 return CreateMaskedIntrinsic(Intrinsic::masked_store
, Ops
, OverloadedTypes
);
620 /// Create a call to a Masked intrinsic, with given intrinsic Id,
621 /// an array of operands - Ops, and an array of overloaded types -
623 CallInst
*IRBuilderBase::CreateMaskedIntrinsic(Intrinsic::ID Id
,
624 ArrayRef
<Value
*> Ops
,
625 ArrayRef
<Type
*> OverloadedTypes
,
627 Module
*M
= BB
->getParent()->getParent();
628 Function
*TheFn
= Intrinsic::getDeclaration(M
, Id
, OverloadedTypes
);
629 return CreateCall(TheFn
, Ops
, {}, Name
);
632 /// Create a call to a Masked Gather intrinsic.
633 /// \p Ty - vector type to gather
634 /// \p Ptrs - vector of pointers for loading
635 /// \p Align - alignment for one element
636 /// \p Mask - vector of booleans which indicates what vector lanes should
637 /// be accessed in memory
638 /// \p PassThru - pass-through value that is used to fill the masked-off lanes
640 /// \p Name - name of the result variable
641 CallInst
*IRBuilderBase::CreateMaskedGather(Type
*Ty
, Value
*Ptrs
,
642 Align Alignment
, Value
*Mask
,
645 auto *VecTy
= cast
<VectorType
>(Ty
);
646 ElementCount NumElts
= VecTy
->getElementCount();
647 auto *PtrsTy
= cast
<VectorType
>(Ptrs
->getType());
648 assert(NumElts
== PtrsTy
->getElementCount() && "Element count mismatch");
651 Mask
= getAllOnesMask(NumElts
);
654 PassThru
= PoisonValue::get(Ty
);
656 Type
*OverloadedTypes
[] = {Ty
, PtrsTy
};
657 Value
*Ops
[] = {Ptrs
, getInt32(Alignment
.value()), Mask
, PassThru
};
659 // We specify only one type when we create this intrinsic. Types of other
660 // arguments are derived from this type.
661 return CreateMaskedIntrinsic(Intrinsic::masked_gather
, Ops
, OverloadedTypes
,
665 /// Create a call to a Masked Scatter intrinsic.
666 /// \p Data - data to be stored,
667 /// \p Ptrs - the vector of pointers, where the \p Data elements should be
669 /// \p Align - alignment for one element
670 /// \p Mask - vector of booleans which indicates what vector lanes should
671 /// be accessed in memory
672 CallInst
*IRBuilderBase::CreateMaskedScatter(Value
*Data
, Value
*Ptrs
,
673 Align Alignment
, Value
*Mask
) {
674 auto *PtrsTy
= cast
<VectorType
>(Ptrs
->getType());
675 auto *DataTy
= cast
<VectorType
>(Data
->getType());
676 ElementCount NumElts
= PtrsTy
->getElementCount();
679 Mask
= getAllOnesMask(NumElts
);
681 Type
*OverloadedTypes
[] = {DataTy
, PtrsTy
};
682 Value
*Ops
[] = {Data
, Ptrs
, getInt32(Alignment
.value()), Mask
};
684 // We specify only one type when we create this intrinsic. Types of other
685 // arguments are derived from this type.
686 return CreateMaskedIntrinsic(Intrinsic::masked_scatter
, Ops
, OverloadedTypes
);
689 /// Create a call to Masked Expand Load intrinsic
690 /// \p Ty - vector type to load
691 /// \p Ptr - base pointer for the load
692 /// \p Mask - vector of booleans which indicates what vector lanes should
693 /// be accessed in memory
694 /// \p PassThru - pass-through value that is used to fill the masked-off lanes
696 /// \p Name - name of the result variable
697 CallInst
*IRBuilderBase::CreateMaskedExpandLoad(Type
*Ty
, Value
*Ptr
,
698 Value
*Mask
, Value
*PassThru
,
700 assert(Ty
->isVectorTy() && "Type should be vector");
701 assert(Mask
&& "Mask should not be all-ones (null)");
703 PassThru
= PoisonValue::get(Ty
);
704 Type
*OverloadedTypes
[] = {Ty
};
705 Value
*Ops
[] = {Ptr
, Mask
, PassThru
};
706 return CreateMaskedIntrinsic(Intrinsic::masked_expandload
, Ops
,
707 OverloadedTypes
, Name
);
710 /// Create a call to Masked Compress Store intrinsic
711 /// \p Val - data to be stored,
712 /// \p Ptr - base pointer for the store
713 /// \p Mask - vector of booleans which indicates what vector lanes should
714 /// be accessed in memory
715 CallInst
*IRBuilderBase::CreateMaskedCompressStore(Value
*Val
, Value
*Ptr
,
717 Type
*DataTy
= Val
->getType();
718 assert(DataTy
->isVectorTy() && "Val should be a vector");
719 assert(Mask
&& "Mask should not be all-ones (null)");
720 Type
*OverloadedTypes
[] = {DataTy
};
721 Value
*Ops
[] = {Val
, Ptr
, Mask
};
722 return CreateMaskedIntrinsic(Intrinsic::masked_compressstore
, Ops
,
726 template <typename T0
>
727 static std::vector
<Value
*>
728 getStatepointArgs(IRBuilderBase
&B
, uint64_t ID
, uint32_t NumPatchBytes
,
729 Value
*ActualCallee
, uint32_t Flags
, ArrayRef
<T0
> CallArgs
) {
730 std::vector
<Value
*> Args
;
731 Args
.push_back(B
.getInt64(ID
));
732 Args
.push_back(B
.getInt32(NumPatchBytes
));
733 Args
.push_back(ActualCallee
);
734 Args
.push_back(B
.getInt32(CallArgs
.size()));
735 Args
.push_back(B
.getInt32(Flags
));
736 llvm::append_range(Args
, CallArgs
);
737 // GC Transition and Deopt args are now always handled via operand bundle.
738 // They will be removed from the signature of gc.statepoint shortly.
739 Args
.push_back(B
.getInt32(0));
740 Args
.push_back(B
.getInt32(0));
741 // GC args are now encoded in the gc-live operand bundle
745 template<typename T1
, typename T2
, typename T3
>
746 static std::vector
<OperandBundleDef
>
747 getStatepointBundles(std::optional
<ArrayRef
<T1
>> TransitionArgs
,
748 std::optional
<ArrayRef
<T2
>> DeoptArgs
,
749 ArrayRef
<T3
> GCArgs
) {
750 std::vector
<OperandBundleDef
> Rval
;
752 SmallVector
<Value
*, 16> DeoptValues
;
753 llvm::append_range(DeoptValues
, *DeoptArgs
);
754 Rval
.emplace_back("deopt", DeoptValues
);
756 if (TransitionArgs
) {
757 SmallVector
<Value
*, 16> TransitionValues
;
758 llvm::append_range(TransitionValues
, *TransitionArgs
);
759 Rval
.emplace_back("gc-transition", TransitionValues
);
762 SmallVector
<Value
*, 16> LiveValues
;
763 llvm::append_range(LiveValues
, GCArgs
);
764 Rval
.emplace_back("gc-live", LiveValues
);
769 template <typename T0
, typename T1
, typename T2
, typename T3
>
770 static CallInst
*CreateGCStatepointCallCommon(
771 IRBuilderBase
*Builder
, uint64_t ID
, uint32_t NumPatchBytes
,
772 FunctionCallee ActualCallee
, uint32_t Flags
, ArrayRef
<T0
> CallArgs
,
773 std::optional
<ArrayRef
<T1
>> TransitionArgs
,
774 std::optional
<ArrayRef
<T2
>> DeoptArgs
, ArrayRef
<T3
> GCArgs
,
776 Module
*M
= Builder
->GetInsertBlock()->getParent()->getParent();
777 // Fill in the one generic type'd argument (the function is also vararg)
778 Function
*FnStatepoint
=
779 Intrinsic::getDeclaration(M
, Intrinsic::experimental_gc_statepoint
,
780 {ActualCallee
.getCallee()->getType()});
782 std::vector
<Value
*> Args
= getStatepointArgs(
783 *Builder
, ID
, NumPatchBytes
, ActualCallee
.getCallee(), Flags
, CallArgs
);
785 CallInst
*CI
= Builder
->CreateCall(
787 getStatepointBundles(TransitionArgs
, DeoptArgs
, GCArgs
), Name
);
789 Attribute::get(Builder
->getContext(), Attribute::ElementType
,
790 ActualCallee
.getFunctionType()));
794 CallInst
*IRBuilderBase::CreateGCStatepointCall(
795 uint64_t ID
, uint32_t NumPatchBytes
, FunctionCallee ActualCallee
,
796 ArrayRef
<Value
*> CallArgs
, std::optional
<ArrayRef
<Value
*>> DeoptArgs
,
797 ArrayRef
<Value
*> GCArgs
, const Twine
&Name
) {
798 return CreateGCStatepointCallCommon
<Value
*, Value
*, Value
*, Value
*>(
799 this, ID
, NumPatchBytes
, ActualCallee
, uint32_t(StatepointFlags::None
),
800 CallArgs
, std::nullopt
/* No Transition Args */, DeoptArgs
, GCArgs
, Name
);
803 CallInst
*IRBuilderBase::CreateGCStatepointCall(
804 uint64_t ID
, uint32_t NumPatchBytes
, FunctionCallee ActualCallee
,
805 uint32_t Flags
, ArrayRef
<Value
*> CallArgs
,
806 std::optional
<ArrayRef
<Use
>> TransitionArgs
,
807 std::optional
<ArrayRef
<Use
>> DeoptArgs
, ArrayRef
<Value
*> GCArgs
,
809 return CreateGCStatepointCallCommon
<Value
*, Use
, Use
, Value
*>(
810 this, ID
, NumPatchBytes
, ActualCallee
, Flags
, CallArgs
, TransitionArgs
,
811 DeoptArgs
, GCArgs
, Name
);
814 CallInst
*IRBuilderBase::CreateGCStatepointCall(
815 uint64_t ID
, uint32_t NumPatchBytes
, FunctionCallee ActualCallee
,
816 ArrayRef
<Use
> CallArgs
, std::optional
<ArrayRef
<Value
*>> DeoptArgs
,
817 ArrayRef
<Value
*> GCArgs
, const Twine
&Name
) {
818 return CreateGCStatepointCallCommon
<Use
, Value
*, Value
*, Value
*>(
819 this, ID
, NumPatchBytes
, ActualCallee
, uint32_t(StatepointFlags::None
),
820 CallArgs
, std::nullopt
, DeoptArgs
, GCArgs
, Name
);
823 template <typename T0
, typename T1
, typename T2
, typename T3
>
824 static InvokeInst
*CreateGCStatepointInvokeCommon(
825 IRBuilderBase
*Builder
, uint64_t ID
, uint32_t NumPatchBytes
,
826 FunctionCallee ActualInvokee
, BasicBlock
*NormalDest
,
827 BasicBlock
*UnwindDest
, uint32_t Flags
, ArrayRef
<T0
> InvokeArgs
,
828 std::optional
<ArrayRef
<T1
>> TransitionArgs
,
829 std::optional
<ArrayRef
<T2
>> DeoptArgs
, ArrayRef
<T3
> GCArgs
,
831 Module
*M
= Builder
->GetInsertBlock()->getParent()->getParent();
832 // Fill in the one generic type'd argument (the function is also vararg)
833 Function
*FnStatepoint
=
834 Intrinsic::getDeclaration(M
, Intrinsic::experimental_gc_statepoint
,
835 {ActualInvokee
.getCallee()->getType()});
837 std::vector
<Value
*> Args
=
838 getStatepointArgs(*Builder
, ID
, NumPatchBytes
, ActualInvokee
.getCallee(),
841 InvokeInst
*II
= Builder
->CreateInvoke(
842 FnStatepoint
, NormalDest
, UnwindDest
, Args
,
843 getStatepointBundles(TransitionArgs
, DeoptArgs
, GCArgs
), Name
);
845 Attribute::get(Builder
->getContext(), Attribute::ElementType
,
846 ActualInvokee
.getFunctionType()));
850 InvokeInst
*IRBuilderBase::CreateGCStatepointInvoke(
851 uint64_t ID
, uint32_t NumPatchBytes
, FunctionCallee ActualInvokee
,
852 BasicBlock
*NormalDest
, BasicBlock
*UnwindDest
,
853 ArrayRef
<Value
*> InvokeArgs
, std::optional
<ArrayRef
<Value
*>> DeoptArgs
,
854 ArrayRef
<Value
*> GCArgs
, const Twine
&Name
) {
855 return CreateGCStatepointInvokeCommon
<Value
*, Value
*, Value
*, Value
*>(
856 this, ID
, NumPatchBytes
, ActualInvokee
, NormalDest
, UnwindDest
,
857 uint32_t(StatepointFlags::None
), InvokeArgs
,
858 std::nullopt
/* No Transition Args*/, DeoptArgs
, GCArgs
, Name
);
861 InvokeInst
*IRBuilderBase::CreateGCStatepointInvoke(
862 uint64_t ID
, uint32_t NumPatchBytes
, FunctionCallee ActualInvokee
,
863 BasicBlock
*NormalDest
, BasicBlock
*UnwindDest
, uint32_t Flags
,
864 ArrayRef
<Value
*> InvokeArgs
, std::optional
<ArrayRef
<Use
>> TransitionArgs
,
865 std::optional
<ArrayRef
<Use
>> DeoptArgs
, ArrayRef
<Value
*> GCArgs
,
867 return CreateGCStatepointInvokeCommon
<Value
*, Use
, Use
, Value
*>(
868 this, ID
, NumPatchBytes
, ActualInvokee
, NormalDest
, UnwindDest
, Flags
,
869 InvokeArgs
, TransitionArgs
, DeoptArgs
, GCArgs
, Name
);
872 InvokeInst
*IRBuilderBase::CreateGCStatepointInvoke(
873 uint64_t ID
, uint32_t NumPatchBytes
, FunctionCallee ActualInvokee
,
874 BasicBlock
*NormalDest
, BasicBlock
*UnwindDest
, ArrayRef
<Use
> InvokeArgs
,
875 std::optional
<ArrayRef
<Value
*>> DeoptArgs
, ArrayRef
<Value
*> GCArgs
,
877 return CreateGCStatepointInvokeCommon
<Use
, Value
*, Value
*, Value
*>(
878 this, ID
, NumPatchBytes
, ActualInvokee
, NormalDest
, UnwindDest
,
879 uint32_t(StatepointFlags::None
), InvokeArgs
, std::nullopt
, DeoptArgs
,
883 CallInst
*IRBuilderBase::CreateGCResult(Instruction
*Statepoint
,
884 Type
*ResultType
, const Twine
&Name
) {
885 Intrinsic::ID ID
= Intrinsic::experimental_gc_result
;
886 Module
*M
= BB
->getParent()->getParent();
887 Type
*Types
[] = {ResultType
};
888 Function
*FnGCResult
= Intrinsic::getDeclaration(M
, ID
, Types
);
890 Value
*Args
[] = {Statepoint
};
891 return CreateCall(FnGCResult
, Args
, {}, Name
);
894 CallInst
*IRBuilderBase::CreateGCRelocate(Instruction
*Statepoint
,
895 int BaseOffset
, int DerivedOffset
,
896 Type
*ResultType
, const Twine
&Name
) {
897 Module
*M
= BB
->getParent()->getParent();
898 Type
*Types
[] = {ResultType
};
899 Function
*FnGCRelocate
=
900 Intrinsic::getDeclaration(M
, Intrinsic::experimental_gc_relocate
, Types
);
902 Value
*Args
[] = {Statepoint
, getInt32(BaseOffset
), getInt32(DerivedOffset
)};
903 return CreateCall(FnGCRelocate
, Args
, {}, Name
);
906 CallInst
*IRBuilderBase::CreateGCGetPointerBase(Value
*DerivedPtr
,
908 Module
*M
= BB
->getParent()->getParent();
909 Type
*PtrTy
= DerivedPtr
->getType();
910 Function
*FnGCFindBase
= Intrinsic::getDeclaration(
911 M
, Intrinsic::experimental_gc_get_pointer_base
, {PtrTy
, PtrTy
});
912 return CreateCall(FnGCFindBase
, {DerivedPtr
}, {}, Name
);
915 CallInst
*IRBuilderBase::CreateGCGetPointerOffset(Value
*DerivedPtr
,
917 Module
*M
= BB
->getParent()->getParent();
918 Type
*PtrTy
= DerivedPtr
->getType();
919 Function
*FnGCGetOffset
= Intrinsic::getDeclaration(
920 M
, Intrinsic::experimental_gc_get_pointer_offset
, {PtrTy
});
921 return CreateCall(FnGCGetOffset
, {DerivedPtr
}, {}, Name
);
924 CallInst
*IRBuilderBase::CreateUnaryIntrinsic(Intrinsic::ID ID
, Value
*V
,
925 Instruction
*FMFSource
,
927 Module
*M
= BB
->getModule();
928 Function
*Fn
= Intrinsic::getDeclaration(M
, ID
, {V
->getType()});
929 return createCallHelper(Fn
, {V
}, Name
, FMFSource
);
932 CallInst
*IRBuilderBase::CreateBinaryIntrinsic(Intrinsic::ID ID
, Value
*LHS
,
934 Instruction
*FMFSource
,
936 Module
*M
= BB
->getModule();
937 Function
*Fn
= Intrinsic::getDeclaration(M
, ID
, { LHS
->getType() });
938 return createCallHelper(Fn
, {LHS
, RHS
}, Name
, FMFSource
);
941 CallInst
*IRBuilderBase::CreateIntrinsic(Intrinsic::ID ID
,
942 ArrayRef
<Type
*> Types
,
943 ArrayRef
<Value
*> Args
,
944 Instruction
*FMFSource
,
946 Module
*M
= BB
->getModule();
947 Function
*Fn
= Intrinsic::getDeclaration(M
, ID
, Types
);
948 return createCallHelper(Fn
, Args
, Name
, FMFSource
);
951 CallInst
*IRBuilderBase::CreateIntrinsic(Type
*RetTy
, Intrinsic::ID ID
,
952 ArrayRef
<Value
*> Args
,
953 Instruction
*FMFSource
,
955 Module
*M
= BB
->getModule();
957 SmallVector
<Intrinsic::IITDescriptor
> Table
;
958 Intrinsic::getIntrinsicInfoTableEntries(ID
, Table
);
959 ArrayRef
<Intrinsic::IITDescriptor
> TableRef(Table
);
961 SmallVector
<Type
*> ArgTys
;
962 ArgTys
.reserve(Args
.size());
964 ArgTys
.push_back(I
->getType());
965 FunctionType
*FTy
= FunctionType::get(RetTy
, ArgTys
, false);
966 SmallVector
<Type
*> OverloadTys
;
967 Intrinsic::MatchIntrinsicTypesResult Res
=
968 matchIntrinsicSignature(FTy
, TableRef
, OverloadTys
);
970 assert(Res
== Intrinsic::MatchIntrinsicTypes_Match
&& TableRef
.empty() &&
971 "Wrong types for intrinsic!");
972 // TODO: Handle varargs intrinsics.
974 Function
*Fn
= Intrinsic::getDeclaration(M
, ID
, OverloadTys
);
975 return createCallHelper(Fn
, Args
, Name
, FMFSource
);
978 CallInst
*IRBuilderBase::CreateConstrainedFPBinOp(
979 Intrinsic::ID ID
, Value
*L
, Value
*R
, Instruction
*FMFSource
,
980 const Twine
&Name
, MDNode
*FPMathTag
,
981 std::optional
<RoundingMode
> Rounding
,
982 std::optional
<fp::ExceptionBehavior
> Except
) {
983 Value
*RoundingV
= getConstrainedFPRounding(Rounding
);
984 Value
*ExceptV
= getConstrainedFPExcept(Except
);
986 FastMathFlags UseFMF
= FMF
;
988 UseFMF
= FMFSource
->getFastMathFlags();
990 CallInst
*C
= CreateIntrinsic(ID
, {L
->getType()},
991 {L
, R
, RoundingV
, ExceptV
}, nullptr, Name
);
992 setConstrainedFPCallAttr(C
);
993 setFPAttrs(C
, FPMathTag
, UseFMF
);
997 CallInst
*IRBuilderBase::CreateConstrainedFPUnroundedBinOp(
998 Intrinsic::ID ID
, Value
*L
, Value
*R
, Instruction
*FMFSource
,
999 const Twine
&Name
, MDNode
*FPMathTag
,
1000 std::optional
<fp::ExceptionBehavior
> Except
) {
1001 Value
*ExceptV
= getConstrainedFPExcept(Except
);
1003 FastMathFlags UseFMF
= FMF
;
1005 UseFMF
= FMFSource
->getFastMathFlags();
1008 CreateIntrinsic(ID
, {L
->getType()}, {L
, R
, ExceptV
}, nullptr, Name
);
1009 setConstrainedFPCallAttr(C
);
1010 setFPAttrs(C
, FPMathTag
, UseFMF
);
1014 Value
*IRBuilderBase::CreateNAryOp(unsigned Opc
, ArrayRef
<Value
*> Ops
,
1015 const Twine
&Name
, MDNode
*FPMathTag
) {
1016 if (Instruction::isBinaryOp(Opc
)) {
1017 assert(Ops
.size() == 2 && "Invalid number of operands!");
1018 return CreateBinOp(static_cast<Instruction::BinaryOps
>(Opc
),
1019 Ops
[0], Ops
[1], Name
, FPMathTag
);
1021 if (Instruction::isUnaryOp(Opc
)) {
1022 assert(Ops
.size() == 1 && "Invalid number of operands!");
1023 return CreateUnOp(static_cast<Instruction::UnaryOps
>(Opc
),
1024 Ops
[0], Name
, FPMathTag
);
1026 llvm_unreachable("Unexpected opcode!");
1029 CallInst
*IRBuilderBase::CreateConstrainedFPCast(
1030 Intrinsic::ID ID
, Value
*V
, Type
*DestTy
,
1031 Instruction
*FMFSource
, const Twine
&Name
, MDNode
*FPMathTag
,
1032 std::optional
<RoundingMode
> Rounding
,
1033 std::optional
<fp::ExceptionBehavior
> Except
) {
1034 Value
*ExceptV
= getConstrainedFPExcept(Except
);
1036 FastMathFlags UseFMF
= FMF
;
1038 UseFMF
= FMFSource
->getFastMathFlags();
1041 bool HasRoundingMD
= false;
1045 #define INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC) \
1046 case Intrinsic::INTRINSIC: \
1047 HasRoundingMD = ROUND_MODE; \
1049 #include "llvm/IR/ConstrainedOps.def"
1051 if (HasRoundingMD
) {
1052 Value
*RoundingV
= getConstrainedFPRounding(Rounding
);
1053 C
= CreateIntrinsic(ID
, {DestTy
, V
->getType()}, {V
, RoundingV
, ExceptV
},
1056 C
= CreateIntrinsic(ID
, {DestTy
, V
->getType()}, {V
, ExceptV
}, nullptr,
1059 setConstrainedFPCallAttr(C
);
1061 if (isa
<FPMathOperator
>(C
))
1062 setFPAttrs(C
, FPMathTag
, UseFMF
);
1066 Value
*IRBuilderBase::CreateFCmpHelper(
1067 CmpInst::Predicate P
, Value
*LHS
, Value
*RHS
, const Twine
&Name
,
1068 MDNode
*FPMathTag
, bool IsSignaling
) {
1069 if (IsFPConstrained
) {
1070 auto ID
= IsSignaling
? Intrinsic::experimental_constrained_fcmps
1071 : Intrinsic::experimental_constrained_fcmp
;
1072 return CreateConstrainedFPCmp(ID
, P
, LHS
, RHS
, Name
);
1075 if (auto *LC
= dyn_cast
<Constant
>(LHS
))
1076 if (auto *RC
= dyn_cast
<Constant
>(RHS
))
1077 return Insert(Folder
.CreateFCmp(P
, LC
, RC
), Name
);
1078 return Insert(setFPAttrs(new FCmpInst(P
, LHS
, RHS
), FPMathTag
, FMF
), Name
);
1081 CallInst
*IRBuilderBase::CreateConstrainedFPCmp(
1082 Intrinsic::ID ID
, CmpInst::Predicate P
, Value
*L
, Value
*R
,
1083 const Twine
&Name
, std::optional
<fp::ExceptionBehavior
> Except
) {
1084 Value
*PredicateV
= getConstrainedFPPredicate(P
);
1085 Value
*ExceptV
= getConstrainedFPExcept(Except
);
1087 CallInst
*C
= CreateIntrinsic(ID
, {L
->getType()},
1088 {L
, R
, PredicateV
, ExceptV
}, nullptr, Name
);
1089 setConstrainedFPCallAttr(C
);
1093 CallInst
*IRBuilderBase::CreateConstrainedFPCall(
1094 Function
*Callee
, ArrayRef
<Value
*> Args
, const Twine
&Name
,
1095 std::optional
<RoundingMode
> Rounding
,
1096 std::optional
<fp::ExceptionBehavior
> Except
) {
1097 llvm::SmallVector
<Value
*, 6> UseArgs
;
1099 append_range(UseArgs
, Args
);
1100 bool HasRoundingMD
= false;
1101 switch (Callee
->getIntrinsicID()) {
1104 #define INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC) \
1105 case Intrinsic::INTRINSIC: \
1106 HasRoundingMD = ROUND_MODE; \
1108 #include "llvm/IR/ConstrainedOps.def"
1111 UseArgs
.push_back(getConstrainedFPRounding(Rounding
));
1112 UseArgs
.push_back(getConstrainedFPExcept(Except
));
1114 CallInst
*C
= CreateCall(Callee
, UseArgs
, Name
);
1115 setConstrainedFPCallAttr(C
);
1119 Value
*IRBuilderBase::CreateSelect(Value
*C
, Value
*True
, Value
*False
,
1120 const Twine
&Name
, Instruction
*MDFrom
) {
1121 if (auto *V
= Folder
.FoldSelect(C
, True
, False
))
1124 SelectInst
*Sel
= SelectInst::Create(C
, True
, False
);
1126 MDNode
*Prof
= MDFrom
->getMetadata(LLVMContext::MD_prof
);
1127 MDNode
*Unpred
= MDFrom
->getMetadata(LLVMContext::MD_unpredictable
);
1128 Sel
= addBranchMetadata(Sel
, Prof
, Unpred
);
1130 if (isa
<FPMathOperator
>(Sel
))
1131 setFPAttrs(Sel
, nullptr /* MDNode* */, FMF
);
1132 return Insert(Sel
, Name
);
1135 Value
*IRBuilderBase::CreatePtrDiff(Type
*ElemTy
, Value
*LHS
, Value
*RHS
,
1136 const Twine
&Name
) {
1137 assert(LHS
->getType() == RHS
->getType() &&
1138 "Pointer subtraction operand types must match!");
1139 Value
*LHS_int
= CreatePtrToInt(LHS
, Type::getInt64Ty(Context
));
1140 Value
*RHS_int
= CreatePtrToInt(RHS
, Type::getInt64Ty(Context
));
1141 Value
*Difference
= CreateSub(LHS_int
, RHS_int
);
1142 return CreateExactSDiv(Difference
, ConstantExpr::getSizeOf(ElemTy
),
1146 Value
*IRBuilderBase::CreateLaunderInvariantGroup(Value
*Ptr
) {
1147 assert(isa
<PointerType
>(Ptr
->getType()) &&
1148 "launder.invariant.group only applies to pointers.");
1149 auto *PtrType
= Ptr
->getType();
1150 Module
*M
= BB
->getParent()->getParent();
1151 Function
*FnLaunderInvariantGroup
= Intrinsic::getDeclaration(
1152 M
, Intrinsic::launder_invariant_group
, {PtrType
});
1154 assert(FnLaunderInvariantGroup
->getReturnType() == PtrType
&&
1155 FnLaunderInvariantGroup
->getFunctionType()->getParamType(0) ==
1157 "LaunderInvariantGroup should take and return the same type");
1159 return CreateCall(FnLaunderInvariantGroup
, {Ptr
});
1162 Value
*IRBuilderBase::CreateStripInvariantGroup(Value
*Ptr
) {
1163 assert(isa
<PointerType
>(Ptr
->getType()) &&
1164 "strip.invariant.group only applies to pointers.");
1166 auto *PtrType
= Ptr
->getType();
1167 Module
*M
= BB
->getParent()->getParent();
1168 Function
*FnStripInvariantGroup
= Intrinsic::getDeclaration(
1169 M
, Intrinsic::strip_invariant_group
, {PtrType
});
1171 assert(FnStripInvariantGroup
->getReturnType() == PtrType
&&
1172 FnStripInvariantGroup
->getFunctionType()->getParamType(0) ==
1174 "StripInvariantGroup should take and return the same type");
1176 return CreateCall(FnStripInvariantGroup
, {Ptr
});
1179 Value
*IRBuilderBase::CreateVectorReverse(Value
*V
, const Twine
&Name
) {
1180 auto *Ty
= cast
<VectorType
>(V
->getType());
1181 if (isa
<ScalableVectorType
>(Ty
)) {
1182 Module
*M
= BB
->getParent()->getParent();
1183 Function
*F
= Intrinsic::getDeclaration(
1184 M
, Intrinsic::experimental_vector_reverse
, Ty
);
1185 return Insert(CallInst::Create(F
, V
), Name
);
1187 // Keep the original behaviour for fixed vector
1188 SmallVector
<int, 8> ShuffleMask
;
1189 int NumElts
= Ty
->getElementCount().getKnownMinValue();
1190 for (int i
= 0; i
< NumElts
; ++i
)
1191 ShuffleMask
.push_back(NumElts
- i
- 1);
1192 return CreateShuffleVector(V
, ShuffleMask
, Name
);
1195 Value
*IRBuilderBase::CreateVectorSplice(Value
*V1
, Value
*V2
, int64_t Imm
,
1196 const Twine
&Name
) {
1197 assert(isa
<VectorType
>(V1
->getType()) && "Unexpected type");
1198 assert(V1
->getType() == V2
->getType() &&
1199 "Splice expects matching operand types!");
1201 if (auto *VTy
= dyn_cast
<ScalableVectorType
>(V1
->getType())) {
1202 Module
*M
= BB
->getParent()->getParent();
1203 Function
*F
= Intrinsic::getDeclaration(
1204 M
, Intrinsic::experimental_vector_splice
, VTy
);
1206 Value
*Ops
[] = {V1
, V2
, getInt32(Imm
)};
1207 return Insert(CallInst::Create(F
, Ops
), Name
);
1210 unsigned NumElts
= cast
<FixedVectorType
>(V1
->getType())->getNumElements();
1211 assert(((-Imm
<= NumElts
) || (Imm
< NumElts
)) &&
1212 "Invalid immediate for vector splice!");
1214 // Keep the original behaviour for fixed vector
1215 unsigned Idx
= (NumElts
+ Imm
) % NumElts
;
1216 SmallVector
<int, 8> Mask
;
1217 for (unsigned I
= 0; I
< NumElts
; ++I
)
1218 Mask
.push_back(Idx
+ I
);
1220 return CreateShuffleVector(V1
, V2
, Mask
);
1223 Value
*IRBuilderBase::CreateVectorSplat(unsigned NumElts
, Value
*V
,
1224 const Twine
&Name
) {
1225 auto EC
= ElementCount::getFixed(NumElts
);
1226 return CreateVectorSplat(EC
, V
, Name
);
1229 Value
*IRBuilderBase::CreateVectorSplat(ElementCount EC
, Value
*V
,
1230 const Twine
&Name
) {
1231 assert(EC
.isNonZero() && "Cannot splat to an empty vector!");
1233 // First insert it into a poison vector so we can shuffle it.
1234 Value
*Poison
= PoisonValue::get(VectorType::get(V
->getType(), EC
));
1235 V
= CreateInsertElement(Poison
, V
, getInt64(0), Name
+ ".splatinsert");
1237 // Shuffle the value across the desired number of elements.
1238 SmallVector
<int, 16> Zeros
;
1239 Zeros
.resize(EC
.getKnownMinValue());
1240 return CreateShuffleVector(V
, Zeros
, Name
+ ".splat");
1243 Value
*IRBuilderBase::CreatePreserveArrayAccessIndex(
1244 Type
*ElTy
, Value
*Base
, unsigned Dimension
, unsigned LastIndex
,
1246 auto *BaseType
= Base
->getType();
1247 assert(isa
<PointerType
>(BaseType
) &&
1248 "Invalid Base ptr type for preserve.array.access.index.");
1250 Value
*LastIndexV
= getInt32(LastIndex
);
1251 Constant
*Zero
= ConstantInt::get(Type::getInt32Ty(Context
), 0);
1252 SmallVector
<Value
*, 4> IdxList(Dimension
, Zero
);
1253 IdxList
.push_back(LastIndexV
);
1255 Type
*ResultType
= GetElementPtrInst::getGEPReturnType(Base
, IdxList
);
1257 Module
*M
= BB
->getParent()->getParent();
1258 Function
*FnPreserveArrayAccessIndex
= Intrinsic::getDeclaration(
1259 M
, Intrinsic::preserve_array_access_index
, {ResultType
, BaseType
});
1261 Value
*DimV
= getInt32(Dimension
);
1263 CreateCall(FnPreserveArrayAccessIndex
, {Base
, DimV
, LastIndexV
});
1265 0, Attribute::get(Fn
->getContext(), Attribute::ElementType
, ElTy
));
1267 Fn
->setMetadata(LLVMContext::MD_preserve_access_index
, DbgInfo
);
1272 Value
*IRBuilderBase::CreatePreserveUnionAccessIndex(
1273 Value
*Base
, unsigned FieldIndex
, MDNode
*DbgInfo
) {
1274 assert(isa
<PointerType
>(Base
->getType()) &&
1275 "Invalid Base ptr type for preserve.union.access.index.");
1276 auto *BaseType
= Base
->getType();
1278 Module
*M
= BB
->getParent()->getParent();
1279 Function
*FnPreserveUnionAccessIndex
= Intrinsic::getDeclaration(
1280 M
, Intrinsic::preserve_union_access_index
, {BaseType
, BaseType
});
1282 Value
*DIIndex
= getInt32(FieldIndex
);
1284 CreateCall(FnPreserveUnionAccessIndex
, {Base
, DIIndex
});
1286 Fn
->setMetadata(LLVMContext::MD_preserve_access_index
, DbgInfo
);
1291 Value
*IRBuilderBase::CreatePreserveStructAccessIndex(
1292 Type
*ElTy
, Value
*Base
, unsigned Index
, unsigned FieldIndex
,
1294 auto *BaseType
= Base
->getType();
1295 assert(isa
<PointerType
>(BaseType
) &&
1296 "Invalid Base ptr type for preserve.struct.access.index.");
1298 Value
*GEPIndex
= getInt32(Index
);
1299 Constant
*Zero
= ConstantInt::get(Type::getInt32Ty(Context
), 0);
1301 GetElementPtrInst::getGEPReturnType(Base
, {Zero
, GEPIndex
});
1303 Module
*M
= BB
->getParent()->getParent();
1304 Function
*FnPreserveStructAccessIndex
= Intrinsic::getDeclaration(
1305 M
, Intrinsic::preserve_struct_access_index
, {ResultType
, BaseType
});
1307 Value
*DIIndex
= getInt32(FieldIndex
);
1308 CallInst
*Fn
= CreateCall(FnPreserveStructAccessIndex
,
1309 {Base
, GEPIndex
, DIIndex
});
1311 0, Attribute::get(Fn
->getContext(), Attribute::ElementType
, ElTy
));
1313 Fn
->setMetadata(LLVMContext::MD_preserve_access_index
, DbgInfo
);
1318 Value
*IRBuilderBase::createIsFPClass(Value
*FPNum
, unsigned Test
) {
1319 ConstantInt
*TestV
= getInt32(Test
);
1320 Module
*M
= BB
->getParent()->getParent();
1321 Function
*FnIsFPClass
=
1322 Intrinsic::getDeclaration(M
, Intrinsic::is_fpclass
, {FPNum
->getType()});
1323 return CreateCall(FnIsFPClass
, {FPNum
, TestV
});
1326 CallInst
*IRBuilderBase::CreateAlignmentAssumptionHelper(const DataLayout
&DL
,
1329 Value
*OffsetValue
) {
1330 SmallVector
<Value
*, 4> Vals({PtrValue
, AlignValue
});
1332 Vals
.push_back(OffsetValue
);
1333 OperandBundleDefT
<Value
*> AlignOpB("align", Vals
);
1334 return CreateAssumption(ConstantInt::getTrue(getContext()), {AlignOpB
});
1337 CallInst
*IRBuilderBase::CreateAlignmentAssumption(const DataLayout
&DL
,
1340 Value
*OffsetValue
) {
1341 assert(isa
<PointerType
>(PtrValue
->getType()) &&
1342 "trying to create an alignment assumption on a non-pointer?");
1343 assert(Alignment
!= 0 && "Invalid Alignment");
1344 auto *PtrTy
= cast
<PointerType
>(PtrValue
->getType());
1345 Type
*IntPtrTy
= getIntPtrTy(DL
, PtrTy
->getAddressSpace());
1346 Value
*AlignValue
= ConstantInt::get(IntPtrTy
, Alignment
);
1347 return CreateAlignmentAssumptionHelper(DL
, PtrValue
, AlignValue
, OffsetValue
);
1350 CallInst
*IRBuilderBase::CreateAlignmentAssumption(const DataLayout
&DL
,
1353 Value
*OffsetValue
) {
1354 assert(isa
<PointerType
>(PtrValue
->getType()) &&
1355 "trying to create an alignment assumption on a non-pointer?");
1356 return CreateAlignmentAssumptionHelper(DL
, PtrValue
, Alignment
, OffsetValue
);
1359 IRBuilderDefaultInserter::~IRBuilderDefaultInserter() = default;
1360 IRBuilderCallbackInserter::~IRBuilderCallbackInserter() = default;
1361 IRBuilderFolder::~IRBuilderFolder() = default;
1362 void ConstantFolder::anchor() {}
1363 void NoFolder::anchor() {}